xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision cee50c2a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0189"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200421";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156 
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB	128
159 #define DEF_ZBC_MAX_OPEN_ZONES	8
160 #define DEF_ZBC_NR_CONV_ZONES	1
161 
162 #define SDEBUG_LUN_0_VAL 0
163 
164 /* bit mask values for sdebug_opts */
165 #define SDEBUG_OPT_NOISE		1
166 #define SDEBUG_OPT_MEDIUM_ERR		2
167 #define SDEBUG_OPT_TIMEOUT		4
168 #define SDEBUG_OPT_RECOVERED_ERR	8
169 #define SDEBUG_OPT_TRANSPORT_ERR	16
170 #define SDEBUG_OPT_DIF_ERR		32
171 #define SDEBUG_OPT_DIX_ERR		64
172 #define SDEBUG_OPT_MAC_TIMEOUT		128
173 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
174 #define SDEBUG_OPT_Q_NOISE		0x200
175 #define SDEBUG_OPT_ALL_TSF		0x400
176 #define SDEBUG_OPT_RARE_TSF		0x800
177 #define SDEBUG_OPT_N_WCE		0x1000
178 #define SDEBUG_OPT_RESET_NOISE		0x2000
179 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
180 #define SDEBUG_OPT_HOST_BUSY		0x8000
181 #define SDEBUG_OPT_CMD_ABORT		0x10000
182 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
183 			      SDEBUG_OPT_RESET_NOISE)
184 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
185 				  SDEBUG_OPT_TRANSPORT_ERR | \
186 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
187 				  SDEBUG_OPT_SHORT_TRANSFER | \
188 				  SDEBUG_OPT_HOST_BUSY | \
189 				  SDEBUG_OPT_CMD_ABORT)
190 /* When "every_nth" > 0 then modulo "every_nth" commands:
191  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
192  *   - a RECOVERED_ERROR is simulated on successful read and write
193  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
194  *   - a TRANSPORT_ERROR is simulated on successful read and write
195  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
196  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
197  *     CMD_ABORT
198  *
199  * When "every_nth" < 0 then after "- every_nth" commands the selected
200  * error will be injected. The error will be injected on every subsequent
201  * command until some other action occurs; for example, the user writing
202  * a new value (other than -1 or 1) to every_nth:
203  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
204  */
205 
206 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
207  * priority order. In the subset implemented here lower numbers have higher
208  * priority. The UA numbers should be a sequence starting from 0 with
209  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
210 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
211 #define SDEBUG_UA_BUS_RESET 1
212 #define SDEBUG_UA_MODE_CHANGED 2
213 #define SDEBUG_UA_CAPACITY_CHANGED 3
214 #define SDEBUG_UA_LUNS_CHANGED 4
215 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
216 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
217 #define SDEBUG_NUM_UAS 7
218 
219 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
220  * sector on read commands: */
221 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
222 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
223 
224 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
225  * or "peripheral device" addressing (value 0) */
226 #define SAM2_LUN_ADDRESS_METHOD 0
227 
228 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
229  * (for response) per submit queue at one time. Can be reduced by max_queue
230  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
231  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
232  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
233  * but cannot exceed SDEBUG_CANQUEUE .
234  */
235 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
236 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
237 #define DEF_CMD_PER_LUN  255
238 
239 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
240 #define F_D_IN			1	/* Data-in command (e.g. READ) */
241 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
242 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
243 #define F_D_UNKN		8
244 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
245 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
246 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
247 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
248 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
249 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
250 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
251 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
252 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
253 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
254 
255 /* Useful combinations of the above flags */
256 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
257 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
258 #define FF_SA (F_SA_HIGH | F_SA_LOW)
259 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
260 
261 #define SDEBUG_MAX_PARTS 4
262 
263 #define SDEBUG_MAX_CMD_LEN 32
264 
265 #define SDEB_XA_NOT_IN_USE XA_MARK_1
266 
267 /* Zone types (zbcr05 table 25) */
268 enum sdebug_z_type {
269 	ZBC_ZONE_TYPE_CNV	= 0x1,
270 	ZBC_ZONE_TYPE_SWR	= 0x2,
271 	ZBC_ZONE_TYPE_SWP	= 0x3,
272 };
273 
274 /* enumeration names taken from table 26, zbcr05 */
275 enum sdebug_z_cond {
276 	ZBC_NOT_WRITE_POINTER	= 0x0,
277 	ZC1_EMPTY		= 0x1,
278 	ZC2_IMPLICIT_OPEN	= 0x2,
279 	ZC3_EXPLICIT_OPEN	= 0x3,
280 	ZC4_CLOSED		= 0x4,
281 	ZC6_READ_ONLY		= 0xd,
282 	ZC5_FULL		= 0xe,
283 	ZC7_OFFLINE		= 0xf,
284 };
285 
286 struct sdeb_zone_state {	/* ZBC: per zone state */
287 	enum sdebug_z_type z_type;
288 	enum sdebug_z_cond z_cond;
289 	bool z_non_seq_resource;
290 	unsigned int z_size;
291 	sector_t z_start;
292 	sector_t z_wp;
293 };
294 
295 struct sdebug_dev_info {
296 	struct list_head dev_list;
297 	unsigned int channel;
298 	unsigned int target;
299 	u64 lun;
300 	uuid_t lu_name;
301 	struct sdebug_host_info *sdbg_host;
302 	unsigned long uas_bm[1];
303 	atomic_t num_in_q;
304 	atomic_t stopped;
305 	bool used;
306 
307 	/* For ZBC devices */
308 	enum blk_zoned_model zmodel;
309 	unsigned int zsize;
310 	unsigned int zsize_shift;
311 	unsigned int nr_zones;
312 	unsigned int nr_conv_zones;
313 	unsigned int nr_imp_open;
314 	unsigned int nr_exp_open;
315 	unsigned int nr_closed;
316 	unsigned int max_open;
317 	struct sdeb_zone_state *zstate;
318 };
319 
320 struct sdebug_host_info {
321 	struct list_head host_list;
322 	int si_idx;	/* sdeb_store_info (per host) xarray index */
323 	struct Scsi_Host *shost;
324 	struct device dev;
325 	struct list_head dev_info_list;
326 };
327 
328 /* There is an xarray of pointers to this struct's objects, one per host */
329 struct sdeb_store_info {
330 	rwlock_t macc_lck;	/* for atomic media access on this store */
331 	u8 *storep;		/* user data storage (ram) */
332 	struct t10_pi_tuple *dif_storep; /* protection info */
333 	void *map_storep;	/* provisioning map */
334 };
335 
336 #define to_sdebug_host(d)	\
337 	container_of(d, struct sdebug_host_info, dev)
338 
339 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
340 		      SDEB_DEFER_WQ = 2};
341 
342 struct sdebug_defer {
343 	struct hrtimer hrt;
344 	struct execute_work ew;
345 	int sqa_idx;	/* index of sdebug_queue array */
346 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
347 	int issuing_cpu;
348 	bool init_hrt;
349 	bool init_wq;
350 	bool aborted;	/* true when blk_abort_request() already called */
351 	enum sdeb_defer_type defer_t;
352 };
353 
354 struct sdebug_queued_cmd {
355 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
356 	 * instance indicates this slot is in use.
357 	 */
358 	struct sdebug_defer *sd_dp;
359 	struct scsi_cmnd *a_cmnd;
360 	unsigned int inj_recovered:1;
361 	unsigned int inj_transport:1;
362 	unsigned int inj_dif:1;
363 	unsigned int inj_dix:1;
364 	unsigned int inj_short:1;
365 	unsigned int inj_host_busy:1;
366 	unsigned int inj_cmd_abort:1;
367 };
368 
369 struct sdebug_queue {
370 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
371 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
372 	spinlock_t qc_lock;
373 	atomic_t blocked;	/* to temporarily stop more being queued */
374 };
375 
376 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
377 static atomic_t sdebug_completions;  /* count of deferred completions */
378 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
379 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
380 
381 struct opcode_info_t {
382 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
383 				/* for terminating element */
384 	u8 opcode;		/* if num_attached > 0, preferred */
385 	u16 sa;			/* service action */
386 	u32 flags;		/* OR-ed set of SDEB_F_* */
387 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
388 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
389 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
390 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
391 };
392 
393 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
394 enum sdeb_opcode_index {
395 	SDEB_I_INVALID_OPCODE =	0,
396 	SDEB_I_INQUIRY = 1,
397 	SDEB_I_REPORT_LUNS = 2,
398 	SDEB_I_REQUEST_SENSE = 3,
399 	SDEB_I_TEST_UNIT_READY = 4,
400 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
401 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
402 	SDEB_I_LOG_SENSE = 7,
403 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
404 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
405 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
406 	SDEB_I_START_STOP = 11,
407 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
408 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
409 	SDEB_I_MAINT_IN = 14,
410 	SDEB_I_MAINT_OUT = 15,
411 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
412 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
413 	SDEB_I_RESERVE = 18,		/* 6, 10 */
414 	SDEB_I_RELEASE = 19,		/* 6, 10 */
415 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
416 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
417 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
418 	SDEB_I_SEND_DIAG = 23,
419 	SDEB_I_UNMAP = 24,
420 	SDEB_I_WRITE_BUFFER = 25,
421 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
422 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
423 	SDEB_I_COMP_WRITE = 28,
424 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
425 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
426 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
427 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
428 };
429 
430 
431 static const unsigned char opcode_ind_arr[256] = {
432 /* 0x0; 0x0->0x1f: 6 byte cdbs */
433 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
434 	    0, 0, 0, 0,
435 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
436 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
437 	    SDEB_I_RELEASE,
438 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
439 	    SDEB_I_ALLOW_REMOVAL, 0,
440 /* 0x20; 0x20->0x3f: 10 byte cdbs */
441 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
442 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
443 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
444 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
445 /* 0x40; 0x40->0x5f: 10 byte cdbs */
446 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
447 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
448 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
449 	    SDEB_I_RELEASE,
450 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
451 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
452 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, SDEB_I_VARIABLE_LEN,
455 /* 0x80; 0x80->0x9f: 16 byte cdbs */
456 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
457 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
458 	0, 0, 0, SDEB_I_VERIFY,
459 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
460 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
461 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
462 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
463 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
464 	     SDEB_I_MAINT_OUT, 0, 0, 0,
465 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
466 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
467 	0, 0, 0, 0, 0, 0, 0, 0,
468 	0, 0, 0, 0, 0, 0, 0, 0,
469 /* 0xc0; 0xc0->0xff: vendor specific */
470 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
471 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
472 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
473 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
474 };
475 
476 /*
477  * The following "response" functions return the SCSI mid-level's 4 byte
478  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
479  * command completion, they can mask their return value with
480  * SDEG_RES_IMMED_MASK .
481  */
482 #define SDEG_RES_IMMED_MASK 0x40000000
483 
484 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
505 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
506 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
507 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
508 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
509 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
510 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
511 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
512 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
513 
514 static int sdebug_do_add_host(bool mk_new_store);
515 static int sdebug_add_host_helper(int per_host_idx);
516 static void sdebug_do_remove_host(bool the_end);
517 static int sdebug_add_store(void);
518 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
519 static void sdebug_erase_all_stores(bool apart_from_first);
520 
521 /*
522  * The following are overflow arrays for cdbs that "hit" the same index in
523  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
524  * should be placed in opcode_info_arr[], the others should be placed here.
525  */
526 static const struct opcode_info_t msense_iarr[] = {
527 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
528 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
529 };
530 
531 static const struct opcode_info_t mselect_iarr[] = {
532 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
533 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
534 };
535 
536 static const struct opcode_info_t read_iarr[] = {
537 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
538 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
539 	     0, 0, 0, 0} },
540 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
541 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
542 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
543 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
544 	     0xc7, 0, 0, 0, 0} },
545 };
546 
547 static const struct opcode_info_t write_iarr[] = {
548 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
549 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
550 		   0, 0, 0, 0, 0, 0} },
551 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
552 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
553 		   0, 0, 0} },
554 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
555 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
556 		   0xbf, 0xc7, 0, 0, 0, 0} },
557 };
558 
559 static const struct opcode_info_t verify_iarr[] = {
560 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
561 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
562 		   0, 0, 0, 0, 0, 0} },
563 };
564 
565 static const struct opcode_info_t sa_in_16_iarr[] = {
566 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
567 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
568 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
569 };
570 
571 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
572 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
573 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
574 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
575 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
576 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
577 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
578 };
579 
580 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
581 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
582 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
583 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
584 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
585 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
586 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
587 };
588 
589 static const struct opcode_info_t write_same_iarr[] = {
590 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
591 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
592 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
593 };
594 
595 static const struct opcode_info_t reserve_iarr[] = {
596 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
597 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
598 };
599 
600 static const struct opcode_info_t release_iarr[] = {
601 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
602 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
603 };
604 
605 static const struct opcode_info_t sync_cache_iarr[] = {
606 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
607 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
609 };
610 
611 static const struct opcode_info_t pre_fetch_iarr[] = {
612 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
613 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
614 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
615 };
616 
617 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
618 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
619 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
620 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
621 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
622 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
623 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
624 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
625 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
626 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
627 };
628 
629 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
630 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
631 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
632 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
633 };
634 
635 
636 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
637  * plus the terminating elements for logic that scans this table such as
638  * REPORT SUPPORTED OPERATION CODES. */
639 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
640 /* 0 */
641 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
642 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
643 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
644 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
645 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
646 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
647 	     0, 0} },					/* REPORT LUNS */
648 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
649 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
650 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
651 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
652 /* 5 */
653 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
654 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
655 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
656 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
657 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
658 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
659 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
660 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
661 	     0, 0, 0} },
662 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
663 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
664 	     0, 0} },
665 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
666 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
667 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
668 /* 10 */
669 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
670 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
671 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
673 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
674 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
675 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
676 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
677 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
679 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
680 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
681 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
682 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
683 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
684 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
685 				0xff, 0, 0xc7, 0, 0, 0, 0} },
686 /* 15 */
687 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
688 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
689 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
690 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
691 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
692 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
693 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
694 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
695 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
696 	     0xff, 0xff} },
697 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
698 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
699 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
700 	     0} },
701 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
702 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
703 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
704 	     0} },
705 /* 20 */
706 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
707 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
708 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
709 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
710 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
711 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
712 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
713 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
714 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
715 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
716 /* 25 */
717 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
718 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
719 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
720 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
721 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
722 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
723 		 0, 0, 0, 0, 0} },
724 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
725 	    resp_sync_cache, sync_cache_iarr,
726 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
727 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
728 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
729 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
730 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
731 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
732 	    resp_pre_fetch, pre_fetch_iarr,
733 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
734 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
735 
736 /* 30 */
737 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
738 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
739 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
740 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
741 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
742 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
743 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
744 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
745 /* sentinel */
746 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
747 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
748 };
749 
750 static int sdebug_num_hosts;
751 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
752 static int sdebug_ato = DEF_ATO;
753 static int sdebug_cdb_len = DEF_CDB_LEN;
754 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
755 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
756 static int sdebug_dif = DEF_DIF;
757 static int sdebug_dix = DEF_DIX;
758 static int sdebug_dsense = DEF_D_SENSE;
759 static int sdebug_every_nth = DEF_EVERY_NTH;
760 static int sdebug_fake_rw = DEF_FAKE_RW;
761 static unsigned int sdebug_guard = DEF_GUARD;
762 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
763 static int sdebug_max_luns = DEF_MAX_LUNS;
764 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
765 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
766 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
767 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
768 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
769 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
770 static int sdebug_no_uld;
771 static int sdebug_num_parts = DEF_NUM_PARTS;
772 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
773 static int sdebug_opt_blks = DEF_OPT_BLKS;
774 static int sdebug_opts = DEF_OPTS;
775 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
776 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
777 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
778 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
779 static int sdebug_sector_size = DEF_SECTOR_SIZE;
780 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
781 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
782 static unsigned int sdebug_lbpu = DEF_LBPU;
783 static unsigned int sdebug_lbpws = DEF_LBPWS;
784 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
785 static unsigned int sdebug_lbprz = DEF_LBPRZ;
786 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
787 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
788 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
789 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
790 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
791 static int sdebug_uuid_ctl = DEF_UUID_CTL;
792 static bool sdebug_random = DEF_RANDOM;
793 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
794 static bool sdebug_removable = DEF_REMOVABLE;
795 static bool sdebug_clustering;
796 static bool sdebug_host_lock = DEF_HOST_LOCK;
797 static bool sdebug_strict = DEF_STRICT;
798 static bool sdebug_any_injecting_opt;
799 static bool sdebug_verbose;
800 static bool have_dif_prot;
801 static bool write_since_sync;
802 static bool sdebug_statistics = DEF_STATISTICS;
803 static bool sdebug_wp;
804 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
805 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
806 static char *sdeb_zbc_model_s;
807 
808 static unsigned int sdebug_store_sectors;
809 static sector_t sdebug_capacity;	/* in sectors */
810 
811 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
812    may still need them */
813 static int sdebug_heads;		/* heads per disk */
814 static int sdebug_cylinders_per;	/* cylinders per surface */
815 static int sdebug_sectors_per;		/* sectors per cylinder */
816 
817 static LIST_HEAD(sdebug_host_list);
818 static DEFINE_SPINLOCK(sdebug_host_list_lock);
819 
820 static struct xarray per_store_arr;
821 static struct xarray *per_store_ap = &per_store_arr;
822 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
823 static int sdeb_most_recent_idx = -1;
824 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
825 
826 static unsigned long map_size;
827 static int num_aborts;
828 static int num_dev_resets;
829 static int num_target_resets;
830 static int num_bus_resets;
831 static int num_host_resets;
832 static int dix_writes;
833 static int dix_reads;
834 static int dif_errors;
835 
836 /* ZBC global data */
837 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
841 
842 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
843 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
844 
845 static DEFINE_RWLOCK(atomic_rw);
846 static DEFINE_RWLOCK(atomic_rw2);
847 
848 static rwlock_t *ramdisk_lck_a[2];
849 
850 static char sdebug_proc_name[] = MY_NAME;
851 static const char *my_name = MY_NAME;
852 
853 static struct bus_type pseudo_lld_bus;
854 
855 static struct device_driver sdebug_driverfs_driver = {
856 	.name 		= sdebug_proc_name,
857 	.bus		= &pseudo_lld_bus,
858 };
859 
860 static const int check_condition_result =
861 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
862 
863 static const int illegal_condition_result =
864 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
865 
866 static const int device_qfull_result =
867 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
868 
869 static const int condition_met_result = SAM_STAT_CONDITION_MET;
870 
871 
872 /* Only do the extra work involved in logical block provisioning if one or
873  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
874  * real reads and writes (i.e. not skipping them for speed).
875  */
876 static inline bool scsi_debug_lbp(void)
877 {
878 	return 0 == sdebug_fake_rw &&
879 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
880 }
881 
882 static void *lba2fake_store(struct sdeb_store_info *sip,
883 			    unsigned long long lba)
884 {
885 	struct sdeb_store_info *lsip = sip;
886 
887 	lba = do_div(lba, sdebug_store_sectors);
888 	if (!sip || !sip->storep) {
889 		WARN_ON_ONCE(true);
890 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
891 	}
892 	return lsip->storep + lba * sdebug_sector_size;
893 }
894 
895 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
896 				      sector_t sector)
897 {
898 	sector = sector_div(sector, sdebug_store_sectors);
899 
900 	return sip->dif_storep + sector;
901 }
902 
903 static void sdebug_max_tgts_luns(void)
904 {
905 	struct sdebug_host_info *sdbg_host;
906 	struct Scsi_Host *hpnt;
907 
908 	spin_lock(&sdebug_host_list_lock);
909 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
910 		hpnt = sdbg_host->shost;
911 		if ((hpnt->this_id >= 0) &&
912 		    (sdebug_num_tgts > hpnt->this_id))
913 			hpnt->max_id = sdebug_num_tgts + 1;
914 		else
915 			hpnt->max_id = sdebug_num_tgts;
916 		/* sdebug_max_luns; */
917 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
918 	}
919 	spin_unlock(&sdebug_host_list_lock);
920 }
921 
922 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
923 
924 /* Set in_bit to -1 to indicate no bit position of invalid field */
925 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
926 				 enum sdeb_cmd_data c_d,
927 				 int in_byte, int in_bit)
928 {
929 	unsigned char *sbuff;
930 	u8 sks[4];
931 	int sl, asc;
932 
933 	sbuff = scp->sense_buffer;
934 	if (!sbuff) {
935 		sdev_printk(KERN_ERR, scp->device,
936 			    "%s: sense_buffer is NULL\n", __func__);
937 		return;
938 	}
939 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
940 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
941 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
942 	memset(sks, 0, sizeof(sks));
943 	sks[0] = 0x80;
944 	if (c_d)
945 		sks[0] |= 0x40;
946 	if (in_bit >= 0) {
947 		sks[0] |= 0x8;
948 		sks[0] |= 0x7 & in_bit;
949 	}
950 	put_unaligned_be16(in_byte, sks + 1);
951 	if (sdebug_dsense) {
952 		sl = sbuff[7] + 8;
953 		sbuff[7] = sl;
954 		sbuff[sl] = 0x2;
955 		sbuff[sl + 1] = 0x6;
956 		memcpy(sbuff + sl + 4, sks, 3);
957 	} else
958 		memcpy(sbuff + 15, sks, 3);
959 	if (sdebug_verbose)
960 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
961 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
962 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
963 }
964 
965 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
966 {
967 	unsigned char *sbuff;
968 
969 	sbuff = scp->sense_buffer;
970 	if (!sbuff) {
971 		sdev_printk(KERN_ERR, scp->device,
972 			    "%s: sense_buffer is NULL\n", __func__);
973 		return;
974 	}
975 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
976 
977 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
978 
979 	if (sdebug_verbose)
980 		sdev_printk(KERN_INFO, scp->device,
981 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
982 			    my_name, key, asc, asq);
983 }
984 
985 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
986 {
987 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
988 }
989 
990 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
991 			    void __user *arg)
992 {
993 	if (sdebug_verbose) {
994 		if (0x1261 == cmd)
995 			sdev_printk(KERN_INFO, dev,
996 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
997 		else if (0x5331 == cmd)
998 			sdev_printk(KERN_INFO, dev,
999 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1000 				    __func__);
1001 		else
1002 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1003 				    __func__, cmd);
1004 	}
1005 	return -EINVAL;
1006 	/* return -ENOTTY; // correct return but upsets fdisk */
1007 }
1008 
1009 static void config_cdb_len(struct scsi_device *sdev)
1010 {
1011 	switch (sdebug_cdb_len) {
1012 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1013 		sdev->use_10_for_rw = false;
1014 		sdev->use_16_for_rw = false;
1015 		sdev->use_10_for_ms = false;
1016 		break;
1017 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1018 		sdev->use_10_for_rw = true;
1019 		sdev->use_16_for_rw = false;
1020 		sdev->use_10_for_ms = false;
1021 		break;
1022 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1023 		sdev->use_10_for_rw = true;
1024 		sdev->use_16_for_rw = false;
1025 		sdev->use_10_for_ms = true;
1026 		break;
1027 	case 16:
1028 		sdev->use_10_for_rw = false;
1029 		sdev->use_16_for_rw = true;
1030 		sdev->use_10_for_ms = true;
1031 		break;
1032 	case 32: /* No knobs to suggest this so same as 16 for now */
1033 		sdev->use_10_for_rw = false;
1034 		sdev->use_16_for_rw = true;
1035 		sdev->use_10_for_ms = true;
1036 		break;
1037 	default:
1038 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1039 			sdebug_cdb_len);
1040 		sdev->use_10_for_rw = true;
1041 		sdev->use_16_for_rw = false;
1042 		sdev->use_10_for_ms = false;
1043 		sdebug_cdb_len = 10;
1044 		break;
1045 	}
1046 }
1047 
1048 static void all_config_cdb_len(void)
1049 {
1050 	struct sdebug_host_info *sdbg_host;
1051 	struct Scsi_Host *shost;
1052 	struct scsi_device *sdev;
1053 
1054 	spin_lock(&sdebug_host_list_lock);
1055 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1056 		shost = sdbg_host->shost;
1057 		shost_for_each_device(sdev, shost) {
1058 			config_cdb_len(sdev);
1059 		}
1060 	}
1061 	spin_unlock(&sdebug_host_list_lock);
1062 }
1063 
1064 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1065 {
1066 	struct sdebug_host_info *sdhp;
1067 	struct sdebug_dev_info *dp;
1068 
1069 	spin_lock(&sdebug_host_list_lock);
1070 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1071 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1072 			if ((devip->sdbg_host == dp->sdbg_host) &&
1073 			    (devip->target == dp->target))
1074 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1075 		}
1076 	}
1077 	spin_unlock(&sdebug_host_list_lock);
1078 }
1079 
1080 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1081 {
1082 	int k;
1083 
1084 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1085 	if (k != SDEBUG_NUM_UAS) {
1086 		const char *cp = NULL;
1087 
1088 		switch (k) {
1089 		case SDEBUG_UA_POR:
1090 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 					POWER_ON_RESET_ASCQ);
1092 			if (sdebug_verbose)
1093 				cp = "power on reset";
1094 			break;
1095 		case SDEBUG_UA_BUS_RESET:
1096 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1097 					BUS_RESET_ASCQ);
1098 			if (sdebug_verbose)
1099 				cp = "bus reset";
1100 			break;
1101 		case SDEBUG_UA_MODE_CHANGED:
1102 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1103 					MODE_CHANGED_ASCQ);
1104 			if (sdebug_verbose)
1105 				cp = "mode parameters changed";
1106 			break;
1107 		case SDEBUG_UA_CAPACITY_CHANGED:
1108 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1109 					CAPACITY_CHANGED_ASCQ);
1110 			if (sdebug_verbose)
1111 				cp = "capacity data changed";
1112 			break;
1113 		case SDEBUG_UA_MICROCODE_CHANGED:
1114 			mk_sense_buffer(scp, UNIT_ATTENTION,
1115 					TARGET_CHANGED_ASC,
1116 					MICROCODE_CHANGED_ASCQ);
1117 			if (sdebug_verbose)
1118 				cp = "microcode has been changed";
1119 			break;
1120 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1121 			mk_sense_buffer(scp, UNIT_ATTENTION,
1122 					TARGET_CHANGED_ASC,
1123 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1124 			if (sdebug_verbose)
1125 				cp = "microcode has been changed without reset";
1126 			break;
1127 		case SDEBUG_UA_LUNS_CHANGED:
1128 			/*
1129 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1130 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1131 			 * on the target, until a REPORT LUNS command is
1132 			 * received.  SPC-4 behavior is to report it only once.
1133 			 * NOTE:  sdebug_scsi_level does not use the same
1134 			 * values as struct scsi_device->scsi_level.
1135 			 */
1136 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1137 				clear_luns_changed_on_target(devip);
1138 			mk_sense_buffer(scp, UNIT_ATTENTION,
1139 					TARGET_CHANGED_ASC,
1140 					LUNS_CHANGED_ASCQ);
1141 			if (sdebug_verbose)
1142 				cp = "reported luns data has changed";
1143 			break;
1144 		default:
1145 			pr_warn("unexpected unit attention code=%d\n", k);
1146 			if (sdebug_verbose)
1147 				cp = "unknown";
1148 			break;
1149 		}
1150 		clear_bit(k, devip->uas_bm);
1151 		if (sdebug_verbose)
1152 			sdev_printk(KERN_INFO, scp->device,
1153 				   "%s reports: Unit attention: %s\n",
1154 				   my_name, cp);
1155 		return check_condition_result;
1156 	}
1157 	return 0;
1158 }
1159 
1160 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1161 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1162 				int arr_len)
1163 {
1164 	int act_len;
1165 	struct scsi_data_buffer *sdb = &scp->sdb;
1166 
1167 	if (!sdb->length)
1168 		return 0;
1169 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1170 		return DID_ERROR << 16;
1171 
1172 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1173 				      arr, arr_len);
1174 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1175 
1176 	return 0;
1177 }
1178 
1179 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1180  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1181  * calls, not required to write in ascending offset order. Assumes resid
1182  * set to scsi_bufflen() prior to any calls.
1183  */
1184 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1185 				  int arr_len, unsigned int off_dst)
1186 {
1187 	unsigned int act_len, n;
1188 	struct scsi_data_buffer *sdb = &scp->sdb;
1189 	off_t skip = off_dst;
1190 
1191 	if (sdb->length <= off_dst)
1192 		return 0;
1193 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1194 		return DID_ERROR << 16;
1195 
1196 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1197 				       arr, arr_len, skip);
1198 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1199 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1200 		 scsi_get_resid(scp));
1201 	n = scsi_bufflen(scp) - (off_dst + act_len);
1202 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1203 	return 0;
1204 }
1205 
1206 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1207  * 'arr' or -1 if error.
1208  */
1209 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1210 			       int arr_len)
1211 {
1212 	if (!scsi_bufflen(scp))
1213 		return 0;
1214 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1215 		return -1;
1216 
1217 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1218 }
1219 
1220 
1221 static char sdebug_inq_vendor_id[9] = "Linux   ";
1222 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1223 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1224 /* Use some locally assigned NAAs for SAS addresses. */
1225 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1226 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1227 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1228 
1229 /* Device identification VPD page. Returns number of bytes placed in arr */
1230 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1231 			  int target_dev_id, int dev_id_num,
1232 			  const char *dev_id_str, int dev_id_str_len,
1233 			  const uuid_t *lu_name)
1234 {
1235 	int num, port_a;
1236 	char b[32];
1237 
1238 	port_a = target_dev_id + 1;
1239 	/* T10 vendor identifier field format (faked) */
1240 	arr[0] = 0x2;	/* ASCII */
1241 	arr[1] = 0x1;
1242 	arr[2] = 0x0;
1243 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1244 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1245 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1246 	num = 8 + 16 + dev_id_str_len;
1247 	arr[3] = num;
1248 	num += 4;
1249 	if (dev_id_num >= 0) {
1250 		if (sdebug_uuid_ctl) {
1251 			/* Locally assigned UUID */
1252 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1253 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1254 			arr[num++] = 0x0;
1255 			arr[num++] = 0x12;
1256 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1257 			arr[num++] = 0x0;
1258 			memcpy(arr + num, lu_name, 16);
1259 			num += 16;
1260 		} else {
1261 			/* NAA-3, Logical unit identifier (binary) */
1262 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1263 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1264 			arr[num++] = 0x0;
1265 			arr[num++] = 0x8;
1266 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1267 			num += 8;
1268 		}
1269 		/* Target relative port number */
1270 		arr[num++] = 0x61;	/* proto=sas, binary */
1271 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1272 		arr[num++] = 0x0;	/* reserved */
1273 		arr[num++] = 0x4;	/* length */
1274 		arr[num++] = 0x0;	/* reserved */
1275 		arr[num++] = 0x0;	/* reserved */
1276 		arr[num++] = 0x0;
1277 		arr[num++] = 0x1;	/* relative port A */
1278 	}
1279 	/* NAA-3, Target port identifier */
1280 	arr[num++] = 0x61;	/* proto=sas, binary */
1281 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1282 	arr[num++] = 0x0;
1283 	arr[num++] = 0x8;
1284 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1285 	num += 8;
1286 	/* NAA-3, Target port group identifier */
1287 	arr[num++] = 0x61;	/* proto=sas, binary */
1288 	arr[num++] = 0x95;	/* piv=1, target port group id */
1289 	arr[num++] = 0x0;
1290 	arr[num++] = 0x4;
1291 	arr[num++] = 0;
1292 	arr[num++] = 0;
1293 	put_unaligned_be16(port_group_id, arr + num);
1294 	num += 2;
1295 	/* NAA-3, Target device identifier */
1296 	arr[num++] = 0x61;	/* proto=sas, binary */
1297 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1298 	arr[num++] = 0x0;
1299 	arr[num++] = 0x8;
1300 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1301 	num += 8;
1302 	/* SCSI name string: Target device identifier */
1303 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1304 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1305 	arr[num++] = 0x0;
1306 	arr[num++] = 24;
1307 	memcpy(arr + num, "naa.32222220", 12);
1308 	num += 12;
1309 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1310 	memcpy(arr + num, b, 8);
1311 	num += 8;
1312 	memset(arr + num, 0, 4);
1313 	num += 4;
1314 	return num;
1315 }
1316 
1317 static unsigned char vpd84_data[] = {
1318 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1319     0x22,0x22,0x22,0x0,0xbb,0x1,
1320     0x22,0x22,0x22,0x0,0xbb,0x2,
1321 };
1322 
1323 /*  Software interface identification VPD page */
1324 static int inquiry_vpd_84(unsigned char *arr)
1325 {
1326 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1327 	return sizeof(vpd84_data);
1328 }
1329 
1330 /* Management network addresses VPD page */
1331 static int inquiry_vpd_85(unsigned char *arr)
1332 {
1333 	int num = 0;
1334 	const char *na1 = "https://www.kernel.org/config";
1335 	const char *na2 = "http://www.kernel.org/log";
1336 	int plen, olen;
1337 
1338 	arr[num++] = 0x1;	/* lu, storage config */
1339 	arr[num++] = 0x0;	/* reserved */
1340 	arr[num++] = 0x0;
1341 	olen = strlen(na1);
1342 	plen = olen + 1;
1343 	if (plen % 4)
1344 		plen = ((plen / 4) + 1) * 4;
1345 	arr[num++] = plen;	/* length, null termianted, padded */
1346 	memcpy(arr + num, na1, olen);
1347 	memset(arr + num + olen, 0, plen - olen);
1348 	num += plen;
1349 
1350 	arr[num++] = 0x4;	/* lu, logging */
1351 	arr[num++] = 0x0;	/* reserved */
1352 	arr[num++] = 0x0;
1353 	olen = strlen(na2);
1354 	plen = olen + 1;
1355 	if (plen % 4)
1356 		plen = ((plen / 4) + 1) * 4;
1357 	arr[num++] = plen;	/* length, null terminated, padded */
1358 	memcpy(arr + num, na2, olen);
1359 	memset(arr + num + olen, 0, plen - olen);
1360 	num += plen;
1361 
1362 	return num;
1363 }
1364 
1365 /* SCSI ports VPD page */
1366 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1367 {
1368 	int num = 0;
1369 	int port_a, port_b;
1370 
1371 	port_a = target_dev_id + 1;
1372 	port_b = port_a + 1;
1373 	arr[num++] = 0x0;	/* reserved */
1374 	arr[num++] = 0x0;	/* reserved */
1375 	arr[num++] = 0x0;
1376 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1377 	memset(arr + num, 0, 6);
1378 	num += 6;
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 12;	/* length tp descriptor */
1381 	/* naa-5 target port identifier (A) */
1382 	arr[num++] = 0x61;	/* proto=sas, binary */
1383 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1384 	arr[num++] = 0x0;	/* reserved */
1385 	arr[num++] = 0x8;	/* length */
1386 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1387 	num += 8;
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x0;	/* reserved */
1390 	arr[num++] = 0x0;
1391 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1392 	memset(arr + num, 0, 6);
1393 	num += 6;
1394 	arr[num++] = 0x0;
1395 	arr[num++] = 12;	/* length tp descriptor */
1396 	/* naa-5 target port identifier (B) */
1397 	arr[num++] = 0x61;	/* proto=sas, binary */
1398 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1399 	arr[num++] = 0x0;	/* reserved */
1400 	arr[num++] = 0x8;	/* length */
1401 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1402 	num += 8;
1403 
1404 	return num;
1405 }
1406 
1407 
1408 static unsigned char vpd89_data[] = {
1409 /* from 4th byte */ 0,0,0,0,
1410 'l','i','n','u','x',' ',' ',' ',
1411 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1412 '1','2','3','4',
1413 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1414 0xec,0,0,0,
1415 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1416 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1417 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1418 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1419 0x53,0x41,
1420 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1421 0x20,0x20,
1422 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1423 0x10,0x80,
1424 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1425 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1426 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1428 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1429 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1430 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1435 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1436 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1437 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1450 };
1451 
1452 /* ATA Information VPD page */
1453 static int inquiry_vpd_89(unsigned char *arr)
1454 {
1455 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1456 	return sizeof(vpd89_data);
1457 }
1458 
1459 
1460 static unsigned char vpdb0_data[] = {
1461 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1462 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1463 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1464 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1465 };
1466 
1467 /* Block limits VPD page (SBC-3) */
1468 static int inquiry_vpd_b0(unsigned char *arr)
1469 {
1470 	unsigned int gran;
1471 
1472 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1473 
1474 	/* Optimal transfer length granularity */
1475 	if (sdebug_opt_xferlen_exp != 0 &&
1476 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1477 		gran = 1 << sdebug_opt_xferlen_exp;
1478 	else
1479 		gran = 1 << sdebug_physblk_exp;
1480 	put_unaligned_be16(gran, arr + 2);
1481 
1482 	/* Maximum Transfer Length */
1483 	if (sdebug_store_sectors > 0x400)
1484 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1485 
1486 	/* Optimal Transfer Length */
1487 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1488 
1489 	if (sdebug_lbpu) {
1490 		/* Maximum Unmap LBA Count */
1491 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1492 
1493 		/* Maximum Unmap Block Descriptor Count */
1494 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1495 	}
1496 
1497 	/* Unmap Granularity Alignment */
1498 	if (sdebug_unmap_alignment) {
1499 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1500 		arr[28] |= 0x80; /* UGAVALID */
1501 	}
1502 
1503 	/* Optimal Unmap Granularity */
1504 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1505 
1506 	/* Maximum WRITE SAME Length */
1507 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1508 
1509 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1510 
1511 	return sizeof(vpdb0_data);
1512 }
1513 
1514 /* Block device characteristics VPD page (SBC-3) */
1515 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1516 {
1517 	memset(arr, 0, 0x3c);
1518 	arr[0] = 0;
1519 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1520 	arr[2] = 0;
1521 	arr[3] = 5;	/* less than 1.8" */
1522 	if (devip->zmodel == BLK_ZONED_HA)
1523 		arr[4] = 1 << 4;	/* zoned field = 01b */
1524 
1525 	return 0x3c;
1526 }
1527 
1528 /* Logical block provisioning VPD page (SBC-4) */
1529 static int inquiry_vpd_b2(unsigned char *arr)
1530 {
1531 	memset(arr, 0, 0x4);
1532 	arr[0] = 0;			/* threshold exponent */
1533 	if (sdebug_lbpu)
1534 		arr[1] = 1 << 7;
1535 	if (sdebug_lbpws)
1536 		arr[1] |= 1 << 6;
1537 	if (sdebug_lbpws10)
1538 		arr[1] |= 1 << 5;
1539 	if (sdebug_lbprz && scsi_debug_lbp())
1540 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1541 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1542 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1543 	/* threshold_percentage=0 */
1544 	return 0x4;
1545 }
1546 
1547 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1548 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1549 {
1550 	memset(arr, 0, 0x3c);
1551 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1552 	/*
1553 	 * Set Optimal number of open sequential write preferred zones and
1554 	 * Optimal number of non-sequentially written sequential write
1555 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1556 	 * fields set to zero, apart from Max. number of open swrz_s field.
1557 	 */
1558 	put_unaligned_be32(0xffffffff, &arr[4]);
1559 	put_unaligned_be32(0xffffffff, &arr[8]);
1560 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1561 		put_unaligned_be32(devip->max_open, &arr[12]);
1562 	else
1563 		put_unaligned_be32(0xffffffff, &arr[12]);
1564 	return 0x3c;
1565 }
1566 
1567 #define SDEBUG_LONG_INQ_SZ 96
1568 #define SDEBUG_MAX_INQ_ARR_SZ 584
1569 
1570 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1571 {
1572 	unsigned char pq_pdt;
1573 	unsigned char *arr;
1574 	unsigned char *cmd = scp->cmnd;
1575 	int alloc_len, n, ret;
1576 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1577 
1578 	alloc_len = get_unaligned_be16(cmd + 3);
1579 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1580 	if (! arr)
1581 		return DID_REQUEUE << 16;
1582 	is_disk = (sdebug_ptype == TYPE_DISK);
1583 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1584 	is_disk_zbc = (is_disk || is_zbc);
1585 	have_wlun = scsi_is_wlun(scp->device->lun);
1586 	if (have_wlun)
1587 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1588 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1589 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1590 	else
1591 		pq_pdt = (sdebug_ptype & 0x1f);
1592 	arr[0] = pq_pdt;
1593 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1594 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1595 		kfree(arr);
1596 		return check_condition_result;
1597 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1598 		int lu_id_num, port_group_id, target_dev_id, len;
1599 		char lu_id_str[6];
1600 		int host_no = devip->sdbg_host->shost->host_no;
1601 
1602 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1603 		    (devip->channel & 0x7f);
1604 		if (sdebug_vpd_use_hostno == 0)
1605 			host_no = 0;
1606 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1607 			    (devip->target * 1000) + devip->lun);
1608 		target_dev_id = ((host_no + 1) * 2000) +
1609 				 (devip->target * 1000) - 3;
1610 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1611 		if (0 == cmd[2]) { /* supported vital product data pages */
1612 			arr[1] = cmd[2];	/*sanity */
1613 			n = 4;
1614 			arr[n++] = 0x0;   /* this page */
1615 			arr[n++] = 0x80;  /* unit serial number */
1616 			arr[n++] = 0x83;  /* device identification */
1617 			arr[n++] = 0x84;  /* software interface ident. */
1618 			arr[n++] = 0x85;  /* management network addresses */
1619 			arr[n++] = 0x86;  /* extended inquiry */
1620 			arr[n++] = 0x87;  /* mode page policy */
1621 			arr[n++] = 0x88;  /* SCSI ports */
1622 			if (is_disk_zbc) {	  /* SBC or ZBC */
1623 				arr[n++] = 0x89;  /* ATA information */
1624 				arr[n++] = 0xb0;  /* Block limits */
1625 				arr[n++] = 0xb1;  /* Block characteristics */
1626 				if (is_disk)
1627 					arr[n++] = 0xb2;  /* LB Provisioning */
1628 				if (is_zbc)
1629 					arr[n++] = 0xb6;  /* ZB dev. char. */
1630 			}
1631 			arr[3] = n - 4;	  /* number of supported VPD pages */
1632 		} else if (0x80 == cmd[2]) { /* unit serial number */
1633 			arr[1] = cmd[2];	/*sanity */
1634 			arr[3] = len;
1635 			memcpy(&arr[4], lu_id_str, len);
1636 		} else if (0x83 == cmd[2]) { /* device identification */
1637 			arr[1] = cmd[2];	/*sanity */
1638 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1639 						target_dev_id, lu_id_num,
1640 						lu_id_str, len,
1641 						&devip->lu_name);
1642 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1643 			arr[1] = cmd[2];	/*sanity */
1644 			arr[3] = inquiry_vpd_84(&arr[4]);
1645 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1646 			arr[1] = cmd[2];	/*sanity */
1647 			arr[3] = inquiry_vpd_85(&arr[4]);
1648 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1649 			arr[1] = cmd[2];	/*sanity */
1650 			arr[3] = 0x3c;	/* number of following entries */
1651 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1652 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1653 			else if (have_dif_prot)
1654 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1655 			else
1656 				arr[4] = 0x0;   /* no protection stuff */
1657 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1658 		} else if (0x87 == cmd[2]) { /* mode page policy */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = 0x8;	/* number of following entries */
1661 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1662 			arr[6] = 0x80;	/* mlus, shared */
1663 			arr[8] = 0x18;	 /* protocol specific lu */
1664 			arr[10] = 0x82;	 /* mlus, per initiator port */
1665 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1666 			arr[1] = cmd[2];	/*sanity */
1667 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1668 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1669 			arr[1] = cmd[2];        /*sanity */
1670 			n = inquiry_vpd_89(&arr[4]);
1671 			put_unaligned_be16(n, arr + 2);
1672 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1673 			arr[1] = cmd[2];        /*sanity */
1674 			arr[3] = inquiry_vpd_b0(&arr[4]);
1675 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1676 			arr[1] = cmd[2];        /*sanity */
1677 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1678 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1679 			arr[1] = cmd[2];        /*sanity */
1680 			arr[3] = inquiry_vpd_b2(&arr[4]);
1681 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1682 			arr[1] = cmd[2];        /*sanity */
1683 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1684 		} else {
1685 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1686 			kfree(arr);
1687 			return check_condition_result;
1688 		}
1689 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1690 		ret = fill_from_dev_buffer(scp, arr,
1691 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1692 		kfree(arr);
1693 		return ret;
1694 	}
1695 	/* drops through here for a standard inquiry */
1696 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1697 	arr[2] = sdebug_scsi_level;
1698 	arr[3] = 2;    /* response_data_format==2 */
1699 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1700 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1701 	if (sdebug_vpd_use_hostno == 0)
1702 		arr[5] |= 0x10; /* claim: implicit TPGS */
1703 	arr[6] = 0x10; /* claim: MultiP */
1704 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1705 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1706 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1707 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1708 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1709 	/* Use Vendor Specific area to place driver date in ASCII hex */
1710 	memcpy(&arr[36], sdebug_version_date, 8);
1711 	/* version descriptors (2 bytes each) follow */
1712 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1713 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1714 	n = 62;
1715 	if (is_disk) {		/* SBC-4 no version claimed */
1716 		put_unaligned_be16(0x600, arr + n);
1717 		n += 2;
1718 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1719 		put_unaligned_be16(0x525, arr + n);
1720 		n += 2;
1721 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1722 		put_unaligned_be16(0x624, arr + n);
1723 		n += 2;
1724 	}
1725 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1726 	ret = fill_from_dev_buffer(scp, arr,
1727 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1728 	kfree(arr);
1729 	return ret;
1730 }
1731 
1732 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1733 				   0, 0, 0x0, 0x0};
1734 
1735 static int resp_requests(struct scsi_cmnd *scp,
1736 			 struct sdebug_dev_info *devip)
1737 {
1738 	unsigned char *sbuff;
1739 	unsigned char *cmd = scp->cmnd;
1740 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1741 	bool dsense;
1742 	int len = 18;
1743 
1744 	memset(arr, 0, sizeof(arr));
1745 	dsense = !!(cmd[1] & 1);
1746 	sbuff = scp->sense_buffer;
1747 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1748 		if (dsense) {
1749 			arr[0] = 0x72;
1750 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1751 			arr[2] = THRESHOLD_EXCEEDED;
1752 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1753 			len = 8;
1754 		} else {
1755 			arr[0] = 0x70;
1756 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1757 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1758 			arr[12] = THRESHOLD_EXCEEDED;
1759 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1760 		}
1761 	} else {
1762 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1763 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1764 			;	/* have sense and formats match */
1765 		else if (arr[0] <= 0x70) {
1766 			if (dsense) {
1767 				memset(arr, 0, 8);
1768 				arr[0] = 0x72;
1769 				len = 8;
1770 			} else {
1771 				memset(arr, 0, 18);
1772 				arr[0] = 0x70;
1773 				arr[7] = 0xa;
1774 			}
1775 		} else if (dsense) {
1776 			memset(arr, 0, 8);
1777 			arr[0] = 0x72;
1778 			arr[1] = sbuff[2];     /* sense key */
1779 			arr[2] = sbuff[12];    /* asc */
1780 			arr[3] = sbuff[13];    /* ascq */
1781 			len = 8;
1782 		} else {
1783 			memset(arr, 0, 18);
1784 			arr[0] = 0x70;
1785 			arr[2] = sbuff[1];
1786 			arr[7] = 0xa;
1787 			arr[12] = sbuff[1];
1788 			arr[13] = sbuff[3];
1789 		}
1790 
1791 	}
1792 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1793 	return fill_from_dev_buffer(scp, arr, len);
1794 }
1795 
1796 static int resp_start_stop(struct scsi_cmnd *scp,
1797 			   struct sdebug_dev_info *devip)
1798 {
1799 	unsigned char *cmd = scp->cmnd;
1800 	int power_cond, stop;
1801 	bool changing;
1802 
1803 	power_cond = (cmd[4] & 0xf0) >> 4;
1804 	if (power_cond) {
1805 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1806 		return check_condition_result;
1807 	}
1808 	stop = !(cmd[4] & 1);
1809 	changing = atomic_read(&devip->stopped) == !stop;
1810 	atomic_xchg(&devip->stopped, stop);
1811 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1812 		return SDEG_RES_IMMED_MASK;
1813 	else
1814 		return 0;
1815 }
1816 
1817 static sector_t get_sdebug_capacity(void)
1818 {
1819 	static const unsigned int gibibyte = 1073741824;
1820 
1821 	if (sdebug_virtual_gb > 0)
1822 		return (sector_t)sdebug_virtual_gb *
1823 			(gibibyte / sdebug_sector_size);
1824 	else
1825 		return sdebug_store_sectors;
1826 }
1827 
1828 #define SDEBUG_READCAP_ARR_SZ 8
1829 static int resp_readcap(struct scsi_cmnd *scp,
1830 			struct sdebug_dev_info *devip)
1831 {
1832 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1833 	unsigned int capac;
1834 
1835 	/* following just in case virtual_gb changed */
1836 	sdebug_capacity = get_sdebug_capacity();
1837 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1838 	if (sdebug_capacity < 0xffffffff) {
1839 		capac = (unsigned int)sdebug_capacity - 1;
1840 		put_unaligned_be32(capac, arr + 0);
1841 	} else
1842 		put_unaligned_be32(0xffffffff, arr + 0);
1843 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1844 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1845 }
1846 
1847 #define SDEBUG_READCAP16_ARR_SZ 32
1848 static int resp_readcap16(struct scsi_cmnd *scp,
1849 			  struct sdebug_dev_info *devip)
1850 {
1851 	unsigned char *cmd = scp->cmnd;
1852 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1853 	int alloc_len;
1854 
1855 	alloc_len = get_unaligned_be32(cmd + 10);
1856 	/* following just in case virtual_gb changed */
1857 	sdebug_capacity = get_sdebug_capacity();
1858 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1859 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1860 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1861 	arr[13] = sdebug_physblk_exp & 0xf;
1862 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1863 
1864 	if (scsi_debug_lbp()) {
1865 		arr[14] |= 0x80; /* LBPME */
1866 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1867 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1868 		 * in the wider field maps to 0 in this field.
1869 		 */
1870 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1871 			arr[14] |= 0x40;
1872 	}
1873 
1874 	arr[15] = sdebug_lowest_aligned & 0xff;
1875 
1876 	if (have_dif_prot) {
1877 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1878 		arr[12] |= 1; /* PROT_EN */
1879 	}
1880 
1881 	return fill_from_dev_buffer(scp, arr,
1882 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1883 }
1884 
1885 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1886 
1887 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1888 			      struct sdebug_dev_info *devip)
1889 {
1890 	unsigned char *cmd = scp->cmnd;
1891 	unsigned char *arr;
1892 	int host_no = devip->sdbg_host->shost->host_no;
1893 	int n, ret, alen, rlen;
1894 	int port_group_a, port_group_b, port_a, port_b;
1895 
1896 	alen = get_unaligned_be32(cmd + 6);
1897 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1898 	if (! arr)
1899 		return DID_REQUEUE << 16;
1900 	/*
1901 	 * EVPD page 0x88 states we have two ports, one
1902 	 * real and a fake port with no device connected.
1903 	 * So we create two port groups with one port each
1904 	 * and set the group with port B to unavailable.
1905 	 */
1906 	port_a = 0x1; /* relative port A */
1907 	port_b = 0x2; /* relative port B */
1908 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1909 			(devip->channel & 0x7f);
1910 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1911 			(devip->channel & 0x7f) + 0x80;
1912 
1913 	/*
1914 	 * The asymmetric access state is cycled according to the host_id.
1915 	 */
1916 	n = 4;
1917 	if (sdebug_vpd_use_hostno == 0) {
1918 		arr[n++] = host_no % 3; /* Asymm access state */
1919 		arr[n++] = 0x0F; /* claim: all states are supported */
1920 	} else {
1921 		arr[n++] = 0x0; /* Active/Optimized path */
1922 		arr[n++] = 0x01; /* only support active/optimized paths */
1923 	}
1924 	put_unaligned_be16(port_group_a, arr + n);
1925 	n += 2;
1926 	arr[n++] = 0;    /* Reserved */
1927 	arr[n++] = 0;    /* Status code */
1928 	arr[n++] = 0;    /* Vendor unique */
1929 	arr[n++] = 0x1;  /* One port per group */
1930 	arr[n++] = 0;    /* Reserved */
1931 	arr[n++] = 0;    /* Reserved */
1932 	put_unaligned_be16(port_a, arr + n);
1933 	n += 2;
1934 	arr[n++] = 3;    /* Port unavailable */
1935 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1936 	put_unaligned_be16(port_group_b, arr + n);
1937 	n += 2;
1938 	arr[n++] = 0;    /* Reserved */
1939 	arr[n++] = 0;    /* Status code */
1940 	arr[n++] = 0;    /* Vendor unique */
1941 	arr[n++] = 0x1;  /* One port per group */
1942 	arr[n++] = 0;    /* Reserved */
1943 	arr[n++] = 0;    /* Reserved */
1944 	put_unaligned_be16(port_b, arr + n);
1945 	n += 2;
1946 
1947 	rlen = n - 4;
1948 	put_unaligned_be32(rlen, arr + 0);
1949 
1950 	/*
1951 	 * Return the smallest value of either
1952 	 * - The allocated length
1953 	 * - The constructed command length
1954 	 * - The maximum array size
1955 	 */
1956 	rlen = min_t(int, alen, n);
1957 	ret = fill_from_dev_buffer(scp, arr,
1958 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1959 	kfree(arr);
1960 	return ret;
1961 }
1962 
1963 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1964 			     struct sdebug_dev_info *devip)
1965 {
1966 	bool rctd;
1967 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1968 	u16 req_sa, u;
1969 	u32 alloc_len, a_len;
1970 	int k, offset, len, errsts, count, bump, na;
1971 	const struct opcode_info_t *oip;
1972 	const struct opcode_info_t *r_oip;
1973 	u8 *arr;
1974 	u8 *cmd = scp->cmnd;
1975 
1976 	rctd = !!(cmd[2] & 0x80);
1977 	reporting_opts = cmd[2] & 0x7;
1978 	req_opcode = cmd[3];
1979 	req_sa = get_unaligned_be16(cmd + 4);
1980 	alloc_len = get_unaligned_be32(cmd + 6);
1981 	if (alloc_len < 4 || alloc_len > 0xffff) {
1982 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1983 		return check_condition_result;
1984 	}
1985 	if (alloc_len > 8192)
1986 		a_len = 8192;
1987 	else
1988 		a_len = alloc_len;
1989 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1990 	if (NULL == arr) {
1991 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1992 				INSUFF_RES_ASCQ);
1993 		return check_condition_result;
1994 	}
1995 	switch (reporting_opts) {
1996 	case 0:	/* all commands */
1997 		/* count number of commands */
1998 		for (count = 0, oip = opcode_info_arr;
1999 		     oip->num_attached != 0xff; ++oip) {
2000 			if (F_INV_OP & oip->flags)
2001 				continue;
2002 			count += (oip->num_attached + 1);
2003 		}
2004 		bump = rctd ? 20 : 8;
2005 		put_unaligned_be32(count * bump, arr);
2006 		for (offset = 4, oip = opcode_info_arr;
2007 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2008 			if (F_INV_OP & oip->flags)
2009 				continue;
2010 			na = oip->num_attached;
2011 			arr[offset] = oip->opcode;
2012 			put_unaligned_be16(oip->sa, arr + offset + 2);
2013 			if (rctd)
2014 				arr[offset + 5] |= 0x2;
2015 			if (FF_SA & oip->flags)
2016 				arr[offset + 5] |= 0x1;
2017 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2018 			if (rctd)
2019 				put_unaligned_be16(0xa, arr + offset + 8);
2020 			r_oip = oip;
2021 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2022 				if (F_INV_OP & oip->flags)
2023 					continue;
2024 				offset += bump;
2025 				arr[offset] = oip->opcode;
2026 				put_unaligned_be16(oip->sa, arr + offset + 2);
2027 				if (rctd)
2028 					arr[offset + 5] |= 0x2;
2029 				if (FF_SA & oip->flags)
2030 					arr[offset + 5] |= 0x1;
2031 				put_unaligned_be16(oip->len_mask[0],
2032 						   arr + offset + 6);
2033 				if (rctd)
2034 					put_unaligned_be16(0xa,
2035 							   arr + offset + 8);
2036 			}
2037 			oip = r_oip;
2038 			offset += bump;
2039 		}
2040 		break;
2041 	case 1:	/* one command: opcode only */
2042 	case 2:	/* one command: opcode plus service action */
2043 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2044 		sdeb_i = opcode_ind_arr[req_opcode];
2045 		oip = &opcode_info_arr[sdeb_i];
2046 		if (F_INV_OP & oip->flags) {
2047 			supp = 1;
2048 			offset = 4;
2049 		} else {
2050 			if (1 == reporting_opts) {
2051 				if (FF_SA & oip->flags) {
2052 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2053 							     2, 2);
2054 					kfree(arr);
2055 					return check_condition_result;
2056 				}
2057 				req_sa = 0;
2058 			} else if (2 == reporting_opts &&
2059 				   0 == (FF_SA & oip->flags)) {
2060 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2061 				kfree(arr);	/* point at requested sa */
2062 				return check_condition_result;
2063 			}
2064 			if (0 == (FF_SA & oip->flags) &&
2065 			    req_opcode == oip->opcode)
2066 				supp = 3;
2067 			else if (0 == (FF_SA & oip->flags)) {
2068 				na = oip->num_attached;
2069 				for (k = 0, oip = oip->arrp; k < na;
2070 				     ++k, ++oip) {
2071 					if (req_opcode == oip->opcode)
2072 						break;
2073 				}
2074 				supp = (k >= na) ? 1 : 3;
2075 			} else if (req_sa != oip->sa) {
2076 				na = oip->num_attached;
2077 				for (k = 0, oip = oip->arrp; k < na;
2078 				     ++k, ++oip) {
2079 					if (req_sa == oip->sa)
2080 						break;
2081 				}
2082 				supp = (k >= na) ? 1 : 3;
2083 			} else
2084 				supp = 3;
2085 			if (3 == supp) {
2086 				u = oip->len_mask[0];
2087 				put_unaligned_be16(u, arr + 2);
2088 				arr[4] = oip->opcode;
2089 				for (k = 1; k < u; ++k)
2090 					arr[4 + k] = (k < 16) ?
2091 						 oip->len_mask[k] : 0xff;
2092 				offset = 4 + u;
2093 			} else
2094 				offset = 4;
2095 		}
2096 		arr[1] = (rctd ? 0x80 : 0) | supp;
2097 		if (rctd) {
2098 			put_unaligned_be16(0xa, arr + offset);
2099 			offset += 12;
2100 		}
2101 		break;
2102 	default:
2103 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2104 		kfree(arr);
2105 		return check_condition_result;
2106 	}
2107 	offset = (offset < a_len) ? offset : a_len;
2108 	len = (offset < alloc_len) ? offset : alloc_len;
2109 	errsts = fill_from_dev_buffer(scp, arr, len);
2110 	kfree(arr);
2111 	return errsts;
2112 }
2113 
2114 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2115 			  struct sdebug_dev_info *devip)
2116 {
2117 	bool repd;
2118 	u32 alloc_len, len;
2119 	u8 arr[16];
2120 	u8 *cmd = scp->cmnd;
2121 
2122 	memset(arr, 0, sizeof(arr));
2123 	repd = !!(cmd[2] & 0x80);
2124 	alloc_len = get_unaligned_be32(cmd + 6);
2125 	if (alloc_len < 4) {
2126 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2127 		return check_condition_result;
2128 	}
2129 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2130 	arr[1] = 0x1;		/* ITNRS */
2131 	if (repd) {
2132 		arr[3] = 0xc;
2133 		len = 16;
2134 	} else
2135 		len = 4;
2136 
2137 	len = (len < alloc_len) ? len : alloc_len;
2138 	return fill_from_dev_buffer(scp, arr, len);
2139 }
2140 
2141 /* <<Following mode page info copied from ST318451LW>> */
2142 
2143 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2144 {	/* Read-Write Error Recovery page for mode_sense */
2145 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2146 					5, 0, 0xff, 0xff};
2147 
2148 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2149 	if (1 == pcontrol)
2150 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2151 	return sizeof(err_recov_pg);
2152 }
2153 
2154 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2155 { 	/* Disconnect-Reconnect page for mode_sense */
2156 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2157 					 0, 0, 0, 0, 0, 0, 0, 0};
2158 
2159 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2160 	if (1 == pcontrol)
2161 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2162 	return sizeof(disconnect_pg);
2163 }
2164 
2165 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2166 {       /* Format device page for mode_sense */
2167 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2168 				     0, 0, 0, 0, 0, 0, 0, 0,
2169 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2170 
2171 	memcpy(p, format_pg, sizeof(format_pg));
2172 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2173 	put_unaligned_be16(sdebug_sector_size, p + 12);
2174 	if (sdebug_removable)
2175 		p[20] |= 0x20; /* should agree with INQUIRY */
2176 	if (1 == pcontrol)
2177 		memset(p + 2, 0, sizeof(format_pg) - 2);
2178 	return sizeof(format_pg);
2179 }
2180 
2181 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2182 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2183 				     0, 0, 0, 0};
2184 
2185 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2186 { 	/* Caching page for mode_sense */
2187 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2188 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2189 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2190 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2191 
2192 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2193 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2194 	memcpy(p, caching_pg, sizeof(caching_pg));
2195 	if (1 == pcontrol)
2196 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2197 	else if (2 == pcontrol)
2198 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2199 	return sizeof(caching_pg);
2200 }
2201 
2202 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2203 				    0, 0, 0x2, 0x4b};
2204 
2205 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2206 { 	/* Control mode page for mode_sense */
2207 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2208 					0, 0, 0, 0};
2209 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2210 				     0, 0, 0x2, 0x4b};
2211 
2212 	if (sdebug_dsense)
2213 		ctrl_m_pg[2] |= 0x4;
2214 	else
2215 		ctrl_m_pg[2] &= ~0x4;
2216 
2217 	if (sdebug_ato)
2218 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2219 
2220 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2221 	if (1 == pcontrol)
2222 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2223 	else if (2 == pcontrol)
2224 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2225 	return sizeof(ctrl_m_pg);
2226 }
2227 
2228 
2229 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2230 {	/* Informational Exceptions control mode page for mode_sense */
2231 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2232 				       0, 0, 0x0, 0x0};
2233 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2234 				      0, 0, 0x0, 0x0};
2235 
2236 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2237 	if (1 == pcontrol)
2238 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2239 	else if (2 == pcontrol)
2240 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2241 	return sizeof(iec_m_pg);
2242 }
2243 
2244 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2245 {	/* SAS SSP mode page - short format for mode_sense */
2246 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2247 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2248 
2249 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2250 	if (1 == pcontrol)
2251 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2252 	return sizeof(sas_sf_m_pg);
2253 }
2254 
2255 
2256 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2257 			      int target_dev_id)
2258 {	/* SAS phy control and discover mode page for mode_sense */
2259 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2260 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2261 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2262 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2263 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2264 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2265 		    0, 0, 0, 0, 0, 0, 0, 0,
2266 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2267 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2268 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2269 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2270 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2271 		    0, 0, 0, 0, 0, 0, 0, 0,
2272 		};
2273 	int port_a, port_b;
2274 
2275 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2276 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2277 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2278 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2279 	port_a = target_dev_id + 1;
2280 	port_b = port_a + 1;
2281 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2282 	put_unaligned_be32(port_a, p + 20);
2283 	put_unaligned_be32(port_b, p + 48 + 20);
2284 	if (1 == pcontrol)
2285 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2286 	return sizeof(sas_pcd_m_pg);
2287 }
2288 
2289 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2290 {	/* SAS SSP shared protocol specific port mode subpage */
2291 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2292 		    0, 0, 0, 0, 0, 0, 0, 0,
2293 		};
2294 
2295 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2296 	if (1 == pcontrol)
2297 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2298 	return sizeof(sas_sha_m_pg);
2299 }
2300 
2301 #define SDEBUG_MAX_MSENSE_SZ 256
2302 
2303 static int resp_mode_sense(struct scsi_cmnd *scp,
2304 			   struct sdebug_dev_info *devip)
2305 {
2306 	int pcontrol, pcode, subpcode, bd_len;
2307 	unsigned char dev_spec;
2308 	int alloc_len, offset, len, target_dev_id;
2309 	int target = scp->device->id;
2310 	unsigned char *ap;
2311 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2312 	unsigned char *cmd = scp->cmnd;
2313 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2314 
2315 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2316 	pcontrol = (cmd[2] & 0xc0) >> 6;
2317 	pcode = cmd[2] & 0x3f;
2318 	subpcode = cmd[3];
2319 	msense_6 = (MODE_SENSE == cmd[0]);
2320 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2321 	is_disk = (sdebug_ptype == TYPE_DISK);
2322 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2323 	if ((is_disk || is_zbc) && !dbd)
2324 		bd_len = llbaa ? 16 : 8;
2325 	else
2326 		bd_len = 0;
2327 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2328 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2329 	if (0x3 == pcontrol) {  /* Saving values not supported */
2330 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2331 		return check_condition_result;
2332 	}
2333 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2334 			(devip->target * 1000) - 3;
2335 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2336 	if (is_disk || is_zbc) {
2337 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2338 		if (sdebug_wp)
2339 			dev_spec |= 0x80;
2340 	} else
2341 		dev_spec = 0x0;
2342 	if (msense_6) {
2343 		arr[2] = dev_spec;
2344 		arr[3] = bd_len;
2345 		offset = 4;
2346 	} else {
2347 		arr[3] = dev_spec;
2348 		if (16 == bd_len)
2349 			arr[4] = 0x1;	/* set LONGLBA bit */
2350 		arr[7] = bd_len;	/* assume 255 or less */
2351 		offset = 8;
2352 	}
2353 	ap = arr + offset;
2354 	if ((bd_len > 0) && (!sdebug_capacity))
2355 		sdebug_capacity = get_sdebug_capacity();
2356 
2357 	if (8 == bd_len) {
2358 		if (sdebug_capacity > 0xfffffffe)
2359 			put_unaligned_be32(0xffffffff, ap + 0);
2360 		else
2361 			put_unaligned_be32(sdebug_capacity, ap + 0);
2362 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2363 		offset += bd_len;
2364 		ap = arr + offset;
2365 	} else if (16 == bd_len) {
2366 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2367 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2368 		offset += bd_len;
2369 		ap = arr + offset;
2370 	}
2371 
2372 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2373 		/* TODO: Control Extension page */
2374 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2375 		return check_condition_result;
2376 	}
2377 	bad_pcode = false;
2378 
2379 	switch (pcode) {
2380 	case 0x1:	/* Read-Write error recovery page, direct access */
2381 		len = resp_err_recov_pg(ap, pcontrol, target);
2382 		offset += len;
2383 		break;
2384 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2385 		len = resp_disconnect_pg(ap, pcontrol, target);
2386 		offset += len;
2387 		break;
2388 	case 0x3:       /* Format device page, direct access */
2389 		if (is_disk) {
2390 			len = resp_format_pg(ap, pcontrol, target);
2391 			offset += len;
2392 		} else
2393 			bad_pcode = true;
2394 		break;
2395 	case 0x8:	/* Caching page, direct access */
2396 		if (is_disk || is_zbc) {
2397 			len = resp_caching_pg(ap, pcontrol, target);
2398 			offset += len;
2399 		} else
2400 			bad_pcode = true;
2401 		break;
2402 	case 0xa:	/* Control Mode page, all devices */
2403 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2404 		offset += len;
2405 		break;
2406 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2407 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2408 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2409 			return check_condition_result;
2410 		}
2411 		len = 0;
2412 		if ((0x0 == subpcode) || (0xff == subpcode))
2413 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2414 		if ((0x1 == subpcode) || (0xff == subpcode))
2415 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2416 						  target_dev_id);
2417 		if ((0x2 == subpcode) || (0xff == subpcode))
2418 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2419 		offset += len;
2420 		break;
2421 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2422 		len = resp_iec_m_pg(ap, pcontrol, target);
2423 		offset += len;
2424 		break;
2425 	case 0x3f:	/* Read all Mode pages */
2426 		if ((0 == subpcode) || (0xff == subpcode)) {
2427 			len = resp_err_recov_pg(ap, pcontrol, target);
2428 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2429 			if (is_disk) {
2430 				len += resp_format_pg(ap + len, pcontrol,
2431 						      target);
2432 				len += resp_caching_pg(ap + len, pcontrol,
2433 						       target);
2434 			} else if (is_zbc) {
2435 				len += resp_caching_pg(ap + len, pcontrol,
2436 						       target);
2437 			}
2438 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2439 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2440 			if (0xff == subpcode) {
2441 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2442 						  target, target_dev_id);
2443 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2444 			}
2445 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2446 			offset += len;
2447 		} else {
2448 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2449 			return check_condition_result;
2450 		}
2451 		break;
2452 	default:
2453 		bad_pcode = true;
2454 		break;
2455 	}
2456 	if (bad_pcode) {
2457 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2458 		return check_condition_result;
2459 	}
2460 	if (msense_6)
2461 		arr[0] = offset - 1;
2462 	else
2463 		put_unaligned_be16((offset - 2), arr + 0);
2464 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2465 }
2466 
2467 #define SDEBUG_MAX_MSELECT_SZ 512
2468 
2469 static int resp_mode_select(struct scsi_cmnd *scp,
2470 			    struct sdebug_dev_info *devip)
2471 {
2472 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2473 	int param_len, res, mpage;
2474 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2475 	unsigned char *cmd = scp->cmnd;
2476 	int mselect6 = (MODE_SELECT == cmd[0]);
2477 
2478 	memset(arr, 0, sizeof(arr));
2479 	pf = cmd[1] & 0x10;
2480 	sp = cmd[1] & 0x1;
2481 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2482 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2483 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2484 		return check_condition_result;
2485 	}
2486 	res = fetch_to_dev_buffer(scp, arr, param_len);
2487 	if (-1 == res)
2488 		return DID_ERROR << 16;
2489 	else if (sdebug_verbose && (res < param_len))
2490 		sdev_printk(KERN_INFO, scp->device,
2491 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2492 			    __func__, param_len, res);
2493 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2494 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2495 	if (md_len > 2) {
2496 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2497 		return check_condition_result;
2498 	}
2499 	off = bd_len + (mselect6 ? 4 : 8);
2500 	mpage = arr[off] & 0x3f;
2501 	ps = !!(arr[off] & 0x80);
2502 	if (ps) {
2503 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2504 		return check_condition_result;
2505 	}
2506 	spf = !!(arr[off] & 0x40);
2507 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2508 		       (arr[off + 1] + 2);
2509 	if ((pg_len + off) > param_len) {
2510 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2511 				PARAMETER_LIST_LENGTH_ERR, 0);
2512 		return check_condition_result;
2513 	}
2514 	switch (mpage) {
2515 	case 0x8:      /* Caching Mode page */
2516 		if (caching_pg[1] == arr[off + 1]) {
2517 			memcpy(caching_pg + 2, arr + off + 2,
2518 			       sizeof(caching_pg) - 2);
2519 			goto set_mode_changed_ua;
2520 		}
2521 		break;
2522 	case 0xa:      /* Control Mode page */
2523 		if (ctrl_m_pg[1] == arr[off + 1]) {
2524 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2525 			       sizeof(ctrl_m_pg) - 2);
2526 			if (ctrl_m_pg[4] & 0x8)
2527 				sdebug_wp = true;
2528 			else
2529 				sdebug_wp = false;
2530 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2531 			goto set_mode_changed_ua;
2532 		}
2533 		break;
2534 	case 0x1c:      /* Informational Exceptions Mode page */
2535 		if (iec_m_pg[1] == arr[off + 1]) {
2536 			memcpy(iec_m_pg + 2, arr + off + 2,
2537 			       sizeof(iec_m_pg) - 2);
2538 			goto set_mode_changed_ua;
2539 		}
2540 		break;
2541 	default:
2542 		break;
2543 	}
2544 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2545 	return check_condition_result;
2546 set_mode_changed_ua:
2547 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2548 	return 0;
2549 }
2550 
2551 static int resp_temp_l_pg(unsigned char *arr)
2552 {
2553 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2554 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2555 		};
2556 
2557 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2558 	return sizeof(temp_l_pg);
2559 }
2560 
2561 static int resp_ie_l_pg(unsigned char *arr)
2562 {
2563 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2564 		};
2565 
2566 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2567 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2568 		arr[4] = THRESHOLD_EXCEEDED;
2569 		arr[5] = 0xff;
2570 	}
2571 	return sizeof(ie_l_pg);
2572 }
2573 
2574 #define SDEBUG_MAX_LSENSE_SZ 512
2575 
2576 static int resp_log_sense(struct scsi_cmnd *scp,
2577 			  struct sdebug_dev_info *devip)
2578 {
2579 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2580 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2581 	unsigned char *cmd = scp->cmnd;
2582 
2583 	memset(arr, 0, sizeof(arr));
2584 	ppc = cmd[1] & 0x2;
2585 	sp = cmd[1] & 0x1;
2586 	if (ppc || sp) {
2587 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2588 		return check_condition_result;
2589 	}
2590 	pcode = cmd[2] & 0x3f;
2591 	subpcode = cmd[3] & 0xff;
2592 	alloc_len = get_unaligned_be16(cmd + 7);
2593 	arr[0] = pcode;
2594 	if (0 == subpcode) {
2595 		switch (pcode) {
2596 		case 0x0:	/* Supported log pages log page */
2597 			n = 4;
2598 			arr[n++] = 0x0;		/* this page */
2599 			arr[n++] = 0xd;		/* Temperature */
2600 			arr[n++] = 0x2f;	/* Informational exceptions */
2601 			arr[3] = n - 4;
2602 			break;
2603 		case 0xd:	/* Temperature log page */
2604 			arr[3] = resp_temp_l_pg(arr + 4);
2605 			break;
2606 		case 0x2f:	/* Informational exceptions log page */
2607 			arr[3] = resp_ie_l_pg(arr + 4);
2608 			break;
2609 		default:
2610 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2611 			return check_condition_result;
2612 		}
2613 	} else if (0xff == subpcode) {
2614 		arr[0] |= 0x40;
2615 		arr[1] = subpcode;
2616 		switch (pcode) {
2617 		case 0x0:	/* Supported log pages and subpages log page */
2618 			n = 4;
2619 			arr[n++] = 0x0;
2620 			arr[n++] = 0x0;		/* 0,0 page */
2621 			arr[n++] = 0x0;
2622 			arr[n++] = 0xff;	/* this page */
2623 			arr[n++] = 0xd;
2624 			arr[n++] = 0x0;		/* Temperature */
2625 			arr[n++] = 0x2f;
2626 			arr[n++] = 0x0;	/* Informational exceptions */
2627 			arr[3] = n - 4;
2628 			break;
2629 		case 0xd:	/* Temperature subpages */
2630 			n = 4;
2631 			arr[n++] = 0xd;
2632 			arr[n++] = 0x0;		/* Temperature */
2633 			arr[3] = n - 4;
2634 			break;
2635 		case 0x2f:	/* Informational exceptions subpages */
2636 			n = 4;
2637 			arr[n++] = 0x2f;
2638 			arr[n++] = 0x0;		/* Informational exceptions */
2639 			arr[3] = n - 4;
2640 			break;
2641 		default:
2642 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2643 			return check_condition_result;
2644 		}
2645 	} else {
2646 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2647 		return check_condition_result;
2648 	}
2649 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2650 	return fill_from_dev_buffer(scp, arr,
2651 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2652 }
2653 
2654 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2655 {
2656 	return devip->nr_zones != 0;
2657 }
2658 
2659 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2660 					unsigned long long lba)
2661 {
2662 	return &devip->zstate[lba >> devip->zsize_shift];
2663 }
2664 
2665 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2666 {
2667 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2668 }
2669 
2670 static void zbc_close_zone(struct sdebug_dev_info *devip,
2671 			   struct sdeb_zone_state *zsp)
2672 {
2673 	enum sdebug_z_cond zc;
2674 
2675 	if (zbc_zone_is_conv(zsp))
2676 		return;
2677 
2678 	zc = zsp->z_cond;
2679 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2680 		return;
2681 
2682 	if (zc == ZC2_IMPLICIT_OPEN)
2683 		devip->nr_imp_open--;
2684 	else
2685 		devip->nr_exp_open--;
2686 
2687 	if (zsp->z_wp == zsp->z_start) {
2688 		zsp->z_cond = ZC1_EMPTY;
2689 	} else {
2690 		zsp->z_cond = ZC4_CLOSED;
2691 		devip->nr_closed++;
2692 	}
2693 }
2694 
2695 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2696 {
2697 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2698 	unsigned int i;
2699 
2700 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2701 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2702 			zbc_close_zone(devip, zsp);
2703 			return;
2704 		}
2705 	}
2706 }
2707 
2708 static void zbc_open_zone(struct sdebug_dev_info *devip,
2709 			  struct sdeb_zone_state *zsp, bool explicit)
2710 {
2711 	enum sdebug_z_cond zc;
2712 
2713 	if (zbc_zone_is_conv(zsp))
2714 		return;
2715 
2716 	zc = zsp->z_cond;
2717 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2718 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2719 		return;
2720 
2721 	/* Close an implicit open zone if necessary */
2722 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2723 		zbc_close_zone(devip, zsp);
2724 	else if (devip->max_open &&
2725 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2726 		zbc_close_imp_open_zone(devip);
2727 
2728 	if (zsp->z_cond == ZC4_CLOSED)
2729 		devip->nr_closed--;
2730 	if (explicit) {
2731 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2732 		devip->nr_exp_open++;
2733 	} else {
2734 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2735 		devip->nr_imp_open++;
2736 	}
2737 }
2738 
2739 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2740 		       unsigned long long lba, unsigned int num)
2741 {
2742 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2743 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2744 
2745 	if (zbc_zone_is_conv(zsp))
2746 		return;
2747 
2748 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2749 		zsp->z_wp += num;
2750 		if (zsp->z_wp >= zend)
2751 			zsp->z_cond = ZC5_FULL;
2752 		return;
2753 	}
2754 
2755 	while (num) {
2756 		if (lba != zsp->z_wp)
2757 			zsp->z_non_seq_resource = true;
2758 
2759 		end = lba + num;
2760 		if (end >= zend) {
2761 			n = zend - lba;
2762 			zsp->z_wp = zend;
2763 		} else if (end > zsp->z_wp) {
2764 			n = num;
2765 			zsp->z_wp = end;
2766 		} else {
2767 			n = num;
2768 		}
2769 		if (zsp->z_wp >= zend)
2770 			zsp->z_cond = ZC5_FULL;
2771 
2772 		num -= n;
2773 		lba += n;
2774 		if (num) {
2775 			zsp++;
2776 			zend = zsp->z_start + zsp->z_size;
2777 		}
2778 	}
2779 }
2780 
2781 static int check_zbc_access_params(struct scsi_cmnd *scp,
2782 			unsigned long long lba, unsigned int num, bool write)
2783 {
2784 	struct scsi_device *sdp = scp->device;
2785 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2786 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2787 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2788 
2789 	if (!write) {
2790 		if (devip->zmodel == BLK_ZONED_HA)
2791 			return 0;
2792 		/* For host-managed, reads cannot cross zone types boundaries */
2793 		if (zsp_end != zsp &&
2794 		    zbc_zone_is_conv(zsp) &&
2795 		    !zbc_zone_is_conv(zsp_end)) {
2796 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2797 					LBA_OUT_OF_RANGE,
2798 					READ_INVDATA_ASCQ);
2799 			return check_condition_result;
2800 		}
2801 		return 0;
2802 	}
2803 
2804 	/* No restrictions for writes within conventional zones */
2805 	if (zbc_zone_is_conv(zsp)) {
2806 		if (!zbc_zone_is_conv(zsp_end)) {
2807 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2808 					LBA_OUT_OF_RANGE,
2809 					WRITE_BOUNDARY_ASCQ);
2810 			return check_condition_result;
2811 		}
2812 		return 0;
2813 	}
2814 
2815 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2816 		/* Writes cannot cross sequential zone boundaries */
2817 		if (zsp_end != zsp) {
2818 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2819 					LBA_OUT_OF_RANGE,
2820 					WRITE_BOUNDARY_ASCQ);
2821 			return check_condition_result;
2822 		}
2823 		/* Cannot write full zones */
2824 		if (zsp->z_cond == ZC5_FULL) {
2825 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2826 					INVALID_FIELD_IN_CDB, 0);
2827 			return check_condition_result;
2828 		}
2829 		/* Writes must be aligned to the zone WP */
2830 		if (lba != zsp->z_wp) {
2831 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2832 					LBA_OUT_OF_RANGE,
2833 					UNALIGNED_WRITE_ASCQ);
2834 			return check_condition_result;
2835 		}
2836 	}
2837 
2838 	/* Handle implicit open of closed and empty zones */
2839 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2840 		if (devip->max_open &&
2841 		    devip->nr_exp_open >= devip->max_open) {
2842 			mk_sense_buffer(scp, DATA_PROTECT,
2843 					INSUFF_RES_ASC,
2844 					INSUFF_ZONE_ASCQ);
2845 			return check_condition_result;
2846 		}
2847 		zbc_open_zone(devip, zsp, false);
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 static inline int check_device_access_params
2854 			(struct scsi_cmnd *scp, unsigned long long lba,
2855 			 unsigned int num, bool write)
2856 {
2857 	struct scsi_device *sdp = scp->device;
2858 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2859 
2860 	if (lba + num > sdebug_capacity) {
2861 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2862 		return check_condition_result;
2863 	}
2864 	/* transfer length excessive (tie in to block limits VPD page) */
2865 	if (num > sdebug_store_sectors) {
2866 		/* needs work to find which cdb byte 'num' comes from */
2867 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2868 		return check_condition_result;
2869 	}
2870 	if (write && unlikely(sdebug_wp)) {
2871 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2872 		return check_condition_result;
2873 	}
2874 	if (sdebug_dev_is_zoned(devip))
2875 		return check_zbc_access_params(scp, lba, num, write);
2876 
2877 	return 0;
2878 }
2879 
2880 /*
2881  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2882  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2883  * that access any of the "stores" in struct sdeb_store_info should call this
2884  * function with bug_if_fake_rw set to true.
2885  */
2886 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2887 						bool bug_if_fake_rw)
2888 {
2889 	if (sdebug_fake_rw) {
2890 		BUG_ON(bug_if_fake_rw);	/* See note above */
2891 		return NULL;
2892 	}
2893 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2894 }
2895 
2896 /* Returns number of bytes copied or -1 if error. */
2897 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2898 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2899 {
2900 	int ret;
2901 	u64 block, rest = 0;
2902 	enum dma_data_direction dir;
2903 	struct scsi_data_buffer *sdb = &scp->sdb;
2904 	u8 *fsp;
2905 
2906 	if (do_write) {
2907 		dir = DMA_TO_DEVICE;
2908 		write_since_sync = true;
2909 	} else {
2910 		dir = DMA_FROM_DEVICE;
2911 	}
2912 
2913 	if (!sdb->length || !sip)
2914 		return 0;
2915 	if (scp->sc_data_direction != dir)
2916 		return -1;
2917 	fsp = sip->storep;
2918 
2919 	block = do_div(lba, sdebug_store_sectors);
2920 	if (block + num > sdebug_store_sectors)
2921 		rest = block + num - sdebug_store_sectors;
2922 
2923 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2924 		   fsp + (block * sdebug_sector_size),
2925 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2926 	if (ret != (num - rest) * sdebug_sector_size)
2927 		return ret;
2928 
2929 	if (rest) {
2930 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2931 			    fsp, rest * sdebug_sector_size,
2932 			    sg_skip + ((num - rest) * sdebug_sector_size),
2933 			    do_write);
2934 	}
2935 
2936 	return ret;
2937 }
2938 
2939 /* Returns number of bytes copied or -1 if error. */
2940 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2941 {
2942 	struct scsi_data_buffer *sdb = &scp->sdb;
2943 
2944 	if (!sdb->length)
2945 		return 0;
2946 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2947 		return -1;
2948 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2949 			      num * sdebug_sector_size, 0, true);
2950 }
2951 
2952 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2953  * arr into sip->storep+lba and return true. If comparison fails then
2954  * return false. */
2955 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2956 			      const u8 *arr, bool compare_only)
2957 {
2958 	bool res;
2959 	u64 block, rest = 0;
2960 	u32 store_blks = sdebug_store_sectors;
2961 	u32 lb_size = sdebug_sector_size;
2962 	u8 *fsp = sip->storep;
2963 
2964 	block = do_div(lba, store_blks);
2965 	if (block + num > store_blks)
2966 		rest = block + num - store_blks;
2967 
2968 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2969 	if (!res)
2970 		return res;
2971 	if (rest)
2972 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2973 			     rest * lb_size);
2974 	if (!res)
2975 		return res;
2976 	if (compare_only)
2977 		return true;
2978 	arr += num * lb_size;
2979 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2980 	if (rest)
2981 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2982 	return res;
2983 }
2984 
2985 static __be16 dif_compute_csum(const void *buf, int len)
2986 {
2987 	__be16 csum;
2988 
2989 	if (sdebug_guard)
2990 		csum = (__force __be16)ip_compute_csum(buf, len);
2991 	else
2992 		csum = cpu_to_be16(crc_t10dif(buf, len));
2993 
2994 	return csum;
2995 }
2996 
2997 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2998 		      sector_t sector, u32 ei_lba)
2999 {
3000 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3001 
3002 	if (sdt->guard_tag != csum) {
3003 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3004 			(unsigned long)sector,
3005 			be16_to_cpu(sdt->guard_tag),
3006 			be16_to_cpu(csum));
3007 		return 0x01;
3008 	}
3009 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3010 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3011 		pr_err("REF check failed on sector %lu\n",
3012 			(unsigned long)sector);
3013 		return 0x03;
3014 	}
3015 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3016 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3017 		pr_err("REF check failed on sector %lu\n",
3018 			(unsigned long)sector);
3019 		return 0x03;
3020 	}
3021 	return 0;
3022 }
3023 
3024 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3025 			  unsigned int sectors, bool read)
3026 {
3027 	size_t resid;
3028 	void *paddr;
3029 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3030 						scp->device->hostdata, true);
3031 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3032 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3033 	struct sg_mapping_iter miter;
3034 
3035 	/* Bytes of protection data to copy into sgl */
3036 	resid = sectors * sizeof(*dif_storep);
3037 
3038 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3039 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3040 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3041 
3042 	while (sg_miter_next(&miter) && resid > 0) {
3043 		size_t len = min_t(size_t, miter.length, resid);
3044 		void *start = dif_store(sip, sector);
3045 		size_t rest = 0;
3046 
3047 		if (dif_store_end < start + len)
3048 			rest = start + len - dif_store_end;
3049 
3050 		paddr = miter.addr;
3051 
3052 		if (read)
3053 			memcpy(paddr, start, len - rest);
3054 		else
3055 			memcpy(start, paddr, len - rest);
3056 
3057 		if (rest) {
3058 			if (read)
3059 				memcpy(paddr + len - rest, dif_storep, rest);
3060 			else
3061 				memcpy(dif_storep, paddr + len - rest, rest);
3062 		}
3063 
3064 		sector += len / sizeof(*dif_storep);
3065 		resid -= len;
3066 	}
3067 	sg_miter_stop(&miter);
3068 }
3069 
3070 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3071 			    unsigned int sectors, u32 ei_lba)
3072 {
3073 	unsigned int i;
3074 	sector_t sector;
3075 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3076 						scp->device->hostdata, true);
3077 	struct t10_pi_tuple *sdt;
3078 
3079 	for (i = 0; i < sectors; i++, ei_lba++) {
3080 		int ret;
3081 
3082 		sector = start_sec + i;
3083 		sdt = dif_store(sip, sector);
3084 
3085 		if (sdt->app_tag == cpu_to_be16(0xffff))
3086 			continue;
3087 
3088 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3089 				 ei_lba);
3090 		if (ret) {
3091 			dif_errors++;
3092 			return ret;
3093 		}
3094 	}
3095 
3096 	dif_copy_prot(scp, start_sec, sectors, true);
3097 	dix_reads++;
3098 
3099 	return 0;
3100 }
3101 
3102 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3103 {
3104 	bool check_prot;
3105 	u32 num;
3106 	u32 ei_lba;
3107 	int ret;
3108 	u64 lba;
3109 	struct sdeb_store_info *sip = devip2sip(devip, true);
3110 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3111 	u8 *cmd = scp->cmnd;
3112 	struct sdebug_queued_cmd *sqcp;
3113 
3114 	switch (cmd[0]) {
3115 	case READ_16:
3116 		ei_lba = 0;
3117 		lba = get_unaligned_be64(cmd + 2);
3118 		num = get_unaligned_be32(cmd + 10);
3119 		check_prot = true;
3120 		break;
3121 	case READ_10:
3122 		ei_lba = 0;
3123 		lba = get_unaligned_be32(cmd + 2);
3124 		num = get_unaligned_be16(cmd + 7);
3125 		check_prot = true;
3126 		break;
3127 	case READ_6:
3128 		ei_lba = 0;
3129 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3130 		      (u32)(cmd[1] & 0x1f) << 16;
3131 		num = (0 == cmd[4]) ? 256 : cmd[4];
3132 		check_prot = true;
3133 		break;
3134 	case READ_12:
3135 		ei_lba = 0;
3136 		lba = get_unaligned_be32(cmd + 2);
3137 		num = get_unaligned_be32(cmd + 6);
3138 		check_prot = true;
3139 		break;
3140 	case XDWRITEREAD_10:
3141 		ei_lba = 0;
3142 		lba = get_unaligned_be32(cmd + 2);
3143 		num = get_unaligned_be16(cmd + 7);
3144 		check_prot = false;
3145 		break;
3146 	default:	/* assume READ(32) */
3147 		lba = get_unaligned_be64(cmd + 12);
3148 		ei_lba = get_unaligned_be32(cmd + 20);
3149 		num = get_unaligned_be32(cmd + 28);
3150 		check_prot = false;
3151 		break;
3152 	}
3153 	if (unlikely(have_dif_prot && check_prot)) {
3154 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3155 		    (cmd[1] & 0xe0)) {
3156 			mk_sense_invalid_opcode(scp);
3157 			return check_condition_result;
3158 		}
3159 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3160 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3161 		    (cmd[1] & 0xe0) == 0)
3162 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3163 				    "to DIF device\n");
3164 	}
3165 	if (unlikely(sdebug_any_injecting_opt)) {
3166 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
3167 
3168 		if (sqcp) {
3169 			if (sqcp->inj_short)
3170 				num /= 2;
3171 		}
3172 	} else
3173 		sqcp = NULL;
3174 
3175 	ret = check_device_access_params(scp, lba, num, false);
3176 	if (ret)
3177 		return ret;
3178 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3179 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3180 		     ((lba + num) > sdebug_medium_error_start))) {
3181 		/* claim unrecoverable read error */
3182 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3183 		/* set info field and valid bit for fixed descriptor */
3184 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3185 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3186 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3187 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3188 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3189 		}
3190 		scsi_set_resid(scp, scsi_bufflen(scp));
3191 		return check_condition_result;
3192 	}
3193 
3194 	read_lock(macc_lckp);
3195 
3196 	/* DIX + T10 DIF */
3197 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3198 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3199 
3200 		if (prot_ret) {
3201 			read_unlock(macc_lckp);
3202 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3203 			return illegal_condition_result;
3204 		}
3205 	}
3206 
3207 	ret = do_device_access(sip, scp, 0, lba, num, false);
3208 	read_unlock(macc_lckp);
3209 	if (unlikely(ret == -1))
3210 		return DID_ERROR << 16;
3211 
3212 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3213 
3214 	if (unlikely(sqcp)) {
3215 		if (sqcp->inj_recovered) {
3216 			mk_sense_buffer(scp, RECOVERED_ERROR,
3217 					THRESHOLD_EXCEEDED, 0);
3218 			return check_condition_result;
3219 		} else if (sqcp->inj_transport) {
3220 			mk_sense_buffer(scp, ABORTED_COMMAND,
3221 					TRANSPORT_PROBLEM, ACK_NAK_TO);
3222 			return check_condition_result;
3223 		} else if (sqcp->inj_dif) {
3224 			/* Logical block guard check failed */
3225 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3226 			return illegal_condition_result;
3227 		} else if (sqcp->inj_dix) {
3228 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3229 			return illegal_condition_result;
3230 		}
3231 	}
3232 	return 0;
3233 }
3234 
3235 static void dump_sector(unsigned char *buf, int len)
3236 {
3237 	int i, j, n;
3238 
3239 	pr_err(">>> Sector Dump <<<\n");
3240 	for (i = 0 ; i < len ; i += 16) {
3241 		char b[128];
3242 
3243 		for (j = 0, n = 0; j < 16; j++) {
3244 			unsigned char c = buf[i+j];
3245 
3246 			if (c >= 0x20 && c < 0x7e)
3247 				n += scnprintf(b + n, sizeof(b) - n,
3248 					       " %c ", buf[i+j]);
3249 			else
3250 				n += scnprintf(b + n, sizeof(b) - n,
3251 					       "%02x ", buf[i+j]);
3252 		}
3253 		pr_err("%04d: %s\n", i, b);
3254 	}
3255 }
3256 
3257 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3258 			     unsigned int sectors, u32 ei_lba)
3259 {
3260 	int ret;
3261 	struct t10_pi_tuple *sdt;
3262 	void *daddr;
3263 	sector_t sector = start_sec;
3264 	int ppage_offset;
3265 	int dpage_offset;
3266 	struct sg_mapping_iter diter;
3267 	struct sg_mapping_iter piter;
3268 
3269 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3270 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3271 
3272 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3273 			scsi_prot_sg_count(SCpnt),
3274 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3275 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3276 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3277 
3278 	/* For each protection page */
3279 	while (sg_miter_next(&piter)) {
3280 		dpage_offset = 0;
3281 		if (WARN_ON(!sg_miter_next(&diter))) {
3282 			ret = 0x01;
3283 			goto out;
3284 		}
3285 
3286 		for (ppage_offset = 0; ppage_offset < piter.length;
3287 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3288 			/* If we're at the end of the current
3289 			 * data page advance to the next one
3290 			 */
3291 			if (dpage_offset >= diter.length) {
3292 				if (WARN_ON(!sg_miter_next(&diter))) {
3293 					ret = 0x01;
3294 					goto out;
3295 				}
3296 				dpage_offset = 0;
3297 			}
3298 
3299 			sdt = piter.addr + ppage_offset;
3300 			daddr = diter.addr + dpage_offset;
3301 
3302 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3303 			if (ret) {
3304 				dump_sector(daddr, sdebug_sector_size);
3305 				goto out;
3306 			}
3307 
3308 			sector++;
3309 			ei_lba++;
3310 			dpage_offset += sdebug_sector_size;
3311 		}
3312 		diter.consumed = dpage_offset;
3313 		sg_miter_stop(&diter);
3314 	}
3315 	sg_miter_stop(&piter);
3316 
3317 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3318 	dix_writes++;
3319 
3320 	return 0;
3321 
3322 out:
3323 	dif_errors++;
3324 	sg_miter_stop(&diter);
3325 	sg_miter_stop(&piter);
3326 	return ret;
3327 }
3328 
3329 static unsigned long lba_to_map_index(sector_t lba)
3330 {
3331 	if (sdebug_unmap_alignment)
3332 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3333 	sector_div(lba, sdebug_unmap_granularity);
3334 	return lba;
3335 }
3336 
3337 static sector_t map_index_to_lba(unsigned long index)
3338 {
3339 	sector_t lba = index * sdebug_unmap_granularity;
3340 
3341 	if (sdebug_unmap_alignment)
3342 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3343 	return lba;
3344 }
3345 
3346 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3347 			      unsigned int *num)
3348 {
3349 	sector_t end;
3350 	unsigned int mapped;
3351 	unsigned long index;
3352 	unsigned long next;
3353 
3354 	index = lba_to_map_index(lba);
3355 	mapped = test_bit(index, sip->map_storep);
3356 
3357 	if (mapped)
3358 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3359 	else
3360 		next = find_next_bit(sip->map_storep, map_size, index);
3361 
3362 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3363 	*num = end - lba;
3364 	return mapped;
3365 }
3366 
3367 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3368 		       unsigned int len)
3369 {
3370 	sector_t end = lba + len;
3371 
3372 	while (lba < end) {
3373 		unsigned long index = lba_to_map_index(lba);
3374 
3375 		if (index < map_size)
3376 			set_bit(index, sip->map_storep);
3377 
3378 		lba = map_index_to_lba(index + 1);
3379 	}
3380 }
3381 
3382 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3383 			 unsigned int len)
3384 {
3385 	sector_t end = lba + len;
3386 	u8 *fsp = sip->storep;
3387 
3388 	while (lba < end) {
3389 		unsigned long index = lba_to_map_index(lba);
3390 
3391 		if (lba == map_index_to_lba(index) &&
3392 		    lba + sdebug_unmap_granularity <= end &&
3393 		    index < map_size) {
3394 			clear_bit(index, sip->map_storep);
3395 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3396 				memset(fsp + lba * sdebug_sector_size,
3397 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3398 				       sdebug_sector_size *
3399 				       sdebug_unmap_granularity);
3400 			}
3401 			if (sip->dif_storep) {
3402 				memset(sip->dif_storep + lba, 0xff,
3403 				       sizeof(*sip->dif_storep) *
3404 				       sdebug_unmap_granularity);
3405 			}
3406 		}
3407 		lba = map_index_to_lba(index + 1);
3408 	}
3409 }
3410 
3411 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3412 {
3413 	bool check_prot;
3414 	u32 num;
3415 	u32 ei_lba;
3416 	int ret;
3417 	u64 lba;
3418 	struct sdeb_store_info *sip = devip2sip(devip, true);
3419 	rwlock_t *macc_lckp = &sip->macc_lck;
3420 	u8 *cmd = scp->cmnd;
3421 
3422 	switch (cmd[0]) {
3423 	case WRITE_16:
3424 		ei_lba = 0;
3425 		lba = get_unaligned_be64(cmd + 2);
3426 		num = get_unaligned_be32(cmd + 10);
3427 		check_prot = true;
3428 		break;
3429 	case WRITE_10:
3430 		ei_lba = 0;
3431 		lba = get_unaligned_be32(cmd + 2);
3432 		num = get_unaligned_be16(cmd + 7);
3433 		check_prot = true;
3434 		break;
3435 	case WRITE_6:
3436 		ei_lba = 0;
3437 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3438 		      (u32)(cmd[1] & 0x1f) << 16;
3439 		num = (0 == cmd[4]) ? 256 : cmd[4];
3440 		check_prot = true;
3441 		break;
3442 	case WRITE_12:
3443 		ei_lba = 0;
3444 		lba = get_unaligned_be32(cmd + 2);
3445 		num = get_unaligned_be32(cmd + 6);
3446 		check_prot = true;
3447 		break;
3448 	case 0x53:	/* XDWRITEREAD(10) */
3449 		ei_lba = 0;
3450 		lba = get_unaligned_be32(cmd + 2);
3451 		num = get_unaligned_be16(cmd + 7);
3452 		check_prot = false;
3453 		break;
3454 	default:	/* assume WRITE(32) */
3455 		lba = get_unaligned_be64(cmd + 12);
3456 		ei_lba = get_unaligned_be32(cmd + 20);
3457 		num = get_unaligned_be32(cmd + 28);
3458 		check_prot = false;
3459 		break;
3460 	}
3461 	if (unlikely(have_dif_prot && check_prot)) {
3462 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3463 		    (cmd[1] & 0xe0)) {
3464 			mk_sense_invalid_opcode(scp);
3465 			return check_condition_result;
3466 		}
3467 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3468 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3469 		    (cmd[1] & 0xe0) == 0)
3470 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3471 				    "to DIF device\n");
3472 	}
3473 
3474 	write_lock(macc_lckp);
3475 	ret = check_device_access_params(scp, lba, num, true);
3476 	if (ret) {
3477 		write_unlock(macc_lckp);
3478 		return ret;
3479 	}
3480 
3481 	/* DIX + T10 DIF */
3482 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3483 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3484 
3485 		if (prot_ret) {
3486 			write_unlock(macc_lckp);
3487 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3488 			return illegal_condition_result;
3489 		}
3490 	}
3491 
3492 	ret = do_device_access(sip, scp, 0, lba, num, true);
3493 	if (unlikely(scsi_debug_lbp()))
3494 		map_region(sip, lba, num);
3495 	/* If ZBC zone then bump its write pointer */
3496 	if (sdebug_dev_is_zoned(devip))
3497 		zbc_inc_wp(devip, lba, num);
3498 	write_unlock(macc_lckp);
3499 	if (unlikely(-1 == ret))
3500 		return DID_ERROR << 16;
3501 	else if (unlikely(sdebug_verbose &&
3502 			  (ret < (num * sdebug_sector_size))))
3503 		sdev_printk(KERN_INFO, scp->device,
3504 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3505 			    my_name, num * sdebug_sector_size, ret);
3506 
3507 	if (unlikely(sdebug_any_injecting_opt)) {
3508 		struct sdebug_queued_cmd *sqcp =
3509 				(struct sdebug_queued_cmd *)scp->host_scribble;
3510 
3511 		if (sqcp) {
3512 			if (sqcp->inj_recovered) {
3513 				mk_sense_buffer(scp, RECOVERED_ERROR,
3514 						THRESHOLD_EXCEEDED, 0);
3515 				return check_condition_result;
3516 			} else if (sqcp->inj_dif) {
3517 				/* Logical block guard check failed */
3518 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3519 				return illegal_condition_result;
3520 			} else if (sqcp->inj_dix) {
3521 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3522 				return illegal_condition_result;
3523 			}
3524 		}
3525 	}
3526 	return 0;
3527 }
3528 
3529 /*
3530  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3531  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3532  */
3533 static int resp_write_scat(struct scsi_cmnd *scp,
3534 			   struct sdebug_dev_info *devip)
3535 {
3536 	u8 *cmd = scp->cmnd;
3537 	u8 *lrdp = NULL;
3538 	u8 *up;
3539 	struct sdeb_store_info *sip = devip2sip(devip, true);
3540 	rwlock_t *macc_lckp = &sip->macc_lck;
3541 	u8 wrprotect;
3542 	u16 lbdof, num_lrd, k;
3543 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3544 	u32 lb_size = sdebug_sector_size;
3545 	u32 ei_lba;
3546 	u64 lba;
3547 	int ret, res;
3548 	bool is_16;
3549 	static const u32 lrd_size = 32; /* + parameter list header size */
3550 
3551 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3552 		is_16 = false;
3553 		wrprotect = (cmd[10] >> 5) & 0x7;
3554 		lbdof = get_unaligned_be16(cmd + 12);
3555 		num_lrd = get_unaligned_be16(cmd + 16);
3556 		bt_len = get_unaligned_be32(cmd + 28);
3557 	} else {        /* that leaves WRITE SCATTERED(16) */
3558 		is_16 = true;
3559 		wrprotect = (cmd[2] >> 5) & 0x7;
3560 		lbdof = get_unaligned_be16(cmd + 4);
3561 		num_lrd = get_unaligned_be16(cmd + 8);
3562 		bt_len = get_unaligned_be32(cmd + 10);
3563 		if (unlikely(have_dif_prot)) {
3564 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3565 			    wrprotect) {
3566 				mk_sense_invalid_opcode(scp);
3567 				return illegal_condition_result;
3568 			}
3569 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3570 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3571 			     wrprotect == 0)
3572 				sdev_printk(KERN_ERR, scp->device,
3573 					    "Unprotected WR to DIF device\n");
3574 		}
3575 	}
3576 	if ((num_lrd == 0) || (bt_len == 0))
3577 		return 0;       /* T10 says these do-nothings are not errors */
3578 	if (lbdof == 0) {
3579 		if (sdebug_verbose)
3580 			sdev_printk(KERN_INFO, scp->device,
3581 				"%s: %s: LB Data Offset field bad\n",
3582 				my_name, __func__);
3583 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3584 		return illegal_condition_result;
3585 	}
3586 	lbdof_blen = lbdof * lb_size;
3587 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3588 		if (sdebug_verbose)
3589 			sdev_printk(KERN_INFO, scp->device,
3590 				"%s: %s: LBA range descriptors don't fit\n",
3591 				my_name, __func__);
3592 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3593 		return illegal_condition_result;
3594 	}
3595 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3596 	if (lrdp == NULL)
3597 		return SCSI_MLQUEUE_HOST_BUSY;
3598 	if (sdebug_verbose)
3599 		sdev_printk(KERN_INFO, scp->device,
3600 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3601 			my_name, __func__, lbdof_blen);
3602 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3603 	if (res == -1) {
3604 		ret = DID_ERROR << 16;
3605 		goto err_out;
3606 	}
3607 
3608 	write_lock(macc_lckp);
3609 	sg_off = lbdof_blen;
3610 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3611 	cum_lb = 0;
3612 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3613 		lba = get_unaligned_be64(up + 0);
3614 		num = get_unaligned_be32(up + 8);
3615 		if (sdebug_verbose)
3616 			sdev_printk(KERN_INFO, scp->device,
3617 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3618 				my_name, __func__, k, lba, num, sg_off);
3619 		if (num == 0)
3620 			continue;
3621 		ret = check_device_access_params(scp, lba, num, true);
3622 		if (ret)
3623 			goto err_out_unlock;
3624 		num_by = num * lb_size;
3625 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3626 
3627 		if ((cum_lb + num) > bt_len) {
3628 			if (sdebug_verbose)
3629 				sdev_printk(KERN_INFO, scp->device,
3630 				    "%s: %s: sum of blocks > data provided\n",
3631 				    my_name, __func__);
3632 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3633 					0);
3634 			ret = illegal_condition_result;
3635 			goto err_out_unlock;
3636 		}
3637 
3638 		/* DIX + T10 DIF */
3639 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3640 			int prot_ret = prot_verify_write(scp, lba, num,
3641 							 ei_lba);
3642 
3643 			if (prot_ret) {
3644 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3645 						prot_ret);
3646 				ret = illegal_condition_result;
3647 				goto err_out_unlock;
3648 			}
3649 		}
3650 
3651 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3652 		/* If ZBC zone then bump its write pointer */
3653 		if (sdebug_dev_is_zoned(devip))
3654 			zbc_inc_wp(devip, lba, num);
3655 		if (unlikely(scsi_debug_lbp()))
3656 			map_region(sip, lba, num);
3657 		if (unlikely(-1 == ret)) {
3658 			ret = DID_ERROR << 16;
3659 			goto err_out_unlock;
3660 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3661 			sdev_printk(KERN_INFO, scp->device,
3662 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3663 			    my_name, num_by, ret);
3664 
3665 		if (unlikely(sdebug_any_injecting_opt)) {
3666 			struct sdebug_queued_cmd *sqcp =
3667 				(struct sdebug_queued_cmd *)scp->host_scribble;
3668 
3669 			if (sqcp) {
3670 				if (sqcp->inj_recovered) {
3671 					mk_sense_buffer(scp, RECOVERED_ERROR,
3672 							THRESHOLD_EXCEEDED, 0);
3673 					ret = illegal_condition_result;
3674 					goto err_out_unlock;
3675 				} else if (sqcp->inj_dif) {
3676 					/* Logical block guard check failed */
3677 					mk_sense_buffer(scp, ABORTED_COMMAND,
3678 							0x10, 1);
3679 					ret = illegal_condition_result;
3680 					goto err_out_unlock;
3681 				} else if (sqcp->inj_dix) {
3682 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3683 							0x10, 1);
3684 					ret = illegal_condition_result;
3685 					goto err_out_unlock;
3686 				}
3687 			}
3688 		}
3689 		sg_off += num_by;
3690 		cum_lb += num;
3691 	}
3692 	ret = 0;
3693 err_out_unlock:
3694 	write_unlock(macc_lckp);
3695 err_out:
3696 	kfree(lrdp);
3697 	return ret;
3698 }
3699 
3700 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3701 			   u32 ei_lba, bool unmap, bool ndob)
3702 {
3703 	struct scsi_device *sdp = scp->device;
3704 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3705 	unsigned long long i;
3706 	u64 block, lbaa;
3707 	u32 lb_size = sdebug_sector_size;
3708 	int ret;
3709 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3710 						scp->device->hostdata, true);
3711 	rwlock_t *macc_lckp = &sip->macc_lck;
3712 	u8 *fs1p;
3713 	u8 *fsp;
3714 
3715 	write_lock(macc_lckp);
3716 
3717 	ret = check_device_access_params(scp, lba, num, true);
3718 	if (ret) {
3719 		write_unlock(macc_lckp);
3720 		return ret;
3721 	}
3722 
3723 	if (unmap && scsi_debug_lbp()) {
3724 		unmap_region(sip, lba, num);
3725 		goto out;
3726 	}
3727 	lbaa = lba;
3728 	block = do_div(lbaa, sdebug_store_sectors);
3729 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3730 	fsp = sip->storep;
3731 	fs1p = fsp + (block * lb_size);
3732 	if (ndob) {
3733 		memset(fs1p, 0, lb_size);
3734 		ret = 0;
3735 	} else
3736 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3737 
3738 	if (-1 == ret) {
3739 		write_unlock(&sip->macc_lck);
3740 		return DID_ERROR << 16;
3741 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3742 		sdev_printk(KERN_INFO, scp->device,
3743 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3744 			    my_name, "write same", lb_size, ret);
3745 
3746 	/* Copy first sector to remaining blocks */
3747 	for (i = 1 ; i < num ; i++) {
3748 		lbaa = lba + i;
3749 		block = do_div(lbaa, sdebug_store_sectors);
3750 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3751 	}
3752 	if (scsi_debug_lbp())
3753 		map_region(sip, lba, num);
3754 	/* If ZBC zone then bump its write pointer */
3755 	if (sdebug_dev_is_zoned(devip))
3756 		zbc_inc_wp(devip, lba, num);
3757 out:
3758 	write_unlock(macc_lckp);
3759 
3760 	return 0;
3761 }
3762 
3763 static int resp_write_same_10(struct scsi_cmnd *scp,
3764 			      struct sdebug_dev_info *devip)
3765 {
3766 	u8 *cmd = scp->cmnd;
3767 	u32 lba;
3768 	u16 num;
3769 	u32 ei_lba = 0;
3770 	bool unmap = false;
3771 
3772 	if (cmd[1] & 0x8) {
3773 		if (sdebug_lbpws10 == 0) {
3774 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3775 			return check_condition_result;
3776 		} else
3777 			unmap = true;
3778 	}
3779 	lba = get_unaligned_be32(cmd + 2);
3780 	num = get_unaligned_be16(cmd + 7);
3781 	if (num > sdebug_write_same_length) {
3782 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3783 		return check_condition_result;
3784 	}
3785 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3786 }
3787 
3788 static int resp_write_same_16(struct scsi_cmnd *scp,
3789 			      struct sdebug_dev_info *devip)
3790 {
3791 	u8 *cmd = scp->cmnd;
3792 	u64 lba;
3793 	u32 num;
3794 	u32 ei_lba = 0;
3795 	bool unmap = false;
3796 	bool ndob = false;
3797 
3798 	if (cmd[1] & 0x8) {	/* UNMAP */
3799 		if (sdebug_lbpws == 0) {
3800 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3801 			return check_condition_result;
3802 		} else
3803 			unmap = true;
3804 	}
3805 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3806 		ndob = true;
3807 	lba = get_unaligned_be64(cmd + 2);
3808 	num = get_unaligned_be32(cmd + 10);
3809 	if (num > sdebug_write_same_length) {
3810 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3811 		return check_condition_result;
3812 	}
3813 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3814 }
3815 
3816 /* Note the mode field is in the same position as the (lower) service action
3817  * field. For the Report supported operation codes command, SPC-4 suggests
3818  * each mode of this command should be reported separately; for future. */
3819 static int resp_write_buffer(struct scsi_cmnd *scp,
3820 			     struct sdebug_dev_info *devip)
3821 {
3822 	u8 *cmd = scp->cmnd;
3823 	struct scsi_device *sdp = scp->device;
3824 	struct sdebug_dev_info *dp;
3825 	u8 mode;
3826 
3827 	mode = cmd[1] & 0x1f;
3828 	switch (mode) {
3829 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3830 		/* set UAs on this device only */
3831 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3832 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3833 		break;
3834 	case 0x5:	/* download MC, save and ACT */
3835 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3836 		break;
3837 	case 0x6:	/* download MC with offsets and ACT */
3838 		/* set UAs on most devices (LUs) in this target */
3839 		list_for_each_entry(dp,
3840 				    &devip->sdbg_host->dev_info_list,
3841 				    dev_list)
3842 			if (dp->target == sdp->id) {
3843 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3844 				if (devip != dp)
3845 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3846 						dp->uas_bm);
3847 			}
3848 		break;
3849 	case 0x7:	/* download MC with offsets, save, and ACT */
3850 		/* set UA on all devices (LUs) in this target */
3851 		list_for_each_entry(dp,
3852 				    &devip->sdbg_host->dev_info_list,
3853 				    dev_list)
3854 			if (dp->target == sdp->id)
3855 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3856 					dp->uas_bm);
3857 		break;
3858 	default:
3859 		/* do nothing for this command for other mode values */
3860 		break;
3861 	}
3862 	return 0;
3863 }
3864 
3865 static int resp_comp_write(struct scsi_cmnd *scp,
3866 			   struct sdebug_dev_info *devip)
3867 {
3868 	u8 *cmd = scp->cmnd;
3869 	u8 *arr;
3870 	struct sdeb_store_info *sip = devip2sip(devip, true);
3871 	rwlock_t *macc_lckp = &sip->macc_lck;
3872 	u64 lba;
3873 	u32 dnum;
3874 	u32 lb_size = sdebug_sector_size;
3875 	u8 num;
3876 	int ret;
3877 	int retval = 0;
3878 
3879 	lba = get_unaligned_be64(cmd + 2);
3880 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3881 	if (0 == num)
3882 		return 0;	/* degenerate case, not an error */
3883 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3884 	    (cmd[1] & 0xe0)) {
3885 		mk_sense_invalid_opcode(scp);
3886 		return check_condition_result;
3887 	}
3888 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3889 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3890 	    (cmd[1] & 0xe0) == 0)
3891 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3892 			    "to DIF device\n");
3893 	ret = check_device_access_params(scp, lba, num, false);
3894 	if (ret)
3895 		return ret;
3896 	dnum = 2 * num;
3897 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3898 	if (NULL == arr) {
3899 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3900 				INSUFF_RES_ASCQ);
3901 		return check_condition_result;
3902 	}
3903 
3904 	write_lock(macc_lckp);
3905 
3906 	ret = do_dout_fetch(scp, dnum, arr);
3907 	if (ret == -1) {
3908 		retval = DID_ERROR << 16;
3909 		goto cleanup;
3910 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3911 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3912 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3913 			    dnum * lb_size, ret);
3914 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3915 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3916 		retval = check_condition_result;
3917 		goto cleanup;
3918 	}
3919 	if (scsi_debug_lbp())
3920 		map_region(sip, lba, num);
3921 cleanup:
3922 	write_unlock(macc_lckp);
3923 	kfree(arr);
3924 	return retval;
3925 }
3926 
3927 struct unmap_block_desc {
3928 	__be64	lba;
3929 	__be32	blocks;
3930 	__be32	__reserved;
3931 };
3932 
3933 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3934 {
3935 	unsigned char *buf;
3936 	struct unmap_block_desc *desc;
3937 	struct sdeb_store_info *sip = devip2sip(devip, true);
3938 	rwlock_t *macc_lckp = &sip->macc_lck;
3939 	unsigned int i, payload_len, descriptors;
3940 	int ret;
3941 
3942 	if (!scsi_debug_lbp())
3943 		return 0;	/* fib and say its done */
3944 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3945 	BUG_ON(scsi_bufflen(scp) != payload_len);
3946 
3947 	descriptors = (payload_len - 8) / 16;
3948 	if (descriptors > sdebug_unmap_max_desc) {
3949 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3950 		return check_condition_result;
3951 	}
3952 
3953 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3954 	if (!buf) {
3955 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3956 				INSUFF_RES_ASCQ);
3957 		return check_condition_result;
3958 	}
3959 
3960 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3961 
3962 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3963 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3964 
3965 	desc = (void *)&buf[8];
3966 
3967 	write_lock(macc_lckp);
3968 
3969 	for (i = 0 ; i < descriptors ; i++) {
3970 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3971 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3972 
3973 		ret = check_device_access_params(scp, lba, num, true);
3974 		if (ret)
3975 			goto out;
3976 
3977 		unmap_region(sip, lba, num);
3978 	}
3979 
3980 	ret = 0;
3981 
3982 out:
3983 	write_unlock(macc_lckp);
3984 	kfree(buf);
3985 
3986 	return ret;
3987 }
3988 
3989 #define SDEBUG_GET_LBA_STATUS_LEN 32
3990 
3991 static int resp_get_lba_status(struct scsi_cmnd *scp,
3992 			       struct sdebug_dev_info *devip)
3993 {
3994 	u8 *cmd = scp->cmnd;
3995 	u64 lba;
3996 	u32 alloc_len, mapped, num;
3997 	int ret;
3998 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3999 
4000 	lba = get_unaligned_be64(cmd + 2);
4001 	alloc_len = get_unaligned_be32(cmd + 10);
4002 
4003 	if (alloc_len < 24)
4004 		return 0;
4005 
4006 	ret = check_device_access_params(scp, lba, 1, false);
4007 	if (ret)
4008 		return ret;
4009 
4010 	if (scsi_debug_lbp()) {
4011 		struct sdeb_store_info *sip = devip2sip(devip, true);
4012 
4013 		mapped = map_state(sip, lba, &num);
4014 	} else {
4015 		mapped = 1;
4016 		/* following just in case virtual_gb changed */
4017 		sdebug_capacity = get_sdebug_capacity();
4018 		if (sdebug_capacity - lba <= 0xffffffff)
4019 			num = sdebug_capacity - lba;
4020 		else
4021 			num = 0xffffffff;
4022 	}
4023 
4024 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4025 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4026 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4027 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4028 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4029 
4030 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4031 }
4032 
4033 static int resp_sync_cache(struct scsi_cmnd *scp,
4034 			   struct sdebug_dev_info *devip)
4035 {
4036 	int res = 0;
4037 	u64 lba;
4038 	u32 num_blocks;
4039 	u8 *cmd = scp->cmnd;
4040 
4041 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4042 		lba = get_unaligned_be32(cmd + 2);
4043 		num_blocks = get_unaligned_be16(cmd + 7);
4044 	} else {				/* SYNCHRONIZE_CACHE(16) */
4045 		lba = get_unaligned_be64(cmd + 2);
4046 		num_blocks = get_unaligned_be32(cmd + 10);
4047 	}
4048 	if (lba + num_blocks > sdebug_capacity) {
4049 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4050 		return check_condition_result;
4051 	}
4052 	if (!write_since_sync || cmd[1] & 0x2)
4053 		res = SDEG_RES_IMMED_MASK;
4054 	else		/* delay if write_since_sync and IMMED clear */
4055 		write_since_sync = false;
4056 	return res;
4057 }
4058 
4059 /*
4060  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4061  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4062  * a GOOD status otherwise. Model a disk with a big cache and yield
4063  * CONDITION MET. Actually tries to bring range in main memory into the
4064  * cache associated with the CPU(s).
4065  */
4066 static int resp_pre_fetch(struct scsi_cmnd *scp,
4067 			  struct sdebug_dev_info *devip)
4068 {
4069 	int res = 0;
4070 	u64 lba;
4071 	u64 block, rest = 0;
4072 	u32 nblks;
4073 	u8 *cmd = scp->cmnd;
4074 	struct sdeb_store_info *sip = devip2sip(devip, true);
4075 	rwlock_t *macc_lckp = &sip->macc_lck;
4076 	u8 *fsp = sip->storep;
4077 
4078 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4079 		lba = get_unaligned_be32(cmd + 2);
4080 		nblks = get_unaligned_be16(cmd + 7);
4081 	} else {			/* PRE-FETCH(16) */
4082 		lba = get_unaligned_be64(cmd + 2);
4083 		nblks = get_unaligned_be32(cmd + 10);
4084 	}
4085 	if (lba + nblks > sdebug_capacity) {
4086 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4087 		return check_condition_result;
4088 	}
4089 	if (!fsp)
4090 		goto fini;
4091 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4092 	block = do_div(lba, sdebug_store_sectors);
4093 	if (block + nblks > sdebug_store_sectors)
4094 		rest = block + nblks - sdebug_store_sectors;
4095 
4096 	/* Try to bring the PRE-FETCH range into CPU's cache */
4097 	read_lock(macc_lckp);
4098 	prefetch_range(fsp + (sdebug_sector_size * block),
4099 		       (nblks - rest) * sdebug_sector_size);
4100 	if (rest)
4101 		prefetch_range(fsp, rest * sdebug_sector_size);
4102 	read_unlock(macc_lckp);
4103 fini:
4104 	if (cmd[1] & 0x2)
4105 		res = SDEG_RES_IMMED_MASK;
4106 	return res | condition_met_result;
4107 }
4108 
4109 #define RL_BUCKET_ELEMS 8
4110 
4111 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4112  * (W-LUN), the normal Linux scanning logic does not associate it with a
4113  * device (e.g. /dev/sg7). The following magic will make that association:
4114  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4115  * where <n> is a host number. If there are multiple targets in a host then
4116  * the above will associate a W-LUN to each target. To only get a W-LUN
4117  * for target 2, then use "echo '- 2 49409' > scan" .
4118  */
4119 static int resp_report_luns(struct scsi_cmnd *scp,
4120 			    struct sdebug_dev_info *devip)
4121 {
4122 	unsigned char *cmd = scp->cmnd;
4123 	unsigned int alloc_len;
4124 	unsigned char select_report;
4125 	u64 lun;
4126 	struct scsi_lun *lun_p;
4127 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4128 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4129 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4130 	unsigned int tlun_cnt;	/* total LUN count */
4131 	unsigned int rlen;	/* response length (in bytes) */
4132 	int k, j, n, res;
4133 	unsigned int off_rsp = 0;
4134 	const int sz_lun = sizeof(struct scsi_lun);
4135 
4136 	clear_luns_changed_on_target(devip);
4137 
4138 	select_report = cmd[2];
4139 	alloc_len = get_unaligned_be32(cmd + 6);
4140 
4141 	if (alloc_len < 4) {
4142 		pr_err("alloc len too small %d\n", alloc_len);
4143 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4144 		return check_condition_result;
4145 	}
4146 
4147 	switch (select_report) {
4148 	case 0:		/* all LUNs apart from W-LUNs */
4149 		lun_cnt = sdebug_max_luns;
4150 		wlun_cnt = 0;
4151 		break;
4152 	case 1:		/* only W-LUNs */
4153 		lun_cnt = 0;
4154 		wlun_cnt = 1;
4155 		break;
4156 	case 2:		/* all LUNs */
4157 		lun_cnt = sdebug_max_luns;
4158 		wlun_cnt = 1;
4159 		break;
4160 	case 0x10:	/* only administrative LUs */
4161 	case 0x11:	/* see SPC-5 */
4162 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4163 	default:
4164 		pr_debug("select report invalid %d\n", select_report);
4165 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4166 		return check_condition_result;
4167 	}
4168 
4169 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4170 		--lun_cnt;
4171 
4172 	tlun_cnt = lun_cnt + wlun_cnt;
4173 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4174 	scsi_set_resid(scp, scsi_bufflen(scp));
4175 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4176 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4177 
4178 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4179 	lun = sdebug_no_lun_0 ? 1 : 0;
4180 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4181 		memset(arr, 0, sizeof(arr));
4182 		lun_p = (struct scsi_lun *)&arr[0];
4183 		if (k == 0) {
4184 			put_unaligned_be32(rlen, &arr[0]);
4185 			++lun_p;
4186 			j = 1;
4187 		}
4188 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4189 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4190 				break;
4191 			int_to_scsilun(lun++, lun_p);
4192 		}
4193 		if (j < RL_BUCKET_ELEMS)
4194 			break;
4195 		n = j * sz_lun;
4196 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4197 		if (res)
4198 			return res;
4199 		off_rsp += n;
4200 	}
4201 	if (wlun_cnt) {
4202 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4203 		++j;
4204 	}
4205 	if (j > 0)
4206 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4207 	return res;
4208 }
4209 
4210 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4211 {
4212 	bool is_bytchk3 = false;
4213 	u8 bytchk;
4214 	int ret, j;
4215 	u32 vnum, a_num, off;
4216 	const u32 lb_size = sdebug_sector_size;
4217 	u64 lba;
4218 	u8 *arr;
4219 	u8 *cmd = scp->cmnd;
4220 	struct sdeb_store_info *sip = devip2sip(devip, true);
4221 	rwlock_t *macc_lckp = &sip->macc_lck;
4222 
4223 	bytchk = (cmd[1] >> 1) & 0x3;
4224 	if (bytchk == 0) {
4225 		return 0;	/* always claim internal verify okay */
4226 	} else if (bytchk == 2) {
4227 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4228 		return check_condition_result;
4229 	} else if (bytchk == 3) {
4230 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4231 	}
4232 	switch (cmd[0]) {
4233 	case VERIFY_16:
4234 		lba = get_unaligned_be64(cmd + 2);
4235 		vnum = get_unaligned_be32(cmd + 10);
4236 		break;
4237 	case VERIFY:		/* is VERIFY(10) */
4238 		lba = get_unaligned_be32(cmd + 2);
4239 		vnum = get_unaligned_be16(cmd + 7);
4240 		break;
4241 	default:
4242 		mk_sense_invalid_opcode(scp);
4243 		return check_condition_result;
4244 	}
4245 	a_num = is_bytchk3 ? 1 : vnum;
4246 	/* Treat following check like one for read (i.e. no write) access */
4247 	ret = check_device_access_params(scp, lba, a_num, false);
4248 	if (ret)
4249 		return ret;
4250 
4251 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4252 	if (!arr) {
4253 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4254 				INSUFF_RES_ASCQ);
4255 		return check_condition_result;
4256 	}
4257 	/* Not changing store, so only need read access */
4258 	read_lock(macc_lckp);
4259 
4260 	ret = do_dout_fetch(scp, a_num, arr);
4261 	if (ret == -1) {
4262 		ret = DID_ERROR << 16;
4263 		goto cleanup;
4264 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4265 		sdev_printk(KERN_INFO, scp->device,
4266 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4267 			    my_name, __func__, a_num * lb_size, ret);
4268 	}
4269 	if (is_bytchk3) {
4270 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4271 			memcpy(arr + off, arr, lb_size);
4272 	}
4273 	ret = 0;
4274 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4275 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4276 		ret = check_condition_result;
4277 		goto cleanup;
4278 	}
4279 cleanup:
4280 	read_unlock(macc_lckp);
4281 	kfree(arr);
4282 	return ret;
4283 }
4284 
4285 #define RZONES_DESC_HD 64
4286 
4287 /* Report zones depending on start LBA nad reporting options */
4288 static int resp_report_zones(struct scsi_cmnd *scp,
4289 			     struct sdebug_dev_info *devip)
4290 {
4291 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4292 	int ret = 0;
4293 	u32 alloc_len, rep_opts, rep_len;
4294 	bool partial;
4295 	u64 lba, zs_lba;
4296 	u8 *arr = NULL, *desc;
4297 	u8 *cmd = scp->cmnd;
4298 	struct sdeb_zone_state *zsp;
4299 	struct sdeb_store_info *sip = devip2sip(devip, false);
4300 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4301 
4302 	if (!sdebug_dev_is_zoned(devip)) {
4303 		mk_sense_invalid_opcode(scp);
4304 		return check_condition_result;
4305 	}
4306 	zs_lba = get_unaligned_be64(cmd + 2);
4307 	alloc_len = get_unaligned_be32(cmd + 10);
4308 	rep_opts = cmd[14] & 0x3f;
4309 	partial = cmd[14] & 0x80;
4310 
4311 	if (zs_lba >= sdebug_capacity) {
4312 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4313 		return check_condition_result;
4314 	}
4315 
4316 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4317 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4318 			    max_zones);
4319 
4320 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4321 	if (!arr) {
4322 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4323 				INSUFF_RES_ASCQ);
4324 		return check_condition_result;
4325 	}
4326 
4327 	read_lock(macc_lckp);
4328 
4329 	desc = arr + 64;
4330 	for (i = 0; i < max_zones; i++) {
4331 		lba = zs_lba + devip->zsize * i;
4332 		if (lba > sdebug_capacity)
4333 			break;
4334 		zsp = zbc_zone(devip, lba);
4335 		switch (rep_opts) {
4336 		case 0x00:
4337 			/* All zones */
4338 			break;
4339 		case 0x01:
4340 			/* Empty zones */
4341 			if (zsp->z_cond != ZC1_EMPTY)
4342 				continue;
4343 			break;
4344 		case 0x02:
4345 			/* Implicit open zones */
4346 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4347 				continue;
4348 			break;
4349 		case 0x03:
4350 			/* Explicit open zones */
4351 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4352 				continue;
4353 			break;
4354 		case 0x04:
4355 			/* Closed zones */
4356 			if (zsp->z_cond != ZC4_CLOSED)
4357 				continue;
4358 			break;
4359 		case 0x05:
4360 			/* Full zones */
4361 			if (zsp->z_cond != ZC5_FULL)
4362 				continue;
4363 			break;
4364 		case 0x06:
4365 		case 0x07:
4366 		case 0x10:
4367 			/*
4368 			 * Read-only, offline, reset WP recommended are
4369 			 * not emulated: no zones to report;
4370 			 */
4371 			continue;
4372 		case 0x11:
4373 			/* non-seq-resource set */
4374 			if (!zsp->z_non_seq_resource)
4375 				continue;
4376 			break;
4377 		case 0x3f:
4378 			/* Not write pointer (conventional) zones */
4379 			if (!zbc_zone_is_conv(zsp))
4380 				continue;
4381 			break;
4382 		default:
4383 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4384 					INVALID_FIELD_IN_CDB, 0);
4385 			ret = check_condition_result;
4386 			goto fini;
4387 		}
4388 
4389 		if (nrz < rep_max_zones) {
4390 			/* Fill zone descriptor */
4391 			desc[0] = zsp->z_type;
4392 			desc[1] = zsp->z_cond << 4;
4393 			if (zsp->z_non_seq_resource)
4394 				desc[1] |= 1 << 1;
4395 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4396 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4397 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4398 			desc += 64;
4399 		}
4400 
4401 		if (partial && nrz >= rep_max_zones)
4402 			break;
4403 
4404 		nrz++;
4405 	}
4406 
4407 	/* Report header */
4408 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4409 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4410 
4411 	rep_len = (unsigned long)desc - (unsigned long)arr;
4412 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4413 
4414 fini:
4415 	read_unlock(macc_lckp);
4416 	kfree(arr);
4417 	return ret;
4418 }
4419 
4420 /* Logic transplanted from tcmu-runner, file_zbc.c */
4421 static void zbc_open_all(struct sdebug_dev_info *devip)
4422 {
4423 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4424 	unsigned int i;
4425 
4426 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4427 		if (zsp->z_cond == ZC4_CLOSED)
4428 			zbc_open_zone(devip, &devip->zstate[i], true);
4429 	}
4430 }
4431 
4432 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4433 {
4434 	int res = 0;
4435 	u64 z_id;
4436 	enum sdebug_z_cond zc;
4437 	u8 *cmd = scp->cmnd;
4438 	struct sdeb_zone_state *zsp;
4439 	bool all = cmd[14] & 0x01;
4440 	struct sdeb_store_info *sip = devip2sip(devip, false);
4441 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4442 
4443 	if (!sdebug_dev_is_zoned(devip)) {
4444 		mk_sense_invalid_opcode(scp);
4445 		return check_condition_result;
4446 	}
4447 
4448 	write_lock(macc_lckp);
4449 
4450 	if (all) {
4451 		/* Check if all closed zones can be open */
4452 		if (devip->max_open &&
4453 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4454 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4455 					INSUFF_ZONE_ASCQ);
4456 			res = check_condition_result;
4457 			goto fini;
4458 		}
4459 		/* Open all closed zones */
4460 		zbc_open_all(devip);
4461 		goto fini;
4462 	}
4463 
4464 	/* Open the specified zone */
4465 	z_id = get_unaligned_be64(cmd + 2);
4466 	if (z_id >= sdebug_capacity) {
4467 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4468 		res = check_condition_result;
4469 		goto fini;
4470 	}
4471 
4472 	zsp = zbc_zone(devip, z_id);
4473 	if (z_id != zsp->z_start) {
4474 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4475 		res = check_condition_result;
4476 		goto fini;
4477 	}
4478 	if (zbc_zone_is_conv(zsp)) {
4479 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4480 		res = check_condition_result;
4481 		goto fini;
4482 	}
4483 
4484 	zc = zsp->z_cond;
4485 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4486 		goto fini;
4487 
4488 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4489 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4490 				INSUFF_ZONE_ASCQ);
4491 		res = check_condition_result;
4492 		goto fini;
4493 	}
4494 
4495 	if (zc == ZC2_IMPLICIT_OPEN)
4496 		zbc_close_zone(devip, zsp);
4497 	zbc_open_zone(devip, zsp, true);
4498 fini:
4499 	write_unlock(macc_lckp);
4500 	return res;
4501 }
4502 
4503 static void zbc_close_all(struct sdebug_dev_info *devip)
4504 {
4505 	unsigned int i;
4506 
4507 	for (i = 0; i < devip->nr_zones; i++)
4508 		zbc_close_zone(devip, &devip->zstate[i]);
4509 }
4510 
4511 static int resp_close_zone(struct scsi_cmnd *scp,
4512 			   struct sdebug_dev_info *devip)
4513 {
4514 	int res = 0;
4515 	u64 z_id;
4516 	u8 *cmd = scp->cmnd;
4517 	struct sdeb_zone_state *zsp;
4518 	bool all = cmd[14] & 0x01;
4519 	struct sdeb_store_info *sip = devip2sip(devip, false);
4520 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4521 
4522 	if (!sdebug_dev_is_zoned(devip)) {
4523 		mk_sense_invalid_opcode(scp);
4524 		return check_condition_result;
4525 	}
4526 
4527 	write_lock(macc_lckp);
4528 
4529 	if (all) {
4530 		zbc_close_all(devip);
4531 		goto fini;
4532 	}
4533 
4534 	/* Close specified zone */
4535 	z_id = get_unaligned_be64(cmd + 2);
4536 	if (z_id >= sdebug_capacity) {
4537 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4538 		res = check_condition_result;
4539 		goto fini;
4540 	}
4541 
4542 	zsp = zbc_zone(devip, z_id);
4543 	if (z_id != zsp->z_start) {
4544 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4545 		res = check_condition_result;
4546 		goto fini;
4547 	}
4548 	if (zbc_zone_is_conv(zsp)) {
4549 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4550 		res = check_condition_result;
4551 		goto fini;
4552 	}
4553 
4554 	zbc_close_zone(devip, zsp);
4555 fini:
4556 	write_unlock(macc_lckp);
4557 	return res;
4558 }
4559 
4560 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4561 			    struct sdeb_zone_state *zsp, bool empty)
4562 {
4563 	enum sdebug_z_cond zc = zsp->z_cond;
4564 
4565 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4566 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4567 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4568 			zbc_close_zone(devip, zsp);
4569 		if (zsp->z_cond == ZC4_CLOSED)
4570 			devip->nr_closed--;
4571 		zsp->z_wp = zsp->z_start + zsp->z_size;
4572 		zsp->z_cond = ZC5_FULL;
4573 	}
4574 }
4575 
4576 static void zbc_finish_all(struct sdebug_dev_info *devip)
4577 {
4578 	unsigned int i;
4579 
4580 	for (i = 0; i < devip->nr_zones; i++)
4581 		zbc_finish_zone(devip, &devip->zstate[i], false);
4582 }
4583 
4584 static int resp_finish_zone(struct scsi_cmnd *scp,
4585 			    struct sdebug_dev_info *devip)
4586 {
4587 	struct sdeb_zone_state *zsp;
4588 	int res = 0;
4589 	u64 z_id;
4590 	u8 *cmd = scp->cmnd;
4591 	bool all = cmd[14] & 0x01;
4592 	struct sdeb_store_info *sip = devip2sip(devip, false);
4593 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4594 
4595 	if (!sdebug_dev_is_zoned(devip)) {
4596 		mk_sense_invalid_opcode(scp);
4597 		return check_condition_result;
4598 	}
4599 
4600 	write_lock(macc_lckp);
4601 
4602 	if (all) {
4603 		zbc_finish_all(devip);
4604 		goto fini;
4605 	}
4606 
4607 	/* Finish the specified zone */
4608 	z_id = get_unaligned_be64(cmd + 2);
4609 	if (z_id >= sdebug_capacity) {
4610 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4611 		res = check_condition_result;
4612 		goto fini;
4613 	}
4614 
4615 	zsp = zbc_zone(devip, z_id);
4616 	if (z_id != zsp->z_start) {
4617 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4618 		res = check_condition_result;
4619 		goto fini;
4620 	}
4621 	if (zbc_zone_is_conv(zsp)) {
4622 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4623 		res = check_condition_result;
4624 		goto fini;
4625 	}
4626 
4627 	zbc_finish_zone(devip, zsp, true);
4628 fini:
4629 	write_unlock(macc_lckp);
4630 	return res;
4631 }
4632 
4633 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4634 			 struct sdeb_zone_state *zsp)
4635 {
4636 	enum sdebug_z_cond zc;
4637 
4638 	if (zbc_zone_is_conv(zsp))
4639 		return;
4640 
4641 	zc = zsp->z_cond;
4642 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4643 		zbc_close_zone(devip, zsp);
4644 
4645 	if (zsp->z_cond == ZC4_CLOSED)
4646 		devip->nr_closed--;
4647 
4648 	zsp->z_non_seq_resource = false;
4649 	zsp->z_wp = zsp->z_start;
4650 	zsp->z_cond = ZC1_EMPTY;
4651 }
4652 
4653 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4654 {
4655 	unsigned int i;
4656 
4657 	for (i = 0; i < devip->nr_zones; i++)
4658 		zbc_rwp_zone(devip, &devip->zstate[i]);
4659 }
4660 
4661 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4662 {
4663 	struct sdeb_zone_state *zsp;
4664 	int res = 0;
4665 	u64 z_id;
4666 	u8 *cmd = scp->cmnd;
4667 	bool all = cmd[14] & 0x01;
4668 	struct sdeb_store_info *sip = devip2sip(devip, false);
4669 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4670 
4671 	if (!sdebug_dev_is_zoned(devip)) {
4672 		mk_sense_invalid_opcode(scp);
4673 		return check_condition_result;
4674 	}
4675 
4676 	write_lock(macc_lckp);
4677 
4678 	if (all) {
4679 		zbc_rwp_all(devip);
4680 		goto fini;
4681 	}
4682 
4683 	z_id = get_unaligned_be64(cmd + 2);
4684 	if (z_id >= sdebug_capacity) {
4685 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4686 		res = check_condition_result;
4687 		goto fini;
4688 	}
4689 
4690 	zsp = zbc_zone(devip, z_id);
4691 	if (z_id != zsp->z_start) {
4692 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4693 		res = check_condition_result;
4694 		goto fini;
4695 	}
4696 	if (zbc_zone_is_conv(zsp)) {
4697 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4698 		res = check_condition_result;
4699 		goto fini;
4700 	}
4701 
4702 	zbc_rwp_zone(devip, zsp);
4703 fini:
4704 	write_unlock(macc_lckp);
4705 	return res;
4706 }
4707 
4708 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4709 {
4710 	u32 tag = blk_mq_unique_tag(cmnd->request);
4711 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
4712 
4713 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4714 	if (WARN_ON_ONCE(hwq >= submit_queues))
4715 		hwq = 0;
4716 	return sdebug_q_arr + hwq;
4717 }
4718 
4719 /* Queued (deferred) command completions converge here. */
4720 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4721 {
4722 	bool aborted = sd_dp->aborted;
4723 	int qc_idx;
4724 	int retiring = 0;
4725 	unsigned long iflags;
4726 	struct sdebug_queue *sqp;
4727 	struct sdebug_queued_cmd *sqcp;
4728 	struct scsi_cmnd *scp;
4729 	struct sdebug_dev_info *devip;
4730 
4731 	sd_dp->defer_t = SDEB_DEFER_NONE;
4732 	if (unlikely(aborted))
4733 		sd_dp->aborted = false;
4734 	qc_idx = sd_dp->qc_idx;
4735 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4736 	if (sdebug_statistics) {
4737 		atomic_inc(&sdebug_completions);
4738 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4739 			atomic_inc(&sdebug_miss_cpus);
4740 	}
4741 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4742 		pr_err("wild qc_idx=%d\n", qc_idx);
4743 		return;
4744 	}
4745 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4746 	sqcp = &sqp->qc_arr[qc_idx];
4747 	scp = sqcp->a_cmnd;
4748 	if (unlikely(scp == NULL)) {
4749 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4750 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
4751 		       sd_dp->sqa_idx, qc_idx);
4752 		return;
4753 	}
4754 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4755 	if (likely(devip))
4756 		atomic_dec(&devip->num_in_q);
4757 	else
4758 		pr_err("devip=NULL\n");
4759 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4760 		retiring = 1;
4761 
4762 	sqcp->a_cmnd = NULL;
4763 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4764 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4765 		pr_err("Unexpected completion\n");
4766 		return;
4767 	}
4768 
4769 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4770 		int k, retval;
4771 
4772 		retval = atomic_read(&retired_max_queue);
4773 		if (qc_idx >= retval) {
4774 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4775 			pr_err("index %d too large\n", retval);
4776 			return;
4777 		}
4778 		k = find_last_bit(sqp->in_use_bm, retval);
4779 		if ((k < sdebug_max_queue) || (k == retval))
4780 			atomic_set(&retired_max_queue, 0);
4781 		else
4782 			atomic_set(&retired_max_queue, k + 1);
4783 	}
4784 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4785 	if (unlikely(aborted)) {
4786 		if (sdebug_verbose)
4787 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4788 		return;
4789 	}
4790 	scp->scsi_done(scp); /* callback to mid level */
4791 }
4792 
4793 /* When high resolution timer goes off this function is called. */
4794 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4795 {
4796 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4797 						  hrt);
4798 	sdebug_q_cmd_complete(sd_dp);
4799 	return HRTIMER_NORESTART;
4800 }
4801 
4802 /* When work queue schedules work, it calls this function. */
4803 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4804 {
4805 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4806 						  ew.work);
4807 	sdebug_q_cmd_complete(sd_dp);
4808 }
4809 
4810 static bool got_shared_uuid;
4811 static uuid_t shared_uuid;
4812 
4813 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4814 {
4815 	struct sdeb_zone_state *zsp;
4816 	sector_t capacity = get_sdebug_capacity();
4817 	sector_t zstart = 0;
4818 	unsigned int i;
4819 
4820 	/*
4821 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4822 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4823 	 * use the specified zone size checking that at least 2 zones can be
4824 	 * created for the device.
4825 	 */
4826 	if (!sdeb_zbc_zone_size_mb) {
4827 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4828 			>> ilog2(sdebug_sector_size);
4829 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4830 			devip->zsize >>= 1;
4831 		if (devip->zsize < 2) {
4832 			pr_err("Device capacity too small\n");
4833 			return -EINVAL;
4834 		}
4835 	} else {
4836 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4837 			pr_err("Zone size is not a power of 2\n");
4838 			return -EINVAL;
4839 		}
4840 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4841 			>> ilog2(sdebug_sector_size);
4842 		if (devip->zsize >= capacity) {
4843 			pr_err("Zone size too large for device capacity\n");
4844 			return -EINVAL;
4845 		}
4846 	}
4847 
4848 	devip->zsize_shift = ilog2(devip->zsize);
4849 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4850 
4851 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4852 		pr_err("Number of conventional zones too large\n");
4853 		return -EINVAL;
4854 	}
4855 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4856 
4857 	if (devip->zmodel == BLK_ZONED_HM) {
4858 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4859 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4860 			devip->max_open = (devip->nr_zones - 1) / 2;
4861 		else
4862 			devip->max_open = sdeb_zbc_max_open;
4863 	}
4864 
4865 	devip->zstate = kcalloc(devip->nr_zones,
4866 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4867 	if (!devip->zstate)
4868 		return -ENOMEM;
4869 
4870 	for (i = 0; i < devip->nr_zones; i++) {
4871 		zsp = &devip->zstate[i];
4872 
4873 		zsp->z_start = zstart;
4874 
4875 		if (i < devip->nr_conv_zones) {
4876 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4877 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4878 			zsp->z_wp = (sector_t)-1;
4879 		} else {
4880 			if (devip->zmodel == BLK_ZONED_HM)
4881 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4882 			else
4883 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4884 			zsp->z_cond = ZC1_EMPTY;
4885 			zsp->z_wp = zsp->z_start;
4886 		}
4887 
4888 		if (zsp->z_start + devip->zsize < capacity)
4889 			zsp->z_size = devip->zsize;
4890 		else
4891 			zsp->z_size = capacity - zsp->z_start;
4892 
4893 		zstart += zsp->z_size;
4894 	}
4895 
4896 	return 0;
4897 }
4898 
4899 static struct sdebug_dev_info *sdebug_device_create(
4900 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4901 {
4902 	struct sdebug_dev_info *devip;
4903 
4904 	devip = kzalloc(sizeof(*devip), flags);
4905 	if (devip) {
4906 		if (sdebug_uuid_ctl == 1)
4907 			uuid_gen(&devip->lu_name);
4908 		else if (sdebug_uuid_ctl == 2) {
4909 			if (got_shared_uuid)
4910 				devip->lu_name = shared_uuid;
4911 			else {
4912 				uuid_gen(&shared_uuid);
4913 				got_shared_uuid = true;
4914 				devip->lu_name = shared_uuid;
4915 			}
4916 		}
4917 		devip->sdbg_host = sdbg_host;
4918 		if (sdeb_zbc_in_use) {
4919 			devip->zmodel = sdeb_zbc_model;
4920 			if (sdebug_device_create_zones(devip)) {
4921 				kfree(devip);
4922 				return NULL;
4923 			}
4924 		} else {
4925 			devip->zmodel = BLK_ZONED_NONE;
4926 		}
4927 		devip->sdbg_host = sdbg_host;
4928 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4929 	}
4930 	return devip;
4931 }
4932 
4933 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4934 {
4935 	struct sdebug_host_info *sdbg_host;
4936 	struct sdebug_dev_info *open_devip = NULL;
4937 	struct sdebug_dev_info *devip;
4938 
4939 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4940 	if (!sdbg_host) {
4941 		pr_err("Host info NULL\n");
4942 		return NULL;
4943 	}
4944 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4945 		if ((devip->used) && (devip->channel == sdev->channel) &&
4946 		    (devip->target == sdev->id) &&
4947 		    (devip->lun == sdev->lun))
4948 			return devip;
4949 		else {
4950 			if ((!devip->used) && (!open_devip))
4951 				open_devip = devip;
4952 		}
4953 	}
4954 	if (!open_devip) { /* try and make a new one */
4955 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4956 		if (!open_devip) {
4957 			pr_err("out of memory at line %d\n", __LINE__);
4958 			return NULL;
4959 		}
4960 	}
4961 
4962 	open_devip->channel = sdev->channel;
4963 	open_devip->target = sdev->id;
4964 	open_devip->lun = sdev->lun;
4965 	open_devip->sdbg_host = sdbg_host;
4966 	atomic_set(&open_devip->num_in_q, 0);
4967 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4968 	open_devip->used = true;
4969 	return open_devip;
4970 }
4971 
4972 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4973 {
4974 	if (sdebug_verbose)
4975 		pr_info("slave_alloc <%u %u %u %llu>\n",
4976 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4977 	return 0;
4978 }
4979 
4980 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4981 {
4982 	struct sdebug_dev_info *devip =
4983 			(struct sdebug_dev_info *)sdp->hostdata;
4984 
4985 	if (sdebug_verbose)
4986 		pr_info("slave_configure <%u %u %u %llu>\n",
4987 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4988 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4989 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4990 	if (devip == NULL) {
4991 		devip = find_build_dev_info(sdp);
4992 		if (devip == NULL)
4993 			return 1;  /* no resources, will be marked offline */
4994 	}
4995 	sdp->hostdata = devip;
4996 	if (sdebug_no_uld)
4997 		sdp->no_uld_attach = 1;
4998 	config_cdb_len(sdp);
4999 	return 0;
5000 }
5001 
5002 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5003 {
5004 	struct sdebug_dev_info *devip =
5005 		(struct sdebug_dev_info *)sdp->hostdata;
5006 
5007 	if (sdebug_verbose)
5008 		pr_info("slave_destroy <%u %u %u %llu>\n",
5009 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5010 	if (devip) {
5011 		/* make this slot available for re-use */
5012 		devip->used = false;
5013 		sdp->hostdata = NULL;
5014 	}
5015 }
5016 
5017 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5018 			   enum sdeb_defer_type defer_t)
5019 {
5020 	if (!sd_dp)
5021 		return;
5022 	if (defer_t == SDEB_DEFER_HRT)
5023 		hrtimer_cancel(&sd_dp->hrt);
5024 	else if (defer_t == SDEB_DEFER_WQ)
5025 		cancel_work_sync(&sd_dp->ew.work);
5026 }
5027 
5028 /* If @cmnd found deletes its timer or work queue and returns true; else
5029    returns false */
5030 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5031 {
5032 	unsigned long iflags;
5033 	int j, k, qmax, r_qmax;
5034 	enum sdeb_defer_type l_defer_t;
5035 	struct sdebug_queue *sqp;
5036 	struct sdebug_queued_cmd *sqcp;
5037 	struct sdebug_dev_info *devip;
5038 	struct sdebug_defer *sd_dp;
5039 
5040 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5041 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5042 		qmax = sdebug_max_queue;
5043 		r_qmax = atomic_read(&retired_max_queue);
5044 		if (r_qmax > qmax)
5045 			qmax = r_qmax;
5046 		for (k = 0; k < qmax; ++k) {
5047 			if (test_bit(k, sqp->in_use_bm)) {
5048 				sqcp = &sqp->qc_arr[k];
5049 				if (cmnd != sqcp->a_cmnd)
5050 					continue;
5051 				/* found */
5052 				devip = (struct sdebug_dev_info *)
5053 						cmnd->device->hostdata;
5054 				if (devip)
5055 					atomic_dec(&devip->num_in_q);
5056 				sqcp->a_cmnd = NULL;
5057 				sd_dp = sqcp->sd_dp;
5058 				if (sd_dp) {
5059 					l_defer_t = sd_dp->defer_t;
5060 					sd_dp->defer_t = SDEB_DEFER_NONE;
5061 				} else
5062 					l_defer_t = SDEB_DEFER_NONE;
5063 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5064 				stop_qc_helper(sd_dp, l_defer_t);
5065 				clear_bit(k, sqp->in_use_bm);
5066 				return true;
5067 			}
5068 		}
5069 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5070 	}
5071 	return false;
5072 }
5073 
5074 /* Deletes (stops) timers or work queues of all queued commands */
5075 static void stop_all_queued(void)
5076 {
5077 	unsigned long iflags;
5078 	int j, k;
5079 	enum sdeb_defer_type l_defer_t;
5080 	struct sdebug_queue *sqp;
5081 	struct sdebug_queued_cmd *sqcp;
5082 	struct sdebug_dev_info *devip;
5083 	struct sdebug_defer *sd_dp;
5084 
5085 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5086 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5087 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5088 			if (test_bit(k, sqp->in_use_bm)) {
5089 				sqcp = &sqp->qc_arr[k];
5090 				if (sqcp->a_cmnd == NULL)
5091 					continue;
5092 				devip = (struct sdebug_dev_info *)
5093 					sqcp->a_cmnd->device->hostdata;
5094 				if (devip)
5095 					atomic_dec(&devip->num_in_q);
5096 				sqcp->a_cmnd = NULL;
5097 				sd_dp = sqcp->sd_dp;
5098 				if (sd_dp) {
5099 					l_defer_t = sd_dp->defer_t;
5100 					sd_dp->defer_t = SDEB_DEFER_NONE;
5101 				} else
5102 					l_defer_t = SDEB_DEFER_NONE;
5103 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5104 				stop_qc_helper(sd_dp, l_defer_t);
5105 				clear_bit(k, sqp->in_use_bm);
5106 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5107 			}
5108 		}
5109 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5110 	}
5111 }
5112 
5113 /* Free queued command memory on heap */
5114 static void free_all_queued(void)
5115 {
5116 	int j, k;
5117 	struct sdebug_queue *sqp;
5118 	struct sdebug_queued_cmd *sqcp;
5119 
5120 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5121 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5122 			sqcp = &sqp->qc_arr[k];
5123 			kfree(sqcp->sd_dp);
5124 			sqcp->sd_dp = NULL;
5125 		}
5126 	}
5127 }
5128 
5129 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5130 {
5131 	bool ok;
5132 
5133 	++num_aborts;
5134 	if (SCpnt) {
5135 		ok = stop_queued_cmnd(SCpnt);
5136 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5137 			sdev_printk(KERN_INFO, SCpnt->device,
5138 				    "%s: command%s found\n", __func__,
5139 				    ok ? "" : " not");
5140 	}
5141 	return SUCCESS;
5142 }
5143 
5144 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5145 {
5146 	++num_dev_resets;
5147 	if (SCpnt && SCpnt->device) {
5148 		struct scsi_device *sdp = SCpnt->device;
5149 		struct sdebug_dev_info *devip =
5150 				(struct sdebug_dev_info *)sdp->hostdata;
5151 
5152 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5153 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5154 		if (devip)
5155 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5156 	}
5157 	return SUCCESS;
5158 }
5159 
5160 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5161 {
5162 	struct sdebug_host_info *sdbg_host;
5163 	struct sdebug_dev_info *devip;
5164 	struct scsi_device *sdp;
5165 	struct Scsi_Host *hp;
5166 	int k = 0;
5167 
5168 	++num_target_resets;
5169 	if (!SCpnt)
5170 		goto lie;
5171 	sdp = SCpnt->device;
5172 	if (!sdp)
5173 		goto lie;
5174 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5175 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5176 	hp = sdp->host;
5177 	if (!hp)
5178 		goto lie;
5179 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5180 	if (sdbg_host) {
5181 		list_for_each_entry(devip,
5182 				    &sdbg_host->dev_info_list,
5183 				    dev_list)
5184 			if (devip->target == sdp->id) {
5185 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5186 				++k;
5187 			}
5188 	}
5189 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5190 		sdev_printk(KERN_INFO, sdp,
5191 			    "%s: %d device(s) found in target\n", __func__, k);
5192 lie:
5193 	return SUCCESS;
5194 }
5195 
5196 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5197 {
5198 	struct sdebug_host_info *sdbg_host;
5199 	struct sdebug_dev_info *devip;
5200 	struct scsi_device *sdp;
5201 	struct Scsi_Host *hp;
5202 	int k = 0;
5203 
5204 	++num_bus_resets;
5205 	if (!(SCpnt && SCpnt->device))
5206 		goto lie;
5207 	sdp = SCpnt->device;
5208 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5209 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5210 	hp = sdp->host;
5211 	if (hp) {
5212 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5213 		if (sdbg_host) {
5214 			list_for_each_entry(devip,
5215 					    &sdbg_host->dev_info_list,
5216 					    dev_list) {
5217 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5218 				++k;
5219 			}
5220 		}
5221 	}
5222 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5223 		sdev_printk(KERN_INFO, sdp,
5224 			    "%s: %d device(s) found in host\n", __func__, k);
5225 lie:
5226 	return SUCCESS;
5227 }
5228 
5229 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5230 {
5231 	struct sdebug_host_info *sdbg_host;
5232 	struct sdebug_dev_info *devip;
5233 	int k = 0;
5234 
5235 	++num_host_resets;
5236 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5237 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5238 	spin_lock(&sdebug_host_list_lock);
5239 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5240 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5241 				    dev_list) {
5242 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5243 			++k;
5244 		}
5245 	}
5246 	spin_unlock(&sdebug_host_list_lock);
5247 	stop_all_queued();
5248 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5249 		sdev_printk(KERN_INFO, SCpnt->device,
5250 			    "%s: %d device(s) found\n", __func__, k);
5251 	return SUCCESS;
5252 }
5253 
5254 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5255 {
5256 	struct msdos_partition *pp;
5257 	int starts[SDEBUG_MAX_PARTS + 2];
5258 	int sectors_per_part, num_sectors, k;
5259 	int heads_by_sects, start_sec, end_sec;
5260 
5261 	/* assume partition table already zeroed */
5262 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5263 		return;
5264 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5265 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5266 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5267 	}
5268 	num_sectors = (int)sdebug_store_sectors;
5269 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5270 			   / sdebug_num_parts;
5271 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5272 	starts[0] = sdebug_sectors_per;
5273 	for (k = 1; k < sdebug_num_parts; ++k)
5274 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5275 			    * heads_by_sects;
5276 	starts[sdebug_num_parts] = num_sectors;
5277 	starts[sdebug_num_parts + 1] = 0;
5278 
5279 	ramp[510] = 0x55;	/* magic partition markings */
5280 	ramp[511] = 0xAA;
5281 	pp = (struct msdos_partition *)(ramp + 0x1be);
5282 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5283 		start_sec = starts[k];
5284 		end_sec = starts[k + 1] - 1;
5285 		pp->boot_ind = 0;
5286 
5287 		pp->cyl = start_sec / heads_by_sects;
5288 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5289 			   / sdebug_sectors_per;
5290 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5291 
5292 		pp->end_cyl = end_sec / heads_by_sects;
5293 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5294 			       / sdebug_sectors_per;
5295 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5296 
5297 		pp->start_sect = cpu_to_le32(start_sec);
5298 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5299 		pp->sys_ind = 0x83;	/* plain Linux partition */
5300 	}
5301 }
5302 
5303 static void block_unblock_all_queues(bool block)
5304 {
5305 	int j;
5306 	struct sdebug_queue *sqp;
5307 
5308 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5309 		atomic_set(&sqp->blocked, (int)block);
5310 }
5311 
5312 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5313  * commands will be processed normally before triggers occur.
5314  */
5315 static void tweak_cmnd_count(void)
5316 {
5317 	int count, modulo;
5318 
5319 	modulo = abs(sdebug_every_nth);
5320 	if (modulo < 2)
5321 		return;
5322 	block_unblock_all_queues(true);
5323 	count = atomic_read(&sdebug_cmnd_count);
5324 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5325 	block_unblock_all_queues(false);
5326 }
5327 
5328 static void clear_queue_stats(void)
5329 {
5330 	atomic_set(&sdebug_cmnd_count, 0);
5331 	atomic_set(&sdebug_completions, 0);
5332 	atomic_set(&sdebug_miss_cpus, 0);
5333 	atomic_set(&sdebug_a_tsf, 0);
5334 }
5335 
5336 static void setup_inject(struct sdebug_queue *sqp,
5337 			 struct sdebug_queued_cmd *sqcp)
5338 {
5339 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
5340 		if (sdebug_every_nth > 0)
5341 			sqcp->inj_recovered = sqcp->inj_transport
5342 				= sqcp->inj_dif
5343 				= sqcp->inj_dix = sqcp->inj_short
5344 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
5345 		return;
5346 	}
5347 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
5348 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
5349 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
5350 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
5351 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
5352 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
5353 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
5354 }
5355 
5356 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5357 
5358 /* Complete the processing of the thread that queued a SCSI command to this
5359  * driver. It either completes the command by calling cmnd_done() or
5360  * schedules a hr timer or work queue then returns 0. Returns
5361  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5362  */
5363 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5364 			 int scsi_result,
5365 			 int (*pfp)(struct scsi_cmnd *,
5366 				    struct sdebug_dev_info *),
5367 			 int delta_jiff, int ndelay)
5368 {
5369 	bool new_sd_dp;
5370 	int k, num_in_q, qdepth, inject;
5371 	unsigned long iflags;
5372 	u64 ns_from_boot = 0;
5373 	struct sdebug_queue *sqp;
5374 	struct sdebug_queued_cmd *sqcp;
5375 	struct scsi_device *sdp;
5376 	struct sdebug_defer *sd_dp;
5377 
5378 	if (unlikely(devip == NULL)) {
5379 		if (scsi_result == 0)
5380 			scsi_result = DID_NO_CONNECT << 16;
5381 		goto respond_in_thread;
5382 	}
5383 	sdp = cmnd->device;
5384 
5385 	if (delta_jiff == 0)
5386 		goto respond_in_thread;
5387 
5388 	sqp = get_queue(cmnd);
5389 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5390 	if (unlikely(atomic_read(&sqp->blocked))) {
5391 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5392 		return SCSI_MLQUEUE_HOST_BUSY;
5393 	}
5394 	num_in_q = atomic_read(&devip->num_in_q);
5395 	qdepth = cmnd->device->queue_depth;
5396 	inject = 0;
5397 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5398 		if (scsi_result) {
5399 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5400 			goto respond_in_thread;
5401 		} else
5402 			scsi_result = device_qfull_result;
5403 	} else if (unlikely(sdebug_every_nth &&
5404 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5405 			    (scsi_result == 0))) {
5406 		if ((num_in_q == (qdepth - 1)) &&
5407 		    (atomic_inc_return(&sdebug_a_tsf) >=
5408 		     abs(sdebug_every_nth))) {
5409 			atomic_set(&sdebug_a_tsf, 0);
5410 			inject = 1;
5411 			scsi_result = device_qfull_result;
5412 		}
5413 	}
5414 
5415 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5416 	if (unlikely(k >= sdebug_max_queue)) {
5417 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5418 		if (scsi_result)
5419 			goto respond_in_thread;
5420 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5421 			scsi_result = device_qfull_result;
5422 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5423 			sdev_printk(KERN_INFO, sdp,
5424 				    "%s: max_queue=%d exceeded, %s\n",
5425 				    __func__, sdebug_max_queue,
5426 				    (scsi_result ?  "status: TASK SET FULL" :
5427 						    "report: host busy"));
5428 		if (scsi_result)
5429 			goto respond_in_thread;
5430 		else
5431 			return SCSI_MLQUEUE_HOST_BUSY;
5432 	}
5433 	__set_bit(k, sqp->in_use_bm);
5434 	atomic_inc(&devip->num_in_q);
5435 	sqcp = &sqp->qc_arr[k];
5436 	sqcp->a_cmnd = cmnd;
5437 	cmnd->host_scribble = (unsigned char *)sqcp;
5438 	sd_dp = sqcp->sd_dp;
5439 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5440 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
5441 		setup_inject(sqp, sqcp);
5442 	if (sd_dp == NULL) {
5443 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5444 		if (sd_dp == NULL)
5445 			return SCSI_MLQUEUE_HOST_BUSY;
5446 		new_sd_dp = true;
5447 	} else {
5448 		new_sd_dp = false;
5449 	}
5450 
5451 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5452 		ns_from_boot = ktime_get_boottime_ns();
5453 
5454 	/* one of the resp_*() response functions is called here */
5455 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5456 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5457 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5458 		delta_jiff = ndelay = 0;
5459 	}
5460 	if (cmnd->result == 0 && scsi_result != 0)
5461 		cmnd->result = scsi_result;
5462 
5463 	if (unlikely(sdebug_verbose && cmnd->result))
5464 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5465 			    __func__, cmnd->result);
5466 
5467 	if (delta_jiff > 0 || ndelay > 0) {
5468 		ktime_t kt;
5469 
5470 		if (delta_jiff > 0) {
5471 			u64 ns = jiffies_to_nsecs(delta_jiff);
5472 
5473 			if (sdebug_random && ns < U32_MAX) {
5474 				ns = prandom_u32_max((u32)ns);
5475 			} else if (sdebug_random) {
5476 				ns >>= 12;	/* scale to 4 usec precision */
5477 				if (ns < U32_MAX)	/* over 4 hours max */
5478 					ns = prandom_u32_max((u32)ns);
5479 				ns <<= 12;
5480 			}
5481 			kt = ns_to_ktime(ns);
5482 		} else {	/* ndelay has a 4.2 second max */
5483 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5484 					     (u32)ndelay;
5485 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5486 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5487 
5488 				if (kt <= d) {	/* elapsed duration >= kt */
5489 					sqcp->a_cmnd = NULL;
5490 					atomic_dec(&devip->num_in_q);
5491 					clear_bit(k, sqp->in_use_bm);
5492 					if (new_sd_dp)
5493 						kfree(sd_dp);
5494 					/* call scsi_done() from this thread */
5495 					cmnd->scsi_done(cmnd);
5496 					return 0;
5497 				}
5498 				/* otherwise reduce kt by elapsed time */
5499 				kt -= d;
5500 			}
5501 		}
5502 		if (!sd_dp->init_hrt) {
5503 			sd_dp->init_hrt = true;
5504 			sqcp->sd_dp = sd_dp;
5505 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5506 				     HRTIMER_MODE_REL_PINNED);
5507 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5508 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5509 			sd_dp->qc_idx = k;
5510 		}
5511 		if (sdebug_statistics)
5512 			sd_dp->issuing_cpu = raw_smp_processor_id();
5513 		sd_dp->defer_t = SDEB_DEFER_HRT;
5514 		/* schedule the invocation of scsi_done() for a later time */
5515 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5516 	} else {	/* jdelay < 0, use work queue */
5517 		if (!sd_dp->init_wq) {
5518 			sd_dp->init_wq = true;
5519 			sqcp->sd_dp = sd_dp;
5520 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5521 			sd_dp->qc_idx = k;
5522 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5523 		}
5524 		if (sdebug_statistics)
5525 			sd_dp->issuing_cpu = raw_smp_processor_id();
5526 		sd_dp->defer_t = SDEB_DEFER_WQ;
5527 		if (unlikely(sqcp->inj_cmd_abort))
5528 			sd_dp->aborted = true;
5529 		schedule_work(&sd_dp->ew.work);
5530 		if (unlikely(sqcp->inj_cmd_abort)) {
5531 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5532 				    cmnd->request->tag);
5533 			blk_abort_request(cmnd->request);
5534 		}
5535 	}
5536 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
5537 		     (scsi_result == device_qfull_result)))
5538 		sdev_printk(KERN_INFO, sdp,
5539 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
5540 			    num_in_q, (inject ? "<inject> " : ""),
5541 			    "status: TASK SET FULL");
5542 	return 0;
5543 
5544 respond_in_thread:	/* call back to mid-layer using invocation thread */
5545 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5546 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5547 	if (cmnd->result == 0 && scsi_result != 0)
5548 		cmnd->result = scsi_result;
5549 	cmnd->scsi_done(cmnd);
5550 	return 0;
5551 }
5552 
5553 /* Note: The following macros create attribute files in the
5554    /sys/module/scsi_debug/parameters directory. Unfortunately this
5555    driver is unaware of a change and cannot trigger auxiliary actions
5556    as it can when the corresponding attribute in the
5557    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5558  */
5559 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5560 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5561 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5562 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5563 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5564 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5565 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5566 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5567 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5568 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5569 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5570 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5571 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5572 module_param_string(inq_product, sdebug_inq_product_id,
5573 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5574 module_param_string(inq_rev, sdebug_inq_product_rev,
5575 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5576 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5577 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5578 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5579 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5580 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5581 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5582 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5583 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5584 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5585 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5586 		   S_IRUGO | S_IWUSR);
5587 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5588 		   S_IRUGO | S_IWUSR);
5589 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5590 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5591 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5592 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5593 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5594 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5595 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5596 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5597 module_param_named(per_host_store, sdebug_per_host_store, bool,
5598 		   S_IRUGO | S_IWUSR);
5599 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5600 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5601 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5602 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5603 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5604 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5605 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5606 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5607 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5608 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5609 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5610 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5611 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5612 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5613 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5614 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5615 		   S_IRUGO | S_IWUSR);
5616 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5617 module_param_named(write_same_length, sdebug_write_same_length, int,
5618 		   S_IRUGO | S_IWUSR);
5619 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5620 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5621 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5622 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5623 
5624 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5625 MODULE_DESCRIPTION("SCSI debug adapter driver");
5626 MODULE_LICENSE("GPL");
5627 MODULE_VERSION(SDEBUG_VERSION);
5628 
5629 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5630 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5631 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5632 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5633 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5634 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5635 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5636 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5637 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5638 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5639 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5640 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5641 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5642 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5643 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5644 		 SDEBUG_VERSION "\")");
5645 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5646 MODULE_PARM_DESC(lbprz,
5647 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5648 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5649 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5650 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5651 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5652 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5653 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5654 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5655 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5656 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5657 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5658 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5659 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5660 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5661 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5662 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5663 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5664 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5665 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5666 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5667 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5668 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5669 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5670 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5671 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5672 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5673 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5674 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5675 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5676 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5677 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5678 MODULE_PARM_DESC(uuid_ctl,
5679 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5680 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5681 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5682 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5683 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5684 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5685 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5686 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5687 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5688 
5689 #define SDEBUG_INFO_LEN 256
5690 static char sdebug_info[SDEBUG_INFO_LEN];
5691 
5692 static const char *scsi_debug_info(struct Scsi_Host *shp)
5693 {
5694 	int k;
5695 
5696 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5697 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5698 	if (k >= (SDEBUG_INFO_LEN - 1))
5699 		return sdebug_info;
5700 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5701 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5702 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5703 		  "statistics", (int)sdebug_statistics);
5704 	return sdebug_info;
5705 }
5706 
5707 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5708 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5709 				 int length)
5710 {
5711 	char arr[16];
5712 	int opts;
5713 	int minLen = length > 15 ? 15 : length;
5714 
5715 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5716 		return -EACCES;
5717 	memcpy(arr, buffer, minLen);
5718 	arr[minLen] = '\0';
5719 	if (1 != sscanf(arr, "%d", &opts))
5720 		return -EINVAL;
5721 	sdebug_opts = opts;
5722 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5723 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5724 	if (sdebug_every_nth != 0)
5725 		tweak_cmnd_count();
5726 	return length;
5727 }
5728 
5729 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5730  * same for each scsi_debug host (if more than one). Some of the counters
5731  * output are not atomics so might be inaccurate in a busy system. */
5732 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5733 {
5734 	int f, j, l;
5735 	struct sdebug_queue *sqp;
5736 	struct sdebug_host_info *sdhp;
5737 
5738 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5739 		   SDEBUG_VERSION, sdebug_version_date);
5740 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5741 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5742 		   sdebug_opts, sdebug_every_nth);
5743 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5744 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5745 		   sdebug_sector_size, "bytes");
5746 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5747 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5748 		   num_aborts);
5749 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5750 		   num_dev_resets, num_target_resets, num_bus_resets,
5751 		   num_host_resets);
5752 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5753 		   dix_reads, dix_writes, dif_errors);
5754 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5755 		   sdebug_statistics);
5756 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5757 		   atomic_read(&sdebug_cmnd_count),
5758 		   atomic_read(&sdebug_completions),
5759 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5760 		   atomic_read(&sdebug_a_tsf));
5761 
5762 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5763 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5764 		seq_printf(m, "  queue %d:\n", j);
5765 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5766 		if (f != sdebug_max_queue) {
5767 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5768 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5769 				   "first,last bits", f, l);
5770 		}
5771 	}
5772 
5773 	seq_printf(m, "this host_no=%d\n", host->host_no);
5774 	if (!xa_empty(per_store_ap)) {
5775 		bool niu;
5776 		int idx;
5777 		unsigned long l_idx;
5778 		struct sdeb_store_info *sip;
5779 
5780 		seq_puts(m, "\nhost list:\n");
5781 		j = 0;
5782 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5783 			idx = sdhp->si_idx;
5784 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5785 				   sdhp->shost->host_no, idx);
5786 			++j;
5787 		}
5788 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5789 			   sdeb_most_recent_idx);
5790 		j = 0;
5791 		xa_for_each(per_store_ap, l_idx, sip) {
5792 			niu = xa_get_mark(per_store_ap, l_idx,
5793 					  SDEB_XA_NOT_IN_USE);
5794 			idx = (int)l_idx;
5795 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5796 				   (niu ? "  not_in_use" : ""));
5797 			++j;
5798 		}
5799 	}
5800 	return 0;
5801 }
5802 
5803 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5804 {
5805 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5806 }
5807 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5808  * of delay is jiffies.
5809  */
5810 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5811 			   size_t count)
5812 {
5813 	int jdelay, res;
5814 
5815 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5816 		res = count;
5817 		if (sdebug_jdelay != jdelay) {
5818 			int j, k;
5819 			struct sdebug_queue *sqp;
5820 
5821 			block_unblock_all_queues(true);
5822 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5823 			     ++j, ++sqp) {
5824 				k = find_first_bit(sqp->in_use_bm,
5825 						   sdebug_max_queue);
5826 				if (k != sdebug_max_queue) {
5827 					res = -EBUSY;   /* queued commands */
5828 					break;
5829 				}
5830 			}
5831 			if (res > 0) {
5832 				sdebug_jdelay = jdelay;
5833 				sdebug_ndelay = 0;
5834 			}
5835 			block_unblock_all_queues(false);
5836 		}
5837 		return res;
5838 	}
5839 	return -EINVAL;
5840 }
5841 static DRIVER_ATTR_RW(delay);
5842 
5843 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5844 {
5845 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5846 }
5847 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5848 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5849 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5850 			    size_t count)
5851 {
5852 	int ndelay, res;
5853 
5854 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5855 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5856 		res = count;
5857 		if (sdebug_ndelay != ndelay) {
5858 			int j, k;
5859 			struct sdebug_queue *sqp;
5860 
5861 			block_unblock_all_queues(true);
5862 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5863 			     ++j, ++sqp) {
5864 				k = find_first_bit(sqp->in_use_bm,
5865 						   sdebug_max_queue);
5866 				if (k != sdebug_max_queue) {
5867 					res = -EBUSY;   /* queued commands */
5868 					break;
5869 				}
5870 			}
5871 			if (res > 0) {
5872 				sdebug_ndelay = ndelay;
5873 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5874 							: DEF_JDELAY;
5875 			}
5876 			block_unblock_all_queues(false);
5877 		}
5878 		return res;
5879 	}
5880 	return -EINVAL;
5881 }
5882 static DRIVER_ATTR_RW(ndelay);
5883 
5884 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5885 {
5886 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5887 }
5888 
5889 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5890 			  size_t count)
5891 {
5892 	int opts;
5893 	char work[20];
5894 
5895 	if (sscanf(buf, "%10s", work) == 1) {
5896 		if (strncasecmp(work, "0x", 2) == 0) {
5897 			if (kstrtoint(work + 2, 16, &opts) == 0)
5898 				goto opts_done;
5899 		} else {
5900 			if (kstrtoint(work, 10, &opts) == 0)
5901 				goto opts_done;
5902 		}
5903 	}
5904 	return -EINVAL;
5905 opts_done:
5906 	sdebug_opts = opts;
5907 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5908 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5909 	tweak_cmnd_count();
5910 	return count;
5911 }
5912 static DRIVER_ATTR_RW(opts);
5913 
5914 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5915 {
5916 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5917 }
5918 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5919 			   size_t count)
5920 {
5921 	int n;
5922 
5923 	/* Cannot change from or to TYPE_ZBC with sysfs */
5924 	if (sdebug_ptype == TYPE_ZBC)
5925 		return -EINVAL;
5926 
5927 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5928 		if (n == TYPE_ZBC)
5929 			return -EINVAL;
5930 		sdebug_ptype = n;
5931 		return count;
5932 	}
5933 	return -EINVAL;
5934 }
5935 static DRIVER_ATTR_RW(ptype);
5936 
5937 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5938 {
5939 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5940 }
5941 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5942 			    size_t count)
5943 {
5944 	int n;
5945 
5946 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5947 		sdebug_dsense = n;
5948 		return count;
5949 	}
5950 	return -EINVAL;
5951 }
5952 static DRIVER_ATTR_RW(dsense);
5953 
5954 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5955 {
5956 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5957 }
5958 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5959 			     size_t count)
5960 {
5961 	int n, idx;
5962 
5963 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5964 		bool want_store = (n == 0);
5965 		struct sdebug_host_info *sdhp;
5966 
5967 		n = (n > 0);
5968 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5969 		if (sdebug_fake_rw == n)
5970 			return count;	/* not transitioning so do nothing */
5971 
5972 		if (want_store) {	/* 1 --> 0 transition, set up store */
5973 			if (sdeb_first_idx < 0) {
5974 				idx = sdebug_add_store();
5975 				if (idx < 0)
5976 					return idx;
5977 			} else {
5978 				idx = sdeb_first_idx;
5979 				xa_clear_mark(per_store_ap, idx,
5980 					      SDEB_XA_NOT_IN_USE);
5981 			}
5982 			/* make all hosts use same store */
5983 			list_for_each_entry(sdhp, &sdebug_host_list,
5984 					    host_list) {
5985 				if (sdhp->si_idx != idx) {
5986 					xa_set_mark(per_store_ap, sdhp->si_idx,
5987 						    SDEB_XA_NOT_IN_USE);
5988 					sdhp->si_idx = idx;
5989 				}
5990 			}
5991 			sdeb_most_recent_idx = idx;
5992 		} else {	/* 0 --> 1 transition is trigger for shrink */
5993 			sdebug_erase_all_stores(true /* apart from first */);
5994 		}
5995 		sdebug_fake_rw = n;
5996 		return count;
5997 	}
5998 	return -EINVAL;
5999 }
6000 static DRIVER_ATTR_RW(fake_rw);
6001 
6002 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6003 {
6004 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6005 }
6006 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6007 			      size_t count)
6008 {
6009 	int n;
6010 
6011 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6012 		sdebug_no_lun_0 = n;
6013 		return count;
6014 	}
6015 	return -EINVAL;
6016 }
6017 static DRIVER_ATTR_RW(no_lun_0);
6018 
6019 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6020 {
6021 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6022 }
6023 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6024 			      size_t count)
6025 {
6026 	int n;
6027 
6028 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6029 		sdebug_num_tgts = n;
6030 		sdebug_max_tgts_luns();
6031 		return count;
6032 	}
6033 	return -EINVAL;
6034 }
6035 static DRIVER_ATTR_RW(num_tgts);
6036 
6037 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6038 {
6039 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6040 }
6041 static DRIVER_ATTR_RO(dev_size_mb);
6042 
6043 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6044 {
6045 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6046 }
6047 
6048 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6049 				    size_t count)
6050 {
6051 	bool v;
6052 
6053 	if (kstrtobool(buf, &v))
6054 		return -EINVAL;
6055 
6056 	sdebug_per_host_store = v;
6057 	return count;
6058 }
6059 static DRIVER_ATTR_RW(per_host_store);
6060 
6061 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6062 {
6063 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6064 }
6065 static DRIVER_ATTR_RO(num_parts);
6066 
6067 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6068 {
6069 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6070 }
6071 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6072 			       size_t count)
6073 {
6074 	int nth;
6075 
6076 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
6077 		sdebug_every_nth = nth;
6078 		if (nth && !sdebug_statistics) {
6079 			pr_info("every_nth needs statistics=1, set it\n");
6080 			sdebug_statistics = true;
6081 		}
6082 		tweak_cmnd_count();
6083 		return count;
6084 	}
6085 	return -EINVAL;
6086 }
6087 static DRIVER_ATTR_RW(every_nth);
6088 
6089 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6090 {
6091 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6092 }
6093 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6094 			      size_t count)
6095 {
6096 	int n;
6097 	bool changed;
6098 
6099 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6100 		if (n > 256) {
6101 			pr_warn("max_luns can be no more than 256\n");
6102 			return -EINVAL;
6103 		}
6104 		changed = (sdebug_max_luns != n);
6105 		sdebug_max_luns = n;
6106 		sdebug_max_tgts_luns();
6107 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6108 			struct sdebug_host_info *sdhp;
6109 			struct sdebug_dev_info *dp;
6110 
6111 			spin_lock(&sdebug_host_list_lock);
6112 			list_for_each_entry(sdhp, &sdebug_host_list,
6113 					    host_list) {
6114 				list_for_each_entry(dp, &sdhp->dev_info_list,
6115 						    dev_list) {
6116 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6117 						dp->uas_bm);
6118 				}
6119 			}
6120 			spin_unlock(&sdebug_host_list_lock);
6121 		}
6122 		return count;
6123 	}
6124 	return -EINVAL;
6125 }
6126 static DRIVER_ATTR_RW(max_luns);
6127 
6128 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6129 {
6130 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6131 }
6132 /* N.B. max_queue can be changed while there are queued commands. In flight
6133  * commands beyond the new max_queue will be completed. */
6134 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6135 			       size_t count)
6136 {
6137 	int j, n, k, a;
6138 	struct sdebug_queue *sqp;
6139 
6140 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6141 	    (n <= SDEBUG_CANQUEUE)) {
6142 		block_unblock_all_queues(true);
6143 		k = 0;
6144 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6145 		     ++j, ++sqp) {
6146 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6147 			if (a > k)
6148 				k = a;
6149 		}
6150 		sdebug_max_queue = n;
6151 		if (k == SDEBUG_CANQUEUE)
6152 			atomic_set(&retired_max_queue, 0);
6153 		else if (k >= n)
6154 			atomic_set(&retired_max_queue, k + 1);
6155 		else
6156 			atomic_set(&retired_max_queue, 0);
6157 		block_unblock_all_queues(false);
6158 		return count;
6159 	}
6160 	return -EINVAL;
6161 }
6162 static DRIVER_ATTR_RW(max_queue);
6163 
6164 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6165 {
6166 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6167 }
6168 static DRIVER_ATTR_RO(no_uld);
6169 
6170 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6171 {
6172 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6173 }
6174 static DRIVER_ATTR_RO(scsi_level);
6175 
6176 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6177 {
6178 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6179 }
6180 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6181 				size_t count)
6182 {
6183 	int n;
6184 	bool changed;
6185 
6186 	/* Ignore capacity change for ZBC drives for now */
6187 	if (sdeb_zbc_in_use)
6188 		return -ENOTSUPP;
6189 
6190 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6191 		changed = (sdebug_virtual_gb != n);
6192 		sdebug_virtual_gb = n;
6193 		sdebug_capacity = get_sdebug_capacity();
6194 		if (changed) {
6195 			struct sdebug_host_info *sdhp;
6196 			struct sdebug_dev_info *dp;
6197 
6198 			spin_lock(&sdebug_host_list_lock);
6199 			list_for_each_entry(sdhp, &sdebug_host_list,
6200 					    host_list) {
6201 				list_for_each_entry(dp, &sdhp->dev_info_list,
6202 						    dev_list) {
6203 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6204 						dp->uas_bm);
6205 				}
6206 			}
6207 			spin_unlock(&sdebug_host_list_lock);
6208 		}
6209 		return count;
6210 	}
6211 	return -EINVAL;
6212 }
6213 static DRIVER_ATTR_RW(virtual_gb);
6214 
6215 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6216 {
6217 	/* absolute number of hosts currently active is what is shown */
6218 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6219 }
6220 
6221 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6222 			      size_t count)
6223 {
6224 	bool found;
6225 	unsigned long idx;
6226 	struct sdeb_store_info *sip;
6227 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6228 	int delta_hosts;
6229 
6230 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6231 		return -EINVAL;
6232 	if (delta_hosts > 0) {
6233 		do {
6234 			found = false;
6235 			if (want_phs) {
6236 				xa_for_each_marked(per_store_ap, idx, sip,
6237 						   SDEB_XA_NOT_IN_USE) {
6238 					sdeb_most_recent_idx = (int)idx;
6239 					found = true;
6240 					break;
6241 				}
6242 				if (found)	/* re-use case */
6243 					sdebug_add_host_helper((int)idx);
6244 				else
6245 					sdebug_do_add_host(true);
6246 			} else {
6247 				sdebug_do_add_host(false);
6248 			}
6249 		} while (--delta_hosts);
6250 	} else if (delta_hosts < 0) {
6251 		do {
6252 			sdebug_do_remove_host(false);
6253 		} while (++delta_hosts);
6254 	}
6255 	return count;
6256 }
6257 static DRIVER_ATTR_RW(add_host);
6258 
6259 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6260 {
6261 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6262 }
6263 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6264 				    size_t count)
6265 {
6266 	int n;
6267 
6268 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6269 		sdebug_vpd_use_hostno = n;
6270 		return count;
6271 	}
6272 	return -EINVAL;
6273 }
6274 static DRIVER_ATTR_RW(vpd_use_hostno);
6275 
6276 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6277 {
6278 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6279 }
6280 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6281 				size_t count)
6282 {
6283 	int n;
6284 
6285 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6286 		if (n > 0)
6287 			sdebug_statistics = true;
6288 		else {
6289 			clear_queue_stats();
6290 			sdebug_statistics = false;
6291 		}
6292 		return count;
6293 	}
6294 	return -EINVAL;
6295 }
6296 static DRIVER_ATTR_RW(statistics);
6297 
6298 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6299 {
6300 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6301 }
6302 static DRIVER_ATTR_RO(sector_size);
6303 
6304 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6305 {
6306 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6307 }
6308 static DRIVER_ATTR_RO(submit_queues);
6309 
6310 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6311 {
6312 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6313 }
6314 static DRIVER_ATTR_RO(dix);
6315 
6316 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6317 {
6318 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6319 }
6320 static DRIVER_ATTR_RO(dif);
6321 
6322 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6323 {
6324 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6325 }
6326 static DRIVER_ATTR_RO(guard);
6327 
6328 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6329 {
6330 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6331 }
6332 static DRIVER_ATTR_RO(ato);
6333 
6334 static ssize_t map_show(struct device_driver *ddp, char *buf)
6335 {
6336 	ssize_t count = 0;
6337 
6338 	if (!scsi_debug_lbp())
6339 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6340 				 sdebug_store_sectors);
6341 
6342 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6343 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6344 
6345 		if (sip)
6346 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6347 					  (int)map_size, sip->map_storep);
6348 	}
6349 	buf[count++] = '\n';
6350 	buf[count] = '\0';
6351 
6352 	return count;
6353 }
6354 static DRIVER_ATTR_RO(map);
6355 
6356 static ssize_t random_show(struct device_driver *ddp, char *buf)
6357 {
6358 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6359 }
6360 
6361 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6362 			    size_t count)
6363 {
6364 	bool v;
6365 
6366 	if (kstrtobool(buf, &v))
6367 		return -EINVAL;
6368 
6369 	sdebug_random = v;
6370 	return count;
6371 }
6372 static DRIVER_ATTR_RW(random);
6373 
6374 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6375 {
6376 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6377 }
6378 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6379 			       size_t count)
6380 {
6381 	int n;
6382 
6383 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6384 		sdebug_removable = (n > 0);
6385 		return count;
6386 	}
6387 	return -EINVAL;
6388 }
6389 static DRIVER_ATTR_RW(removable);
6390 
6391 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6392 {
6393 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6394 }
6395 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6396 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6397 			       size_t count)
6398 {
6399 	int n;
6400 
6401 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6402 		sdebug_host_lock = (n > 0);
6403 		return count;
6404 	}
6405 	return -EINVAL;
6406 }
6407 static DRIVER_ATTR_RW(host_lock);
6408 
6409 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6410 {
6411 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6412 }
6413 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6414 			    size_t count)
6415 {
6416 	int n;
6417 
6418 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6419 		sdebug_strict = (n > 0);
6420 		return count;
6421 	}
6422 	return -EINVAL;
6423 }
6424 static DRIVER_ATTR_RW(strict);
6425 
6426 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6427 {
6428 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6429 }
6430 static DRIVER_ATTR_RO(uuid_ctl);
6431 
6432 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6433 {
6434 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6435 }
6436 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6437 			     size_t count)
6438 {
6439 	int ret, n;
6440 
6441 	ret = kstrtoint(buf, 0, &n);
6442 	if (ret)
6443 		return ret;
6444 	sdebug_cdb_len = n;
6445 	all_config_cdb_len();
6446 	return count;
6447 }
6448 static DRIVER_ATTR_RW(cdb_len);
6449 
6450 static const char * const zbc_model_strs_a[] = {
6451 	[BLK_ZONED_NONE] = "none",
6452 	[BLK_ZONED_HA]   = "host-aware",
6453 	[BLK_ZONED_HM]   = "host-managed",
6454 };
6455 
6456 static const char * const zbc_model_strs_b[] = {
6457 	[BLK_ZONED_NONE] = "no",
6458 	[BLK_ZONED_HA]   = "aware",
6459 	[BLK_ZONED_HM]   = "managed",
6460 };
6461 
6462 static const char * const zbc_model_strs_c[] = {
6463 	[BLK_ZONED_NONE] = "0",
6464 	[BLK_ZONED_HA]   = "1",
6465 	[BLK_ZONED_HM]   = "2",
6466 };
6467 
6468 static int sdeb_zbc_model_str(const char *cp)
6469 {
6470 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6471 
6472 	if (res < 0) {
6473 		res = sysfs_match_string(zbc_model_strs_b, cp);
6474 		if (res < 0) {
6475 			res = sysfs_match_string(zbc_model_strs_c, cp);
6476 			if (res < 0)
6477 				return -EINVAL;
6478 		}
6479 	}
6480 	return res;
6481 }
6482 
6483 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6484 {
6485 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6486 			 zbc_model_strs_a[sdeb_zbc_model]);
6487 }
6488 static DRIVER_ATTR_RO(zbc);
6489 
6490 /* Note: The following array creates attribute files in the
6491    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6492    files (over those found in the /sys/module/scsi_debug/parameters
6493    directory) is that auxiliary actions can be triggered when an attribute
6494    is changed. For example see: add_host_store() above.
6495  */
6496 
6497 static struct attribute *sdebug_drv_attrs[] = {
6498 	&driver_attr_delay.attr,
6499 	&driver_attr_opts.attr,
6500 	&driver_attr_ptype.attr,
6501 	&driver_attr_dsense.attr,
6502 	&driver_attr_fake_rw.attr,
6503 	&driver_attr_no_lun_0.attr,
6504 	&driver_attr_num_tgts.attr,
6505 	&driver_attr_dev_size_mb.attr,
6506 	&driver_attr_num_parts.attr,
6507 	&driver_attr_every_nth.attr,
6508 	&driver_attr_max_luns.attr,
6509 	&driver_attr_max_queue.attr,
6510 	&driver_attr_no_uld.attr,
6511 	&driver_attr_scsi_level.attr,
6512 	&driver_attr_virtual_gb.attr,
6513 	&driver_attr_add_host.attr,
6514 	&driver_attr_per_host_store.attr,
6515 	&driver_attr_vpd_use_hostno.attr,
6516 	&driver_attr_sector_size.attr,
6517 	&driver_attr_statistics.attr,
6518 	&driver_attr_submit_queues.attr,
6519 	&driver_attr_dix.attr,
6520 	&driver_attr_dif.attr,
6521 	&driver_attr_guard.attr,
6522 	&driver_attr_ato.attr,
6523 	&driver_attr_map.attr,
6524 	&driver_attr_random.attr,
6525 	&driver_attr_removable.attr,
6526 	&driver_attr_host_lock.attr,
6527 	&driver_attr_ndelay.attr,
6528 	&driver_attr_strict.attr,
6529 	&driver_attr_uuid_ctl.attr,
6530 	&driver_attr_cdb_len.attr,
6531 	&driver_attr_zbc.attr,
6532 	NULL,
6533 };
6534 ATTRIBUTE_GROUPS(sdebug_drv);
6535 
6536 static struct device *pseudo_primary;
6537 
6538 static int __init scsi_debug_init(void)
6539 {
6540 	bool want_store = (sdebug_fake_rw == 0);
6541 	unsigned long sz;
6542 	int k, ret, hosts_to_add;
6543 	int idx = -1;
6544 
6545 	ramdisk_lck_a[0] = &atomic_rw;
6546 	ramdisk_lck_a[1] = &atomic_rw2;
6547 	atomic_set(&retired_max_queue, 0);
6548 
6549 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6550 		pr_warn("ndelay must be less than 1 second, ignored\n");
6551 		sdebug_ndelay = 0;
6552 	} else if (sdebug_ndelay > 0)
6553 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6554 
6555 	switch (sdebug_sector_size) {
6556 	case  512:
6557 	case 1024:
6558 	case 2048:
6559 	case 4096:
6560 		break;
6561 	default:
6562 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6563 		return -EINVAL;
6564 	}
6565 
6566 	switch (sdebug_dif) {
6567 	case T10_PI_TYPE0_PROTECTION:
6568 		break;
6569 	case T10_PI_TYPE1_PROTECTION:
6570 	case T10_PI_TYPE2_PROTECTION:
6571 	case T10_PI_TYPE3_PROTECTION:
6572 		have_dif_prot = true;
6573 		break;
6574 
6575 	default:
6576 		pr_err("dif must be 0, 1, 2 or 3\n");
6577 		return -EINVAL;
6578 	}
6579 
6580 	if (sdebug_num_tgts < 0) {
6581 		pr_err("num_tgts must be >= 0\n");
6582 		return -EINVAL;
6583 	}
6584 
6585 	if (sdebug_guard > 1) {
6586 		pr_err("guard must be 0 or 1\n");
6587 		return -EINVAL;
6588 	}
6589 
6590 	if (sdebug_ato > 1) {
6591 		pr_err("ato must be 0 or 1\n");
6592 		return -EINVAL;
6593 	}
6594 
6595 	if (sdebug_physblk_exp > 15) {
6596 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6597 		return -EINVAL;
6598 	}
6599 	if (sdebug_max_luns > 256) {
6600 		pr_warn("max_luns can be no more than 256, use default\n");
6601 		sdebug_max_luns = DEF_MAX_LUNS;
6602 	}
6603 
6604 	if (sdebug_lowest_aligned > 0x3fff) {
6605 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6606 		return -EINVAL;
6607 	}
6608 
6609 	if (submit_queues < 1) {
6610 		pr_err("submit_queues must be 1 or more\n");
6611 		return -EINVAL;
6612 	}
6613 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6614 			       GFP_KERNEL);
6615 	if (sdebug_q_arr == NULL)
6616 		return -ENOMEM;
6617 	for (k = 0; k < submit_queues; ++k)
6618 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6619 
6620 	/*
6621 	 * check for host managed zoned block device specified with
6622 	 * ptype=0x14 or zbc=XXX.
6623 	 */
6624 	if (sdebug_ptype == TYPE_ZBC) {
6625 		sdeb_zbc_model = BLK_ZONED_HM;
6626 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6627 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6628 		if (k < 0) {
6629 			ret = k;
6630 			goto free_vm;
6631 		}
6632 		sdeb_zbc_model = k;
6633 		switch (sdeb_zbc_model) {
6634 		case BLK_ZONED_NONE:
6635 		case BLK_ZONED_HA:
6636 			sdebug_ptype = TYPE_DISK;
6637 			break;
6638 		case BLK_ZONED_HM:
6639 			sdebug_ptype = TYPE_ZBC;
6640 			break;
6641 		default:
6642 			pr_err("Invalid ZBC model\n");
6643 			return -EINVAL;
6644 		}
6645 	}
6646 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6647 		sdeb_zbc_in_use = true;
6648 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6649 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6650 	}
6651 
6652 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6653 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6654 	if (sdebug_dev_size_mb < 1)
6655 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6656 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6657 	sdebug_store_sectors = sz / sdebug_sector_size;
6658 	sdebug_capacity = get_sdebug_capacity();
6659 
6660 	/* play around with geometry, don't waste too much on track 0 */
6661 	sdebug_heads = 8;
6662 	sdebug_sectors_per = 32;
6663 	if (sdebug_dev_size_mb >= 256)
6664 		sdebug_heads = 64;
6665 	else if (sdebug_dev_size_mb >= 16)
6666 		sdebug_heads = 32;
6667 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6668 			       (sdebug_sectors_per * sdebug_heads);
6669 	if (sdebug_cylinders_per >= 1024) {
6670 		/* other LLDs do this; implies >= 1GB ram disk ... */
6671 		sdebug_heads = 255;
6672 		sdebug_sectors_per = 63;
6673 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6674 			       (sdebug_sectors_per * sdebug_heads);
6675 	}
6676 	if (scsi_debug_lbp()) {
6677 		sdebug_unmap_max_blocks =
6678 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6679 
6680 		sdebug_unmap_max_desc =
6681 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6682 
6683 		sdebug_unmap_granularity =
6684 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6685 
6686 		if (sdebug_unmap_alignment &&
6687 		    sdebug_unmap_granularity <=
6688 		    sdebug_unmap_alignment) {
6689 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6690 			ret = -EINVAL;
6691 			goto free_q_arr;
6692 		}
6693 	}
6694 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6695 	if (want_store) {
6696 		idx = sdebug_add_store();
6697 		if (idx < 0) {
6698 			ret = idx;
6699 			goto free_q_arr;
6700 		}
6701 	}
6702 
6703 	pseudo_primary = root_device_register("pseudo_0");
6704 	if (IS_ERR(pseudo_primary)) {
6705 		pr_warn("root_device_register() error\n");
6706 		ret = PTR_ERR(pseudo_primary);
6707 		goto free_vm;
6708 	}
6709 	ret = bus_register(&pseudo_lld_bus);
6710 	if (ret < 0) {
6711 		pr_warn("bus_register error: %d\n", ret);
6712 		goto dev_unreg;
6713 	}
6714 	ret = driver_register(&sdebug_driverfs_driver);
6715 	if (ret < 0) {
6716 		pr_warn("driver_register error: %d\n", ret);
6717 		goto bus_unreg;
6718 	}
6719 
6720 	hosts_to_add = sdebug_add_host;
6721 	sdebug_add_host = 0;
6722 
6723 	for (k = 0; k < hosts_to_add; k++) {
6724 		if (want_store && k == 0) {
6725 			ret = sdebug_add_host_helper(idx);
6726 			if (ret < 0) {
6727 				pr_err("add_host_helper k=%d, error=%d\n",
6728 				       k, -ret);
6729 				break;
6730 			}
6731 		} else {
6732 			ret = sdebug_do_add_host(want_store &&
6733 						 sdebug_per_host_store);
6734 			if (ret < 0) {
6735 				pr_err("add_host k=%d error=%d\n", k, -ret);
6736 				break;
6737 			}
6738 		}
6739 	}
6740 	if (sdebug_verbose)
6741 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6742 
6743 	return 0;
6744 
6745 bus_unreg:
6746 	bus_unregister(&pseudo_lld_bus);
6747 dev_unreg:
6748 	root_device_unregister(pseudo_primary);
6749 free_vm:
6750 	sdebug_erase_store(idx, NULL);
6751 free_q_arr:
6752 	kfree(sdebug_q_arr);
6753 	return ret;
6754 }
6755 
6756 static void __exit scsi_debug_exit(void)
6757 {
6758 	int k = sdebug_num_hosts;
6759 
6760 	stop_all_queued();
6761 	for (; k; k--)
6762 		sdebug_do_remove_host(true);
6763 	free_all_queued();
6764 	driver_unregister(&sdebug_driverfs_driver);
6765 	bus_unregister(&pseudo_lld_bus);
6766 	root_device_unregister(pseudo_primary);
6767 
6768 	sdebug_erase_all_stores(false);
6769 	xa_destroy(per_store_ap);
6770 }
6771 
6772 device_initcall(scsi_debug_init);
6773 module_exit(scsi_debug_exit);
6774 
6775 static void sdebug_release_adapter(struct device *dev)
6776 {
6777 	struct sdebug_host_info *sdbg_host;
6778 
6779 	sdbg_host = to_sdebug_host(dev);
6780 	kfree(sdbg_host);
6781 }
6782 
6783 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6784 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6785 {
6786 	if (idx < 0)
6787 		return;
6788 	if (!sip) {
6789 		if (xa_empty(per_store_ap))
6790 			return;
6791 		sip = xa_load(per_store_ap, idx);
6792 		if (!sip)
6793 			return;
6794 	}
6795 	vfree(sip->map_storep);
6796 	vfree(sip->dif_storep);
6797 	vfree(sip->storep);
6798 	xa_erase(per_store_ap, idx);
6799 	kfree(sip);
6800 }
6801 
6802 /* Assume apart_from_first==false only in shutdown case. */
6803 static void sdebug_erase_all_stores(bool apart_from_first)
6804 {
6805 	unsigned long idx;
6806 	struct sdeb_store_info *sip = NULL;
6807 
6808 	xa_for_each(per_store_ap, idx, sip) {
6809 		if (apart_from_first)
6810 			apart_from_first = false;
6811 		else
6812 			sdebug_erase_store(idx, sip);
6813 	}
6814 	if (apart_from_first)
6815 		sdeb_most_recent_idx = sdeb_first_idx;
6816 }
6817 
6818 /*
6819  * Returns store xarray new element index (idx) if >=0 else negated errno.
6820  * Limit the number of stores to 65536.
6821  */
6822 static int sdebug_add_store(void)
6823 {
6824 	int res;
6825 	u32 n_idx;
6826 	unsigned long iflags;
6827 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6828 	struct sdeb_store_info *sip = NULL;
6829 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6830 
6831 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6832 	if (!sip)
6833 		return -ENOMEM;
6834 
6835 	xa_lock_irqsave(per_store_ap, iflags);
6836 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6837 	if (unlikely(res < 0)) {
6838 		xa_unlock_irqrestore(per_store_ap, iflags);
6839 		kfree(sip);
6840 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6841 		return res;
6842 	}
6843 	sdeb_most_recent_idx = n_idx;
6844 	if (sdeb_first_idx < 0)
6845 		sdeb_first_idx = n_idx;
6846 	xa_unlock_irqrestore(per_store_ap, iflags);
6847 
6848 	res = -ENOMEM;
6849 	sip->storep = vzalloc(sz);
6850 	if (!sip->storep) {
6851 		pr_err("user data oom\n");
6852 		goto err;
6853 	}
6854 	if (sdebug_num_parts > 0)
6855 		sdebug_build_parts(sip->storep, sz);
6856 
6857 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6858 	if (sdebug_dix) {
6859 		int dif_size;
6860 
6861 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6862 		sip->dif_storep = vmalloc(dif_size);
6863 
6864 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6865 			sip->dif_storep);
6866 
6867 		if (!sip->dif_storep) {
6868 			pr_err("DIX oom\n");
6869 			goto err;
6870 		}
6871 		memset(sip->dif_storep, 0xff, dif_size);
6872 	}
6873 	/* Logical Block Provisioning */
6874 	if (scsi_debug_lbp()) {
6875 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6876 		sip->map_storep = vmalloc(array_size(sizeof(long),
6877 						     BITS_TO_LONGS(map_size)));
6878 
6879 		pr_info("%lu provisioning blocks\n", map_size);
6880 
6881 		if (!sip->map_storep) {
6882 			pr_err("LBP map oom\n");
6883 			goto err;
6884 		}
6885 
6886 		bitmap_zero(sip->map_storep, map_size);
6887 
6888 		/* Map first 1KB for partition table */
6889 		if (sdebug_num_parts)
6890 			map_region(sip, 0, 2);
6891 	}
6892 
6893 	rwlock_init(&sip->macc_lck);
6894 	return (int)n_idx;
6895 err:
6896 	sdebug_erase_store((int)n_idx, sip);
6897 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
6898 	return res;
6899 }
6900 
6901 static int sdebug_add_host_helper(int per_host_idx)
6902 {
6903 	int k, devs_per_host, idx;
6904 	int error = -ENOMEM;
6905 	struct sdebug_host_info *sdbg_host;
6906 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6907 
6908 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6909 	if (!sdbg_host)
6910 		return -ENOMEM;
6911 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6912 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6913 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6914 	sdbg_host->si_idx = idx;
6915 
6916 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6917 
6918 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6919 	for (k = 0; k < devs_per_host; k++) {
6920 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6921 		if (!sdbg_devinfo)
6922 			goto clean;
6923 	}
6924 
6925 	spin_lock(&sdebug_host_list_lock);
6926 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6927 	spin_unlock(&sdebug_host_list_lock);
6928 
6929 	sdbg_host->dev.bus = &pseudo_lld_bus;
6930 	sdbg_host->dev.parent = pseudo_primary;
6931 	sdbg_host->dev.release = &sdebug_release_adapter;
6932 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6933 
6934 	error = device_register(&sdbg_host->dev);
6935 	if (error)
6936 		goto clean;
6937 
6938 	++sdebug_num_hosts;
6939 	return 0;
6940 
6941 clean:
6942 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6943 				 dev_list) {
6944 		list_del(&sdbg_devinfo->dev_list);
6945 		kfree(sdbg_devinfo->zstate);
6946 		kfree(sdbg_devinfo);
6947 	}
6948 	kfree(sdbg_host);
6949 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
6950 	return error;
6951 }
6952 
6953 static int sdebug_do_add_host(bool mk_new_store)
6954 {
6955 	int ph_idx = sdeb_most_recent_idx;
6956 
6957 	if (mk_new_store) {
6958 		ph_idx = sdebug_add_store();
6959 		if (ph_idx < 0)
6960 			return ph_idx;
6961 	}
6962 	return sdebug_add_host_helper(ph_idx);
6963 }
6964 
6965 static void sdebug_do_remove_host(bool the_end)
6966 {
6967 	int idx = -1;
6968 	struct sdebug_host_info *sdbg_host = NULL;
6969 	struct sdebug_host_info *sdbg_host2;
6970 
6971 	spin_lock(&sdebug_host_list_lock);
6972 	if (!list_empty(&sdebug_host_list)) {
6973 		sdbg_host = list_entry(sdebug_host_list.prev,
6974 				       struct sdebug_host_info, host_list);
6975 		idx = sdbg_host->si_idx;
6976 	}
6977 	if (!the_end && idx >= 0) {
6978 		bool unique = true;
6979 
6980 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
6981 			if (sdbg_host2 == sdbg_host)
6982 				continue;
6983 			if (idx == sdbg_host2->si_idx) {
6984 				unique = false;
6985 				break;
6986 			}
6987 		}
6988 		if (unique) {
6989 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6990 			if (idx == sdeb_most_recent_idx)
6991 				--sdeb_most_recent_idx;
6992 		}
6993 	}
6994 	if (sdbg_host)
6995 		list_del(&sdbg_host->host_list);
6996 	spin_unlock(&sdebug_host_list_lock);
6997 
6998 	if (!sdbg_host)
6999 		return;
7000 
7001 	device_unregister(&sdbg_host->dev);
7002 	--sdebug_num_hosts;
7003 }
7004 
7005 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7006 {
7007 	int num_in_q = 0;
7008 	struct sdebug_dev_info *devip;
7009 
7010 	block_unblock_all_queues(true);
7011 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7012 	if (NULL == devip) {
7013 		block_unblock_all_queues(false);
7014 		return	-ENODEV;
7015 	}
7016 	num_in_q = atomic_read(&devip->num_in_q);
7017 
7018 	if (qdepth < 1)
7019 		qdepth = 1;
7020 	/* allow to exceed max host qc_arr elements for testing */
7021 	if (qdepth > SDEBUG_CANQUEUE + 10)
7022 		qdepth = SDEBUG_CANQUEUE + 10;
7023 	scsi_change_queue_depth(sdev, qdepth);
7024 
7025 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7026 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7027 			    __func__, qdepth, num_in_q);
7028 	}
7029 	block_unblock_all_queues(false);
7030 	return sdev->queue_depth;
7031 }
7032 
7033 static bool fake_timeout(struct scsi_cmnd *scp)
7034 {
7035 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7036 		if (sdebug_every_nth < -1)
7037 			sdebug_every_nth = -1;
7038 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7039 			return true; /* ignore command causing timeout */
7040 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7041 			 scsi_medium_access_command(scp))
7042 			return true; /* time out reads and writes */
7043 	}
7044 	return false;
7045 }
7046 
7047 static bool fake_host_busy(struct scsi_cmnd *scp)
7048 {
7049 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
7050 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7051 }
7052 
7053 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7054 				   struct scsi_cmnd *scp)
7055 {
7056 	u8 sdeb_i;
7057 	struct scsi_device *sdp = scp->device;
7058 	const struct opcode_info_t *oip;
7059 	const struct opcode_info_t *r_oip;
7060 	struct sdebug_dev_info *devip;
7061 
7062 	u8 *cmd = scp->cmnd;
7063 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7064 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7065 	int k, na;
7066 	int errsts = 0;
7067 	u32 flags;
7068 	u16 sa;
7069 	u8 opcode = cmd[0];
7070 	bool has_wlun_rl;
7071 
7072 	scsi_set_resid(scp, 0);
7073 	if (sdebug_statistics)
7074 		atomic_inc(&sdebug_cmnd_count);
7075 	if (unlikely(sdebug_verbose &&
7076 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7077 		char b[120];
7078 		int n, len, sb;
7079 
7080 		len = scp->cmd_len;
7081 		sb = (int)sizeof(b);
7082 		if (len > 32)
7083 			strcpy(b, "too long, over 32 bytes");
7084 		else {
7085 			for (k = 0, n = 0; k < len && n < sb; ++k)
7086 				n += scnprintf(b + n, sb - n, "%02x ",
7087 					       (u32)cmd[k]);
7088 		}
7089 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7090 			    blk_mq_unique_tag(scp->request), b);
7091 	}
7092 	if (fake_host_busy(scp))
7093 		return SCSI_MLQUEUE_HOST_BUSY;
7094 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7095 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7096 		goto err_out;
7097 
7098 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7099 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7100 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7101 	if (unlikely(!devip)) {
7102 		devip = find_build_dev_info(sdp);
7103 		if (NULL == devip)
7104 			goto err_out;
7105 	}
7106 	na = oip->num_attached;
7107 	r_pfp = oip->pfp;
7108 	if (na) {	/* multiple commands with this opcode */
7109 		r_oip = oip;
7110 		if (FF_SA & r_oip->flags) {
7111 			if (F_SA_LOW & oip->flags)
7112 				sa = 0x1f & cmd[1];
7113 			else
7114 				sa = get_unaligned_be16(cmd + 8);
7115 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7116 				if (opcode == oip->opcode && sa == oip->sa)
7117 					break;
7118 			}
7119 		} else {   /* since no service action only check opcode */
7120 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7121 				if (opcode == oip->opcode)
7122 					break;
7123 			}
7124 		}
7125 		if (k > na) {
7126 			if (F_SA_LOW & r_oip->flags)
7127 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7128 			else if (F_SA_HIGH & r_oip->flags)
7129 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7130 			else
7131 				mk_sense_invalid_opcode(scp);
7132 			goto check_cond;
7133 		}
7134 	}	/* else (when na==0) we assume the oip is a match */
7135 	flags = oip->flags;
7136 	if (unlikely(F_INV_OP & flags)) {
7137 		mk_sense_invalid_opcode(scp);
7138 		goto check_cond;
7139 	}
7140 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7141 		if (sdebug_verbose)
7142 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7143 				    my_name, opcode, " supported for wlun");
7144 		mk_sense_invalid_opcode(scp);
7145 		goto check_cond;
7146 	}
7147 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7148 		u8 rem;
7149 		int j;
7150 
7151 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7152 			rem = ~oip->len_mask[k] & cmd[k];
7153 			if (rem) {
7154 				for (j = 7; j >= 0; --j, rem <<= 1) {
7155 					if (0x80 & rem)
7156 						break;
7157 				}
7158 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7159 				goto check_cond;
7160 			}
7161 		}
7162 	}
7163 	if (unlikely(!(F_SKIP_UA & flags) &&
7164 		     find_first_bit(devip->uas_bm,
7165 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7166 		errsts = make_ua(scp, devip);
7167 		if (errsts)
7168 			goto check_cond;
7169 	}
7170 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7171 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7172 		if (sdebug_verbose)
7173 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7174 				    "%s\n", my_name, "initializing command "
7175 				    "required");
7176 		errsts = check_condition_result;
7177 		goto fini;
7178 	}
7179 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7180 		goto fini;
7181 	if (unlikely(sdebug_every_nth)) {
7182 		if (fake_timeout(scp))
7183 			return 0;	/* ignore command: make trouble */
7184 	}
7185 	if (likely(oip->pfp))
7186 		pfp = oip->pfp;	/* calls a resp_* function */
7187 	else
7188 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7189 
7190 fini:
7191 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7192 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7193 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7194 					    sdebug_ndelay > 10000)) {
7195 		/*
7196 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7197 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7198 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7199 		 * For Synchronize Cache want 1/20 of SSU's delay.
7200 		 */
7201 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7202 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7203 
7204 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7205 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7206 	} else
7207 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7208 				     sdebug_ndelay);
7209 check_cond:
7210 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7211 err_out:
7212 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7213 }
7214 
7215 static struct scsi_host_template sdebug_driver_template = {
7216 	.show_info =		scsi_debug_show_info,
7217 	.write_info =		scsi_debug_write_info,
7218 	.proc_name =		sdebug_proc_name,
7219 	.name =			"SCSI DEBUG",
7220 	.info =			scsi_debug_info,
7221 	.slave_alloc =		scsi_debug_slave_alloc,
7222 	.slave_configure =	scsi_debug_slave_configure,
7223 	.slave_destroy =	scsi_debug_slave_destroy,
7224 	.ioctl =		scsi_debug_ioctl,
7225 	.queuecommand =		scsi_debug_queuecommand,
7226 	.change_queue_depth =	sdebug_change_qdepth,
7227 	.eh_abort_handler =	scsi_debug_abort,
7228 	.eh_device_reset_handler = scsi_debug_device_reset,
7229 	.eh_target_reset_handler = scsi_debug_target_reset,
7230 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7231 	.eh_host_reset_handler = scsi_debug_host_reset,
7232 	.can_queue =		SDEBUG_CANQUEUE,
7233 	.this_id =		7,
7234 	.sg_tablesize =		SG_MAX_SEGMENTS,
7235 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7236 	.max_sectors =		-1U,
7237 	.max_segment_size =	-1U,
7238 	.module =		THIS_MODULE,
7239 	.track_queue_depth =	1,
7240 };
7241 
7242 static int sdebug_driver_probe(struct device *dev)
7243 {
7244 	int error = 0;
7245 	struct sdebug_host_info *sdbg_host;
7246 	struct Scsi_Host *hpnt;
7247 	int hprot;
7248 
7249 	sdbg_host = to_sdebug_host(dev);
7250 
7251 	sdebug_driver_template.can_queue = sdebug_max_queue;
7252 	if (!sdebug_clustering)
7253 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7254 
7255 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7256 	if (NULL == hpnt) {
7257 		pr_err("scsi_host_alloc failed\n");
7258 		error = -ENODEV;
7259 		return error;
7260 	}
7261 	if (submit_queues > nr_cpu_ids) {
7262 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7263 			my_name, submit_queues, nr_cpu_ids);
7264 		submit_queues = nr_cpu_ids;
7265 	}
7266 	/* Decide whether to tell scsi subsystem that we want mq */
7267 	/* Following should give the same answer for each host */
7268 	hpnt->nr_hw_queues = submit_queues;
7269 
7270 	sdbg_host->shost = hpnt;
7271 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7272 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7273 		hpnt->max_id = sdebug_num_tgts + 1;
7274 	else
7275 		hpnt->max_id = sdebug_num_tgts;
7276 	/* = sdebug_max_luns; */
7277 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7278 
7279 	hprot = 0;
7280 
7281 	switch (sdebug_dif) {
7282 
7283 	case T10_PI_TYPE1_PROTECTION:
7284 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7285 		if (sdebug_dix)
7286 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7287 		break;
7288 
7289 	case T10_PI_TYPE2_PROTECTION:
7290 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7291 		if (sdebug_dix)
7292 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7293 		break;
7294 
7295 	case T10_PI_TYPE3_PROTECTION:
7296 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7297 		if (sdebug_dix)
7298 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7299 		break;
7300 
7301 	default:
7302 		if (sdebug_dix)
7303 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7304 		break;
7305 	}
7306 
7307 	scsi_host_set_prot(hpnt, hprot);
7308 
7309 	if (have_dif_prot || sdebug_dix)
7310 		pr_info("host protection%s%s%s%s%s%s%s\n",
7311 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7312 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7313 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7314 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7315 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7316 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7317 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7318 
7319 	if (sdebug_guard == 1)
7320 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7321 	else
7322 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7323 
7324 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7325 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7326 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7327 		sdebug_statistics = true;
7328 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7329 	if (error) {
7330 		pr_err("scsi_add_host failed\n");
7331 		error = -ENODEV;
7332 		scsi_host_put(hpnt);
7333 	} else {
7334 		scsi_scan_host(hpnt);
7335 	}
7336 
7337 	return error;
7338 }
7339 
7340 static int sdebug_driver_remove(struct device *dev)
7341 {
7342 	struct sdebug_host_info *sdbg_host;
7343 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7344 
7345 	sdbg_host = to_sdebug_host(dev);
7346 
7347 	if (!sdbg_host) {
7348 		pr_err("Unable to locate host info\n");
7349 		return -ENODEV;
7350 	}
7351 
7352 	scsi_remove_host(sdbg_host->shost);
7353 
7354 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7355 				 dev_list) {
7356 		list_del(&sdbg_devinfo->dev_list);
7357 		kfree(sdbg_devinfo->zstate);
7358 		kfree(sdbg_devinfo);
7359 	}
7360 
7361 	scsi_host_put(sdbg_host->shost);
7362 	return 0;
7363 }
7364 
7365 static int pseudo_lld_bus_match(struct device *dev,
7366 				struct device_driver *dev_driver)
7367 {
7368 	return 1;
7369 }
7370 
7371 static struct bus_type pseudo_lld_bus = {
7372 	.name = "pseudo",
7373 	.match = pseudo_lld_bus_match,
7374 	.probe = sdebug_driver_probe,
7375 	.remove = sdebug_driver_remove,
7376 	.drv_groups = sdebug_drv_groups,
7377 };
7378