xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 84905d34)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156 
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB	128
159 #define DEF_ZBC_MAX_OPEN_ZONES	8
160 #define DEF_ZBC_NR_CONV_ZONES	1
161 
162 #define SDEBUG_LUN_0_VAL 0
163 
164 /* bit mask values for sdebug_opts */
165 #define SDEBUG_OPT_NOISE		1
166 #define SDEBUG_OPT_MEDIUM_ERR		2
167 #define SDEBUG_OPT_TIMEOUT		4
168 #define SDEBUG_OPT_RECOVERED_ERR	8
169 #define SDEBUG_OPT_TRANSPORT_ERR	16
170 #define SDEBUG_OPT_DIF_ERR		32
171 #define SDEBUG_OPT_DIX_ERR		64
172 #define SDEBUG_OPT_MAC_TIMEOUT		128
173 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
174 #define SDEBUG_OPT_Q_NOISE		0x200
175 #define SDEBUG_OPT_ALL_TSF		0x400
176 #define SDEBUG_OPT_RARE_TSF		0x800
177 #define SDEBUG_OPT_N_WCE		0x1000
178 #define SDEBUG_OPT_RESET_NOISE		0x2000
179 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
180 #define SDEBUG_OPT_HOST_BUSY		0x8000
181 #define SDEBUG_OPT_CMD_ABORT		0x10000
182 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
183 			      SDEBUG_OPT_RESET_NOISE)
184 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
185 				  SDEBUG_OPT_TRANSPORT_ERR | \
186 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
187 				  SDEBUG_OPT_SHORT_TRANSFER | \
188 				  SDEBUG_OPT_HOST_BUSY | \
189 				  SDEBUG_OPT_CMD_ABORT)
190 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
191 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
192 
193 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
194  * priority order. In the subset implemented here lower numbers have higher
195  * priority. The UA numbers should be a sequence starting from 0 with
196  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
197 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
198 #define SDEBUG_UA_BUS_RESET 1
199 #define SDEBUG_UA_MODE_CHANGED 2
200 #define SDEBUG_UA_CAPACITY_CHANGED 3
201 #define SDEBUG_UA_LUNS_CHANGED 4
202 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
203 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
204 #define SDEBUG_NUM_UAS 7
205 
206 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
207  * sector on read commands: */
208 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
209 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
210 
211 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
212  * or "peripheral device" addressing (value 0) */
213 #define SAM2_LUN_ADDRESS_METHOD 0
214 
215 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
216  * (for response) per submit queue at one time. Can be reduced by max_queue
217  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
218  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
219  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
220  * but cannot exceed SDEBUG_CANQUEUE .
221  */
222 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
223 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
224 #define DEF_CMD_PER_LUN  255
225 
226 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
227 #define F_D_IN			1	/* Data-in command (e.g. READ) */
228 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
229 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
230 #define F_D_UNKN		8
231 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
232 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
233 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
234 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
235 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
236 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
237 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
238 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
239 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
240 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
241 
242 /* Useful combinations of the above flags */
243 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
244 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
245 #define FF_SA (F_SA_HIGH | F_SA_LOW)
246 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
247 
248 #define SDEBUG_MAX_PARTS 4
249 
250 #define SDEBUG_MAX_CMD_LEN 32
251 
252 #define SDEB_XA_NOT_IN_USE XA_MARK_1
253 
254 /* Zone types (zbcr05 table 25) */
255 enum sdebug_z_type {
256 	ZBC_ZONE_TYPE_CNV	= 0x1,
257 	ZBC_ZONE_TYPE_SWR	= 0x2,
258 	ZBC_ZONE_TYPE_SWP	= 0x3,
259 };
260 
261 /* enumeration names taken from table 26, zbcr05 */
262 enum sdebug_z_cond {
263 	ZBC_NOT_WRITE_POINTER	= 0x0,
264 	ZC1_EMPTY		= 0x1,
265 	ZC2_IMPLICIT_OPEN	= 0x2,
266 	ZC3_EXPLICIT_OPEN	= 0x3,
267 	ZC4_CLOSED		= 0x4,
268 	ZC6_READ_ONLY		= 0xd,
269 	ZC5_FULL		= 0xe,
270 	ZC7_OFFLINE		= 0xf,
271 };
272 
273 struct sdeb_zone_state {	/* ZBC: per zone state */
274 	enum sdebug_z_type z_type;
275 	enum sdebug_z_cond z_cond;
276 	bool z_non_seq_resource;
277 	unsigned int z_size;
278 	sector_t z_start;
279 	sector_t z_wp;
280 };
281 
282 struct sdebug_dev_info {
283 	struct list_head dev_list;
284 	unsigned int channel;
285 	unsigned int target;
286 	u64 lun;
287 	uuid_t lu_name;
288 	struct sdebug_host_info *sdbg_host;
289 	unsigned long uas_bm[1];
290 	atomic_t num_in_q;
291 	atomic_t stopped;
292 	bool used;
293 
294 	/* For ZBC devices */
295 	enum blk_zoned_model zmodel;
296 	unsigned int zsize;
297 	unsigned int zsize_shift;
298 	unsigned int nr_zones;
299 	unsigned int nr_conv_zones;
300 	unsigned int nr_imp_open;
301 	unsigned int nr_exp_open;
302 	unsigned int nr_closed;
303 	unsigned int max_open;
304 	struct sdeb_zone_state *zstate;
305 };
306 
307 struct sdebug_host_info {
308 	struct list_head host_list;
309 	int si_idx;	/* sdeb_store_info (per host) xarray index */
310 	struct Scsi_Host *shost;
311 	struct device dev;
312 	struct list_head dev_info_list;
313 };
314 
315 /* There is an xarray of pointers to this struct's objects, one per host */
316 struct sdeb_store_info {
317 	rwlock_t macc_lck;	/* for atomic media access on this store */
318 	u8 *storep;		/* user data storage (ram) */
319 	struct t10_pi_tuple *dif_storep; /* protection info */
320 	void *map_storep;	/* provisioning map */
321 };
322 
323 #define to_sdebug_host(d)	\
324 	container_of(d, struct sdebug_host_info, dev)
325 
326 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
327 		      SDEB_DEFER_WQ = 2};
328 
329 struct sdebug_defer {
330 	struct hrtimer hrt;
331 	struct execute_work ew;
332 	int sqa_idx;	/* index of sdebug_queue array */
333 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
334 	int hc_idx;	/* hostwide tag index */
335 	int issuing_cpu;
336 	bool init_hrt;
337 	bool init_wq;
338 	bool aborted;	/* true when blk_abort_request() already called */
339 	enum sdeb_defer_type defer_t;
340 };
341 
342 struct sdebug_queued_cmd {
343 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 	 * instance indicates this slot is in use.
345 	 */
346 	struct sdebug_defer *sd_dp;
347 	struct scsi_cmnd *a_cmnd;
348 };
349 
350 struct sdebug_queue {
351 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
353 	spinlock_t qc_lock;
354 	atomic_t blocked;	/* to temporarily stop more being queued */
355 };
356 
357 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
358 static atomic_t sdebug_completions;  /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 
363 struct opcode_info_t {
364 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
365 				/* for terminating element */
366 	u8 opcode;		/* if num_attached > 0, preferred */
367 	u16 sa;			/* service action */
368 	u32 flags;		/* OR-ed set of SDEB_F_* */
369 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
370 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
371 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
372 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
373 };
374 
375 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
376 enum sdeb_opcode_index {
377 	SDEB_I_INVALID_OPCODE =	0,
378 	SDEB_I_INQUIRY = 1,
379 	SDEB_I_REPORT_LUNS = 2,
380 	SDEB_I_REQUEST_SENSE = 3,
381 	SDEB_I_TEST_UNIT_READY = 4,
382 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
383 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
384 	SDEB_I_LOG_SENSE = 7,
385 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
386 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
387 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
388 	SDEB_I_START_STOP = 11,
389 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
390 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
391 	SDEB_I_MAINT_IN = 14,
392 	SDEB_I_MAINT_OUT = 15,
393 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
394 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
395 	SDEB_I_RESERVE = 18,		/* 6, 10 */
396 	SDEB_I_RELEASE = 19,		/* 6, 10 */
397 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
398 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
399 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
400 	SDEB_I_SEND_DIAG = 23,
401 	SDEB_I_UNMAP = 24,
402 	SDEB_I_WRITE_BUFFER = 25,
403 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
404 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
405 	SDEB_I_COMP_WRITE = 28,
406 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
407 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
408 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
409 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
410 };
411 
412 
413 static const unsigned char opcode_ind_arr[256] = {
414 /* 0x0; 0x0->0x1f: 6 byte cdbs */
415 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
416 	    0, 0, 0, 0,
417 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
418 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
419 	    SDEB_I_RELEASE,
420 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
421 	    SDEB_I_ALLOW_REMOVAL, 0,
422 /* 0x20; 0x20->0x3f: 10 byte cdbs */
423 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
425 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
426 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
427 /* 0x40; 0x40->0x5f: 10 byte cdbs */
428 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
429 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
430 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
431 	    SDEB_I_RELEASE,
432 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
433 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
434 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
435 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 	0, SDEB_I_VARIABLE_LEN,
437 /* 0x80; 0x80->0x9f: 16 byte cdbs */
438 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
439 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
440 	0, 0, 0, SDEB_I_VERIFY,
441 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
442 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
443 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
444 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
445 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
446 	     SDEB_I_MAINT_OUT, 0, 0, 0,
447 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
448 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
449 	0, 0, 0, 0, 0, 0, 0, 0,
450 	0, 0, 0, 0, 0, 0, 0, 0,
451 /* 0xc0; 0xc0->0xff: vendor specific */
452 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 };
457 
458 /*
459  * The following "response" functions return the SCSI mid-level's 4 byte
460  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
461  * command completion, they can mask their return value with
462  * SDEG_RES_IMMED_MASK .
463  */
464 #define SDEG_RES_IMMED_MASK 0x40000000
465 
466 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
467 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 
496 static int sdebug_do_add_host(bool mk_new_store);
497 static int sdebug_add_host_helper(int per_host_idx);
498 static void sdebug_do_remove_host(bool the_end);
499 static int sdebug_add_store(void);
500 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
501 static void sdebug_erase_all_stores(bool apart_from_first);
502 
503 /*
504  * The following are overflow arrays for cdbs that "hit" the same index in
505  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
506  * should be placed in opcode_info_arr[], the others should be placed here.
507  */
508 static const struct opcode_info_t msense_iarr[] = {
509 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
510 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 };
512 
513 static const struct opcode_info_t mselect_iarr[] = {
514 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
515 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 };
517 
518 static const struct opcode_info_t read_iarr[] = {
519 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
520 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
521 	     0, 0, 0, 0} },
522 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
523 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
524 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
525 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
526 	     0xc7, 0, 0, 0, 0} },
527 };
528 
529 static const struct opcode_info_t write_iarr[] = {
530 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
531 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
532 		   0, 0, 0, 0, 0, 0} },
533 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
534 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
535 		   0, 0, 0} },
536 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
537 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
538 		   0xbf, 0xc7, 0, 0, 0, 0} },
539 };
540 
541 static const struct opcode_info_t verify_iarr[] = {
542 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
543 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
544 		   0, 0, 0, 0, 0, 0} },
545 };
546 
547 static const struct opcode_info_t sa_in_16_iarr[] = {
548 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
549 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
550 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
551 };
552 
553 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
554 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
555 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
556 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
557 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
558 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
559 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
560 };
561 
562 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
563 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
564 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
565 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
566 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
567 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
568 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
569 };
570 
571 static const struct opcode_info_t write_same_iarr[] = {
572 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
573 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
574 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
575 };
576 
577 static const struct opcode_info_t reserve_iarr[] = {
578 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
579 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
580 };
581 
582 static const struct opcode_info_t release_iarr[] = {
583 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
584 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 };
586 
587 static const struct opcode_info_t sync_cache_iarr[] = {
588 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
589 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
590 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
591 };
592 
593 static const struct opcode_info_t pre_fetch_iarr[] = {
594 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
595 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
596 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
597 };
598 
599 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
600 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
601 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
602 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
603 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
604 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
606 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
607 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
608 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
609 };
610 
611 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
612 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
613 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
614 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
615 };
616 
617 
618 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
619  * plus the terminating elements for logic that scans this table such as
620  * REPORT SUPPORTED OPERATION CODES. */
621 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
622 /* 0 */
623 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
624 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
626 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
627 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
628 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
629 	     0, 0} },					/* REPORT LUNS */
630 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
631 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
633 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 /* 5 */
635 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
636 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
637 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
638 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
639 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
640 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
641 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
642 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
643 	     0, 0, 0} },
644 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
645 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
646 	     0, 0} },
647 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
648 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
649 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
650 /* 10 */
651 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
652 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
653 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
655 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
656 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
657 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
658 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
659 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
661 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
662 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
663 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
664 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
665 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
666 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
667 				0xff, 0, 0xc7, 0, 0, 0, 0} },
668 /* 15 */
669 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
670 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
671 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
672 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
673 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
674 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
675 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
676 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
677 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
678 	     0xff, 0xff} },
679 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
680 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
681 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
682 	     0} },
683 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
684 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
685 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686 	     0} },
687 /* 20 */
688 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
689 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
691 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
693 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
695 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
697 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
698 /* 25 */
699 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
700 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
701 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
702 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
703 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
704 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
705 		 0, 0, 0, 0, 0} },
706 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
707 	    resp_sync_cache, sync_cache_iarr,
708 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
709 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
710 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
711 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
712 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
713 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
714 	    resp_pre_fetch, pre_fetch_iarr,
715 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
717 
718 /* 30 */
719 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
720 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
721 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
722 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
723 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
724 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
725 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
726 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
727 /* sentinel */
728 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
729 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 };
731 
732 static int sdebug_num_hosts;
733 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
734 static int sdebug_ato = DEF_ATO;
735 static int sdebug_cdb_len = DEF_CDB_LEN;
736 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
737 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
738 static int sdebug_dif = DEF_DIF;
739 static int sdebug_dix = DEF_DIX;
740 static int sdebug_dsense = DEF_D_SENSE;
741 static int sdebug_every_nth = DEF_EVERY_NTH;
742 static int sdebug_fake_rw = DEF_FAKE_RW;
743 static unsigned int sdebug_guard = DEF_GUARD;
744 static int sdebug_host_max_queue;	/* per host */
745 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
746 static int sdebug_max_luns = DEF_MAX_LUNS;
747 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
748 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
749 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
750 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
751 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
752 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
753 static int sdebug_no_uld;
754 static int sdebug_num_parts = DEF_NUM_PARTS;
755 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
756 static int sdebug_opt_blks = DEF_OPT_BLKS;
757 static int sdebug_opts = DEF_OPTS;
758 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
759 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
760 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
761 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
762 static int sdebug_sector_size = DEF_SECTOR_SIZE;
763 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
764 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
765 static unsigned int sdebug_lbpu = DEF_LBPU;
766 static unsigned int sdebug_lbpws = DEF_LBPWS;
767 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
768 static unsigned int sdebug_lbprz = DEF_LBPRZ;
769 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
770 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
771 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
772 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
773 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
774 static int sdebug_uuid_ctl = DEF_UUID_CTL;
775 static bool sdebug_random = DEF_RANDOM;
776 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
777 static bool sdebug_removable = DEF_REMOVABLE;
778 static bool sdebug_clustering;
779 static bool sdebug_host_lock = DEF_HOST_LOCK;
780 static bool sdebug_strict = DEF_STRICT;
781 static bool sdebug_any_injecting_opt;
782 static bool sdebug_verbose;
783 static bool have_dif_prot;
784 static bool write_since_sync;
785 static bool sdebug_statistics = DEF_STATISTICS;
786 static bool sdebug_wp;
787 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
788 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
789 static char *sdeb_zbc_model_s;
790 
791 static unsigned int sdebug_store_sectors;
792 static sector_t sdebug_capacity;	/* in sectors */
793 
794 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
795    may still need them */
796 static int sdebug_heads;		/* heads per disk */
797 static int sdebug_cylinders_per;	/* cylinders per surface */
798 static int sdebug_sectors_per;		/* sectors per cylinder */
799 
800 static LIST_HEAD(sdebug_host_list);
801 static DEFINE_SPINLOCK(sdebug_host_list_lock);
802 
803 static struct xarray per_store_arr;
804 static struct xarray *per_store_ap = &per_store_arr;
805 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
806 static int sdeb_most_recent_idx = -1;
807 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
808 
809 static unsigned long map_size;
810 static int num_aborts;
811 static int num_dev_resets;
812 static int num_target_resets;
813 static int num_bus_resets;
814 static int num_host_resets;
815 static int dix_writes;
816 static int dix_reads;
817 static int dif_errors;
818 
819 /* ZBC global data */
820 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
821 static int sdeb_zbc_zone_size_mb;
822 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
823 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
824 
825 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
826 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
827 
828 static DEFINE_RWLOCK(atomic_rw);
829 static DEFINE_RWLOCK(atomic_rw2);
830 
831 static rwlock_t *ramdisk_lck_a[2];
832 
833 static char sdebug_proc_name[] = MY_NAME;
834 static const char *my_name = MY_NAME;
835 
836 static struct bus_type pseudo_lld_bus;
837 
838 static struct device_driver sdebug_driverfs_driver = {
839 	.name 		= sdebug_proc_name,
840 	.bus		= &pseudo_lld_bus,
841 };
842 
843 static const int check_condition_result =
844 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
845 
846 static const int illegal_condition_result =
847 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
848 
849 static const int device_qfull_result =
850 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
851 
852 static const int condition_met_result = SAM_STAT_CONDITION_MET;
853 
854 
855 /* Only do the extra work involved in logical block provisioning if one or
856  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
857  * real reads and writes (i.e. not skipping them for speed).
858  */
859 static inline bool scsi_debug_lbp(void)
860 {
861 	return 0 == sdebug_fake_rw &&
862 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
863 }
864 
865 static void *lba2fake_store(struct sdeb_store_info *sip,
866 			    unsigned long long lba)
867 {
868 	struct sdeb_store_info *lsip = sip;
869 
870 	lba = do_div(lba, sdebug_store_sectors);
871 	if (!sip || !sip->storep) {
872 		WARN_ON_ONCE(true);
873 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
874 	}
875 	return lsip->storep + lba * sdebug_sector_size;
876 }
877 
878 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
879 				      sector_t sector)
880 {
881 	sector = sector_div(sector, sdebug_store_sectors);
882 
883 	return sip->dif_storep + sector;
884 }
885 
886 static void sdebug_max_tgts_luns(void)
887 {
888 	struct sdebug_host_info *sdbg_host;
889 	struct Scsi_Host *hpnt;
890 
891 	spin_lock(&sdebug_host_list_lock);
892 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
893 		hpnt = sdbg_host->shost;
894 		if ((hpnt->this_id >= 0) &&
895 		    (sdebug_num_tgts > hpnt->this_id))
896 			hpnt->max_id = sdebug_num_tgts + 1;
897 		else
898 			hpnt->max_id = sdebug_num_tgts;
899 		/* sdebug_max_luns; */
900 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
901 	}
902 	spin_unlock(&sdebug_host_list_lock);
903 }
904 
905 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
906 
907 /* Set in_bit to -1 to indicate no bit position of invalid field */
908 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
909 				 enum sdeb_cmd_data c_d,
910 				 int in_byte, int in_bit)
911 {
912 	unsigned char *sbuff;
913 	u8 sks[4];
914 	int sl, asc;
915 
916 	sbuff = scp->sense_buffer;
917 	if (!sbuff) {
918 		sdev_printk(KERN_ERR, scp->device,
919 			    "%s: sense_buffer is NULL\n", __func__);
920 		return;
921 	}
922 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
923 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
924 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
925 	memset(sks, 0, sizeof(sks));
926 	sks[0] = 0x80;
927 	if (c_d)
928 		sks[0] |= 0x40;
929 	if (in_bit >= 0) {
930 		sks[0] |= 0x8;
931 		sks[0] |= 0x7 & in_bit;
932 	}
933 	put_unaligned_be16(in_byte, sks + 1);
934 	if (sdebug_dsense) {
935 		sl = sbuff[7] + 8;
936 		sbuff[7] = sl;
937 		sbuff[sl] = 0x2;
938 		sbuff[sl + 1] = 0x6;
939 		memcpy(sbuff + sl + 4, sks, 3);
940 	} else
941 		memcpy(sbuff + 15, sks, 3);
942 	if (sdebug_verbose)
943 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
944 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
945 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
946 }
947 
948 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
949 {
950 	unsigned char *sbuff;
951 
952 	sbuff = scp->sense_buffer;
953 	if (!sbuff) {
954 		sdev_printk(KERN_ERR, scp->device,
955 			    "%s: sense_buffer is NULL\n", __func__);
956 		return;
957 	}
958 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
959 
960 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
961 
962 	if (sdebug_verbose)
963 		sdev_printk(KERN_INFO, scp->device,
964 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
965 			    my_name, key, asc, asq);
966 }
967 
968 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
969 {
970 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
971 }
972 
973 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
974 			    void __user *arg)
975 {
976 	if (sdebug_verbose) {
977 		if (0x1261 == cmd)
978 			sdev_printk(KERN_INFO, dev,
979 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
980 		else if (0x5331 == cmd)
981 			sdev_printk(KERN_INFO, dev,
982 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
983 				    __func__);
984 		else
985 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
986 				    __func__, cmd);
987 	}
988 	return -EINVAL;
989 	/* return -ENOTTY; // correct return but upsets fdisk */
990 }
991 
992 static void config_cdb_len(struct scsi_device *sdev)
993 {
994 	switch (sdebug_cdb_len) {
995 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
996 		sdev->use_10_for_rw = false;
997 		sdev->use_16_for_rw = false;
998 		sdev->use_10_for_ms = false;
999 		break;
1000 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1001 		sdev->use_10_for_rw = true;
1002 		sdev->use_16_for_rw = false;
1003 		sdev->use_10_for_ms = false;
1004 		break;
1005 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1006 		sdev->use_10_for_rw = true;
1007 		sdev->use_16_for_rw = false;
1008 		sdev->use_10_for_ms = true;
1009 		break;
1010 	case 16:
1011 		sdev->use_10_for_rw = false;
1012 		sdev->use_16_for_rw = true;
1013 		sdev->use_10_for_ms = true;
1014 		break;
1015 	case 32: /* No knobs to suggest this so same as 16 for now */
1016 		sdev->use_10_for_rw = false;
1017 		sdev->use_16_for_rw = true;
1018 		sdev->use_10_for_ms = true;
1019 		break;
1020 	default:
1021 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1022 			sdebug_cdb_len);
1023 		sdev->use_10_for_rw = true;
1024 		sdev->use_16_for_rw = false;
1025 		sdev->use_10_for_ms = false;
1026 		sdebug_cdb_len = 10;
1027 		break;
1028 	}
1029 }
1030 
1031 static void all_config_cdb_len(void)
1032 {
1033 	struct sdebug_host_info *sdbg_host;
1034 	struct Scsi_Host *shost;
1035 	struct scsi_device *sdev;
1036 
1037 	spin_lock(&sdebug_host_list_lock);
1038 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1039 		shost = sdbg_host->shost;
1040 		shost_for_each_device(sdev, shost) {
1041 			config_cdb_len(sdev);
1042 		}
1043 	}
1044 	spin_unlock(&sdebug_host_list_lock);
1045 }
1046 
1047 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1048 {
1049 	struct sdebug_host_info *sdhp;
1050 	struct sdebug_dev_info *dp;
1051 
1052 	spin_lock(&sdebug_host_list_lock);
1053 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1054 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1055 			if ((devip->sdbg_host == dp->sdbg_host) &&
1056 			    (devip->target == dp->target))
1057 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1058 		}
1059 	}
1060 	spin_unlock(&sdebug_host_list_lock);
1061 }
1062 
1063 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1064 {
1065 	int k;
1066 
1067 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1068 	if (k != SDEBUG_NUM_UAS) {
1069 		const char *cp = NULL;
1070 
1071 		switch (k) {
1072 		case SDEBUG_UA_POR:
1073 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1074 					POWER_ON_RESET_ASCQ);
1075 			if (sdebug_verbose)
1076 				cp = "power on reset";
1077 			break;
1078 		case SDEBUG_UA_BUS_RESET:
1079 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1080 					BUS_RESET_ASCQ);
1081 			if (sdebug_verbose)
1082 				cp = "bus reset";
1083 			break;
1084 		case SDEBUG_UA_MODE_CHANGED:
1085 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1086 					MODE_CHANGED_ASCQ);
1087 			if (sdebug_verbose)
1088 				cp = "mode parameters changed";
1089 			break;
1090 		case SDEBUG_UA_CAPACITY_CHANGED:
1091 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1092 					CAPACITY_CHANGED_ASCQ);
1093 			if (sdebug_verbose)
1094 				cp = "capacity data changed";
1095 			break;
1096 		case SDEBUG_UA_MICROCODE_CHANGED:
1097 			mk_sense_buffer(scp, UNIT_ATTENTION,
1098 					TARGET_CHANGED_ASC,
1099 					MICROCODE_CHANGED_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "microcode has been changed";
1102 			break;
1103 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION,
1105 					TARGET_CHANGED_ASC,
1106 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "microcode has been changed without reset";
1109 			break;
1110 		case SDEBUG_UA_LUNS_CHANGED:
1111 			/*
1112 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1113 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1114 			 * on the target, until a REPORT LUNS command is
1115 			 * received.  SPC-4 behavior is to report it only once.
1116 			 * NOTE:  sdebug_scsi_level does not use the same
1117 			 * values as struct scsi_device->scsi_level.
1118 			 */
1119 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1120 				clear_luns_changed_on_target(devip);
1121 			mk_sense_buffer(scp, UNIT_ATTENTION,
1122 					TARGET_CHANGED_ASC,
1123 					LUNS_CHANGED_ASCQ);
1124 			if (sdebug_verbose)
1125 				cp = "reported luns data has changed";
1126 			break;
1127 		default:
1128 			pr_warn("unexpected unit attention code=%d\n", k);
1129 			if (sdebug_verbose)
1130 				cp = "unknown";
1131 			break;
1132 		}
1133 		clear_bit(k, devip->uas_bm);
1134 		if (sdebug_verbose)
1135 			sdev_printk(KERN_INFO, scp->device,
1136 				   "%s reports: Unit attention: %s\n",
1137 				   my_name, cp);
1138 		return check_condition_result;
1139 	}
1140 	return 0;
1141 }
1142 
1143 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1144 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1145 				int arr_len)
1146 {
1147 	int act_len;
1148 	struct scsi_data_buffer *sdb = &scp->sdb;
1149 
1150 	if (!sdb->length)
1151 		return 0;
1152 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1153 		return DID_ERROR << 16;
1154 
1155 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1156 				      arr, arr_len);
1157 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1158 
1159 	return 0;
1160 }
1161 
1162 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1163  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1164  * calls, not required to write in ascending offset order. Assumes resid
1165  * set to scsi_bufflen() prior to any calls.
1166  */
1167 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1168 				  int arr_len, unsigned int off_dst)
1169 {
1170 	unsigned int act_len, n;
1171 	struct scsi_data_buffer *sdb = &scp->sdb;
1172 	off_t skip = off_dst;
1173 
1174 	if (sdb->length <= off_dst)
1175 		return 0;
1176 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1177 		return DID_ERROR << 16;
1178 
1179 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1180 				       arr, arr_len, skip);
1181 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1182 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1183 		 scsi_get_resid(scp));
1184 	n = scsi_bufflen(scp) - (off_dst + act_len);
1185 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1186 	return 0;
1187 }
1188 
1189 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1190  * 'arr' or -1 if error.
1191  */
1192 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1193 			       int arr_len)
1194 {
1195 	if (!scsi_bufflen(scp))
1196 		return 0;
1197 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1198 		return -1;
1199 
1200 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1201 }
1202 
1203 
1204 static char sdebug_inq_vendor_id[9] = "Linux   ";
1205 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1206 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1207 /* Use some locally assigned NAAs for SAS addresses. */
1208 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1209 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1210 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1211 
1212 /* Device identification VPD page. Returns number of bytes placed in arr */
1213 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1214 			  int target_dev_id, int dev_id_num,
1215 			  const char *dev_id_str, int dev_id_str_len,
1216 			  const uuid_t *lu_name)
1217 {
1218 	int num, port_a;
1219 	char b[32];
1220 
1221 	port_a = target_dev_id + 1;
1222 	/* T10 vendor identifier field format (faked) */
1223 	arr[0] = 0x2;	/* ASCII */
1224 	arr[1] = 0x1;
1225 	arr[2] = 0x0;
1226 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1227 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1228 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1229 	num = 8 + 16 + dev_id_str_len;
1230 	arr[3] = num;
1231 	num += 4;
1232 	if (dev_id_num >= 0) {
1233 		if (sdebug_uuid_ctl) {
1234 			/* Locally assigned UUID */
1235 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1236 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1237 			arr[num++] = 0x0;
1238 			arr[num++] = 0x12;
1239 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1240 			arr[num++] = 0x0;
1241 			memcpy(arr + num, lu_name, 16);
1242 			num += 16;
1243 		} else {
1244 			/* NAA-3, Logical unit identifier (binary) */
1245 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1246 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1247 			arr[num++] = 0x0;
1248 			arr[num++] = 0x8;
1249 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1250 			num += 8;
1251 		}
1252 		/* Target relative port number */
1253 		arr[num++] = 0x61;	/* proto=sas, binary */
1254 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1255 		arr[num++] = 0x0;	/* reserved */
1256 		arr[num++] = 0x4;	/* length */
1257 		arr[num++] = 0x0;	/* reserved */
1258 		arr[num++] = 0x0;	/* reserved */
1259 		arr[num++] = 0x0;
1260 		arr[num++] = 0x1;	/* relative port A */
1261 	}
1262 	/* NAA-3, Target port identifier */
1263 	arr[num++] = 0x61;	/* proto=sas, binary */
1264 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1265 	arr[num++] = 0x0;
1266 	arr[num++] = 0x8;
1267 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1268 	num += 8;
1269 	/* NAA-3, Target port group identifier */
1270 	arr[num++] = 0x61;	/* proto=sas, binary */
1271 	arr[num++] = 0x95;	/* piv=1, target port group id */
1272 	arr[num++] = 0x0;
1273 	arr[num++] = 0x4;
1274 	arr[num++] = 0;
1275 	arr[num++] = 0;
1276 	put_unaligned_be16(port_group_id, arr + num);
1277 	num += 2;
1278 	/* NAA-3, Target device identifier */
1279 	arr[num++] = 0x61;	/* proto=sas, binary */
1280 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1281 	arr[num++] = 0x0;
1282 	arr[num++] = 0x8;
1283 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1284 	num += 8;
1285 	/* SCSI name string: Target device identifier */
1286 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1287 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1288 	arr[num++] = 0x0;
1289 	arr[num++] = 24;
1290 	memcpy(arr + num, "naa.32222220", 12);
1291 	num += 12;
1292 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1293 	memcpy(arr + num, b, 8);
1294 	num += 8;
1295 	memset(arr + num, 0, 4);
1296 	num += 4;
1297 	return num;
1298 }
1299 
1300 static unsigned char vpd84_data[] = {
1301 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1302     0x22,0x22,0x22,0x0,0xbb,0x1,
1303     0x22,0x22,0x22,0x0,0xbb,0x2,
1304 };
1305 
1306 /*  Software interface identification VPD page */
1307 static int inquiry_vpd_84(unsigned char *arr)
1308 {
1309 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1310 	return sizeof(vpd84_data);
1311 }
1312 
1313 /* Management network addresses VPD page */
1314 static int inquiry_vpd_85(unsigned char *arr)
1315 {
1316 	int num = 0;
1317 	const char *na1 = "https://www.kernel.org/config";
1318 	const char *na2 = "http://www.kernel.org/log";
1319 	int plen, olen;
1320 
1321 	arr[num++] = 0x1;	/* lu, storage config */
1322 	arr[num++] = 0x0;	/* reserved */
1323 	arr[num++] = 0x0;
1324 	olen = strlen(na1);
1325 	plen = olen + 1;
1326 	if (plen % 4)
1327 		plen = ((plen / 4) + 1) * 4;
1328 	arr[num++] = plen;	/* length, null termianted, padded */
1329 	memcpy(arr + num, na1, olen);
1330 	memset(arr + num + olen, 0, plen - olen);
1331 	num += plen;
1332 
1333 	arr[num++] = 0x4;	/* lu, logging */
1334 	arr[num++] = 0x0;	/* reserved */
1335 	arr[num++] = 0x0;
1336 	olen = strlen(na2);
1337 	plen = olen + 1;
1338 	if (plen % 4)
1339 		plen = ((plen / 4) + 1) * 4;
1340 	arr[num++] = plen;	/* length, null terminated, padded */
1341 	memcpy(arr + num, na2, olen);
1342 	memset(arr + num + olen, 0, plen - olen);
1343 	num += plen;
1344 
1345 	return num;
1346 }
1347 
1348 /* SCSI ports VPD page */
1349 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1350 {
1351 	int num = 0;
1352 	int port_a, port_b;
1353 
1354 	port_a = target_dev_id + 1;
1355 	port_b = port_a + 1;
1356 	arr[num++] = 0x0;	/* reserved */
1357 	arr[num++] = 0x0;	/* reserved */
1358 	arr[num++] = 0x0;
1359 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1360 	memset(arr + num, 0, 6);
1361 	num += 6;
1362 	arr[num++] = 0x0;
1363 	arr[num++] = 12;	/* length tp descriptor */
1364 	/* naa-5 target port identifier (A) */
1365 	arr[num++] = 0x61;	/* proto=sas, binary */
1366 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1367 	arr[num++] = 0x0;	/* reserved */
1368 	arr[num++] = 0x8;	/* length */
1369 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1370 	num += 8;
1371 	arr[num++] = 0x0;	/* reserved */
1372 	arr[num++] = 0x0;	/* reserved */
1373 	arr[num++] = 0x0;
1374 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1375 	memset(arr + num, 0, 6);
1376 	num += 6;
1377 	arr[num++] = 0x0;
1378 	arr[num++] = 12;	/* length tp descriptor */
1379 	/* naa-5 target port identifier (B) */
1380 	arr[num++] = 0x61;	/* proto=sas, binary */
1381 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x8;	/* length */
1384 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1385 	num += 8;
1386 
1387 	return num;
1388 }
1389 
1390 
1391 static unsigned char vpd89_data[] = {
1392 /* from 4th byte */ 0,0,0,0,
1393 'l','i','n','u','x',' ',' ',' ',
1394 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1395 '1','2','3','4',
1396 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1397 0xec,0,0,0,
1398 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1399 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1400 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1401 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1402 0x53,0x41,
1403 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1404 0x20,0x20,
1405 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1406 0x10,0x80,
1407 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1408 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1409 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1410 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1411 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1412 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1413 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1414 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1415 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1417 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1418 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1419 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1420 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1433 };
1434 
1435 /* ATA Information VPD page */
1436 static int inquiry_vpd_89(unsigned char *arr)
1437 {
1438 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1439 	return sizeof(vpd89_data);
1440 }
1441 
1442 
1443 static unsigned char vpdb0_data[] = {
1444 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1445 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 };
1449 
1450 /* Block limits VPD page (SBC-3) */
1451 static int inquiry_vpd_b0(unsigned char *arr)
1452 {
1453 	unsigned int gran;
1454 
1455 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1456 
1457 	/* Optimal transfer length granularity */
1458 	if (sdebug_opt_xferlen_exp != 0 &&
1459 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1460 		gran = 1 << sdebug_opt_xferlen_exp;
1461 	else
1462 		gran = 1 << sdebug_physblk_exp;
1463 	put_unaligned_be16(gran, arr + 2);
1464 
1465 	/* Maximum Transfer Length */
1466 	if (sdebug_store_sectors > 0x400)
1467 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1468 
1469 	/* Optimal Transfer Length */
1470 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1471 
1472 	if (sdebug_lbpu) {
1473 		/* Maximum Unmap LBA Count */
1474 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1475 
1476 		/* Maximum Unmap Block Descriptor Count */
1477 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1478 	}
1479 
1480 	/* Unmap Granularity Alignment */
1481 	if (sdebug_unmap_alignment) {
1482 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1483 		arr[28] |= 0x80; /* UGAVALID */
1484 	}
1485 
1486 	/* Optimal Unmap Granularity */
1487 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1488 
1489 	/* Maximum WRITE SAME Length */
1490 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1491 
1492 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1493 
1494 	return sizeof(vpdb0_data);
1495 }
1496 
1497 /* Block device characteristics VPD page (SBC-3) */
1498 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1499 {
1500 	memset(arr, 0, 0x3c);
1501 	arr[0] = 0;
1502 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1503 	arr[2] = 0;
1504 	arr[3] = 5;	/* less than 1.8" */
1505 	if (devip->zmodel == BLK_ZONED_HA)
1506 		arr[4] = 1 << 4;	/* zoned field = 01b */
1507 
1508 	return 0x3c;
1509 }
1510 
1511 /* Logical block provisioning VPD page (SBC-4) */
1512 static int inquiry_vpd_b2(unsigned char *arr)
1513 {
1514 	memset(arr, 0, 0x4);
1515 	arr[0] = 0;			/* threshold exponent */
1516 	if (sdebug_lbpu)
1517 		arr[1] = 1 << 7;
1518 	if (sdebug_lbpws)
1519 		arr[1] |= 1 << 6;
1520 	if (sdebug_lbpws10)
1521 		arr[1] |= 1 << 5;
1522 	if (sdebug_lbprz && scsi_debug_lbp())
1523 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1524 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1525 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1526 	/* threshold_percentage=0 */
1527 	return 0x4;
1528 }
1529 
1530 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1531 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1532 {
1533 	memset(arr, 0, 0x3c);
1534 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1535 	/*
1536 	 * Set Optimal number of open sequential write preferred zones and
1537 	 * Optimal number of non-sequentially written sequential write
1538 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1539 	 * fields set to zero, apart from Max. number of open swrz_s field.
1540 	 */
1541 	put_unaligned_be32(0xffffffff, &arr[4]);
1542 	put_unaligned_be32(0xffffffff, &arr[8]);
1543 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1544 		put_unaligned_be32(devip->max_open, &arr[12]);
1545 	else
1546 		put_unaligned_be32(0xffffffff, &arr[12]);
1547 	return 0x3c;
1548 }
1549 
1550 #define SDEBUG_LONG_INQ_SZ 96
1551 #define SDEBUG_MAX_INQ_ARR_SZ 584
1552 
1553 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1554 {
1555 	unsigned char pq_pdt;
1556 	unsigned char *arr;
1557 	unsigned char *cmd = scp->cmnd;
1558 	int alloc_len, n, ret;
1559 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1560 
1561 	alloc_len = get_unaligned_be16(cmd + 3);
1562 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1563 	if (! arr)
1564 		return DID_REQUEUE << 16;
1565 	is_disk = (sdebug_ptype == TYPE_DISK);
1566 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1567 	is_disk_zbc = (is_disk || is_zbc);
1568 	have_wlun = scsi_is_wlun(scp->device->lun);
1569 	if (have_wlun)
1570 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1571 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1572 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1573 	else
1574 		pq_pdt = (sdebug_ptype & 0x1f);
1575 	arr[0] = pq_pdt;
1576 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1577 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1578 		kfree(arr);
1579 		return check_condition_result;
1580 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1581 		int lu_id_num, port_group_id, target_dev_id, len;
1582 		char lu_id_str[6];
1583 		int host_no = devip->sdbg_host->shost->host_no;
1584 
1585 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1586 		    (devip->channel & 0x7f);
1587 		if (sdebug_vpd_use_hostno == 0)
1588 			host_no = 0;
1589 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1590 			    (devip->target * 1000) + devip->lun);
1591 		target_dev_id = ((host_no + 1) * 2000) +
1592 				 (devip->target * 1000) - 3;
1593 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1594 		if (0 == cmd[2]) { /* supported vital product data pages */
1595 			arr[1] = cmd[2];	/*sanity */
1596 			n = 4;
1597 			arr[n++] = 0x0;   /* this page */
1598 			arr[n++] = 0x80;  /* unit serial number */
1599 			arr[n++] = 0x83;  /* device identification */
1600 			arr[n++] = 0x84;  /* software interface ident. */
1601 			arr[n++] = 0x85;  /* management network addresses */
1602 			arr[n++] = 0x86;  /* extended inquiry */
1603 			arr[n++] = 0x87;  /* mode page policy */
1604 			arr[n++] = 0x88;  /* SCSI ports */
1605 			if (is_disk_zbc) {	  /* SBC or ZBC */
1606 				arr[n++] = 0x89;  /* ATA information */
1607 				arr[n++] = 0xb0;  /* Block limits */
1608 				arr[n++] = 0xb1;  /* Block characteristics */
1609 				if (is_disk)
1610 					arr[n++] = 0xb2;  /* LB Provisioning */
1611 				if (is_zbc)
1612 					arr[n++] = 0xb6;  /* ZB dev. char. */
1613 			}
1614 			arr[3] = n - 4;	  /* number of supported VPD pages */
1615 		} else if (0x80 == cmd[2]) { /* unit serial number */
1616 			arr[1] = cmd[2];	/*sanity */
1617 			arr[3] = len;
1618 			memcpy(&arr[4], lu_id_str, len);
1619 		} else if (0x83 == cmd[2]) { /* device identification */
1620 			arr[1] = cmd[2];	/*sanity */
1621 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1622 						target_dev_id, lu_id_num,
1623 						lu_id_str, len,
1624 						&devip->lu_name);
1625 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1626 			arr[1] = cmd[2];	/*sanity */
1627 			arr[3] = inquiry_vpd_84(&arr[4]);
1628 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1629 			arr[1] = cmd[2];	/*sanity */
1630 			arr[3] = inquiry_vpd_85(&arr[4]);
1631 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1632 			arr[1] = cmd[2];	/*sanity */
1633 			arr[3] = 0x3c;	/* number of following entries */
1634 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1635 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1636 			else if (have_dif_prot)
1637 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1638 			else
1639 				arr[4] = 0x0;   /* no protection stuff */
1640 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1641 		} else if (0x87 == cmd[2]) { /* mode page policy */
1642 			arr[1] = cmd[2];	/*sanity */
1643 			arr[3] = 0x8;	/* number of following entries */
1644 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1645 			arr[6] = 0x80;	/* mlus, shared */
1646 			arr[8] = 0x18;	 /* protocol specific lu */
1647 			arr[10] = 0x82;	 /* mlus, per initiator port */
1648 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1649 			arr[1] = cmd[2];	/*sanity */
1650 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1651 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1652 			arr[1] = cmd[2];        /*sanity */
1653 			n = inquiry_vpd_89(&arr[4]);
1654 			put_unaligned_be16(n, arr + 2);
1655 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1656 			arr[1] = cmd[2];        /*sanity */
1657 			arr[3] = inquiry_vpd_b0(&arr[4]);
1658 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1659 			arr[1] = cmd[2];        /*sanity */
1660 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1661 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1662 			arr[1] = cmd[2];        /*sanity */
1663 			arr[3] = inquiry_vpd_b2(&arr[4]);
1664 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1667 		} else {
1668 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1669 			kfree(arr);
1670 			return check_condition_result;
1671 		}
1672 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1673 		ret = fill_from_dev_buffer(scp, arr,
1674 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1675 		kfree(arr);
1676 		return ret;
1677 	}
1678 	/* drops through here for a standard inquiry */
1679 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1680 	arr[2] = sdebug_scsi_level;
1681 	arr[3] = 2;    /* response_data_format==2 */
1682 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1683 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1684 	if (sdebug_vpd_use_hostno == 0)
1685 		arr[5] |= 0x10; /* claim: implicit TPGS */
1686 	arr[6] = 0x10; /* claim: MultiP */
1687 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1688 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1689 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1690 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1691 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1692 	/* Use Vendor Specific area to place driver date in ASCII hex */
1693 	memcpy(&arr[36], sdebug_version_date, 8);
1694 	/* version descriptors (2 bytes each) follow */
1695 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1696 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1697 	n = 62;
1698 	if (is_disk) {		/* SBC-4 no version claimed */
1699 		put_unaligned_be16(0x600, arr + n);
1700 		n += 2;
1701 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1702 		put_unaligned_be16(0x525, arr + n);
1703 		n += 2;
1704 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1705 		put_unaligned_be16(0x624, arr + n);
1706 		n += 2;
1707 	}
1708 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1709 	ret = fill_from_dev_buffer(scp, arr,
1710 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1711 	kfree(arr);
1712 	return ret;
1713 }
1714 
1715 /* See resp_iec_m_pg() for how this data is manipulated */
1716 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1717 				   0, 0, 0x0, 0x0};
1718 
1719 static int resp_requests(struct scsi_cmnd *scp,
1720 			 struct sdebug_dev_info *devip)
1721 {
1722 	unsigned char *cmd = scp->cmnd;
1723 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1724 	bool dsense = !!(cmd[1] & 1);
1725 	int alloc_len = cmd[4];
1726 	int len = 18;
1727 	int stopped_state = atomic_read(&devip->stopped);
1728 
1729 	memset(arr, 0, sizeof(arr));
1730 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1731 		if (dsense) {
1732 			arr[0] = 0x72;
1733 			arr[1] = NOT_READY;
1734 			arr[2] = LOGICAL_UNIT_NOT_READY;
1735 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1736 			len = 8;
1737 		} else {
1738 			arr[0] = 0x70;
1739 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1740 			arr[7] = 0xa;			/* 18 byte sense buffer */
1741 			arr[12] = LOGICAL_UNIT_NOT_READY;
1742 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1743 		}
1744 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1745 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1746 		if (dsense) {
1747 			arr[0] = 0x72;
1748 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1749 			arr[2] = THRESHOLD_EXCEEDED;
1750 			arr[3] = 0xff;		/* Failure prediction(false) */
1751 			len = 8;
1752 		} else {
1753 			arr[0] = 0x70;
1754 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1755 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1756 			arr[12] = THRESHOLD_EXCEEDED;
1757 			arr[13] = 0xff;		/* Failure prediction(false) */
1758 		}
1759 	} else {	/* nothing to report */
1760 		if (dsense) {
1761 			len = 8;
1762 			memset(arr, 0, len);
1763 			arr[0] = 0x72;
1764 		} else {
1765 			memset(arr, 0, len);
1766 			arr[0] = 0x70;
1767 			arr[7] = 0xa;
1768 		}
1769 	}
1770 	return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1771 }
1772 
1773 static int resp_start_stop(struct scsi_cmnd *scp,
1774 			   struct sdebug_dev_info *devip)
1775 {
1776 	unsigned char *cmd = scp->cmnd;
1777 	int power_cond, stop;
1778 	bool changing;
1779 
1780 	power_cond = (cmd[4] & 0xf0) >> 4;
1781 	if (power_cond) {
1782 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1783 		return check_condition_result;
1784 	}
1785 	stop = !(cmd[4] & 1);
1786 	changing = atomic_read(&devip->stopped) == !stop;
1787 	atomic_xchg(&devip->stopped, stop);
1788 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1789 		return SDEG_RES_IMMED_MASK;
1790 	else
1791 		return 0;
1792 }
1793 
1794 static sector_t get_sdebug_capacity(void)
1795 {
1796 	static const unsigned int gibibyte = 1073741824;
1797 
1798 	if (sdebug_virtual_gb > 0)
1799 		return (sector_t)sdebug_virtual_gb *
1800 			(gibibyte / sdebug_sector_size);
1801 	else
1802 		return sdebug_store_sectors;
1803 }
1804 
1805 #define SDEBUG_READCAP_ARR_SZ 8
1806 static int resp_readcap(struct scsi_cmnd *scp,
1807 			struct sdebug_dev_info *devip)
1808 {
1809 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1810 	unsigned int capac;
1811 
1812 	/* following just in case virtual_gb changed */
1813 	sdebug_capacity = get_sdebug_capacity();
1814 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1815 	if (sdebug_capacity < 0xffffffff) {
1816 		capac = (unsigned int)sdebug_capacity - 1;
1817 		put_unaligned_be32(capac, arr + 0);
1818 	} else
1819 		put_unaligned_be32(0xffffffff, arr + 0);
1820 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1821 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1822 }
1823 
1824 #define SDEBUG_READCAP16_ARR_SZ 32
1825 static int resp_readcap16(struct scsi_cmnd *scp,
1826 			  struct sdebug_dev_info *devip)
1827 {
1828 	unsigned char *cmd = scp->cmnd;
1829 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1830 	int alloc_len;
1831 
1832 	alloc_len = get_unaligned_be32(cmd + 10);
1833 	/* following just in case virtual_gb changed */
1834 	sdebug_capacity = get_sdebug_capacity();
1835 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1836 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1837 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1838 	arr[13] = sdebug_physblk_exp & 0xf;
1839 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1840 
1841 	if (scsi_debug_lbp()) {
1842 		arr[14] |= 0x80; /* LBPME */
1843 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1844 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1845 		 * in the wider field maps to 0 in this field.
1846 		 */
1847 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1848 			arr[14] |= 0x40;
1849 	}
1850 
1851 	arr[15] = sdebug_lowest_aligned & 0xff;
1852 
1853 	if (have_dif_prot) {
1854 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1855 		arr[12] |= 1; /* PROT_EN */
1856 	}
1857 
1858 	return fill_from_dev_buffer(scp, arr,
1859 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1860 }
1861 
1862 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1863 
1864 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1865 			      struct sdebug_dev_info *devip)
1866 {
1867 	unsigned char *cmd = scp->cmnd;
1868 	unsigned char *arr;
1869 	int host_no = devip->sdbg_host->shost->host_no;
1870 	int n, ret, alen, rlen;
1871 	int port_group_a, port_group_b, port_a, port_b;
1872 
1873 	alen = get_unaligned_be32(cmd + 6);
1874 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1875 	if (! arr)
1876 		return DID_REQUEUE << 16;
1877 	/*
1878 	 * EVPD page 0x88 states we have two ports, one
1879 	 * real and a fake port with no device connected.
1880 	 * So we create two port groups with one port each
1881 	 * and set the group with port B to unavailable.
1882 	 */
1883 	port_a = 0x1; /* relative port A */
1884 	port_b = 0x2; /* relative port B */
1885 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1886 			(devip->channel & 0x7f);
1887 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1888 			(devip->channel & 0x7f) + 0x80;
1889 
1890 	/*
1891 	 * The asymmetric access state is cycled according to the host_id.
1892 	 */
1893 	n = 4;
1894 	if (sdebug_vpd_use_hostno == 0) {
1895 		arr[n++] = host_no % 3; /* Asymm access state */
1896 		arr[n++] = 0x0F; /* claim: all states are supported */
1897 	} else {
1898 		arr[n++] = 0x0; /* Active/Optimized path */
1899 		arr[n++] = 0x01; /* only support active/optimized paths */
1900 	}
1901 	put_unaligned_be16(port_group_a, arr + n);
1902 	n += 2;
1903 	arr[n++] = 0;    /* Reserved */
1904 	arr[n++] = 0;    /* Status code */
1905 	arr[n++] = 0;    /* Vendor unique */
1906 	arr[n++] = 0x1;  /* One port per group */
1907 	arr[n++] = 0;    /* Reserved */
1908 	arr[n++] = 0;    /* Reserved */
1909 	put_unaligned_be16(port_a, arr + n);
1910 	n += 2;
1911 	arr[n++] = 3;    /* Port unavailable */
1912 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1913 	put_unaligned_be16(port_group_b, arr + n);
1914 	n += 2;
1915 	arr[n++] = 0;    /* Reserved */
1916 	arr[n++] = 0;    /* Status code */
1917 	arr[n++] = 0;    /* Vendor unique */
1918 	arr[n++] = 0x1;  /* One port per group */
1919 	arr[n++] = 0;    /* Reserved */
1920 	arr[n++] = 0;    /* Reserved */
1921 	put_unaligned_be16(port_b, arr + n);
1922 	n += 2;
1923 
1924 	rlen = n - 4;
1925 	put_unaligned_be32(rlen, arr + 0);
1926 
1927 	/*
1928 	 * Return the smallest value of either
1929 	 * - The allocated length
1930 	 * - The constructed command length
1931 	 * - The maximum array size
1932 	 */
1933 	rlen = min_t(int, alen, n);
1934 	ret = fill_from_dev_buffer(scp, arr,
1935 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1936 	kfree(arr);
1937 	return ret;
1938 }
1939 
1940 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1941 			     struct sdebug_dev_info *devip)
1942 {
1943 	bool rctd;
1944 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1945 	u16 req_sa, u;
1946 	u32 alloc_len, a_len;
1947 	int k, offset, len, errsts, count, bump, na;
1948 	const struct opcode_info_t *oip;
1949 	const struct opcode_info_t *r_oip;
1950 	u8 *arr;
1951 	u8 *cmd = scp->cmnd;
1952 
1953 	rctd = !!(cmd[2] & 0x80);
1954 	reporting_opts = cmd[2] & 0x7;
1955 	req_opcode = cmd[3];
1956 	req_sa = get_unaligned_be16(cmd + 4);
1957 	alloc_len = get_unaligned_be32(cmd + 6);
1958 	if (alloc_len < 4 || alloc_len > 0xffff) {
1959 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1960 		return check_condition_result;
1961 	}
1962 	if (alloc_len > 8192)
1963 		a_len = 8192;
1964 	else
1965 		a_len = alloc_len;
1966 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1967 	if (NULL == arr) {
1968 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1969 				INSUFF_RES_ASCQ);
1970 		return check_condition_result;
1971 	}
1972 	switch (reporting_opts) {
1973 	case 0:	/* all commands */
1974 		/* count number of commands */
1975 		for (count = 0, oip = opcode_info_arr;
1976 		     oip->num_attached != 0xff; ++oip) {
1977 			if (F_INV_OP & oip->flags)
1978 				continue;
1979 			count += (oip->num_attached + 1);
1980 		}
1981 		bump = rctd ? 20 : 8;
1982 		put_unaligned_be32(count * bump, arr);
1983 		for (offset = 4, oip = opcode_info_arr;
1984 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1985 			if (F_INV_OP & oip->flags)
1986 				continue;
1987 			na = oip->num_attached;
1988 			arr[offset] = oip->opcode;
1989 			put_unaligned_be16(oip->sa, arr + offset + 2);
1990 			if (rctd)
1991 				arr[offset + 5] |= 0x2;
1992 			if (FF_SA & oip->flags)
1993 				arr[offset + 5] |= 0x1;
1994 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1995 			if (rctd)
1996 				put_unaligned_be16(0xa, arr + offset + 8);
1997 			r_oip = oip;
1998 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1999 				if (F_INV_OP & oip->flags)
2000 					continue;
2001 				offset += bump;
2002 				arr[offset] = oip->opcode;
2003 				put_unaligned_be16(oip->sa, arr + offset + 2);
2004 				if (rctd)
2005 					arr[offset + 5] |= 0x2;
2006 				if (FF_SA & oip->flags)
2007 					arr[offset + 5] |= 0x1;
2008 				put_unaligned_be16(oip->len_mask[0],
2009 						   arr + offset + 6);
2010 				if (rctd)
2011 					put_unaligned_be16(0xa,
2012 							   arr + offset + 8);
2013 			}
2014 			oip = r_oip;
2015 			offset += bump;
2016 		}
2017 		break;
2018 	case 1:	/* one command: opcode only */
2019 	case 2:	/* one command: opcode plus service action */
2020 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2021 		sdeb_i = opcode_ind_arr[req_opcode];
2022 		oip = &opcode_info_arr[sdeb_i];
2023 		if (F_INV_OP & oip->flags) {
2024 			supp = 1;
2025 			offset = 4;
2026 		} else {
2027 			if (1 == reporting_opts) {
2028 				if (FF_SA & oip->flags) {
2029 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2030 							     2, 2);
2031 					kfree(arr);
2032 					return check_condition_result;
2033 				}
2034 				req_sa = 0;
2035 			} else if (2 == reporting_opts &&
2036 				   0 == (FF_SA & oip->flags)) {
2037 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2038 				kfree(arr);	/* point at requested sa */
2039 				return check_condition_result;
2040 			}
2041 			if (0 == (FF_SA & oip->flags) &&
2042 			    req_opcode == oip->opcode)
2043 				supp = 3;
2044 			else if (0 == (FF_SA & oip->flags)) {
2045 				na = oip->num_attached;
2046 				for (k = 0, oip = oip->arrp; k < na;
2047 				     ++k, ++oip) {
2048 					if (req_opcode == oip->opcode)
2049 						break;
2050 				}
2051 				supp = (k >= na) ? 1 : 3;
2052 			} else if (req_sa != oip->sa) {
2053 				na = oip->num_attached;
2054 				for (k = 0, oip = oip->arrp; k < na;
2055 				     ++k, ++oip) {
2056 					if (req_sa == oip->sa)
2057 						break;
2058 				}
2059 				supp = (k >= na) ? 1 : 3;
2060 			} else
2061 				supp = 3;
2062 			if (3 == supp) {
2063 				u = oip->len_mask[0];
2064 				put_unaligned_be16(u, arr + 2);
2065 				arr[4] = oip->opcode;
2066 				for (k = 1; k < u; ++k)
2067 					arr[4 + k] = (k < 16) ?
2068 						 oip->len_mask[k] : 0xff;
2069 				offset = 4 + u;
2070 			} else
2071 				offset = 4;
2072 		}
2073 		arr[1] = (rctd ? 0x80 : 0) | supp;
2074 		if (rctd) {
2075 			put_unaligned_be16(0xa, arr + offset);
2076 			offset += 12;
2077 		}
2078 		break;
2079 	default:
2080 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2081 		kfree(arr);
2082 		return check_condition_result;
2083 	}
2084 	offset = (offset < a_len) ? offset : a_len;
2085 	len = (offset < alloc_len) ? offset : alloc_len;
2086 	errsts = fill_from_dev_buffer(scp, arr, len);
2087 	kfree(arr);
2088 	return errsts;
2089 }
2090 
2091 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2092 			  struct sdebug_dev_info *devip)
2093 {
2094 	bool repd;
2095 	u32 alloc_len, len;
2096 	u8 arr[16];
2097 	u8 *cmd = scp->cmnd;
2098 
2099 	memset(arr, 0, sizeof(arr));
2100 	repd = !!(cmd[2] & 0x80);
2101 	alloc_len = get_unaligned_be32(cmd + 6);
2102 	if (alloc_len < 4) {
2103 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2104 		return check_condition_result;
2105 	}
2106 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2107 	arr[1] = 0x1;		/* ITNRS */
2108 	if (repd) {
2109 		arr[3] = 0xc;
2110 		len = 16;
2111 	} else
2112 		len = 4;
2113 
2114 	len = (len < alloc_len) ? len : alloc_len;
2115 	return fill_from_dev_buffer(scp, arr, len);
2116 }
2117 
2118 /* <<Following mode page info copied from ST318451LW>> */
2119 
2120 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2121 {	/* Read-Write Error Recovery page for mode_sense */
2122 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2123 					5, 0, 0xff, 0xff};
2124 
2125 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2126 	if (1 == pcontrol)
2127 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2128 	return sizeof(err_recov_pg);
2129 }
2130 
2131 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2132 { 	/* Disconnect-Reconnect page for mode_sense */
2133 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2134 					 0, 0, 0, 0, 0, 0, 0, 0};
2135 
2136 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2137 	if (1 == pcontrol)
2138 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2139 	return sizeof(disconnect_pg);
2140 }
2141 
2142 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2143 {       /* Format device page for mode_sense */
2144 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2145 				     0, 0, 0, 0, 0, 0, 0, 0,
2146 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2147 
2148 	memcpy(p, format_pg, sizeof(format_pg));
2149 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2150 	put_unaligned_be16(sdebug_sector_size, p + 12);
2151 	if (sdebug_removable)
2152 		p[20] |= 0x20; /* should agree with INQUIRY */
2153 	if (1 == pcontrol)
2154 		memset(p + 2, 0, sizeof(format_pg) - 2);
2155 	return sizeof(format_pg);
2156 }
2157 
2158 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2159 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2160 				     0, 0, 0, 0};
2161 
2162 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2163 { 	/* Caching page for mode_sense */
2164 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2165 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2166 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2167 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2168 
2169 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2170 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2171 	memcpy(p, caching_pg, sizeof(caching_pg));
2172 	if (1 == pcontrol)
2173 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2174 	else if (2 == pcontrol)
2175 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2176 	return sizeof(caching_pg);
2177 }
2178 
2179 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2180 				    0, 0, 0x2, 0x4b};
2181 
2182 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2183 { 	/* Control mode page for mode_sense */
2184 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2185 					0, 0, 0, 0};
2186 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2187 				     0, 0, 0x2, 0x4b};
2188 
2189 	if (sdebug_dsense)
2190 		ctrl_m_pg[2] |= 0x4;
2191 	else
2192 		ctrl_m_pg[2] &= ~0x4;
2193 
2194 	if (sdebug_ato)
2195 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2196 
2197 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2198 	if (1 == pcontrol)
2199 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2200 	else if (2 == pcontrol)
2201 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2202 	return sizeof(ctrl_m_pg);
2203 }
2204 
2205 
2206 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2207 {	/* Informational Exceptions control mode page for mode_sense */
2208 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2209 				       0, 0, 0x0, 0x0};
2210 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2211 				      0, 0, 0x0, 0x0};
2212 
2213 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2214 	if (1 == pcontrol)
2215 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2216 	else if (2 == pcontrol)
2217 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2218 	return sizeof(iec_m_pg);
2219 }
2220 
2221 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2222 {	/* SAS SSP mode page - short format for mode_sense */
2223 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2224 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2225 
2226 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2227 	if (1 == pcontrol)
2228 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2229 	return sizeof(sas_sf_m_pg);
2230 }
2231 
2232 
2233 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2234 			      int target_dev_id)
2235 {	/* SAS phy control and discover mode page for mode_sense */
2236 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2237 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2238 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2239 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2240 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2241 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2242 		    0, 0, 0, 0, 0, 0, 0, 0,
2243 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2244 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2245 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2246 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2247 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2248 		    0, 0, 0, 0, 0, 0, 0, 0,
2249 		};
2250 	int port_a, port_b;
2251 
2252 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2253 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2254 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2255 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2256 	port_a = target_dev_id + 1;
2257 	port_b = port_a + 1;
2258 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2259 	put_unaligned_be32(port_a, p + 20);
2260 	put_unaligned_be32(port_b, p + 48 + 20);
2261 	if (1 == pcontrol)
2262 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2263 	return sizeof(sas_pcd_m_pg);
2264 }
2265 
2266 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2267 {	/* SAS SSP shared protocol specific port mode subpage */
2268 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2269 		    0, 0, 0, 0, 0, 0, 0, 0,
2270 		};
2271 
2272 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2273 	if (1 == pcontrol)
2274 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2275 	return sizeof(sas_sha_m_pg);
2276 }
2277 
2278 #define SDEBUG_MAX_MSENSE_SZ 256
2279 
2280 static int resp_mode_sense(struct scsi_cmnd *scp,
2281 			   struct sdebug_dev_info *devip)
2282 {
2283 	int pcontrol, pcode, subpcode, bd_len;
2284 	unsigned char dev_spec;
2285 	int alloc_len, offset, len, target_dev_id;
2286 	int target = scp->device->id;
2287 	unsigned char *ap;
2288 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2289 	unsigned char *cmd = scp->cmnd;
2290 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2291 
2292 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2293 	pcontrol = (cmd[2] & 0xc0) >> 6;
2294 	pcode = cmd[2] & 0x3f;
2295 	subpcode = cmd[3];
2296 	msense_6 = (MODE_SENSE == cmd[0]);
2297 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2298 	is_disk = (sdebug_ptype == TYPE_DISK);
2299 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2300 	if ((is_disk || is_zbc) && !dbd)
2301 		bd_len = llbaa ? 16 : 8;
2302 	else
2303 		bd_len = 0;
2304 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2305 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2306 	if (0x3 == pcontrol) {  /* Saving values not supported */
2307 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2308 		return check_condition_result;
2309 	}
2310 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2311 			(devip->target * 1000) - 3;
2312 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2313 	if (is_disk || is_zbc) {
2314 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2315 		if (sdebug_wp)
2316 			dev_spec |= 0x80;
2317 	} else
2318 		dev_spec = 0x0;
2319 	if (msense_6) {
2320 		arr[2] = dev_spec;
2321 		arr[3] = bd_len;
2322 		offset = 4;
2323 	} else {
2324 		arr[3] = dev_spec;
2325 		if (16 == bd_len)
2326 			arr[4] = 0x1;	/* set LONGLBA bit */
2327 		arr[7] = bd_len;	/* assume 255 or less */
2328 		offset = 8;
2329 	}
2330 	ap = arr + offset;
2331 	if ((bd_len > 0) && (!sdebug_capacity))
2332 		sdebug_capacity = get_sdebug_capacity();
2333 
2334 	if (8 == bd_len) {
2335 		if (sdebug_capacity > 0xfffffffe)
2336 			put_unaligned_be32(0xffffffff, ap + 0);
2337 		else
2338 			put_unaligned_be32(sdebug_capacity, ap + 0);
2339 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2340 		offset += bd_len;
2341 		ap = arr + offset;
2342 	} else if (16 == bd_len) {
2343 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2344 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2345 		offset += bd_len;
2346 		ap = arr + offset;
2347 	}
2348 
2349 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2350 		/* TODO: Control Extension page */
2351 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2352 		return check_condition_result;
2353 	}
2354 	bad_pcode = false;
2355 
2356 	switch (pcode) {
2357 	case 0x1:	/* Read-Write error recovery page, direct access */
2358 		len = resp_err_recov_pg(ap, pcontrol, target);
2359 		offset += len;
2360 		break;
2361 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2362 		len = resp_disconnect_pg(ap, pcontrol, target);
2363 		offset += len;
2364 		break;
2365 	case 0x3:       /* Format device page, direct access */
2366 		if (is_disk) {
2367 			len = resp_format_pg(ap, pcontrol, target);
2368 			offset += len;
2369 		} else
2370 			bad_pcode = true;
2371 		break;
2372 	case 0x8:	/* Caching page, direct access */
2373 		if (is_disk || is_zbc) {
2374 			len = resp_caching_pg(ap, pcontrol, target);
2375 			offset += len;
2376 		} else
2377 			bad_pcode = true;
2378 		break;
2379 	case 0xa:	/* Control Mode page, all devices */
2380 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2381 		offset += len;
2382 		break;
2383 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2384 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2385 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2386 			return check_condition_result;
2387 		}
2388 		len = 0;
2389 		if ((0x0 == subpcode) || (0xff == subpcode))
2390 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2391 		if ((0x1 == subpcode) || (0xff == subpcode))
2392 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2393 						  target_dev_id);
2394 		if ((0x2 == subpcode) || (0xff == subpcode))
2395 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2396 		offset += len;
2397 		break;
2398 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2399 		len = resp_iec_m_pg(ap, pcontrol, target);
2400 		offset += len;
2401 		break;
2402 	case 0x3f:	/* Read all Mode pages */
2403 		if ((0 == subpcode) || (0xff == subpcode)) {
2404 			len = resp_err_recov_pg(ap, pcontrol, target);
2405 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2406 			if (is_disk) {
2407 				len += resp_format_pg(ap + len, pcontrol,
2408 						      target);
2409 				len += resp_caching_pg(ap + len, pcontrol,
2410 						       target);
2411 			} else if (is_zbc) {
2412 				len += resp_caching_pg(ap + len, pcontrol,
2413 						       target);
2414 			}
2415 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2416 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2417 			if (0xff == subpcode) {
2418 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2419 						  target, target_dev_id);
2420 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2421 			}
2422 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2423 			offset += len;
2424 		} else {
2425 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2426 			return check_condition_result;
2427 		}
2428 		break;
2429 	default:
2430 		bad_pcode = true;
2431 		break;
2432 	}
2433 	if (bad_pcode) {
2434 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2435 		return check_condition_result;
2436 	}
2437 	if (msense_6)
2438 		arr[0] = offset - 1;
2439 	else
2440 		put_unaligned_be16((offset - 2), arr + 0);
2441 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2442 }
2443 
2444 #define SDEBUG_MAX_MSELECT_SZ 512
2445 
2446 static int resp_mode_select(struct scsi_cmnd *scp,
2447 			    struct sdebug_dev_info *devip)
2448 {
2449 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2450 	int param_len, res, mpage;
2451 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2452 	unsigned char *cmd = scp->cmnd;
2453 	int mselect6 = (MODE_SELECT == cmd[0]);
2454 
2455 	memset(arr, 0, sizeof(arr));
2456 	pf = cmd[1] & 0x10;
2457 	sp = cmd[1] & 0x1;
2458 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2459 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2460 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2461 		return check_condition_result;
2462 	}
2463 	res = fetch_to_dev_buffer(scp, arr, param_len);
2464 	if (-1 == res)
2465 		return DID_ERROR << 16;
2466 	else if (sdebug_verbose && (res < param_len))
2467 		sdev_printk(KERN_INFO, scp->device,
2468 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2469 			    __func__, param_len, res);
2470 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2471 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2472 	if (md_len > 2) {
2473 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2474 		return check_condition_result;
2475 	}
2476 	off = bd_len + (mselect6 ? 4 : 8);
2477 	mpage = arr[off] & 0x3f;
2478 	ps = !!(arr[off] & 0x80);
2479 	if (ps) {
2480 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2481 		return check_condition_result;
2482 	}
2483 	spf = !!(arr[off] & 0x40);
2484 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2485 		       (arr[off + 1] + 2);
2486 	if ((pg_len + off) > param_len) {
2487 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2488 				PARAMETER_LIST_LENGTH_ERR, 0);
2489 		return check_condition_result;
2490 	}
2491 	switch (mpage) {
2492 	case 0x8:      /* Caching Mode page */
2493 		if (caching_pg[1] == arr[off + 1]) {
2494 			memcpy(caching_pg + 2, arr + off + 2,
2495 			       sizeof(caching_pg) - 2);
2496 			goto set_mode_changed_ua;
2497 		}
2498 		break;
2499 	case 0xa:      /* Control Mode page */
2500 		if (ctrl_m_pg[1] == arr[off + 1]) {
2501 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2502 			       sizeof(ctrl_m_pg) - 2);
2503 			if (ctrl_m_pg[4] & 0x8)
2504 				sdebug_wp = true;
2505 			else
2506 				sdebug_wp = false;
2507 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2508 			goto set_mode_changed_ua;
2509 		}
2510 		break;
2511 	case 0x1c:      /* Informational Exceptions Mode page */
2512 		if (iec_m_pg[1] == arr[off + 1]) {
2513 			memcpy(iec_m_pg + 2, arr + off + 2,
2514 			       sizeof(iec_m_pg) - 2);
2515 			goto set_mode_changed_ua;
2516 		}
2517 		break;
2518 	default:
2519 		break;
2520 	}
2521 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2522 	return check_condition_result;
2523 set_mode_changed_ua:
2524 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2525 	return 0;
2526 }
2527 
2528 static int resp_temp_l_pg(unsigned char *arr)
2529 {
2530 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2531 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2532 		};
2533 
2534 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2535 	return sizeof(temp_l_pg);
2536 }
2537 
2538 static int resp_ie_l_pg(unsigned char *arr)
2539 {
2540 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2541 		};
2542 
2543 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2544 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2545 		arr[4] = THRESHOLD_EXCEEDED;
2546 		arr[5] = 0xff;
2547 	}
2548 	return sizeof(ie_l_pg);
2549 }
2550 
2551 #define SDEBUG_MAX_LSENSE_SZ 512
2552 
2553 static int resp_log_sense(struct scsi_cmnd *scp,
2554 			  struct sdebug_dev_info *devip)
2555 {
2556 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2557 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2558 	unsigned char *cmd = scp->cmnd;
2559 
2560 	memset(arr, 0, sizeof(arr));
2561 	ppc = cmd[1] & 0x2;
2562 	sp = cmd[1] & 0x1;
2563 	if (ppc || sp) {
2564 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2565 		return check_condition_result;
2566 	}
2567 	pcode = cmd[2] & 0x3f;
2568 	subpcode = cmd[3] & 0xff;
2569 	alloc_len = get_unaligned_be16(cmd + 7);
2570 	arr[0] = pcode;
2571 	if (0 == subpcode) {
2572 		switch (pcode) {
2573 		case 0x0:	/* Supported log pages log page */
2574 			n = 4;
2575 			arr[n++] = 0x0;		/* this page */
2576 			arr[n++] = 0xd;		/* Temperature */
2577 			arr[n++] = 0x2f;	/* Informational exceptions */
2578 			arr[3] = n - 4;
2579 			break;
2580 		case 0xd:	/* Temperature log page */
2581 			arr[3] = resp_temp_l_pg(arr + 4);
2582 			break;
2583 		case 0x2f:	/* Informational exceptions log page */
2584 			arr[3] = resp_ie_l_pg(arr + 4);
2585 			break;
2586 		default:
2587 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2588 			return check_condition_result;
2589 		}
2590 	} else if (0xff == subpcode) {
2591 		arr[0] |= 0x40;
2592 		arr[1] = subpcode;
2593 		switch (pcode) {
2594 		case 0x0:	/* Supported log pages and subpages log page */
2595 			n = 4;
2596 			arr[n++] = 0x0;
2597 			arr[n++] = 0x0;		/* 0,0 page */
2598 			arr[n++] = 0x0;
2599 			arr[n++] = 0xff;	/* this page */
2600 			arr[n++] = 0xd;
2601 			arr[n++] = 0x0;		/* Temperature */
2602 			arr[n++] = 0x2f;
2603 			arr[n++] = 0x0;	/* Informational exceptions */
2604 			arr[3] = n - 4;
2605 			break;
2606 		case 0xd:	/* Temperature subpages */
2607 			n = 4;
2608 			arr[n++] = 0xd;
2609 			arr[n++] = 0x0;		/* Temperature */
2610 			arr[3] = n - 4;
2611 			break;
2612 		case 0x2f:	/* Informational exceptions subpages */
2613 			n = 4;
2614 			arr[n++] = 0x2f;
2615 			arr[n++] = 0x0;		/* Informational exceptions */
2616 			arr[3] = n - 4;
2617 			break;
2618 		default:
2619 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2620 			return check_condition_result;
2621 		}
2622 	} else {
2623 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2624 		return check_condition_result;
2625 	}
2626 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2627 	return fill_from_dev_buffer(scp, arr,
2628 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2629 }
2630 
2631 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2632 {
2633 	return devip->nr_zones != 0;
2634 }
2635 
2636 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2637 					unsigned long long lba)
2638 {
2639 	return &devip->zstate[lba >> devip->zsize_shift];
2640 }
2641 
2642 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2643 {
2644 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2645 }
2646 
2647 static void zbc_close_zone(struct sdebug_dev_info *devip,
2648 			   struct sdeb_zone_state *zsp)
2649 {
2650 	enum sdebug_z_cond zc;
2651 
2652 	if (zbc_zone_is_conv(zsp))
2653 		return;
2654 
2655 	zc = zsp->z_cond;
2656 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2657 		return;
2658 
2659 	if (zc == ZC2_IMPLICIT_OPEN)
2660 		devip->nr_imp_open--;
2661 	else
2662 		devip->nr_exp_open--;
2663 
2664 	if (zsp->z_wp == zsp->z_start) {
2665 		zsp->z_cond = ZC1_EMPTY;
2666 	} else {
2667 		zsp->z_cond = ZC4_CLOSED;
2668 		devip->nr_closed++;
2669 	}
2670 }
2671 
2672 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2673 {
2674 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2675 	unsigned int i;
2676 
2677 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2678 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2679 			zbc_close_zone(devip, zsp);
2680 			return;
2681 		}
2682 	}
2683 }
2684 
2685 static void zbc_open_zone(struct sdebug_dev_info *devip,
2686 			  struct sdeb_zone_state *zsp, bool explicit)
2687 {
2688 	enum sdebug_z_cond zc;
2689 
2690 	if (zbc_zone_is_conv(zsp))
2691 		return;
2692 
2693 	zc = zsp->z_cond;
2694 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2695 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2696 		return;
2697 
2698 	/* Close an implicit open zone if necessary */
2699 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2700 		zbc_close_zone(devip, zsp);
2701 	else if (devip->max_open &&
2702 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2703 		zbc_close_imp_open_zone(devip);
2704 
2705 	if (zsp->z_cond == ZC4_CLOSED)
2706 		devip->nr_closed--;
2707 	if (explicit) {
2708 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2709 		devip->nr_exp_open++;
2710 	} else {
2711 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2712 		devip->nr_imp_open++;
2713 	}
2714 }
2715 
2716 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2717 		       unsigned long long lba, unsigned int num)
2718 {
2719 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2720 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2721 
2722 	if (zbc_zone_is_conv(zsp))
2723 		return;
2724 
2725 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2726 		zsp->z_wp += num;
2727 		if (zsp->z_wp >= zend)
2728 			zsp->z_cond = ZC5_FULL;
2729 		return;
2730 	}
2731 
2732 	while (num) {
2733 		if (lba != zsp->z_wp)
2734 			zsp->z_non_seq_resource = true;
2735 
2736 		end = lba + num;
2737 		if (end >= zend) {
2738 			n = zend - lba;
2739 			zsp->z_wp = zend;
2740 		} else if (end > zsp->z_wp) {
2741 			n = num;
2742 			zsp->z_wp = end;
2743 		} else {
2744 			n = num;
2745 		}
2746 		if (zsp->z_wp >= zend)
2747 			zsp->z_cond = ZC5_FULL;
2748 
2749 		num -= n;
2750 		lba += n;
2751 		if (num) {
2752 			zsp++;
2753 			zend = zsp->z_start + zsp->z_size;
2754 		}
2755 	}
2756 }
2757 
2758 static int check_zbc_access_params(struct scsi_cmnd *scp,
2759 			unsigned long long lba, unsigned int num, bool write)
2760 {
2761 	struct scsi_device *sdp = scp->device;
2762 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2763 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2764 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2765 
2766 	if (!write) {
2767 		if (devip->zmodel == BLK_ZONED_HA)
2768 			return 0;
2769 		/* For host-managed, reads cannot cross zone types boundaries */
2770 		if (zsp_end != zsp &&
2771 		    zbc_zone_is_conv(zsp) &&
2772 		    !zbc_zone_is_conv(zsp_end)) {
2773 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2774 					LBA_OUT_OF_RANGE,
2775 					READ_INVDATA_ASCQ);
2776 			return check_condition_result;
2777 		}
2778 		return 0;
2779 	}
2780 
2781 	/* No restrictions for writes within conventional zones */
2782 	if (zbc_zone_is_conv(zsp)) {
2783 		if (!zbc_zone_is_conv(zsp_end)) {
2784 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2785 					LBA_OUT_OF_RANGE,
2786 					WRITE_BOUNDARY_ASCQ);
2787 			return check_condition_result;
2788 		}
2789 		return 0;
2790 	}
2791 
2792 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2793 		/* Writes cannot cross sequential zone boundaries */
2794 		if (zsp_end != zsp) {
2795 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2796 					LBA_OUT_OF_RANGE,
2797 					WRITE_BOUNDARY_ASCQ);
2798 			return check_condition_result;
2799 		}
2800 		/* Cannot write full zones */
2801 		if (zsp->z_cond == ZC5_FULL) {
2802 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2803 					INVALID_FIELD_IN_CDB, 0);
2804 			return check_condition_result;
2805 		}
2806 		/* Writes must be aligned to the zone WP */
2807 		if (lba != zsp->z_wp) {
2808 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2809 					LBA_OUT_OF_RANGE,
2810 					UNALIGNED_WRITE_ASCQ);
2811 			return check_condition_result;
2812 		}
2813 	}
2814 
2815 	/* Handle implicit open of closed and empty zones */
2816 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2817 		if (devip->max_open &&
2818 		    devip->nr_exp_open >= devip->max_open) {
2819 			mk_sense_buffer(scp, DATA_PROTECT,
2820 					INSUFF_RES_ASC,
2821 					INSUFF_ZONE_ASCQ);
2822 			return check_condition_result;
2823 		}
2824 		zbc_open_zone(devip, zsp, false);
2825 	}
2826 
2827 	return 0;
2828 }
2829 
2830 static inline int check_device_access_params
2831 			(struct scsi_cmnd *scp, unsigned long long lba,
2832 			 unsigned int num, bool write)
2833 {
2834 	struct scsi_device *sdp = scp->device;
2835 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2836 
2837 	if (lba + num > sdebug_capacity) {
2838 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2839 		return check_condition_result;
2840 	}
2841 	/* transfer length excessive (tie in to block limits VPD page) */
2842 	if (num > sdebug_store_sectors) {
2843 		/* needs work to find which cdb byte 'num' comes from */
2844 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2845 		return check_condition_result;
2846 	}
2847 	if (write && unlikely(sdebug_wp)) {
2848 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2849 		return check_condition_result;
2850 	}
2851 	if (sdebug_dev_is_zoned(devip))
2852 		return check_zbc_access_params(scp, lba, num, write);
2853 
2854 	return 0;
2855 }
2856 
2857 /*
2858  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2859  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2860  * that access any of the "stores" in struct sdeb_store_info should call this
2861  * function with bug_if_fake_rw set to true.
2862  */
2863 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2864 						bool bug_if_fake_rw)
2865 {
2866 	if (sdebug_fake_rw) {
2867 		BUG_ON(bug_if_fake_rw);	/* See note above */
2868 		return NULL;
2869 	}
2870 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2871 }
2872 
2873 /* Returns number of bytes copied or -1 if error. */
2874 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2875 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2876 {
2877 	int ret;
2878 	u64 block, rest = 0;
2879 	enum dma_data_direction dir;
2880 	struct scsi_data_buffer *sdb = &scp->sdb;
2881 	u8 *fsp;
2882 
2883 	if (do_write) {
2884 		dir = DMA_TO_DEVICE;
2885 		write_since_sync = true;
2886 	} else {
2887 		dir = DMA_FROM_DEVICE;
2888 	}
2889 
2890 	if (!sdb->length || !sip)
2891 		return 0;
2892 	if (scp->sc_data_direction != dir)
2893 		return -1;
2894 	fsp = sip->storep;
2895 
2896 	block = do_div(lba, sdebug_store_sectors);
2897 	if (block + num > sdebug_store_sectors)
2898 		rest = block + num - sdebug_store_sectors;
2899 
2900 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2901 		   fsp + (block * sdebug_sector_size),
2902 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2903 	if (ret != (num - rest) * sdebug_sector_size)
2904 		return ret;
2905 
2906 	if (rest) {
2907 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2908 			    fsp, rest * sdebug_sector_size,
2909 			    sg_skip + ((num - rest) * sdebug_sector_size),
2910 			    do_write);
2911 	}
2912 
2913 	return ret;
2914 }
2915 
2916 /* Returns number of bytes copied or -1 if error. */
2917 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2918 {
2919 	struct scsi_data_buffer *sdb = &scp->sdb;
2920 
2921 	if (!sdb->length)
2922 		return 0;
2923 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2924 		return -1;
2925 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2926 			      num * sdebug_sector_size, 0, true);
2927 }
2928 
2929 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2930  * arr into sip->storep+lba and return true. If comparison fails then
2931  * return false. */
2932 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2933 			      const u8 *arr, bool compare_only)
2934 {
2935 	bool res;
2936 	u64 block, rest = 0;
2937 	u32 store_blks = sdebug_store_sectors;
2938 	u32 lb_size = sdebug_sector_size;
2939 	u8 *fsp = sip->storep;
2940 
2941 	block = do_div(lba, store_blks);
2942 	if (block + num > store_blks)
2943 		rest = block + num - store_blks;
2944 
2945 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2946 	if (!res)
2947 		return res;
2948 	if (rest)
2949 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2950 			     rest * lb_size);
2951 	if (!res)
2952 		return res;
2953 	if (compare_only)
2954 		return true;
2955 	arr += num * lb_size;
2956 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2957 	if (rest)
2958 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2959 	return res;
2960 }
2961 
2962 static __be16 dif_compute_csum(const void *buf, int len)
2963 {
2964 	__be16 csum;
2965 
2966 	if (sdebug_guard)
2967 		csum = (__force __be16)ip_compute_csum(buf, len);
2968 	else
2969 		csum = cpu_to_be16(crc_t10dif(buf, len));
2970 
2971 	return csum;
2972 }
2973 
2974 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2975 		      sector_t sector, u32 ei_lba)
2976 {
2977 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2978 
2979 	if (sdt->guard_tag != csum) {
2980 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2981 			(unsigned long)sector,
2982 			be16_to_cpu(sdt->guard_tag),
2983 			be16_to_cpu(csum));
2984 		return 0x01;
2985 	}
2986 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2987 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2988 		pr_err("REF check failed on sector %lu\n",
2989 			(unsigned long)sector);
2990 		return 0x03;
2991 	}
2992 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2993 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2994 		pr_err("REF check failed on sector %lu\n",
2995 			(unsigned long)sector);
2996 		return 0x03;
2997 	}
2998 	return 0;
2999 }
3000 
3001 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3002 			  unsigned int sectors, bool read)
3003 {
3004 	size_t resid;
3005 	void *paddr;
3006 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3007 						scp->device->hostdata, true);
3008 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3009 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3010 	struct sg_mapping_iter miter;
3011 
3012 	/* Bytes of protection data to copy into sgl */
3013 	resid = sectors * sizeof(*dif_storep);
3014 
3015 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3016 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3017 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3018 
3019 	while (sg_miter_next(&miter) && resid > 0) {
3020 		size_t len = min_t(size_t, miter.length, resid);
3021 		void *start = dif_store(sip, sector);
3022 		size_t rest = 0;
3023 
3024 		if (dif_store_end < start + len)
3025 			rest = start + len - dif_store_end;
3026 
3027 		paddr = miter.addr;
3028 
3029 		if (read)
3030 			memcpy(paddr, start, len - rest);
3031 		else
3032 			memcpy(start, paddr, len - rest);
3033 
3034 		if (rest) {
3035 			if (read)
3036 				memcpy(paddr + len - rest, dif_storep, rest);
3037 			else
3038 				memcpy(dif_storep, paddr + len - rest, rest);
3039 		}
3040 
3041 		sector += len / sizeof(*dif_storep);
3042 		resid -= len;
3043 	}
3044 	sg_miter_stop(&miter);
3045 }
3046 
3047 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3048 			    unsigned int sectors, u32 ei_lba)
3049 {
3050 	unsigned int i;
3051 	sector_t sector;
3052 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3053 						scp->device->hostdata, true);
3054 	struct t10_pi_tuple *sdt;
3055 
3056 	for (i = 0; i < sectors; i++, ei_lba++) {
3057 		int ret;
3058 
3059 		sector = start_sec + i;
3060 		sdt = dif_store(sip, sector);
3061 
3062 		if (sdt->app_tag == cpu_to_be16(0xffff))
3063 			continue;
3064 
3065 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3066 				 ei_lba);
3067 		if (ret) {
3068 			dif_errors++;
3069 			return ret;
3070 		}
3071 	}
3072 
3073 	dif_copy_prot(scp, start_sec, sectors, true);
3074 	dix_reads++;
3075 
3076 	return 0;
3077 }
3078 
3079 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3080 {
3081 	bool check_prot;
3082 	u32 num;
3083 	u32 ei_lba;
3084 	int ret;
3085 	u64 lba;
3086 	struct sdeb_store_info *sip = devip2sip(devip, true);
3087 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3088 	u8 *cmd = scp->cmnd;
3089 
3090 	switch (cmd[0]) {
3091 	case READ_16:
3092 		ei_lba = 0;
3093 		lba = get_unaligned_be64(cmd + 2);
3094 		num = get_unaligned_be32(cmd + 10);
3095 		check_prot = true;
3096 		break;
3097 	case READ_10:
3098 		ei_lba = 0;
3099 		lba = get_unaligned_be32(cmd + 2);
3100 		num = get_unaligned_be16(cmd + 7);
3101 		check_prot = true;
3102 		break;
3103 	case READ_6:
3104 		ei_lba = 0;
3105 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3106 		      (u32)(cmd[1] & 0x1f) << 16;
3107 		num = (0 == cmd[4]) ? 256 : cmd[4];
3108 		check_prot = true;
3109 		break;
3110 	case READ_12:
3111 		ei_lba = 0;
3112 		lba = get_unaligned_be32(cmd + 2);
3113 		num = get_unaligned_be32(cmd + 6);
3114 		check_prot = true;
3115 		break;
3116 	case XDWRITEREAD_10:
3117 		ei_lba = 0;
3118 		lba = get_unaligned_be32(cmd + 2);
3119 		num = get_unaligned_be16(cmd + 7);
3120 		check_prot = false;
3121 		break;
3122 	default:	/* assume READ(32) */
3123 		lba = get_unaligned_be64(cmd + 12);
3124 		ei_lba = get_unaligned_be32(cmd + 20);
3125 		num = get_unaligned_be32(cmd + 28);
3126 		check_prot = false;
3127 		break;
3128 	}
3129 	if (unlikely(have_dif_prot && check_prot)) {
3130 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3131 		    (cmd[1] & 0xe0)) {
3132 			mk_sense_invalid_opcode(scp);
3133 			return check_condition_result;
3134 		}
3135 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3136 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3137 		    (cmd[1] & 0xe0) == 0)
3138 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3139 				    "to DIF device\n");
3140 	}
3141 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3142 		     atomic_read(&sdeb_inject_pending))) {
3143 		num /= 2;
3144 		atomic_set(&sdeb_inject_pending, 0);
3145 	}
3146 
3147 	ret = check_device_access_params(scp, lba, num, false);
3148 	if (ret)
3149 		return ret;
3150 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3151 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3152 		     ((lba + num) > sdebug_medium_error_start))) {
3153 		/* claim unrecoverable read error */
3154 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3155 		/* set info field and valid bit for fixed descriptor */
3156 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3157 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3158 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3159 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3160 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3161 		}
3162 		scsi_set_resid(scp, scsi_bufflen(scp));
3163 		return check_condition_result;
3164 	}
3165 
3166 	read_lock(macc_lckp);
3167 
3168 	/* DIX + T10 DIF */
3169 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3170 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3171 
3172 		if (prot_ret) {
3173 			read_unlock(macc_lckp);
3174 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3175 			return illegal_condition_result;
3176 		}
3177 	}
3178 
3179 	ret = do_device_access(sip, scp, 0, lba, num, false);
3180 	read_unlock(macc_lckp);
3181 	if (unlikely(ret == -1))
3182 		return DID_ERROR << 16;
3183 
3184 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3185 
3186 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3187 		     atomic_read(&sdeb_inject_pending))) {
3188 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3189 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3190 			atomic_set(&sdeb_inject_pending, 0);
3191 			return check_condition_result;
3192 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3193 			/* Logical block guard check failed */
3194 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3195 			atomic_set(&sdeb_inject_pending, 0);
3196 			return illegal_condition_result;
3197 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3198 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3199 			atomic_set(&sdeb_inject_pending, 0);
3200 			return illegal_condition_result;
3201 		}
3202 	}
3203 	return 0;
3204 }
3205 
3206 static void dump_sector(unsigned char *buf, int len)
3207 {
3208 	int i, j, n;
3209 
3210 	pr_err(">>> Sector Dump <<<\n");
3211 	for (i = 0 ; i < len ; i += 16) {
3212 		char b[128];
3213 
3214 		for (j = 0, n = 0; j < 16; j++) {
3215 			unsigned char c = buf[i+j];
3216 
3217 			if (c >= 0x20 && c < 0x7e)
3218 				n += scnprintf(b + n, sizeof(b) - n,
3219 					       " %c ", buf[i+j]);
3220 			else
3221 				n += scnprintf(b + n, sizeof(b) - n,
3222 					       "%02x ", buf[i+j]);
3223 		}
3224 		pr_err("%04d: %s\n", i, b);
3225 	}
3226 }
3227 
3228 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3229 			     unsigned int sectors, u32 ei_lba)
3230 {
3231 	int ret;
3232 	struct t10_pi_tuple *sdt;
3233 	void *daddr;
3234 	sector_t sector = start_sec;
3235 	int ppage_offset;
3236 	int dpage_offset;
3237 	struct sg_mapping_iter diter;
3238 	struct sg_mapping_iter piter;
3239 
3240 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3241 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3242 
3243 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3244 			scsi_prot_sg_count(SCpnt),
3245 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3246 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3247 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3248 
3249 	/* For each protection page */
3250 	while (sg_miter_next(&piter)) {
3251 		dpage_offset = 0;
3252 		if (WARN_ON(!sg_miter_next(&diter))) {
3253 			ret = 0x01;
3254 			goto out;
3255 		}
3256 
3257 		for (ppage_offset = 0; ppage_offset < piter.length;
3258 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3259 			/* If we're at the end of the current
3260 			 * data page advance to the next one
3261 			 */
3262 			if (dpage_offset >= diter.length) {
3263 				if (WARN_ON(!sg_miter_next(&diter))) {
3264 					ret = 0x01;
3265 					goto out;
3266 				}
3267 				dpage_offset = 0;
3268 			}
3269 
3270 			sdt = piter.addr + ppage_offset;
3271 			daddr = diter.addr + dpage_offset;
3272 
3273 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3274 			if (ret) {
3275 				dump_sector(daddr, sdebug_sector_size);
3276 				goto out;
3277 			}
3278 
3279 			sector++;
3280 			ei_lba++;
3281 			dpage_offset += sdebug_sector_size;
3282 		}
3283 		diter.consumed = dpage_offset;
3284 		sg_miter_stop(&diter);
3285 	}
3286 	sg_miter_stop(&piter);
3287 
3288 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3289 	dix_writes++;
3290 
3291 	return 0;
3292 
3293 out:
3294 	dif_errors++;
3295 	sg_miter_stop(&diter);
3296 	sg_miter_stop(&piter);
3297 	return ret;
3298 }
3299 
3300 static unsigned long lba_to_map_index(sector_t lba)
3301 {
3302 	if (sdebug_unmap_alignment)
3303 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3304 	sector_div(lba, sdebug_unmap_granularity);
3305 	return lba;
3306 }
3307 
3308 static sector_t map_index_to_lba(unsigned long index)
3309 {
3310 	sector_t lba = index * sdebug_unmap_granularity;
3311 
3312 	if (sdebug_unmap_alignment)
3313 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3314 	return lba;
3315 }
3316 
3317 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3318 			      unsigned int *num)
3319 {
3320 	sector_t end;
3321 	unsigned int mapped;
3322 	unsigned long index;
3323 	unsigned long next;
3324 
3325 	index = lba_to_map_index(lba);
3326 	mapped = test_bit(index, sip->map_storep);
3327 
3328 	if (mapped)
3329 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3330 	else
3331 		next = find_next_bit(sip->map_storep, map_size, index);
3332 
3333 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3334 	*num = end - lba;
3335 	return mapped;
3336 }
3337 
3338 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3339 		       unsigned int len)
3340 {
3341 	sector_t end = lba + len;
3342 
3343 	while (lba < end) {
3344 		unsigned long index = lba_to_map_index(lba);
3345 
3346 		if (index < map_size)
3347 			set_bit(index, sip->map_storep);
3348 
3349 		lba = map_index_to_lba(index + 1);
3350 	}
3351 }
3352 
3353 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3354 			 unsigned int len)
3355 {
3356 	sector_t end = lba + len;
3357 	u8 *fsp = sip->storep;
3358 
3359 	while (lba < end) {
3360 		unsigned long index = lba_to_map_index(lba);
3361 
3362 		if (lba == map_index_to_lba(index) &&
3363 		    lba + sdebug_unmap_granularity <= end &&
3364 		    index < map_size) {
3365 			clear_bit(index, sip->map_storep);
3366 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3367 				memset(fsp + lba * sdebug_sector_size,
3368 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3369 				       sdebug_sector_size *
3370 				       sdebug_unmap_granularity);
3371 			}
3372 			if (sip->dif_storep) {
3373 				memset(sip->dif_storep + lba, 0xff,
3374 				       sizeof(*sip->dif_storep) *
3375 				       sdebug_unmap_granularity);
3376 			}
3377 		}
3378 		lba = map_index_to_lba(index + 1);
3379 	}
3380 }
3381 
3382 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3383 {
3384 	bool check_prot;
3385 	u32 num;
3386 	u32 ei_lba;
3387 	int ret;
3388 	u64 lba;
3389 	struct sdeb_store_info *sip = devip2sip(devip, true);
3390 	rwlock_t *macc_lckp = &sip->macc_lck;
3391 	u8 *cmd = scp->cmnd;
3392 
3393 	switch (cmd[0]) {
3394 	case WRITE_16:
3395 		ei_lba = 0;
3396 		lba = get_unaligned_be64(cmd + 2);
3397 		num = get_unaligned_be32(cmd + 10);
3398 		check_prot = true;
3399 		break;
3400 	case WRITE_10:
3401 		ei_lba = 0;
3402 		lba = get_unaligned_be32(cmd + 2);
3403 		num = get_unaligned_be16(cmd + 7);
3404 		check_prot = true;
3405 		break;
3406 	case WRITE_6:
3407 		ei_lba = 0;
3408 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3409 		      (u32)(cmd[1] & 0x1f) << 16;
3410 		num = (0 == cmd[4]) ? 256 : cmd[4];
3411 		check_prot = true;
3412 		break;
3413 	case WRITE_12:
3414 		ei_lba = 0;
3415 		lba = get_unaligned_be32(cmd + 2);
3416 		num = get_unaligned_be32(cmd + 6);
3417 		check_prot = true;
3418 		break;
3419 	case 0x53:	/* XDWRITEREAD(10) */
3420 		ei_lba = 0;
3421 		lba = get_unaligned_be32(cmd + 2);
3422 		num = get_unaligned_be16(cmd + 7);
3423 		check_prot = false;
3424 		break;
3425 	default:	/* assume WRITE(32) */
3426 		lba = get_unaligned_be64(cmd + 12);
3427 		ei_lba = get_unaligned_be32(cmd + 20);
3428 		num = get_unaligned_be32(cmd + 28);
3429 		check_prot = false;
3430 		break;
3431 	}
3432 	if (unlikely(have_dif_prot && check_prot)) {
3433 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3434 		    (cmd[1] & 0xe0)) {
3435 			mk_sense_invalid_opcode(scp);
3436 			return check_condition_result;
3437 		}
3438 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3439 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3440 		    (cmd[1] & 0xe0) == 0)
3441 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3442 				    "to DIF device\n");
3443 	}
3444 
3445 	write_lock(macc_lckp);
3446 	ret = check_device_access_params(scp, lba, num, true);
3447 	if (ret) {
3448 		write_unlock(macc_lckp);
3449 		return ret;
3450 	}
3451 
3452 	/* DIX + T10 DIF */
3453 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3454 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3455 
3456 		if (prot_ret) {
3457 			write_unlock(macc_lckp);
3458 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3459 			return illegal_condition_result;
3460 		}
3461 	}
3462 
3463 	ret = do_device_access(sip, scp, 0, lba, num, true);
3464 	if (unlikely(scsi_debug_lbp()))
3465 		map_region(sip, lba, num);
3466 	/* If ZBC zone then bump its write pointer */
3467 	if (sdebug_dev_is_zoned(devip))
3468 		zbc_inc_wp(devip, lba, num);
3469 	write_unlock(macc_lckp);
3470 	if (unlikely(-1 == ret))
3471 		return DID_ERROR << 16;
3472 	else if (unlikely(sdebug_verbose &&
3473 			  (ret < (num * sdebug_sector_size))))
3474 		sdev_printk(KERN_INFO, scp->device,
3475 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3476 			    my_name, num * sdebug_sector_size, ret);
3477 
3478 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3479 		     atomic_read(&sdeb_inject_pending))) {
3480 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3481 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3482 			atomic_set(&sdeb_inject_pending, 0);
3483 			return check_condition_result;
3484 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3485 			/* Logical block guard check failed */
3486 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3487 			atomic_set(&sdeb_inject_pending, 0);
3488 			return illegal_condition_result;
3489 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3490 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3491 			atomic_set(&sdeb_inject_pending, 0);
3492 			return illegal_condition_result;
3493 		}
3494 	}
3495 	return 0;
3496 }
3497 
3498 /*
3499  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3500  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3501  */
3502 static int resp_write_scat(struct scsi_cmnd *scp,
3503 			   struct sdebug_dev_info *devip)
3504 {
3505 	u8 *cmd = scp->cmnd;
3506 	u8 *lrdp = NULL;
3507 	u8 *up;
3508 	struct sdeb_store_info *sip = devip2sip(devip, true);
3509 	rwlock_t *macc_lckp = &sip->macc_lck;
3510 	u8 wrprotect;
3511 	u16 lbdof, num_lrd, k;
3512 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3513 	u32 lb_size = sdebug_sector_size;
3514 	u32 ei_lba;
3515 	u64 lba;
3516 	int ret, res;
3517 	bool is_16;
3518 	static const u32 lrd_size = 32; /* + parameter list header size */
3519 
3520 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3521 		is_16 = false;
3522 		wrprotect = (cmd[10] >> 5) & 0x7;
3523 		lbdof = get_unaligned_be16(cmd + 12);
3524 		num_lrd = get_unaligned_be16(cmd + 16);
3525 		bt_len = get_unaligned_be32(cmd + 28);
3526 	} else {        /* that leaves WRITE SCATTERED(16) */
3527 		is_16 = true;
3528 		wrprotect = (cmd[2] >> 5) & 0x7;
3529 		lbdof = get_unaligned_be16(cmd + 4);
3530 		num_lrd = get_unaligned_be16(cmd + 8);
3531 		bt_len = get_unaligned_be32(cmd + 10);
3532 		if (unlikely(have_dif_prot)) {
3533 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3534 			    wrprotect) {
3535 				mk_sense_invalid_opcode(scp);
3536 				return illegal_condition_result;
3537 			}
3538 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3539 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3540 			     wrprotect == 0)
3541 				sdev_printk(KERN_ERR, scp->device,
3542 					    "Unprotected WR to DIF device\n");
3543 		}
3544 	}
3545 	if ((num_lrd == 0) || (bt_len == 0))
3546 		return 0;       /* T10 says these do-nothings are not errors */
3547 	if (lbdof == 0) {
3548 		if (sdebug_verbose)
3549 			sdev_printk(KERN_INFO, scp->device,
3550 				"%s: %s: LB Data Offset field bad\n",
3551 				my_name, __func__);
3552 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3553 		return illegal_condition_result;
3554 	}
3555 	lbdof_blen = lbdof * lb_size;
3556 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3557 		if (sdebug_verbose)
3558 			sdev_printk(KERN_INFO, scp->device,
3559 				"%s: %s: LBA range descriptors don't fit\n",
3560 				my_name, __func__);
3561 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3562 		return illegal_condition_result;
3563 	}
3564 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3565 	if (lrdp == NULL)
3566 		return SCSI_MLQUEUE_HOST_BUSY;
3567 	if (sdebug_verbose)
3568 		sdev_printk(KERN_INFO, scp->device,
3569 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3570 			my_name, __func__, lbdof_blen);
3571 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3572 	if (res == -1) {
3573 		ret = DID_ERROR << 16;
3574 		goto err_out;
3575 	}
3576 
3577 	write_lock(macc_lckp);
3578 	sg_off = lbdof_blen;
3579 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3580 	cum_lb = 0;
3581 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3582 		lba = get_unaligned_be64(up + 0);
3583 		num = get_unaligned_be32(up + 8);
3584 		if (sdebug_verbose)
3585 			sdev_printk(KERN_INFO, scp->device,
3586 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3587 				my_name, __func__, k, lba, num, sg_off);
3588 		if (num == 0)
3589 			continue;
3590 		ret = check_device_access_params(scp, lba, num, true);
3591 		if (ret)
3592 			goto err_out_unlock;
3593 		num_by = num * lb_size;
3594 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3595 
3596 		if ((cum_lb + num) > bt_len) {
3597 			if (sdebug_verbose)
3598 				sdev_printk(KERN_INFO, scp->device,
3599 				    "%s: %s: sum of blocks > data provided\n",
3600 				    my_name, __func__);
3601 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3602 					0);
3603 			ret = illegal_condition_result;
3604 			goto err_out_unlock;
3605 		}
3606 
3607 		/* DIX + T10 DIF */
3608 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3609 			int prot_ret = prot_verify_write(scp, lba, num,
3610 							 ei_lba);
3611 
3612 			if (prot_ret) {
3613 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3614 						prot_ret);
3615 				ret = illegal_condition_result;
3616 				goto err_out_unlock;
3617 			}
3618 		}
3619 
3620 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3621 		/* If ZBC zone then bump its write pointer */
3622 		if (sdebug_dev_is_zoned(devip))
3623 			zbc_inc_wp(devip, lba, num);
3624 		if (unlikely(scsi_debug_lbp()))
3625 			map_region(sip, lba, num);
3626 		if (unlikely(-1 == ret)) {
3627 			ret = DID_ERROR << 16;
3628 			goto err_out_unlock;
3629 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3630 			sdev_printk(KERN_INFO, scp->device,
3631 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3632 			    my_name, num_by, ret);
3633 
3634 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3635 			     atomic_read(&sdeb_inject_pending))) {
3636 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3637 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3638 				atomic_set(&sdeb_inject_pending, 0);
3639 				ret = check_condition_result;
3640 				goto err_out_unlock;
3641 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3642 				/* Logical block guard check failed */
3643 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3644 				atomic_set(&sdeb_inject_pending, 0);
3645 				ret = illegal_condition_result;
3646 				goto err_out_unlock;
3647 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3648 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3649 				atomic_set(&sdeb_inject_pending, 0);
3650 				ret = illegal_condition_result;
3651 				goto err_out_unlock;
3652 			}
3653 		}
3654 		sg_off += num_by;
3655 		cum_lb += num;
3656 	}
3657 	ret = 0;
3658 err_out_unlock:
3659 	write_unlock(macc_lckp);
3660 err_out:
3661 	kfree(lrdp);
3662 	return ret;
3663 }
3664 
3665 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3666 			   u32 ei_lba, bool unmap, bool ndob)
3667 {
3668 	struct scsi_device *sdp = scp->device;
3669 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3670 	unsigned long long i;
3671 	u64 block, lbaa;
3672 	u32 lb_size = sdebug_sector_size;
3673 	int ret;
3674 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3675 						scp->device->hostdata, true);
3676 	rwlock_t *macc_lckp = &sip->macc_lck;
3677 	u8 *fs1p;
3678 	u8 *fsp;
3679 
3680 	write_lock(macc_lckp);
3681 
3682 	ret = check_device_access_params(scp, lba, num, true);
3683 	if (ret) {
3684 		write_unlock(macc_lckp);
3685 		return ret;
3686 	}
3687 
3688 	if (unmap && scsi_debug_lbp()) {
3689 		unmap_region(sip, lba, num);
3690 		goto out;
3691 	}
3692 	lbaa = lba;
3693 	block = do_div(lbaa, sdebug_store_sectors);
3694 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3695 	fsp = sip->storep;
3696 	fs1p = fsp + (block * lb_size);
3697 	if (ndob) {
3698 		memset(fs1p, 0, lb_size);
3699 		ret = 0;
3700 	} else
3701 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3702 
3703 	if (-1 == ret) {
3704 		write_unlock(&sip->macc_lck);
3705 		return DID_ERROR << 16;
3706 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3707 		sdev_printk(KERN_INFO, scp->device,
3708 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3709 			    my_name, "write same", lb_size, ret);
3710 
3711 	/* Copy first sector to remaining blocks */
3712 	for (i = 1 ; i < num ; i++) {
3713 		lbaa = lba + i;
3714 		block = do_div(lbaa, sdebug_store_sectors);
3715 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3716 	}
3717 	if (scsi_debug_lbp())
3718 		map_region(sip, lba, num);
3719 	/* If ZBC zone then bump its write pointer */
3720 	if (sdebug_dev_is_zoned(devip))
3721 		zbc_inc_wp(devip, lba, num);
3722 out:
3723 	write_unlock(macc_lckp);
3724 
3725 	return 0;
3726 }
3727 
3728 static int resp_write_same_10(struct scsi_cmnd *scp,
3729 			      struct sdebug_dev_info *devip)
3730 {
3731 	u8 *cmd = scp->cmnd;
3732 	u32 lba;
3733 	u16 num;
3734 	u32 ei_lba = 0;
3735 	bool unmap = false;
3736 
3737 	if (cmd[1] & 0x8) {
3738 		if (sdebug_lbpws10 == 0) {
3739 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3740 			return check_condition_result;
3741 		} else
3742 			unmap = true;
3743 	}
3744 	lba = get_unaligned_be32(cmd + 2);
3745 	num = get_unaligned_be16(cmd + 7);
3746 	if (num > sdebug_write_same_length) {
3747 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3748 		return check_condition_result;
3749 	}
3750 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3751 }
3752 
3753 static int resp_write_same_16(struct scsi_cmnd *scp,
3754 			      struct sdebug_dev_info *devip)
3755 {
3756 	u8 *cmd = scp->cmnd;
3757 	u64 lba;
3758 	u32 num;
3759 	u32 ei_lba = 0;
3760 	bool unmap = false;
3761 	bool ndob = false;
3762 
3763 	if (cmd[1] & 0x8) {	/* UNMAP */
3764 		if (sdebug_lbpws == 0) {
3765 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3766 			return check_condition_result;
3767 		} else
3768 			unmap = true;
3769 	}
3770 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3771 		ndob = true;
3772 	lba = get_unaligned_be64(cmd + 2);
3773 	num = get_unaligned_be32(cmd + 10);
3774 	if (num > sdebug_write_same_length) {
3775 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3776 		return check_condition_result;
3777 	}
3778 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3779 }
3780 
3781 /* Note the mode field is in the same position as the (lower) service action
3782  * field. For the Report supported operation codes command, SPC-4 suggests
3783  * each mode of this command should be reported separately; for future. */
3784 static int resp_write_buffer(struct scsi_cmnd *scp,
3785 			     struct sdebug_dev_info *devip)
3786 {
3787 	u8 *cmd = scp->cmnd;
3788 	struct scsi_device *sdp = scp->device;
3789 	struct sdebug_dev_info *dp;
3790 	u8 mode;
3791 
3792 	mode = cmd[1] & 0x1f;
3793 	switch (mode) {
3794 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3795 		/* set UAs on this device only */
3796 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3797 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3798 		break;
3799 	case 0x5:	/* download MC, save and ACT */
3800 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3801 		break;
3802 	case 0x6:	/* download MC with offsets and ACT */
3803 		/* set UAs on most devices (LUs) in this target */
3804 		list_for_each_entry(dp,
3805 				    &devip->sdbg_host->dev_info_list,
3806 				    dev_list)
3807 			if (dp->target == sdp->id) {
3808 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3809 				if (devip != dp)
3810 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3811 						dp->uas_bm);
3812 			}
3813 		break;
3814 	case 0x7:	/* download MC with offsets, save, and ACT */
3815 		/* set UA on all devices (LUs) in this target */
3816 		list_for_each_entry(dp,
3817 				    &devip->sdbg_host->dev_info_list,
3818 				    dev_list)
3819 			if (dp->target == sdp->id)
3820 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3821 					dp->uas_bm);
3822 		break;
3823 	default:
3824 		/* do nothing for this command for other mode values */
3825 		break;
3826 	}
3827 	return 0;
3828 }
3829 
3830 static int resp_comp_write(struct scsi_cmnd *scp,
3831 			   struct sdebug_dev_info *devip)
3832 {
3833 	u8 *cmd = scp->cmnd;
3834 	u8 *arr;
3835 	struct sdeb_store_info *sip = devip2sip(devip, true);
3836 	rwlock_t *macc_lckp = &sip->macc_lck;
3837 	u64 lba;
3838 	u32 dnum;
3839 	u32 lb_size = sdebug_sector_size;
3840 	u8 num;
3841 	int ret;
3842 	int retval = 0;
3843 
3844 	lba = get_unaligned_be64(cmd + 2);
3845 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3846 	if (0 == num)
3847 		return 0;	/* degenerate case, not an error */
3848 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3849 	    (cmd[1] & 0xe0)) {
3850 		mk_sense_invalid_opcode(scp);
3851 		return check_condition_result;
3852 	}
3853 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3854 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3855 	    (cmd[1] & 0xe0) == 0)
3856 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3857 			    "to DIF device\n");
3858 	ret = check_device_access_params(scp, lba, num, false);
3859 	if (ret)
3860 		return ret;
3861 	dnum = 2 * num;
3862 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3863 	if (NULL == arr) {
3864 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3865 				INSUFF_RES_ASCQ);
3866 		return check_condition_result;
3867 	}
3868 
3869 	write_lock(macc_lckp);
3870 
3871 	ret = do_dout_fetch(scp, dnum, arr);
3872 	if (ret == -1) {
3873 		retval = DID_ERROR << 16;
3874 		goto cleanup;
3875 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3876 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3877 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3878 			    dnum * lb_size, ret);
3879 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3880 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3881 		retval = check_condition_result;
3882 		goto cleanup;
3883 	}
3884 	if (scsi_debug_lbp())
3885 		map_region(sip, lba, num);
3886 cleanup:
3887 	write_unlock(macc_lckp);
3888 	kfree(arr);
3889 	return retval;
3890 }
3891 
3892 struct unmap_block_desc {
3893 	__be64	lba;
3894 	__be32	blocks;
3895 	__be32	__reserved;
3896 };
3897 
3898 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3899 {
3900 	unsigned char *buf;
3901 	struct unmap_block_desc *desc;
3902 	struct sdeb_store_info *sip = devip2sip(devip, true);
3903 	rwlock_t *macc_lckp = &sip->macc_lck;
3904 	unsigned int i, payload_len, descriptors;
3905 	int ret;
3906 
3907 	if (!scsi_debug_lbp())
3908 		return 0;	/* fib and say its done */
3909 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3910 	BUG_ON(scsi_bufflen(scp) != payload_len);
3911 
3912 	descriptors = (payload_len - 8) / 16;
3913 	if (descriptors > sdebug_unmap_max_desc) {
3914 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3915 		return check_condition_result;
3916 	}
3917 
3918 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3919 	if (!buf) {
3920 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3921 				INSUFF_RES_ASCQ);
3922 		return check_condition_result;
3923 	}
3924 
3925 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3926 
3927 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3928 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3929 
3930 	desc = (void *)&buf[8];
3931 
3932 	write_lock(macc_lckp);
3933 
3934 	for (i = 0 ; i < descriptors ; i++) {
3935 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3936 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3937 
3938 		ret = check_device_access_params(scp, lba, num, true);
3939 		if (ret)
3940 			goto out;
3941 
3942 		unmap_region(sip, lba, num);
3943 	}
3944 
3945 	ret = 0;
3946 
3947 out:
3948 	write_unlock(macc_lckp);
3949 	kfree(buf);
3950 
3951 	return ret;
3952 }
3953 
3954 #define SDEBUG_GET_LBA_STATUS_LEN 32
3955 
3956 static int resp_get_lba_status(struct scsi_cmnd *scp,
3957 			       struct sdebug_dev_info *devip)
3958 {
3959 	u8 *cmd = scp->cmnd;
3960 	u64 lba;
3961 	u32 alloc_len, mapped, num;
3962 	int ret;
3963 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3964 
3965 	lba = get_unaligned_be64(cmd + 2);
3966 	alloc_len = get_unaligned_be32(cmd + 10);
3967 
3968 	if (alloc_len < 24)
3969 		return 0;
3970 
3971 	ret = check_device_access_params(scp, lba, 1, false);
3972 	if (ret)
3973 		return ret;
3974 
3975 	if (scsi_debug_lbp()) {
3976 		struct sdeb_store_info *sip = devip2sip(devip, true);
3977 
3978 		mapped = map_state(sip, lba, &num);
3979 	} else {
3980 		mapped = 1;
3981 		/* following just in case virtual_gb changed */
3982 		sdebug_capacity = get_sdebug_capacity();
3983 		if (sdebug_capacity - lba <= 0xffffffff)
3984 			num = sdebug_capacity - lba;
3985 		else
3986 			num = 0xffffffff;
3987 	}
3988 
3989 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3990 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3991 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3992 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3993 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3994 
3995 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3996 }
3997 
3998 static int resp_sync_cache(struct scsi_cmnd *scp,
3999 			   struct sdebug_dev_info *devip)
4000 {
4001 	int res = 0;
4002 	u64 lba;
4003 	u32 num_blocks;
4004 	u8 *cmd = scp->cmnd;
4005 
4006 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4007 		lba = get_unaligned_be32(cmd + 2);
4008 		num_blocks = get_unaligned_be16(cmd + 7);
4009 	} else {				/* SYNCHRONIZE_CACHE(16) */
4010 		lba = get_unaligned_be64(cmd + 2);
4011 		num_blocks = get_unaligned_be32(cmd + 10);
4012 	}
4013 	if (lba + num_blocks > sdebug_capacity) {
4014 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4015 		return check_condition_result;
4016 	}
4017 	if (!write_since_sync || cmd[1] & 0x2)
4018 		res = SDEG_RES_IMMED_MASK;
4019 	else		/* delay if write_since_sync and IMMED clear */
4020 		write_since_sync = false;
4021 	return res;
4022 }
4023 
4024 /*
4025  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4026  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4027  * a GOOD status otherwise. Model a disk with a big cache and yield
4028  * CONDITION MET. Actually tries to bring range in main memory into the
4029  * cache associated with the CPU(s).
4030  */
4031 static int resp_pre_fetch(struct scsi_cmnd *scp,
4032 			  struct sdebug_dev_info *devip)
4033 {
4034 	int res = 0;
4035 	u64 lba;
4036 	u64 block, rest = 0;
4037 	u32 nblks;
4038 	u8 *cmd = scp->cmnd;
4039 	struct sdeb_store_info *sip = devip2sip(devip, true);
4040 	rwlock_t *macc_lckp = &sip->macc_lck;
4041 	u8 *fsp = sip->storep;
4042 
4043 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4044 		lba = get_unaligned_be32(cmd + 2);
4045 		nblks = get_unaligned_be16(cmd + 7);
4046 	} else {			/* PRE-FETCH(16) */
4047 		lba = get_unaligned_be64(cmd + 2);
4048 		nblks = get_unaligned_be32(cmd + 10);
4049 	}
4050 	if (lba + nblks > sdebug_capacity) {
4051 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4052 		return check_condition_result;
4053 	}
4054 	if (!fsp)
4055 		goto fini;
4056 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4057 	block = do_div(lba, sdebug_store_sectors);
4058 	if (block + nblks > sdebug_store_sectors)
4059 		rest = block + nblks - sdebug_store_sectors;
4060 
4061 	/* Try to bring the PRE-FETCH range into CPU's cache */
4062 	read_lock(macc_lckp);
4063 	prefetch_range(fsp + (sdebug_sector_size * block),
4064 		       (nblks - rest) * sdebug_sector_size);
4065 	if (rest)
4066 		prefetch_range(fsp, rest * sdebug_sector_size);
4067 	read_unlock(macc_lckp);
4068 fini:
4069 	if (cmd[1] & 0x2)
4070 		res = SDEG_RES_IMMED_MASK;
4071 	return res | condition_met_result;
4072 }
4073 
4074 #define RL_BUCKET_ELEMS 8
4075 
4076 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4077  * (W-LUN), the normal Linux scanning logic does not associate it with a
4078  * device (e.g. /dev/sg7). The following magic will make that association:
4079  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4080  * where <n> is a host number. If there are multiple targets in a host then
4081  * the above will associate a W-LUN to each target. To only get a W-LUN
4082  * for target 2, then use "echo '- 2 49409' > scan" .
4083  */
4084 static int resp_report_luns(struct scsi_cmnd *scp,
4085 			    struct sdebug_dev_info *devip)
4086 {
4087 	unsigned char *cmd = scp->cmnd;
4088 	unsigned int alloc_len;
4089 	unsigned char select_report;
4090 	u64 lun;
4091 	struct scsi_lun *lun_p;
4092 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4093 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4094 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4095 	unsigned int tlun_cnt;	/* total LUN count */
4096 	unsigned int rlen;	/* response length (in bytes) */
4097 	int k, j, n, res;
4098 	unsigned int off_rsp = 0;
4099 	const int sz_lun = sizeof(struct scsi_lun);
4100 
4101 	clear_luns_changed_on_target(devip);
4102 
4103 	select_report = cmd[2];
4104 	alloc_len = get_unaligned_be32(cmd + 6);
4105 
4106 	if (alloc_len < 4) {
4107 		pr_err("alloc len too small %d\n", alloc_len);
4108 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4109 		return check_condition_result;
4110 	}
4111 
4112 	switch (select_report) {
4113 	case 0:		/* all LUNs apart from W-LUNs */
4114 		lun_cnt = sdebug_max_luns;
4115 		wlun_cnt = 0;
4116 		break;
4117 	case 1:		/* only W-LUNs */
4118 		lun_cnt = 0;
4119 		wlun_cnt = 1;
4120 		break;
4121 	case 2:		/* all LUNs */
4122 		lun_cnt = sdebug_max_luns;
4123 		wlun_cnt = 1;
4124 		break;
4125 	case 0x10:	/* only administrative LUs */
4126 	case 0x11:	/* see SPC-5 */
4127 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4128 	default:
4129 		pr_debug("select report invalid %d\n", select_report);
4130 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4131 		return check_condition_result;
4132 	}
4133 
4134 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4135 		--lun_cnt;
4136 
4137 	tlun_cnt = lun_cnt + wlun_cnt;
4138 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4139 	scsi_set_resid(scp, scsi_bufflen(scp));
4140 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4141 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4142 
4143 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4144 	lun = sdebug_no_lun_0 ? 1 : 0;
4145 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4146 		memset(arr, 0, sizeof(arr));
4147 		lun_p = (struct scsi_lun *)&arr[0];
4148 		if (k == 0) {
4149 			put_unaligned_be32(rlen, &arr[0]);
4150 			++lun_p;
4151 			j = 1;
4152 		}
4153 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4154 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4155 				break;
4156 			int_to_scsilun(lun++, lun_p);
4157 		}
4158 		if (j < RL_BUCKET_ELEMS)
4159 			break;
4160 		n = j * sz_lun;
4161 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4162 		if (res)
4163 			return res;
4164 		off_rsp += n;
4165 	}
4166 	if (wlun_cnt) {
4167 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4168 		++j;
4169 	}
4170 	if (j > 0)
4171 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4172 	return res;
4173 }
4174 
4175 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4176 {
4177 	bool is_bytchk3 = false;
4178 	u8 bytchk;
4179 	int ret, j;
4180 	u32 vnum, a_num, off;
4181 	const u32 lb_size = sdebug_sector_size;
4182 	u64 lba;
4183 	u8 *arr;
4184 	u8 *cmd = scp->cmnd;
4185 	struct sdeb_store_info *sip = devip2sip(devip, true);
4186 	rwlock_t *macc_lckp = &sip->macc_lck;
4187 
4188 	bytchk = (cmd[1] >> 1) & 0x3;
4189 	if (bytchk == 0) {
4190 		return 0;	/* always claim internal verify okay */
4191 	} else if (bytchk == 2) {
4192 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4193 		return check_condition_result;
4194 	} else if (bytchk == 3) {
4195 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4196 	}
4197 	switch (cmd[0]) {
4198 	case VERIFY_16:
4199 		lba = get_unaligned_be64(cmd + 2);
4200 		vnum = get_unaligned_be32(cmd + 10);
4201 		break;
4202 	case VERIFY:		/* is VERIFY(10) */
4203 		lba = get_unaligned_be32(cmd + 2);
4204 		vnum = get_unaligned_be16(cmd + 7);
4205 		break;
4206 	default:
4207 		mk_sense_invalid_opcode(scp);
4208 		return check_condition_result;
4209 	}
4210 	a_num = is_bytchk3 ? 1 : vnum;
4211 	/* Treat following check like one for read (i.e. no write) access */
4212 	ret = check_device_access_params(scp, lba, a_num, false);
4213 	if (ret)
4214 		return ret;
4215 
4216 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4217 	if (!arr) {
4218 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4219 				INSUFF_RES_ASCQ);
4220 		return check_condition_result;
4221 	}
4222 	/* Not changing store, so only need read access */
4223 	read_lock(macc_lckp);
4224 
4225 	ret = do_dout_fetch(scp, a_num, arr);
4226 	if (ret == -1) {
4227 		ret = DID_ERROR << 16;
4228 		goto cleanup;
4229 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4230 		sdev_printk(KERN_INFO, scp->device,
4231 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4232 			    my_name, __func__, a_num * lb_size, ret);
4233 	}
4234 	if (is_bytchk3) {
4235 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4236 			memcpy(arr + off, arr, lb_size);
4237 	}
4238 	ret = 0;
4239 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4240 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4241 		ret = check_condition_result;
4242 		goto cleanup;
4243 	}
4244 cleanup:
4245 	read_unlock(macc_lckp);
4246 	kfree(arr);
4247 	return ret;
4248 }
4249 
4250 #define RZONES_DESC_HD 64
4251 
4252 /* Report zones depending on start LBA nad reporting options */
4253 static int resp_report_zones(struct scsi_cmnd *scp,
4254 			     struct sdebug_dev_info *devip)
4255 {
4256 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4257 	int ret = 0;
4258 	u32 alloc_len, rep_opts, rep_len;
4259 	bool partial;
4260 	u64 lba, zs_lba;
4261 	u8 *arr = NULL, *desc;
4262 	u8 *cmd = scp->cmnd;
4263 	struct sdeb_zone_state *zsp;
4264 	struct sdeb_store_info *sip = devip2sip(devip, false);
4265 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4266 
4267 	if (!sdebug_dev_is_zoned(devip)) {
4268 		mk_sense_invalid_opcode(scp);
4269 		return check_condition_result;
4270 	}
4271 	zs_lba = get_unaligned_be64(cmd + 2);
4272 	alloc_len = get_unaligned_be32(cmd + 10);
4273 	rep_opts = cmd[14] & 0x3f;
4274 	partial = cmd[14] & 0x80;
4275 
4276 	if (zs_lba >= sdebug_capacity) {
4277 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4278 		return check_condition_result;
4279 	}
4280 
4281 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4282 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4283 			    max_zones);
4284 
4285 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4286 	if (!arr) {
4287 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4288 				INSUFF_RES_ASCQ);
4289 		return check_condition_result;
4290 	}
4291 
4292 	read_lock(macc_lckp);
4293 
4294 	desc = arr + 64;
4295 	for (i = 0; i < max_zones; i++) {
4296 		lba = zs_lba + devip->zsize * i;
4297 		if (lba > sdebug_capacity)
4298 			break;
4299 		zsp = zbc_zone(devip, lba);
4300 		switch (rep_opts) {
4301 		case 0x00:
4302 			/* All zones */
4303 			break;
4304 		case 0x01:
4305 			/* Empty zones */
4306 			if (zsp->z_cond != ZC1_EMPTY)
4307 				continue;
4308 			break;
4309 		case 0x02:
4310 			/* Implicit open zones */
4311 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4312 				continue;
4313 			break;
4314 		case 0x03:
4315 			/* Explicit open zones */
4316 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4317 				continue;
4318 			break;
4319 		case 0x04:
4320 			/* Closed zones */
4321 			if (zsp->z_cond != ZC4_CLOSED)
4322 				continue;
4323 			break;
4324 		case 0x05:
4325 			/* Full zones */
4326 			if (zsp->z_cond != ZC5_FULL)
4327 				continue;
4328 			break;
4329 		case 0x06:
4330 		case 0x07:
4331 		case 0x10:
4332 			/*
4333 			 * Read-only, offline, reset WP recommended are
4334 			 * not emulated: no zones to report;
4335 			 */
4336 			continue;
4337 		case 0x11:
4338 			/* non-seq-resource set */
4339 			if (!zsp->z_non_seq_resource)
4340 				continue;
4341 			break;
4342 		case 0x3f:
4343 			/* Not write pointer (conventional) zones */
4344 			if (!zbc_zone_is_conv(zsp))
4345 				continue;
4346 			break;
4347 		default:
4348 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4349 					INVALID_FIELD_IN_CDB, 0);
4350 			ret = check_condition_result;
4351 			goto fini;
4352 		}
4353 
4354 		if (nrz < rep_max_zones) {
4355 			/* Fill zone descriptor */
4356 			desc[0] = zsp->z_type;
4357 			desc[1] = zsp->z_cond << 4;
4358 			if (zsp->z_non_seq_resource)
4359 				desc[1] |= 1 << 1;
4360 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4361 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4362 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4363 			desc += 64;
4364 		}
4365 
4366 		if (partial && nrz >= rep_max_zones)
4367 			break;
4368 
4369 		nrz++;
4370 	}
4371 
4372 	/* Report header */
4373 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4374 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4375 
4376 	rep_len = (unsigned long)desc - (unsigned long)arr;
4377 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4378 
4379 fini:
4380 	read_unlock(macc_lckp);
4381 	kfree(arr);
4382 	return ret;
4383 }
4384 
4385 /* Logic transplanted from tcmu-runner, file_zbc.c */
4386 static void zbc_open_all(struct sdebug_dev_info *devip)
4387 {
4388 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4389 	unsigned int i;
4390 
4391 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4392 		if (zsp->z_cond == ZC4_CLOSED)
4393 			zbc_open_zone(devip, &devip->zstate[i], true);
4394 	}
4395 }
4396 
4397 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4398 {
4399 	int res = 0;
4400 	u64 z_id;
4401 	enum sdebug_z_cond zc;
4402 	u8 *cmd = scp->cmnd;
4403 	struct sdeb_zone_state *zsp;
4404 	bool all = cmd[14] & 0x01;
4405 	struct sdeb_store_info *sip = devip2sip(devip, false);
4406 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4407 
4408 	if (!sdebug_dev_is_zoned(devip)) {
4409 		mk_sense_invalid_opcode(scp);
4410 		return check_condition_result;
4411 	}
4412 
4413 	write_lock(macc_lckp);
4414 
4415 	if (all) {
4416 		/* Check if all closed zones can be open */
4417 		if (devip->max_open &&
4418 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4419 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4420 					INSUFF_ZONE_ASCQ);
4421 			res = check_condition_result;
4422 			goto fini;
4423 		}
4424 		/* Open all closed zones */
4425 		zbc_open_all(devip);
4426 		goto fini;
4427 	}
4428 
4429 	/* Open the specified zone */
4430 	z_id = get_unaligned_be64(cmd + 2);
4431 	if (z_id >= sdebug_capacity) {
4432 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4433 		res = check_condition_result;
4434 		goto fini;
4435 	}
4436 
4437 	zsp = zbc_zone(devip, z_id);
4438 	if (z_id != zsp->z_start) {
4439 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4440 		res = check_condition_result;
4441 		goto fini;
4442 	}
4443 	if (zbc_zone_is_conv(zsp)) {
4444 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4445 		res = check_condition_result;
4446 		goto fini;
4447 	}
4448 
4449 	zc = zsp->z_cond;
4450 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4451 		goto fini;
4452 
4453 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4454 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4455 				INSUFF_ZONE_ASCQ);
4456 		res = check_condition_result;
4457 		goto fini;
4458 	}
4459 
4460 	if (zc == ZC2_IMPLICIT_OPEN)
4461 		zbc_close_zone(devip, zsp);
4462 	zbc_open_zone(devip, zsp, true);
4463 fini:
4464 	write_unlock(macc_lckp);
4465 	return res;
4466 }
4467 
4468 static void zbc_close_all(struct sdebug_dev_info *devip)
4469 {
4470 	unsigned int i;
4471 
4472 	for (i = 0; i < devip->nr_zones; i++)
4473 		zbc_close_zone(devip, &devip->zstate[i]);
4474 }
4475 
4476 static int resp_close_zone(struct scsi_cmnd *scp,
4477 			   struct sdebug_dev_info *devip)
4478 {
4479 	int res = 0;
4480 	u64 z_id;
4481 	u8 *cmd = scp->cmnd;
4482 	struct sdeb_zone_state *zsp;
4483 	bool all = cmd[14] & 0x01;
4484 	struct sdeb_store_info *sip = devip2sip(devip, false);
4485 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4486 
4487 	if (!sdebug_dev_is_zoned(devip)) {
4488 		mk_sense_invalid_opcode(scp);
4489 		return check_condition_result;
4490 	}
4491 
4492 	write_lock(macc_lckp);
4493 
4494 	if (all) {
4495 		zbc_close_all(devip);
4496 		goto fini;
4497 	}
4498 
4499 	/* Close specified zone */
4500 	z_id = get_unaligned_be64(cmd + 2);
4501 	if (z_id >= sdebug_capacity) {
4502 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4503 		res = check_condition_result;
4504 		goto fini;
4505 	}
4506 
4507 	zsp = zbc_zone(devip, z_id);
4508 	if (z_id != zsp->z_start) {
4509 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4510 		res = check_condition_result;
4511 		goto fini;
4512 	}
4513 	if (zbc_zone_is_conv(zsp)) {
4514 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4515 		res = check_condition_result;
4516 		goto fini;
4517 	}
4518 
4519 	zbc_close_zone(devip, zsp);
4520 fini:
4521 	write_unlock(macc_lckp);
4522 	return res;
4523 }
4524 
4525 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4526 			    struct sdeb_zone_state *zsp, bool empty)
4527 {
4528 	enum sdebug_z_cond zc = zsp->z_cond;
4529 
4530 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4531 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4532 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4533 			zbc_close_zone(devip, zsp);
4534 		if (zsp->z_cond == ZC4_CLOSED)
4535 			devip->nr_closed--;
4536 		zsp->z_wp = zsp->z_start + zsp->z_size;
4537 		zsp->z_cond = ZC5_FULL;
4538 	}
4539 }
4540 
4541 static void zbc_finish_all(struct sdebug_dev_info *devip)
4542 {
4543 	unsigned int i;
4544 
4545 	for (i = 0; i < devip->nr_zones; i++)
4546 		zbc_finish_zone(devip, &devip->zstate[i], false);
4547 }
4548 
4549 static int resp_finish_zone(struct scsi_cmnd *scp,
4550 			    struct sdebug_dev_info *devip)
4551 {
4552 	struct sdeb_zone_state *zsp;
4553 	int res = 0;
4554 	u64 z_id;
4555 	u8 *cmd = scp->cmnd;
4556 	bool all = cmd[14] & 0x01;
4557 	struct sdeb_store_info *sip = devip2sip(devip, false);
4558 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4559 
4560 	if (!sdebug_dev_is_zoned(devip)) {
4561 		mk_sense_invalid_opcode(scp);
4562 		return check_condition_result;
4563 	}
4564 
4565 	write_lock(macc_lckp);
4566 
4567 	if (all) {
4568 		zbc_finish_all(devip);
4569 		goto fini;
4570 	}
4571 
4572 	/* Finish the specified zone */
4573 	z_id = get_unaligned_be64(cmd + 2);
4574 	if (z_id >= sdebug_capacity) {
4575 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4576 		res = check_condition_result;
4577 		goto fini;
4578 	}
4579 
4580 	zsp = zbc_zone(devip, z_id);
4581 	if (z_id != zsp->z_start) {
4582 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4583 		res = check_condition_result;
4584 		goto fini;
4585 	}
4586 	if (zbc_zone_is_conv(zsp)) {
4587 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4588 		res = check_condition_result;
4589 		goto fini;
4590 	}
4591 
4592 	zbc_finish_zone(devip, zsp, true);
4593 fini:
4594 	write_unlock(macc_lckp);
4595 	return res;
4596 }
4597 
4598 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4599 			 struct sdeb_zone_state *zsp)
4600 {
4601 	enum sdebug_z_cond zc;
4602 
4603 	if (zbc_zone_is_conv(zsp))
4604 		return;
4605 
4606 	zc = zsp->z_cond;
4607 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4608 		zbc_close_zone(devip, zsp);
4609 
4610 	if (zsp->z_cond == ZC4_CLOSED)
4611 		devip->nr_closed--;
4612 
4613 	zsp->z_non_seq_resource = false;
4614 	zsp->z_wp = zsp->z_start;
4615 	zsp->z_cond = ZC1_EMPTY;
4616 }
4617 
4618 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4619 {
4620 	unsigned int i;
4621 
4622 	for (i = 0; i < devip->nr_zones; i++)
4623 		zbc_rwp_zone(devip, &devip->zstate[i]);
4624 }
4625 
4626 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4627 {
4628 	struct sdeb_zone_state *zsp;
4629 	int res = 0;
4630 	u64 z_id;
4631 	u8 *cmd = scp->cmnd;
4632 	bool all = cmd[14] & 0x01;
4633 	struct sdeb_store_info *sip = devip2sip(devip, false);
4634 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4635 
4636 	if (!sdebug_dev_is_zoned(devip)) {
4637 		mk_sense_invalid_opcode(scp);
4638 		return check_condition_result;
4639 	}
4640 
4641 	write_lock(macc_lckp);
4642 
4643 	if (all) {
4644 		zbc_rwp_all(devip);
4645 		goto fini;
4646 	}
4647 
4648 	z_id = get_unaligned_be64(cmd + 2);
4649 	if (z_id >= sdebug_capacity) {
4650 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4651 		res = check_condition_result;
4652 		goto fini;
4653 	}
4654 
4655 	zsp = zbc_zone(devip, z_id);
4656 	if (z_id != zsp->z_start) {
4657 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4658 		res = check_condition_result;
4659 		goto fini;
4660 	}
4661 	if (zbc_zone_is_conv(zsp)) {
4662 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4663 		res = check_condition_result;
4664 		goto fini;
4665 	}
4666 
4667 	zbc_rwp_zone(devip, zsp);
4668 fini:
4669 	write_unlock(macc_lckp);
4670 	return res;
4671 }
4672 
4673 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4674 {
4675 	u16 hwq;
4676 
4677 	if (sdebug_host_max_queue) {
4678 		/* Provide a simple method to choose the hwq */
4679 		hwq = smp_processor_id() % submit_queues;
4680 	} else {
4681 		u32 tag = blk_mq_unique_tag(cmnd->request);
4682 
4683 		hwq = blk_mq_unique_tag_to_hwq(tag);
4684 
4685 		pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4686 		if (WARN_ON_ONCE(hwq >= submit_queues))
4687 			hwq = 0;
4688 	}
4689 	return sdebug_q_arr + hwq;
4690 }
4691 
4692 static u32 get_tag(struct scsi_cmnd *cmnd)
4693 {
4694 	return blk_mq_unique_tag(cmnd->request);
4695 }
4696 
4697 /* Queued (deferred) command completions converge here. */
4698 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4699 {
4700 	bool aborted = sd_dp->aborted;
4701 	int qc_idx;
4702 	int retiring = 0;
4703 	unsigned long iflags;
4704 	struct sdebug_queue *sqp;
4705 	struct sdebug_queued_cmd *sqcp;
4706 	struct scsi_cmnd *scp;
4707 	struct sdebug_dev_info *devip;
4708 
4709 	sd_dp->defer_t = SDEB_DEFER_NONE;
4710 	if (unlikely(aborted))
4711 		sd_dp->aborted = false;
4712 	qc_idx = sd_dp->qc_idx;
4713 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4714 	if (sdebug_statistics) {
4715 		atomic_inc(&sdebug_completions);
4716 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4717 			atomic_inc(&sdebug_miss_cpus);
4718 	}
4719 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4720 		pr_err("wild qc_idx=%d\n", qc_idx);
4721 		return;
4722 	}
4723 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4724 	sqcp = &sqp->qc_arr[qc_idx];
4725 	scp = sqcp->a_cmnd;
4726 	if (unlikely(scp == NULL)) {
4727 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4728 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4729 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4730 		return;
4731 	}
4732 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4733 	if (likely(devip))
4734 		atomic_dec(&devip->num_in_q);
4735 	else
4736 		pr_err("devip=NULL\n");
4737 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4738 		retiring = 1;
4739 
4740 	sqcp->a_cmnd = NULL;
4741 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4742 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4743 		pr_err("Unexpected completion\n");
4744 		return;
4745 	}
4746 
4747 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4748 		int k, retval;
4749 
4750 		retval = atomic_read(&retired_max_queue);
4751 		if (qc_idx >= retval) {
4752 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4753 			pr_err("index %d too large\n", retval);
4754 			return;
4755 		}
4756 		k = find_last_bit(sqp->in_use_bm, retval);
4757 		if ((k < sdebug_max_queue) || (k == retval))
4758 			atomic_set(&retired_max_queue, 0);
4759 		else
4760 			atomic_set(&retired_max_queue, k + 1);
4761 	}
4762 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4763 	if (unlikely(aborted)) {
4764 		if (sdebug_verbose)
4765 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4766 		return;
4767 	}
4768 	scp->scsi_done(scp); /* callback to mid level */
4769 }
4770 
4771 /* When high resolution timer goes off this function is called. */
4772 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4773 {
4774 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4775 						  hrt);
4776 	sdebug_q_cmd_complete(sd_dp);
4777 	return HRTIMER_NORESTART;
4778 }
4779 
4780 /* When work queue schedules work, it calls this function. */
4781 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4782 {
4783 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4784 						  ew.work);
4785 	sdebug_q_cmd_complete(sd_dp);
4786 }
4787 
4788 static bool got_shared_uuid;
4789 static uuid_t shared_uuid;
4790 
4791 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4792 {
4793 	struct sdeb_zone_state *zsp;
4794 	sector_t capacity = get_sdebug_capacity();
4795 	sector_t zstart = 0;
4796 	unsigned int i;
4797 
4798 	/*
4799 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4800 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4801 	 * use the specified zone size checking that at least 2 zones can be
4802 	 * created for the device.
4803 	 */
4804 	if (!sdeb_zbc_zone_size_mb) {
4805 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4806 			>> ilog2(sdebug_sector_size);
4807 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4808 			devip->zsize >>= 1;
4809 		if (devip->zsize < 2) {
4810 			pr_err("Device capacity too small\n");
4811 			return -EINVAL;
4812 		}
4813 	} else {
4814 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4815 			pr_err("Zone size is not a power of 2\n");
4816 			return -EINVAL;
4817 		}
4818 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4819 			>> ilog2(sdebug_sector_size);
4820 		if (devip->zsize >= capacity) {
4821 			pr_err("Zone size too large for device capacity\n");
4822 			return -EINVAL;
4823 		}
4824 	}
4825 
4826 	devip->zsize_shift = ilog2(devip->zsize);
4827 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4828 
4829 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4830 		pr_err("Number of conventional zones too large\n");
4831 		return -EINVAL;
4832 	}
4833 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4834 
4835 	if (devip->zmodel == BLK_ZONED_HM) {
4836 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4837 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4838 			devip->max_open = (devip->nr_zones - 1) / 2;
4839 		else
4840 			devip->max_open = sdeb_zbc_max_open;
4841 	}
4842 
4843 	devip->zstate = kcalloc(devip->nr_zones,
4844 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4845 	if (!devip->zstate)
4846 		return -ENOMEM;
4847 
4848 	for (i = 0; i < devip->nr_zones; i++) {
4849 		zsp = &devip->zstate[i];
4850 
4851 		zsp->z_start = zstart;
4852 
4853 		if (i < devip->nr_conv_zones) {
4854 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4855 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4856 			zsp->z_wp = (sector_t)-1;
4857 		} else {
4858 			if (devip->zmodel == BLK_ZONED_HM)
4859 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4860 			else
4861 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4862 			zsp->z_cond = ZC1_EMPTY;
4863 			zsp->z_wp = zsp->z_start;
4864 		}
4865 
4866 		if (zsp->z_start + devip->zsize < capacity)
4867 			zsp->z_size = devip->zsize;
4868 		else
4869 			zsp->z_size = capacity - zsp->z_start;
4870 
4871 		zstart += zsp->z_size;
4872 	}
4873 
4874 	return 0;
4875 }
4876 
4877 static struct sdebug_dev_info *sdebug_device_create(
4878 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4879 {
4880 	struct sdebug_dev_info *devip;
4881 
4882 	devip = kzalloc(sizeof(*devip), flags);
4883 	if (devip) {
4884 		if (sdebug_uuid_ctl == 1)
4885 			uuid_gen(&devip->lu_name);
4886 		else if (sdebug_uuid_ctl == 2) {
4887 			if (got_shared_uuid)
4888 				devip->lu_name = shared_uuid;
4889 			else {
4890 				uuid_gen(&shared_uuid);
4891 				got_shared_uuid = true;
4892 				devip->lu_name = shared_uuid;
4893 			}
4894 		}
4895 		devip->sdbg_host = sdbg_host;
4896 		if (sdeb_zbc_in_use) {
4897 			devip->zmodel = sdeb_zbc_model;
4898 			if (sdebug_device_create_zones(devip)) {
4899 				kfree(devip);
4900 				return NULL;
4901 			}
4902 		} else {
4903 			devip->zmodel = BLK_ZONED_NONE;
4904 		}
4905 		devip->sdbg_host = sdbg_host;
4906 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4907 	}
4908 	return devip;
4909 }
4910 
4911 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4912 {
4913 	struct sdebug_host_info *sdbg_host;
4914 	struct sdebug_dev_info *open_devip = NULL;
4915 	struct sdebug_dev_info *devip;
4916 
4917 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4918 	if (!sdbg_host) {
4919 		pr_err("Host info NULL\n");
4920 		return NULL;
4921 	}
4922 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4923 		if ((devip->used) && (devip->channel == sdev->channel) &&
4924 		    (devip->target == sdev->id) &&
4925 		    (devip->lun == sdev->lun))
4926 			return devip;
4927 		else {
4928 			if ((!devip->used) && (!open_devip))
4929 				open_devip = devip;
4930 		}
4931 	}
4932 	if (!open_devip) { /* try and make a new one */
4933 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4934 		if (!open_devip) {
4935 			pr_err("out of memory at line %d\n", __LINE__);
4936 			return NULL;
4937 		}
4938 	}
4939 
4940 	open_devip->channel = sdev->channel;
4941 	open_devip->target = sdev->id;
4942 	open_devip->lun = sdev->lun;
4943 	open_devip->sdbg_host = sdbg_host;
4944 	atomic_set(&open_devip->num_in_q, 0);
4945 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4946 	open_devip->used = true;
4947 	return open_devip;
4948 }
4949 
4950 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4951 {
4952 	if (sdebug_verbose)
4953 		pr_info("slave_alloc <%u %u %u %llu>\n",
4954 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4955 	return 0;
4956 }
4957 
4958 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4959 {
4960 	struct sdebug_dev_info *devip =
4961 			(struct sdebug_dev_info *)sdp->hostdata;
4962 
4963 	if (sdebug_verbose)
4964 		pr_info("slave_configure <%u %u %u %llu>\n",
4965 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4966 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4967 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4968 	if (devip == NULL) {
4969 		devip = find_build_dev_info(sdp);
4970 		if (devip == NULL)
4971 			return 1;  /* no resources, will be marked offline */
4972 	}
4973 	sdp->hostdata = devip;
4974 	if (sdebug_no_uld)
4975 		sdp->no_uld_attach = 1;
4976 	config_cdb_len(sdp);
4977 	return 0;
4978 }
4979 
4980 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4981 {
4982 	struct sdebug_dev_info *devip =
4983 		(struct sdebug_dev_info *)sdp->hostdata;
4984 
4985 	if (sdebug_verbose)
4986 		pr_info("slave_destroy <%u %u %u %llu>\n",
4987 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4988 	if (devip) {
4989 		/* make this slot available for re-use */
4990 		devip->used = false;
4991 		sdp->hostdata = NULL;
4992 	}
4993 }
4994 
4995 static void stop_qc_helper(struct sdebug_defer *sd_dp,
4996 			   enum sdeb_defer_type defer_t)
4997 {
4998 	if (!sd_dp)
4999 		return;
5000 	if (defer_t == SDEB_DEFER_HRT)
5001 		hrtimer_cancel(&sd_dp->hrt);
5002 	else if (defer_t == SDEB_DEFER_WQ)
5003 		cancel_work_sync(&sd_dp->ew.work);
5004 }
5005 
5006 /* If @cmnd found deletes its timer or work queue and returns true; else
5007    returns false */
5008 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5009 {
5010 	unsigned long iflags;
5011 	int j, k, qmax, r_qmax;
5012 	enum sdeb_defer_type l_defer_t;
5013 	struct sdebug_queue *sqp;
5014 	struct sdebug_queued_cmd *sqcp;
5015 	struct sdebug_dev_info *devip;
5016 	struct sdebug_defer *sd_dp;
5017 
5018 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5019 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5020 		qmax = sdebug_max_queue;
5021 		r_qmax = atomic_read(&retired_max_queue);
5022 		if (r_qmax > qmax)
5023 			qmax = r_qmax;
5024 		for (k = 0; k < qmax; ++k) {
5025 			if (test_bit(k, sqp->in_use_bm)) {
5026 				sqcp = &sqp->qc_arr[k];
5027 				if (cmnd != sqcp->a_cmnd)
5028 					continue;
5029 				/* found */
5030 				devip = (struct sdebug_dev_info *)
5031 						cmnd->device->hostdata;
5032 				if (devip)
5033 					atomic_dec(&devip->num_in_q);
5034 				sqcp->a_cmnd = NULL;
5035 				sd_dp = sqcp->sd_dp;
5036 				if (sd_dp) {
5037 					l_defer_t = sd_dp->defer_t;
5038 					sd_dp->defer_t = SDEB_DEFER_NONE;
5039 				} else
5040 					l_defer_t = SDEB_DEFER_NONE;
5041 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5042 				stop_qc_helper(sd_dp, l_defer_t);
5043 				clear_bit(k, sqp->in_use_bm);
5044 				return true;
5045 			}
5046 		}
5047 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5048 	}
5049 	return false;
5050 }
5051 
5052 /* Deletes (stops) timers or work queues of all queued commands */
5053 static void stop_all_queued(void)
5054 {
5055 	unsigned long iflags;
5056 	int j, k;
5057 	enum sdeb_defer_type l_defer_t;
5058 	struct sdebug_queue *sqp;
5059 	struct sdebug_queued_cmd *sqcp;
5060 	struct sdebug_dev_info *devip;
5061 	struct sdebug_defer *sd_dp;
5062 
5063 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5064 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5065 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5066 			if (test_bit(k, sqp->in_use_bm)) {
5067 				sqcp = &sqp->qc_arr[k];
5068 				if (sqcp->a_cmnd == NULL)
5069 					continue;
5070 				devip = (struct sdebug_dev_info *)
5071 					sqcp->a_cmnd->device->hostdata;
5072 				if (devip)
5073 					atomic_dec(&devip->num_in_q);
5074 				sqcp->a_cmnd = NULL;
5075 				sd_dp = sqcp->sd_dp;
5076 				if (sd_dp) {
5077 					l_defer_t = sd_dp->defer_t;
5078 					sd_dp->defer_t = SDEB_DEFER_NONE;
5079 				} else
5080 					l_defer_t = SDEB_DEFER_NONE;
5081 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5082 				stop_qc_helper(sd_dp, l_defer_t);
5083 				clear_bit(k, sqp->in_use_bm);
5084 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5085 			}
5086 		}
5087 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5088 	}
5089 }
5090 
5091 /* Free queued command memory on heap */
5092 static void free_all_queued(void)
5093 {
5094 	int j, k;
5095 	struct sdebug_queue *sqp;
5096 	struct sdebug_queued_cmd *sqcp;
5097 
5098 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5099 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5100 			sqcp = &sqp->qc_arr[k];
5101 			kfree(sqcp->sd_dp);
5102 			sqcp->sd_dp = NULL;
5103 		}
5104 	}
5105 }
5106 
5107 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5108 {
5109 	bool ok;
5110 
5111 	++num_aborts;
5112 	if (SCpnt) {
5113 		ok = stop_queued_cmnd(SCpnt);
5114 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5115 			sdev_printk(KERN_INFO, SCpnt->device,
5116 				    "%s: command%s found\n", __func__,
5117 				    ok ? "" : " not");
5118 	}
5119 	return SUCCESS;
5120 }
5121 
5122 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5123 {
5124 	++num_dev_resets;
5125 	if (SCpnt && SCpnt->device) {
5126 		struct scsi_device *sdp = SCpnt->device;
5127 		struct sdebug_dev_info *devip =
5128 				(struct sdebug_dev_info *)sdp->hostdata;
5129 
5130 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5131 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5132 		if (devip)
5133 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5134 	}
5135 	return SUCCESS;
5136 }
5137 
5138 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5139 {
5140 	struct sdebug_host_info *sdbg_host;
5141 	struct sdebug_dev_info *devip;
5142 	struct scsi_device *sdp;
5143 	struct Scsi_Host *hp;
5144 	int k = 0;
5145 
5146 	++num_target_resets;
5147 	if (!SCpnt)
5148 		goto lie;
5149 	sdp = SCpnt->device;
5150 	if (!sdp)
5151 		goto lie;
5152 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5153 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5154 	hp = sdp->host;
5155 	if (!hp)
5156 		goto lie;
5157 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5158 	if (sdbg_host) {
5159 		list_for_each_entry(devip,
5160 				    &sdbg_host->dev_info_list,
5161 				    dev_list)
5162 			if (devip->target == sdp->id) {
5163 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5164 				++k;
5165 			}
5166 	}
5167 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5168 		sdev_printk(KERN_INFO, sdp,
5169 			    "%s: %d device(s) found in target\n", __func__, k);
5170 lie:
5171 	return SUCCESS;
5172 }
5173 
5174 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5175 {
5176 	struct sdebug_host_info *sdbg_host;
5177 	struct sdebug_dev_info *devip;
5178 	struct scsi_device *sdp;
5179 	struct Scsi_Host *hp;
5180 	int k = 0;
5181 
5182 	++num_bus_resets;
5183 	if (!(SCpnt && SCpnt->device))
5184 		goto lie;
5185 	sdp = SCpnt->device;
5186 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5187 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5188 	hp = sdp->host;
5189 	if (hp) {
5190 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5191 		if (sdbg_host) {
5192 			list_for_each_entry(devip,
5193 					    &sdbg_host->dev_info_list,
5194 					    dev_list) {
5195 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5196 				++k;
5197 			}
5198 		}
5199 	}
5200 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5201 		sdev_printk(KERN_INFO, sdp,
5202 			    "%s: %d device(s) found in host\n", __func__, k);
5203 lie:
5204 	return SUCCESS;
5205 }
5206 
5207 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5208 {
5209 	struct sdebug_host_info *sdbg_host;
5210 	struct sdebug_dev_info *devip;
5211 	int k = 0;
5212 
5213 	++num_host_resets;
5214 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5215 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5216 	spin_lock(&sdebug_host_list_lock);
5217 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5218 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5219 				    dev_list) {
5220 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5221 			++k;
5222 		}
5223 	}
5224 	spin_unlock(&sdebug_host_list_lock);
5225 	stop_all_queued();
5226 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5227 		sdev_printk(KERN_INFO, SCpnt->device,
5228 			    "%s: %d device(s) found\n", __func__, k);
5229 	return SUCCESS;
5230 }
5231 
5232 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5233 {
5234 	struct msdos_partition *pp;
5235 	int starts[SDEBUG_MAX_PARTS + 2];
5236 	int sectors_per_part, num_sectors, k;
5237 	int heads_by_sects, start_sec, end_sec;
5238 
5239 	/* assume partition table already zeroed */
5240 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5241 		return;
5242 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5243 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5244 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5245 	}
5246 	num_sectors = (int)sdebug_store_sectors;
5247 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5248 			   / sdebug_num_parts;
5249 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5250 	starts[0] = sdebug_sectors_per;
5251 	for (k = 1; k < sdebug_num_parts; ++k)
5252 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5253 			    * heads_by_sects;
5254 	starts[sdebug_num_parts] = num_sectors;
5255 	starts[sdebug_num_parts + 1] = 0;
5256 
5257 	ramp[510] = 0x55;	/* magic partition markings */
5258 	ramp[511] = 0xAA;
5259 	pp = (struct msdos_partition *)(ramp + 0x1be);
5260 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5261 		start_sec = starts[k];
5262 		end_sec = starts[k + 1] - 1;
5263 		pp->boot_ind = 0;
5264 
5265 		pp->cyl = start_sec / heads_by_sects;
5266 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5267 			   / sdebug_sectors_per;
5268 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5269 
5270 		pp->end_cyl = end_sec / heads_by_sects;
5271 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5272 			       / sdebug_sectors_per;
5273 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5274 
5275 		pp->start_sect = cpu_to_le32(start_sec);
5276 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5277 		pp->sys_ind = 0x83;	/* plain Linux partition */
5278 	}
5279 }
5280 
5281 static void block_unblock_all_queues(bool block)
5282 {
5283 	int j;
5284 	struct sdebug_queue *sqp;
5285 
5286 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5287 		atomic_set(&sqp->blocked, (int)block);
5288 }
5289 
5290 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5291  * commands will be processed normally before triggers occur.
5292  */
5293 static void tweak_cmnd_count(void)
5294 {
5295 	int count, modulo;
5296 
5297 	modulo = abs(sdebug_every_nth);
5298 	if (modulo < 2)
5299 		return;
5300 	block_unblock_all_queues(true);
5301 	count = atomic_read(&sdebug_cmnd_count);
5302 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5303 	block_unblock_all_queues(false);
5304 }
5305 
5306 static void clear_queue_stats(void)
5307 {
5308 	atomic_set(&sdebug_cmnd_count, 0);
5309 	atomic_set(&sdebug_completions, 0);
5310 	atomic_set(&sdebug_miss_cpus, 0);
5311 	atomic_set(&sdebug_a_tsf, 0);
5312 }
5313 
5314 static bool inject_on_this_cmd(void)
5315 {
5316 	if (sdebug_every_nth == 0)
5317 		return false;
5318 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5319 }
5320 
5321 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5322 
5323 /* Complete the processing of the thread that queued a SCSI command to this
5324  * driver. It either completes the command by calling cmnd_done() or
5325  * schedules a hr timer or work queue then returns 0. Returns
5326  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5327  */
5328 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5329 			 int scsi_result,
5330 			 int (*pfp)(struct scsi_cmnd *,
5331 				    struct sdebug_dev_info *),
5332 			 int delta_jiff, int ndelay)
5333 {
5334 	bool new_sd_dp;
5335 	bool inject = false;
5336 	int k, num_in_q, qdepth;
5337 	unsigned long iflags;
5338 	u64 ns_from_boot = 0;
5339 	struct sdebug_queue *sqp;
5340 	struct sdebug_queued_cmd *sqcp;
5341 	struct scsi_device *sdp;
5342 	struct sdebug_defer *sd_dp;
5343 
5344 	if (unlikely(devip == NULL)) {
5345 		if (scsi_result == 0)
5346 			scsi_result = DID_NO_CONNECT << 16;
5347 		goto respond_in_thread;
5348 	}
5349 	sdp = cmnd->device;
5350 
5351 	if (delta_jiff == 0)
5352 		goto respond_in_thread;
5353 
5354 	sqp = get_queue(cmnd);
5355 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5356 	if (unlikely(atomic_read(&sqp->blocked))) {
5357 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5358 		return SCSI_MLQUEUE_HOST_BUSY;
5359 	}
5360 	num_in_q = atomic_read(&devip->num_in_q);
5361 	qdepth = cmnd->device->queue_depth;
5362 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5363 		if (scsi_result) {
5364 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5365 			goto respond_in_thread;
5366 		} else
5367 			scsi_result = device_qfull_result;
5368 	} else if (unlikely(sdebug_every_nth &&
5369 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5370 			    (scsi_result == 0))) {
5371 		if ((num_in_q == (qdepth - 1)) &&
5372 		    (atomic_inc_return(&sdebug_a_tsf) >=
5373 		     abs(sdebug_every_nth))) {
5374 			atomic_set(&sdebug_a_tsf, 0);
5375 			inject = true;
5376 			scsi_result = device_qfull_result;
5377 		}
5378 	}
5379 
5380 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5381 	if (unlikely(k >= sdebug_max_queue)) {
5382 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5383 		if (scsi_result)
5384 			goto respond_in_thread;
5385 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5386 			scsi_result = device_qfull_result;
5387 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5388 			sdev_printk(KERN_INFO, sdp,
5389 				    "%s: max_queue=%d exceeded, %s\n",
5390 				    __func__, sdebug_max_queue,
5391 				    (scsi_result ?  "status: TASK SET FULL" :
5392 						    "report: host busy"));
5393 		if (scsi_result)
5394 			goto respond_in_thread;
5395 		else
5396 			return SCSI_MLQUEUE_HOST_BUSY;
5397 	}
5398 	set_bit(k, sqp->in_use_bm);
5399 	atomic_inc(&devip->num_in_q);
5400 	sqcp = &sqp->qc_arr[k];
5401 	sqcp->a_cmnd = cmnd;
5402 	cmnd->host_scribble = (unsigned char *)sqcp;
5403 	sd_dp = sqcp->sd_dp;
5404 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5405 	if (!sd_dp) {
5406 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5407 		if (!sd_dp) {
5408 			atomic_dec(&devip->num_in_q);
5409 			clear_bit(k, sqp->in_use_bm);
5410 			return SCSI_MLQUEUE_HOST_BUSY;
5411 		}
5412 		new_sd_dp = true;
5413 	} else {
5414 		new_sd_dp = false;
5415 	}
5416 
5417 	/* Set the hostwide tag */
5418 	if (sdebug_host_max_queue)
5419 		sd_dp->hc_idx = get_tag(cmnd);
5420 
5421 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5422 		ns_from_boot = ktime_get_boottime_ns();
5423 
5424 	/* one of the resp_*() response functions is called here */
5425 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5426 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5427 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5428 		delta_jiff = ndelay = 0;
5429 	}
5430 	if (cmnd->result == 0 && scsi_result != 0)
5431 		cmnd->result = scsi_result;
5432 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5433 		if (atomic_read(&sdeb_inject_pending)) {
5434 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5435 			atomic_set(&sdeb_inject_pending, 0);
5436 			cmnd->result = check_condition_result;
5437 		}
5438 	}
5439 
5440 	if (unlikely(sdebug_verbose && cmnd->result))
5441 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5442 			    __func__, cmnd->result);
5443 
5444 	if (delta_jiff > 0 || ndelay > 0) {
5445 		ktime_t kt;
5446 
5447 		if (delta_jiff > 0) {
5448 			u64 ns = jiffies_to_nsecs(delta_jiff);
5449 
5450 			if (sdebug_random && ns < U32_MAX) {
5451 				ns = prandom_u32_max((u32)ns);
5452 			} else if (sdebug_random) {
5453 				ns >>= 12;	/* scale to 4 usec precision */
5454 				if (ns < U32_MAX)	/* over 4 hours max */
5455 					ns = prandom_u32_max((u32)ns);
5456 				ns <<= 12;
5457 			}
5458 			kt = ns_to_ktime(ns);
5459 		} else {	/* ndelay has a 4.2 second max */
5460 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5461 					     (u32)ndelay;
5462 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5463 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5464 
5465 				if (kt <= d) {	/* elapsed duration >= kt */
5466 					sqcp->a_cmnd = NULL;
5467 					atomic_dec(&devip->num_in_q);
5468 					clear_bit(k, sqp->in_use_bm);
5469 					if (new_sd_dp)
5470 						kfree(sd_dp);
5471 					/* call scsi_done() from this thread */
5472 					cmnd->scsi_done(cmnd);
5473 					return 0;
5474 				}
5475 				/* otherwise reduce kt by elapsed time */
5476 				kt -= d;
5477 			}
5478 		}
5479 		if (!sd_dp->init_hrt) {
5480 			sd_dp->init_hrt = true;
5481 			sqcp->sd_dp = sd_dp;
5482 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5483 				     HRTIMER_MODE_REL_PINNED);
5484 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5485 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5486 			sd_dp->qc_idx = k;
5487 		}
5488 		if (sdebug_statistics)
5489 			sd_dp->issuing_cpu = raw_smp_processor_id();
5490 		sd_dp->defer_t = SDEB_DEFER_HRT;
5491 		/* schedule the invocation of scsi_done() for a later time */
5492 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5493 	} else {	/* jdelay < 0, use work queue */
5494 		if (!sd_dp->init_wq) {
5495 			sd_dp->init_wq = true;
5496 			sqcp->sd_dp = sd_dp;
5497 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5498 			sd_dp->qc_idx = k;
5499 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5500 		}
5501 		if (sdebug_statistics)
5502 			sd_dp->issuing_cpu = raw_smp_processor_id();
5503 		sd_dp->defer_t = SDEB_DEFER_WQ;
5504 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5505 			     atomic_read(&sdeb_inject_pending)))
5506 			sd_dp->aborted = true;
5507 		schedule_work(&sd_dp->ew.work);
5508 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5509 			     atomic_read(&sdeb_inject_pending))) {
5510 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5511 			blk_abort_request(cmnd->request);
5512 			atomic_set(&sdeb_inject_pending, 0);
5513 		}
5514 	}
5515 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5516 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5517 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5518 	return 0;
5519 
5520 respond_in_thread:	/* call back to mid-layer using invocation thread */
5521 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5522 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5523 	if (cmnd->result == 0 && scsi_result != 0)
5524 		cmnd->result = scsi_result;
5525 	cmnd->scsi_done(cmnd);
5526 	return 0;
5527 }
5528 
5529 /* Note: The following macros create attribute files in the
5530    /sys/module/scsi_debug/parameters directory. Unfortunately this
5531    driver is unaware of a change and cannot trigger auxiliary actions
5532    as it can when the corresponding attribute in the
5533    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5534  */
5535 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5536 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5537 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5538 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5539 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5540 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5541 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5542 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5543 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5544 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5545 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5546 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5547 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5548 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5549 module_param_string(inq_product, sdebug_inq_product_id,
5550 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5551 module_param_string(inq_rev, sdebug_inq_product_rev,
5552 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5553 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5554 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5555 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5556 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5557 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5558 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5559 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5560 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5561 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5562 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5563 		   S_IRUGO | S_IWUSR);
5564 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5565 		   S_IRUGO | S_IWUSR);
5566 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5567 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5568 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5569 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5570 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5571 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5572 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5573 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5574 module_param_named(per_host_store, sdebug_per_host_store, bool,
5575 		   S_IRUGO | S_IWUSR);
5576 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5577 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5578 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5579 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5580 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5581 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5582 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5583 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5584 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5585 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5586 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5587 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5588 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5589 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5590 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5591 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5592 		   S_IRUGO | S_IWUSR);
5593 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5594 module_param_named(write_same_length, sdebug_write_same_length, int,
5595 		   S_IRUGO | S_IWUSR);
5596 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5597 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5598 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5599 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5600 
5601 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5602 MODULE_DESCRIPTION("SCSI debug adapter driver");
5603 MODULE_LICENSE("GPL");
5604 MODULE_VERSION(SDEBUG_VERSION);
5605 
5606 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5607 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5608 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5609 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5610 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5611 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5612 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5613 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5614 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5615 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5616 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5617 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5618 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5619 MODULE_PARM_DESC(host_max_queue,
5620 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5621 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5622 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5623 		 SDEBUG_VERSION "\")");
5624 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5625 MODULE_PARM_DESC(lbprz,
5626 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5627 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5628 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5629 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5630 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5631 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5632 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5633 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5634 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5635 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5636 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5637 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5638 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5639 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5640 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5641 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5642 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5643 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5644 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5645 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5646 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5647 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5648 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5649 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5650 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5651 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5652 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5653 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5654 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5655 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5656 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5657 MODULE_PARM_DESC(uuid_ctl,
5658 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5659 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5660 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5661 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5662 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5663 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5664 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5665 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5666 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5667 
5668 #define SDEBUG_INFO_LEN 256
5669 static char sdebug_info[SDEBUG_INFO_LEN];
5670 
5671 static const char *scsi_debug_info(struct Scsi_Host *shp)
5672 {
5673 	int k;
5674 
5675 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5676 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5677 	if (k >= (SDEBUG_INFO_LEN - 1))
5678 		return sdebug_info;
5679 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5680 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5681 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5682 		  "statistics", (int)sdebug_statistics);
5683 	return sdebug_info;
5684 }
5685 
5686 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5687 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5688 				 int length)
5689 {
5690 	char arr[16];
5691 	int opts;
5692 	int minLen = length > 15 ? 15 : length;
5693 
5694 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5695 		return -EACCES;
5696 	memcpy(arr, buffer, minLen);
5697 	arr[minLen] = '\0';
5698 	if (1 != sscanf(arr, "%d", &opts))
5699 		return -EINVAL;
5700 	sdebug_opts = opts;
5701 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5702 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5703 	if (sdebug_every_nth != 0)
5704 		tweak_cmnd_count();
5705 	return length;
5706 }
5707 
5708 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5709  * same for each scsi_debug host (if more than one). Some of the counters
5710  * output are not atomics so might be inaccurate in a busy system. */
5711 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5712 {
5713 	int f, j, l;
5714 	struct sdebug_queue *sqp;
5715 	struct sdebug_host_info *sdhp;
5716 
5717 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5718 		   SDEBUG_VERSION, sdebug_version_date);
5719 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5720 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5721 		   sdebug_opts, sdebug_every_nth);
5722 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5723 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5724 		   sdebug_sector_size, "bytes");
5725 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5726 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5727 		   num_aborts);
5728 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5729 		   num_dev_resets, num_target_resets, num_bus_resets,
5730 		   num_host_resets);
5731 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5732 		   dix_reads, dix_writes, dif_errors);
5733 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5734 		   sdebug_statistics);
5735 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5736 		   atomic_read(&sdebug_cmnd_count),
5737 		   atomic_read(&sdebug_completions),
5738 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5739 		   atomic_read(&sdebug_a_tsf));
5740 
5741 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5742 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5743 		seq_printf(m, "  queue %d:\n", j);
5744 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5745 		if (f != sdebug_max_queue) {
5746 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5747 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5748 				   "first,last bits", f, l);
5749 		}
5750 	}
5751 
5752 	seq_printf(m, "this host_no=%d\n", host->host_no);
5753 	if (!xa_empty(per_store_ap)) {
5754 		bool niu;
5755 		int idx;
5756 		unsigned long l_idx;
5757 		struct sdeb_store_info *sip;
5758 
5759 		seq_puts(m, "\nhost list:\n");
5760 		j = 0;
5761 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5762 			idx = sdhp->si_idx;
5763 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5764 				   sdhp->shost->host_no, idx);
5765 			++j;
5766 		}
5767 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5768 			   sdeb_most_recent_idx);
5769 		j = 0;
5770 		xa_for_each(per_store_ap, l_idx, sip) {
5771 			niu = xa_get_mark(per_store_ap, l_idx,
5772 					  SDEB_XA_NOT_IN_USE);
5773 			idx = (int)l_idx;
5774 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5775 				   (niu ? "  not_in_use" : ""));
5776 			++j;
5777 		}
5778 	}
5779 	return 0;
5780 }
5781 
5782 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5783 {
5784 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5785 }
5786 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5787  * of delay is jiffies.
5788  */
5789 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5790 			   size_t count)
5791 {
5792 	int jdelay, res;
5793 
5794 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5795 		res = count;
5796 		if (sdebug_jdelay != jdelay) {
5797 			int j, k;
5798 			struct sdebug_queue *sqp;
5799 
5800 			block_unblock_all_queues(true);
5801 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5802 			     ++j, ++sqp) {
5803 				k = find_first_bit(sqp->in_use_bm,
5804 						   sdebug_max_queue);
5805 				if (k != sdebug_max_queue) {
5806 					res = -EBUSY;   /* queued commands */
5807 					break;
5808 				}
5809 			}
5810 			if (res > 0) {
5811 				sdebug_jdelay = jdelay;
5812 				sdebug_ndelay = 0;
5813 			}
5814 			block_unblock_all_queues(false);
5815 		}
5816 		return res;
5817 	}
5818 	return -EINVAL;
5819 }
5820 static DRIVER_ATTR_RW(delay);
5821 
5822 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5823 {
5824 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5825 }
5826 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5827 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5828 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5829 			    size_t count)
5830 {
5831 	int ndelay, res;
5832 
5833 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5834 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5835 		res = count;
5836 		if (sdebug_ndelay != ndelay) {
5837 			int j, k;
5838 			struct sdebug_queue *sqp;
5839 
5840 			block_unblock_all_queues(true);
5841 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5842 			     ++j, ++sqp) {
5843 				k = find_first_bit(sqp->in_use_bm,
5844 						   sdebug_max_queue);
5845 				if (k != sdebug_max_queue) {
5846 					res = -EBUSY;   /* queued commands */
5847 					break;
5848 				}
5849 			}
5850 			if (res > 0) {
5851 				sdebug_ndelay = ndelay;
5852 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5853 							: DEF_JDELAY;
5854 			}
5855 			block_unblock_all_queues(false);
5856 		}
5857 		return res;
5858 	}
5859 	return -EINVAL;
5860 }
5861 static DRIVER_ATTR_RW(ndelay);
5862 
5863 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5864 {
5865 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5866 }
5867 
5868 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5869 			  size_t count)
5870 {
5871 	int opts;
5872 	char work[20];
5873 
5874 	if (sscanf(buf, "%10s", work) == 1) {
5875 		if (strncasecmp(work, "0x", 2) == 0) {
5876 			if (kstrtoint(work + 2, 16, &opts) == 0)
5877 				goto opts_done;
5878 		} else {
5879 			if (kstrtoint(work, 10, &opts) == 0)
5880 				goto opts_done;
5881 		}
5882 	}
5883 	return -EINVAL;
5884 opts_done:
5885 	sdebug_opts = opts;
5886 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5887 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5888 	tweak_cmnd_count();
5889 	return count;
5890 }
5891 static DRIVER_ATTR_RW(opts);
5892 
5893 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5894 {
5895 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5896 }
5897 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5898 			   size_t count)
5899 {
5900 	int n;
5901 
5902 	/* Cannot change from or to TYPE_ZBC with sysfs */
5903 	if (sdebug_ptype == TYPE_ZBC)
5904 		return -EINVAL;
5905 
5906 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5907 		if (n == TYPE_ZBC)
5908 			return -EINVAL;
5909 		sdebug_ptype = n;
5910 		return count;
5911 	}
5912 	return -EINVAL;
5913 }
5914 static DRIVER_ATTR_RW(ptype);
5915 
5916 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5917 {
5918 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5919 }
5920 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5921 			    size_t count)
5922 {
5923 	int n;
5924 
5925 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5926 		sdebug_dsense = n;
5927 		return count;
5928 	}
5929 	return -EINVAL;
5930 }
5931 static DRIVER_ATTR_RW(dsense);
5932 
5933 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5934 {
5935 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5936 }
5937 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5938 			     size_t count)
5939 {
5940 	int n, idx;
5941 
5942 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5943 		bool want_store = (n == 0);
5944 		struct sdebug_host_info *sdhp;
5945 
5946 		n = (n > 0);
5947 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5948 		if (sdebug_fake_rw == n)
5949 			return count;	/* not transitioning so do nothing */
5950 
5951 		if (want_store) {	/* 1 --> 0 transition, set up store */
5952 			if (sdeb_first_idx < 0) {
5953 				idx = sdebug_add_store();
5954 				if (idx < 0)
5955 					return idx;
5956 			} else {
5957 				idx = sdeb_first_idx;
5958 				xa_clear_mark(per_store_ap, idx,
5959 					      SDEB_XA_NOT_IN_USE);
5960 			}
5961 			/* make all hosts use same store */
5962 			list_for_each_entry(sdhp, &sdebug_host_list,
5963 					    host_list) {
5964 				if (sdhp->si_idx != idx) {
5965 					xa_set_mark(per_store_ap, sdhp->si_idx,
5966 						    SDEB_XA_NOT_IN_USE);
5967 					sdhp->si_idx = idx;
5968 				}
5969 			}
5970 			sdeb_most_recent_idx = idx;
5971 		} else {	/* 0 --> 1 transition is trigger for shrink */
5972 			sdebug_erase_all_stores(true /* apart from first */);
5973 		}
5974 		sdebug_fake_rw = n;
5975 		return count;
5976 	}
5977 	return -EINVAL;
5978 }
5979 static DRIVER_ATTR_RW(fake_rw);
5980 
5981 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5982 {
5983 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5984 }
5985 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5986 			      size_t count)
5987 {
5988 	int n;
5989 
5990 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5991 		sdebug_no_lun_0 = n;
5992 		return count;
5993 	}
5994 	return -EINVAL;
5995 }
5996 static DRIVER_ATTR_RW(no_lun_0);
5997 
5998 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
5999 {
6000 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6001 }
6002 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6003 			      size_t count)
6004 {
6005 	int n;
6006 
6007 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6008 		sdebug_num_tgts = n;
6009 		sdebug_max_tgts_luns();
6010 		return count;
6011 	}
6012 	return -EINVAL;
6013 }
6014 static DRIVER_ATTR_RW(num_tgts);
6015 
6016 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6017 {
6018 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6019 }
6020 static DRIVER_ATTR_RO(dev_size_mb);
6021 
6022 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6023 {
6024 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6025 }
6026 
6027 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6028 				    size_t count)
6029 {
6030 	bool v;
6031 
6032 	if (kstrtobool(buf, &v))
6033 		return -EINVAL;
6034 
6035 	sdebug_per_host_store = v;
6036 	return count;
6037 }
6038 static DRIVER_ATTR_RW(per_host_store);
6039 
6040 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6041 {
6042 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6043 }
6044 static DRIVER_ATTR_RO(num_parts);
6045 
6046 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6047 {
6048 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6049 }
6050 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6051 			       size_t count)
6052 {
6053 	int nth;
6054 	char work[20];
6055 
6056 	if (sscanf(buf, "%10s", work) == 1) {
6057 		if (strncasecmp(work, "0x", 2) == 0) {
6058 			if (kstrtoint(work + 2, 16, &nth) == 0)
6059 				goto every_nth_done;
6060 		} else {
6061 			if (kstrtoint(work, 10, &nth) == 0)
6062 				goto every_nth_done;
6063 		}
6064 	}
6065 	return -EINVAL;
6066 
6067 every_nth_done:
6068 	sdebug_every_nth = nth;
6069 	if (nth && !sdebug_statistics) {
6070 		pr_info("every_nth needs statistics=1, set it\n");
6071 		sdebug_statistics = true;
6072 	}
6073 	tweak_cmnd_count();
6074 	return count;
6075 }
6076 static DRIVER_ATTR_RW(every_nth);
6077 
6078 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6079 {
6080 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6081 }
6082 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6083 			      size_t count)
6084 {
6085 	int n;
6086 	bool changed;
6087 
6088 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6089 		if (n > 256) {
6090 			pr_warn("max_luns can be no more than 256\n");
6091 			return -EINVAL;
6092 		}
6093 		changed = (sdebug_max_luns != n);
6094 		sdebug_max_luns = n;
6095 		sdebug_max_tgts_luns();
6096 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6097 			struct sdebug_host_info *sdhp;
6098 			struct sdebug_dev_info *dp;
6099 
6100 			spin_lock(&sdebug_host_list_lock);
6101 			list_for_each_entry(sdhp, &sdebug_host_list,
6102 					    host_list) {
6103 				list_for_each_entry(dp, &sdhp->dev_info_list,
6104 						    dev_list) {
6105 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6106 						dp->uas_bm);
6107 				}
6108 			}
6109 			spin_unlock(&sdebug_host_list_lock);
6110 		}
6111 		return count;
6112 	}
6113 	return -EINVAL;
6114 }
6115 static DRIVER_ATTR_RW(max_luns);
6116 
6117 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6118 {
6119 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6120 }
6121 /* N.B. max_queue can be changed while there are queued commands. In flight
6122  * commands beyond the new max_queue will be completed. */
6123 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6124 			       size_t count)
6125 {
6126 	int j, n, k, a;
6127 	struct sdebug_queue *sqp;
6128 
6129 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6130 	    (n <= SDEBUG_CANQUEUE) &&
6131 	    (sdebug_host_max_queue == 0)) {
6132 		block_unblock_all_queues(true);
6133 		k = 0;
6134 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6135 		     ++j, ++sqp) {
6136 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6137 			if (a > k)
6138 				k = a;
6139 		}
6140 		sdebug_max_queue = n;
6141 		if (k == SDEBUG_CANQUEUE)
6142 			atomic_set(&retired_max_queue, 0);
6143 		else if (k >= n)
6144 			atomic_set(&retired_max_queue, k + 1);
6145 		else
6146 			atomic_set(&retired_max_queue, 0);
6147 		block_unblock_all_queues(false);
6148 		return count;
6149 	}
6150 	return -EINVAL;
6151 }
6152 static DRIVER_ATTR_RW(max_queue);
6153 
6154 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6155 {
6156 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6157 }
6158 
6159 /*
6160  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6161  * in range [0, sdebug_host_max_queue), we can't change it.
6162  */
6163 static DRIVER_ATTR_RO(host_max_queue);
6164 
6165 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6166 {
6167 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6168 }
6169 static DRIVER_ATTR_RO(no_uld);
6170 
6171 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6172 {
6173 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6174 }
6175 static DRIVER_ATTR_RO(scsi_level);
6176 
6177 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6178 {
6179 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6180 }
6181 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6182 				size_t count)
6183 {
6184 	int n;
6185 	bool changed;
6186 
6187 	/* Ignore capacity change for ZBC drives for now */
6188 	if (sdeb_zbc_in_use)
6189 		return -ENOTSUPP;
6190 
6191 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6192 		changed = (sdebug_virtual_gb != n);
6193 		sdebug_virtual_gb = n;
6194 		sdebug_capacity = get_sdebug_capacity();
6195 		if (changed) {
6196 			struct sdebug_host_info *sdhp;
6197 			struct sdebug_dev_info *dp;
6198 
6199 			spin_lock(&sdebug_host_list_lock);
6200 			list_for_each_entry(sdhp, &sdebug_host_list,
6201 					    host_list) {
6202 				list_for_each_entry(dp, &sdhp->dev_info_list,
6203 						    dev_list) {
6204 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6205 						dp->uas_bm);
6206 				}
6207 			}
6208 			spin_unlock(&sdebug_host_list_lock);
6209 		}
6210 		return count;
6211 	}
6212 	return -EINVAL;
6213 }
6214 static DRIVER_ATTR_RW(virtual_gb);
6215 
6216 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6217 {
6218 	/* absolute number of hosts currently active is what is shown */
6219 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6220 }
6221 
6222 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6223 			      size_t count)
6224 {
6225 	bool found;
6226 	unsigned long idx;
6227 	struct sdeb_store_info *sip;
6228 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6229 	int delta_hosts;
6230 
6231 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6232 		return -EINVAL;
6233 	if (delta_hosts > 0) {
6234 		do {
6235 			found = false;
6236 			if (want_phs) {
6237 				xa_for_each_marked(per_store_ap, idx, sip,
6238 						   SDEB_XA_NOT_IN_USE) {
6239 					sdeb_most_recent_idx = (int)idx;
6240 					found = true;
6241 					break;
6242 				}
6243 				if (found)	/* re-use case */
6244 					sdebug_add_host_helper((int)idx);
6245 				else
6246 					sdebug_do_add_host(true);
6247 			} else {
6248 				sdebug_do_add_host(false);
6249 			}
6250 		} while (--delta_hosts);
6251 	} else if (delta_hosts < 0) {
6252 		do {
6253 			sdebug_do_remove_host(false);
6254 		} while (++delta_hosts);
6255 	}
6256 	return count;
6257 }
6258 static DRIVER_ATTR_RW(add_host);
6259 
6260 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6261 {
6262 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6263 }
6264 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6265 				    size_t count)
6266 {
6267 	int n;
6268 
6269 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6270 		sdebug_vpd_use_hostno = n;
6271 		return count;
6272 	}
6273 	return -EINVAL;
6274 }
6275 static DRIVER_ATTR_RW(vpd_use_hostno);
6276 
6277 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6278 {
6279 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6280 }
6281 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6282 				size_t count)
6283 {
6284 	int n;
6285 
6286 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6287 		if (n > 0)
6288 			sdebug_statistics = true;
6289 		else {
6290 			clear_queue_stats();
6291 			sdebug_statistics = false;
6292 		}
6293 		return count;
6294 	}
6295 	return -EINVAL;
6296 }
6297 static DRIVER_ATTR_RW(statistics);
6298 
6299 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6300 {
6301 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6302 }
6303 static DRIVER_ATTR_RO(sector_size);
6304 
6305 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6306 {
6307 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6308 }
6309 static DRIVER_ATTR_RO(submit_queues);
6310 
6311 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6312 {
6313 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6314 }
6315 static DRIVER_ATTR_RO(dix);
6316 
6317 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6318 {
6319 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6320 }
6321 static DRIVER_ATTR_RO(dif);
6322 
6323 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6324 {
6325 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6326 }
6327 static DRIVER_ATTR_RO(guard);
6328 
6329 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6330 {
6331 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6332 }
6333 static DRIVER_ATTR_RO(ato);
6334 
6335 static ssize_t map_show(struct device_driver *ddp, char *buf)
6336 {
6337 	ssize_t count = 0;
6338 
6339 	if (!scsi_debug_lbp())
6340 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6341 				 sdebug_store_sectors);
6342 
6343 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6344 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6345 
6346 		if (sip)
6347 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6348 					  (int)map_size, sip->map_storep);
6349 	}
6350 	buf[count++] = '\n';
6351 	buf[count] = '\0';
6352 
6353 	return count;
6354 }
6355 static DRIVER_ATTR_RO(map);
6356 
6357 static ssize_t random_show(struct device_driver *ddp, char *buf)
6358 {
6359 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6360 }
6361 
6362 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6363 			    size_t count)
6364 {
6365 	bool v;
6366 
6367 	if (kstrtobool(buf, &v))
6368 		return -EINVAL;
6369 
6370 	sdebug_random = v;
6371 	return count;
6372 }
6373 static DRIVER_ATTR_RW(random);
6374 
6375 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6376 {
6377 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6378 }
6379 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6380 			       size_t count)
6381 {
6382 	int n;
6383 
6384 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6385 		sdebug_removable = (n > 0);
6386 		return count;
6387 	}
6388 	return -EINVAL;
6389 }
6390 static DRIVER_ATTR_RW(removable);
6391 
6392 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6393 {
6394 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6395 }
6396 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6397 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6398 			       size_t count)
6399 {
6400 	int n;
6401 
6402 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6403 		sdebug_host_lock = (n > 0);
6404 		return count;
6405 	}
6406 	return -EINVAL;
6407 }
6408 static DRIVER_ATTR_RW(host_lock);
6409 
6410 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6411 {
6412 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6413 }
6414 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6415 			    size_t count)
6416 {
6417 	int n;
6418 
6419 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6420 		sdebug_strict = (n > 0);
6421 		return count;
6422 	}
6423 	return -EINVAL;
6424 }
6425 static DRIVER_ATTR_RW(strict);
6426 
6427 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6428 {
6429 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6430 }
6431 static DRIVER_ATTR_RO(uuid_ctl);
6432 
6433 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6434 {
6435 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6436 }
6437 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6438 			     size_t count)
6439 {
6440 	int ret, n;
6441 
6442 	ret = kstrtoint(buf, 0, &n);
6443 	if (ret)
6444 		return ret;
6445 	sdebug_cdb_len = n;
6446 	all_config_cdb_len();
6447 	return count;
6448 }
6449 static DRIVER_ATTR_RW(cdb_len);
6450 
6451 static const char * const zbc_model_strs_a[] = {
6452 	[BLK_ZONED_NONE] = "none",
6453 	[BLK_ZONED_HA]   = "host-aware",
6454 	[BLK_ZONED_HM]   = "host-managed",
6455 };
6456 
6457 static const char * const zbc_model_strs_b[] = {
6458 	[BLK_ZONED_NONE] = "no",
6459 	[BLK_ZONED_HA]   = "aware",
6460 	[BLK_ZONED_HM]   = "managed",
6461 };
6462 
6463 static const char * const zbc_model_strs_c[] = {
6464 	[BLK_ZONED_NONE] = "0",
6465 	[BLK_ZONED_HA]   = "1",
6466 	[BLK_ZONED_HM]   = "2",
6467 };
6468 
6469 static int sdeb_zbc_model_str(const char *cp)
6470 {
6471 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6472 
6473 	if (res < 0) {
6474 		res = sysfs_match_string(zbc_model_strs_b, cp);
6475 		if (res < 0) {
6476 			res = sysfs_match_string(zbc_model_strs_c, cp);
6477 			if (res < 0)
6478 				return -EINVAL;
6479 		}
6480 	}
6481 	return res;
6482 }
6483 
6484 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6485 {
6486 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6487 			 zbc_model_strs_a[sdeb_zbc_model]);
6488 }
6489 static DRIVER_ATTR_RO(zbc);
6490 
6491 /* Note: The following array creates attribute files in the
6492    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6493    files (over those found in the /sys/module/scsi_debug/parameters
6494    directory) is that auxiliary actions can be triggered when an attribute
6495    is changed. For example see: add_host_store() above.
6496  */
6497 
6498 static struct attribute *sdebug_drv_attrs[] = {
6499 	&driver_attr_delay.attr,
6500 	&driver_attr_opts.attr,
6501 	&driver_attr_ptype.attr,
6502 	&driver_attr_dsense.attr,
6503 	&driver_attr_fake_rw.attr,
6504 	&driver_attr_host_max_queue.attr,
6505 	&driver_attr_no_lun_0.attr,
6506 	&driver_attr_num_tgts.attr,
6507 	&driver_attr_dev_size_mb.attr,
6508 	&driver_attr_num_parts.attr,
6509 	&driver_attr_every_nth.attr,
6510 	&driver_attr_max_luns.attr,
6511 	&driver_attr_max_queue.attr,
6512 	&driver_attr_no_uld.attr,
6513 	&driver_attr_scsi_level.attr,
6514 	&driver_attr_virtual_gb.attr,
6515 	&driver_attr_add_host.attr,
6516 	&driver_attr_per_host_store.attr,
6517 	&driver_attr_vpd_use_hostno.attr,
6518 	&driver_attr_sector_size.attr,
6519 	&driver_attr_statistics.attr,
6520 	&driver_attr_submit_queues.attr,
6521 	&driver_attr_dix.attr,
6522 	&driver_attr_dif.attr,
6523 	&driver_attr_guard.attr,
6524 	&driver_attr_ato.attr,
6525 	&driver_attr_map.attr,
6526 	&driver_attr_random.attr,
6527 	&driver_attr_removable.attr,
6528 	&driver_attr_host_lock.attr,
6529 	&driver_attr_ndelay.attr,
6530 	&driver_attr_strict.attr,
6531 	&driver_attr_uuid_ctl.attr,
6532 	&driver_attr_cdb_len.attr,
6533 	&driver_attr_zbc.attr,
6534 	NULL,
6535 };
6536 ATTRIBUTE_GROUPS(sdebug_drv);
6537 
6538 static struct device *pseudo_primary;
6539 
6540 static int __init scsi_debug_init(void)
6541 {
6542 	bool want_store = (sdebug_fake_rw == 0);
6543 	unsigned long sz;
6544 	int k, ret, hosts_to_add;
6545 	int idx = -1;
6546 
6547 	ramdisk_lck_a[0] = &atomic_rw;
6548 	ramdisk_lck_a[1] = &atomic_rw2;
6549 	atomic_set(&retired_max_queue, 0);
6550 
6551 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6552 		pr_warn("ndelay must be less than 1 second, ignored\n");
6553 		sdebug_ndelay = 0;
6554 	} else if (sdebug_ndelay > 0)
6555 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6556 
6557 	switch (sdebug_sector_size) {
6558 	case  512:
6559 	case 1024:
6560 	case 2048:
6561 	case 4096:
6562 		break;
6563 	default:
6564 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6565 		return -EINVAL;
6566 	}
6567 
6568 	switch (sdebug_dif) {
6569 	case T10_PI_TYPE0_PROTECTION:
6570 		break;
6571 	case T10_PI_TYPE1_PROTECTION:
6572 	case T10_PI_TYPE2_PROTECTION:
6573 	case T10_PI_TYPE3_PROTECTION:
6574 		have_dif_prot = true;
6575 		break;
6576 
6577 	default:
6578 		pr_err("dif must be 0, 1, 2 or 3\n");
6579 		return -EINVAL;
6580 	}
6581 
6582 	if (sdebug_num_tgts < 0) {
6583 		pr_err("num_tgts must be >= 0\n");
6584 		return -EINVAL;
6585 	}
6586 
6587 	if (sdebug_guard > 1) {
6588 		pr_err("guard must be 0 or 1\n");
6589 		return -EINVAL;
6590 	}
6591 
6592 	if (sdebug_ato > 1) {
6593 		pr_err("ato must be 0 or 1\n");
6594 		return -EINVAL;
6595 	}
6596 
6597 	if (sdebug_physblk_exp > 15) {
6598 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6599 		return -EINVAL;
6600 	}
6601 	if (sdebug_max_luns > 256) {
6602 		pr_warn("max_luns can be no more than 256, use default\n");
6603 		sdebug_max_luns = DEF_MAX_LUNS;
6604 	}
6605 
6606 	if (sdebug_lowest_aligned > 0x3fff) {
6607 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6608 		return -EINVAL;
6609 	}
6610 
6611 	if (submit_queues < 1) {
6612 		pr_err("submit_queues must be 1 or more\n");
6613 		return -EINVAL;
6614 	}
6615 
6616 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6617 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6618 		return -EINVAL;
6619 	}
6620 
6621 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6622 	    (sdebug_host_max_queue < 0)) {
6623 		pr_err("host_max_queue must be in range [0 %d]\n",
6624 		       SDEBUG_CANQUEUE);
6625 		return -EINVAL;
6626 	}
6627 
6628 	if (sdebug_host_max_queue &&
6629 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6630 		sdebug_max_queue = sdebug_host_max_queue;
6631 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6632 			sdebug_max_queue);
6633 	}
6634 
6635 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6636 			       GFP_KERNEL);
6637 	if (sdebug_q_arr == NULL)
6638 		return -ENOMEM;
6639 	for (k = 0; k < submit_queues; ++k)
6640 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6641 
6642 	/*
6643 	 * check for host managed zoned block device specified with
6644 	 * ptype=0x14 or zbc=XXX.
6645 	 */
6646 	if (sdebug_ptype == TYPE_ZBC) {
6647 		sdeb_zbc_model = BLK_ZONED_HM;
6648 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6649 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6650 		if (k < 0) {
6651 			ret = k;
6652 			goto free_vm;
6653 		}
6654 		sdeb_zbc_model = k;
6655 		switch (sdeb_zbc_model) {
6656 		case BLK_ZONED_NONE:
6657 		case BLK_ZONED_HA:
6658 			sdebug_ptype = TYPE_DISK;
6659 			break;
6660 		case BLK_ZONED_HM:
6661 			sdebug_ptype = TYPE_ZBC;
6662 			break;
6663 		default:
6664 			pr_err("Invalid ZBC model\n");
6665 			return -EINVAL;
6666 		}
6667 	}
6668 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6669 		sdeb_zbc_in_use = true;
6670 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6671 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6672 	}
6673 
6674 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6675 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6676 	if (sdebug_dev_size_mb < 1)
6677 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6678 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6679 	sdebug_store_sectors = sz / sdebug_sector_size;
6680 	sdebug_capacity = get_sdebug_capacity();
6681 
6682 	/* play around with geometry, don't waste too much on track 0 */
6683 	sdebug_heads = 8;
6684 	sdebug_sectors_per = 32;
6685 	if (sdebug_dev_size_mb >= 256)
6686 		sdebug_heads = 64;
6687 	else if (sdebug_dev_size_mb >= 16)
6688 		sdebug_heads = 32;
6689 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6690 			       (sdebug_sectors_per * sdebug_heads);
6691 	if (sdebug_cylinders_per >= 1024) {
6692 		/* other LLDs do this; implies >= 1GB ram disk ... */
6693 		sdebug_heads = 255;
6694 		sdebug_sectors_per = 63;
6695 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6696 			       (sdebug_sectors_per * sdebug_heads);
6697 	}
6698 	if (scsi_debug_lbp()) {
6699 		sdebug_unmap_max_blocks =
6700 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6701 
6702 		sdebug_unmap_max_desc =
6703 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6704 
6705 		sdebug_unmap_granularity =
6706 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6707 
6708 		if (sdebug_unmap_alignment &&
6709 		    sdebug_unmap_granularity <=
6710 		    sdebug_unmap_alignment) {
6711 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6712 			ret = -EINVAL;
6713 			goto free_q_arr;
6714 		}
6715 	}
6716 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6717 	if (want_store) {
6718 		idx = sdebug_add_store();
6719 		if (idx < 0) {
6720 			ret = idx;
6721 			goto free_q_arr;
6722 		}
6723 	}
6724 
6725 	pseudo_primary = root_device_register("pseudo_0");
6726 	if (IS_ERR(pseudo_primary)) {
6727 		pr_warn("root_device_register() error\n");
6728 		ret = PTR_ERR(pseudo_primary);
6729 		goto free_vm;
6730 	}
6731 	ret = bus_register(&pseudo_lld_bus);
6732 	if (ret < 0) {
6733 		pr_warn("bus_register error: %d\n", ret);
6734 		goto dev_unreg;
6735 	}
6736 	ret = driver_register(&sdebug_driverfs_driver);
6737 	if (ret < 0) {
6738 		pr_warn("driver_register error: %d\n", ret);
6739 		goto bus_unreg;
6740 	}
6741 
6742 	hosts_to_add = sdebug_add_host;
6743 	sdebug_add_host = 0;
6744 
6745 	for (k = 0; k < hosts_to_add; k++) {
6746 		if (want_store && k == 0) {
6747 			ret = sdebug_add_host_helper(idx);
6748 			if (ret < 0) {
6749 				pr_err("add_host_helper k=%d, error=%d\n",
6750 				       k, -ret);
6751 				break;
6752 			}
6753 		} else {
6754 			ret = sdebug_do_add_host(want_store &&
6755 						 sdebug_per_host_store);
6756 			if (ret < 0) {
6757 				pr_err("add_host k=%d error=%d\n", k, -ret);
6758 				break;
6759 			}
6760 		}
6761 	}
6762 	if (sdebug_verbose)
6763 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6764 
6765 	return 0;
6766 
6767 bus_unreg:
6768 	bus_unregister(&pseudo_lld_bus);
6769 dev_unreg:
6770 	root_device_unregister(pseudo_primary);
6771 free_vm:
6772 	sdebug_erase_store(idx, NULL);
6773 free_q_arr:
6774 	kfree(sdebug_q_arr);
6775 	return ret;
6776 }
6777 
6778 static void __exit scsi_debug_exit(void)
6779 {
6780 	int k = sdebug_num_hosts;
6781 
6782 	stop_all_queued();
6783 	for (; k; k--)
6784 		sdebug_do_remove_host(true);
6785 	free_all_queued();
6786 	driver_unregister(&sdebug_driverfs_driver);
6787 	bus_unregister(&pseudo_lld_bus);
6788 	root_device_unregister(pseudo_primary);
6789 
6790 	sdebug_erase_all_stores(false);
6791 	xa_destroy(per_store_ap);
6792 }
6793 
6794 device_initcall(scsi_debug_init);
6795 module_exit(scsi_debug_exit);
6796 
6797 static void sdebug_release_adapter(struct device *dev)
6798 {
6799 	struct sdebug_host_info *sdbg_host;
6800 
6801 	sdbg_host = to_sdebug_host(dev);
6802 	kfree(sdbg_host);
6803 }
6804 
6805 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6806 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6807 {
6808 	if (idx < 0)
6809 		return;
6810 	if (!sip) {
6811 		if (xa_empty(per_store_ap))
6812 			return;
6813 		sip = xa_load(per_store_ap, idx);
6814 		if (!sip)
6815 			return;
6816 	}
6817 	vfree(sip->map_storep);
6818 	vfree(sip->dif_storep);
6819 	vfree(sip->storep);
6820 	xa_erase(per_store_ap, idx);
6821 	kfree(sip);
6822 }
6823 
6824 /* Assume apart_from_first==false only in shutdown case. */
6825 static void sdebug_erase_all_stores(bool apart_from_first)
6826 {
6827 	unsigned long idx;
6828 	struct sdeb_store_info *sip = NULL;
6829 
6830 	xa_for_each(per_store_ap, idx, sip) {
6831 		if (apart_from_first)
6832 			apart_from_first = false;
6833 		else
6834 			sdebug_erase_store(idx, sip);
6835 	}
6836 	if (apart_from_first)
6837 		sdeb_most_recent_idx = sdeb_first_idx;
6838 }
6839 
6840 /*
6841  * Returns store xarray new element index (idx) if >=0 else negated errno.
6842  * Limit the number of stores to 65536.
6843  */
6844 static int sdebug_add_store(void)
6845 {
6846 	int res;
6847 	u32 n_idx;
6848 	unsigned long iflags;
6849 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6850 	struct sdeb_store_info *sip = NULL;
6851 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6852 
6853 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6854 	if (!sip)
6855 		return -ENOMEM;
6856 
6857 	xa_lock_irqsave(per_store_ap, iflags);
6858 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6859 	if (unlikely(res < 0)) {
6860 		xa_unlock_irqrestore(per_store_ap, iflags);
6861 		kfree(sip);
6862 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6863 		return res;
6864 	}
6865 	sdeb_most_recent_idx = n_idx;
6866 	if (sdeb_first_idx < 0)
6867 		sdeb_first_idx = n_idx;
6868 	xa_unlock_irqrestore(per_store_ap, iflags);
6869 
6870 	res = -ENOMEM;
6871 	sip->storep = vzalloc(sz);
6872 	if (!sip->storep) {
6873 		pr_err("user data oom\n");
6874 		goto err;
6875 	}
6876 	if (sdebug_num_parts > 0)
6877 		sdebug_build_parts(sip->storep, sz);
6878 
6879 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6880 	if (sdebug_dix) {
6881 		int dif_size;
6882 
6883 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6884 		sip->dif_storep = vmalloc(dif_size);
6885 
6886 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6887 			sip->dif_storep);
6888 
6889 		if (!sip->dif_storep) {
6890 			pr_err("DIX oom\n");
6891 			goto err;
6892 		}
6893 		memset(sip->dif_storep, 0xff, dif_size);
6894 	}
6895 	/* Logical Block Provisioning */
6896 	if (scsi_debug_lbp()) {
6897 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6898 		sip->map_storep = vmalloc(array_size(sizeof(long),
6899 						     BITS_TO_LONGS(map_size)));
6900 
6901 		pr_info("%lu provisioning blocks\n", map_size);
6902 
6903 		if (!sip->map_storep) {
6904 			pr_err("LBP map oom\n");
6905 			goto err;
6906 		}
6907 
6908 		bitmap_zero(sip->map_storep, map_size);
6909 
6910 		/* Map first 1KB for partition table */
6911 		if (sdebug_num_parts)
6912 			map_region(sip, 0, 2);
6913 	}
6914 
6915 	rwlock_init(&sip->macc_lck);
6916 	return (int)n_idx;
6917 err:
6918 	sdebug_erase_store((int)n_idx, sip);
6919 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
6920 	return res;
6921 }
6922 
6923 static int sdebug_add_host_helper(int per_host_idx)
6924 {
6925 	int k, devs_per_host, idx;
6926 	int error = -ENOMEM;
6927 	struct sdebug_host_info *sdbg_host;
6928 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6929 
6930 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6931 	if (!sdbg_host)
6932 		return -ENOMEM;
6933 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6934 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6935 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6936 	sdbg_host->si_idx = idx;
6937 
6938 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6939 
6940 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6941 	for (k = 0; k < devs_per_host; k++) {
6942 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6943 		if (!sdbg_devinfo)
6944 			goto clean;
6945 	}
6946 
6947 	spin_lock(&sdebug_host_list_lock);
6948 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6949 	spin_unlock(&sdebug_host_list_lock);
6950 
6951 	sdbg_host->dev.bus = &pseudo_lld_bus;
6952 	sdbg_host->dev.parent = pseudo_primary;
6953 	sdbg_host->dev.release = &sdebug_release_adapter;
6954 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6955 
6956 	error = device_register(&sdbg_host->dev);
6957 	if (error)
6958 		goto clean;
6959 
6960 	++sdebug_num_hosts;
6961 	return 0;
6962 
6963 clean:
6964 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6965 				 dev_list) {
6966 		list_del(&sdbg_devinfo->dev_list);
6967 		kfree(sdbg_devinfo->zstate);
6968 		kfree(sdbg_devinfo);
6969 	}
6970 	kfree(sdbg_host);
6971 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
6972 	return error;
6973 }
6974 
6975 static int sdebug_do_add_host(bool mk_new_store)
6976 {
6977 	int ph_idx = sdeb_most_recent_idx;
6978 
6979 	if (mk_new_store) {
6980 		ph_idx = sdebug_add_store();
6981 		if (ph_idx < 0)
6982 			return ph_idx;
6983 	}
6984 	return sdebug_add_host_helper(ph_idx);
6985 }
6986 
6987 static void sdebug_do_remove_host(bool the_end)
6988 {
6989 	int idx = -1;
6990 	struct sdebug_host_info *sdbg_host = NULL;
6991 	struct sdebug_host_info *sdbg_host2;
6992 
6993 	spin_lock(&sdebug_host_list_lock);
6994 	if (!list_empty(&sdebug_host_list)) {
6995 		sdbg_host = list_entry(sdebug_host_list.prev,
6996 				       struct sdebug_host_info, host_list);
6997 		idx = sdbg_host->si_idx;
6998 	}
6999 	if (!the_end && idx >= 0) {
7000 		bool unique = true;
7001 
7002 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7003 			if (sdbg_host2 == sdbg_host)
7004 				continue;
7005 			if (idx == sdbg_host2->si_idx) {
7006 				unique = false;
7007 				break;
7008 			}
7009 		}
7010 		if (unique) {
7011 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7012 			if (idx == sdeb_most_recent_idx)
7013 				--sdeb_most_recent_idx;
7014 		}
7015 	}
7016 	if (sdbg_host)
7017 		list_del(&sdbg_host->host_list);
7018 	spin_unlock(&sdebug_host_list_lock);
7019 
7020 	if (!sdbg_host)
7021 		return;
7022 
7023 	device_unregister(&sdbg_host->dev);
7024 	--sdebug_num_hosts;
7025 }
7026 
7027 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7028 {
7029 	int num_in_q = 0;
7030 	struct sdebug_dev_info *devip;
7031 
7032 	block_unblock_all_queues(true);
7033 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7034 	if (NULL == devip) {
7035 		block_unblock_all_queues(false);
7036 		return	-ENODEV;
7037 	}
7038 	num_in_q = atomic_read(&devip->num_in_q);
7039 
7040 	if (qdepth < 1)
7041 		qdepth = 1;
7042 	/* allow to exceed max host qc_arr elements for testing */
7043 	if (qdepth > SDEBUG_CANQUEUE + 10)
7044 		qdepth = SDEBUG_CANQUEUE + 10;
7045 	scsi_change_queue_depth(sdev, qdepth);
7046 
7047 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7048 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7049 			    __func__, qdepth, num_in_q);
7050 	}
7051 	block_unblock_all_queues(false);
7052 	return sdev->queue_depth;
7053 }
7054 
7055 static bool fake_timeout(struct scsi_cmnd *scp)
7056 {
7057 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7058 		if (sdebug_every_nth < -1)
7059 			sdebug_every_nth = -1;
7060 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7061 			return true; /* ignore command causing timeout */
7062 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7063 			 scsi_medium_access_command(scp))
7064 			return true; /* time out reads and writes */
7065 	}
7066 	return false;
7067 }
7068 
7069 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7070 				   struct scsi_cmnd *scp)
7071 {
7072 	u8 sdeb_i;
7073 	struct scsi_device *sdp = scp->device;
7074 	const struct opcode_info_t *oip;
7075 	const struct opcode_info_t *r_oip;
7076 	struct sdebug_dev_info *devip;
7077 	u8 *cmd = scp->cmnd;
7078 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7079 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7080 	int k, na;
7081 	int errsts = 0;
7082 	u32 flags;
7083 	u16 sa;
7084 	u8 opcode = cmd[0];
7085 	bool has_wlun_rl;
7086 	bool inject_now;
7087 
7088 	scsi_set_resid(scp, 0);
7089 	if (sdebug_statistics) {
7090 		atomic_inc(&sdebug_cmnd_count);
7091 		inject_now = inject_on_this_cmd();
7092 	} else {
7093 		inject_now = false;
7094 	}
7095 	if (unlikely(sdebug_verbose &&
7096 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7097 		char b[120];
7098 		int n, len, sb;
7099 
7100 		len = scp->cmd_len;
7101 		sb = (int)sizeof(b);
7102 		if (len > 32)
7103 			strcpy(b, "too long, over 32 bytes");
7104 		else {
7105 			for (k = 0, n = 0; k < len && n < sb; ++k)
7106 				n += scnprintf(b + n, sb - n, "%02x ",
7107 					       (u32)cmd[k]);
7108 		}
7109 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7110 			    blk_mq_unique_tag(scp->request), b);
7111 	}
7112 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7113 		return SCSI_MLQUEUE_HOST_BUSY;
7114 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7115 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7116 		goto err_out;
7117 
7118 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7119 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7120 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7121 	if (unlikely(!devip)) {
7122 		devip = find_build_dev_info(sdp);
7123 		if (NULL == devip)
7124 			goto err_out;
7125 	}
7126 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7127 		atomic_set(&sdeb_inject_pending, 1);
7128 
7129 	na = oip->num_attached;
7130 	r_pfp = oip->pfp;
7131 	if (na) {	/* multiple commands with this opcode */
7132 		r_oip = oip;
7133 		if (FF_SA & r_oip->flags) {
7134 			if (F_SA_LOW & oip->flags)
7135 				sa = 0x1f & cmd[1];
7136 			else
7137 				sa = get_unaligned_be16(cmd + 8);
7138 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7139 				if (opcode == oip->opcode && sa == oip->sa)
7140 					break;
7141 			}
7142 		} else {   /* since no service action only check opcode */
7143 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7144 				if (opcode == oip->opcode)
7145 					break;
7146 			}
7147 		}
7148 		if (k > na) {
7149 			if (F_SA_LOW & r_oip->flags)
7150 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7151 			else if (F_SA_HIGH & r_oip->flags)
7152 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7153 			else
7154 				mk_sense_invalid_opcode(scp);
7155 			goto check_cond;
7156 		}
7157 	}	/* else (when na==0) we assume the oip is a match */
7158 	flags = oip->flags;
7159 	if (unlikely(F_INV_OP & flags)) {
7160 		mk_sense_invalid_opcode(scp);
7161 		goto check_cond;
7162 	}
7163 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7164 		if (sdebug_verbose)
7165 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7166 				    my_name, opcode, " supported for wlun");
7167 		mk_sense_invalid_opcode(scp);
7168 		goto check_cond;
7169 	}
7170 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7171 		u8 rem;
7172 		int j;
7173 
7174 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7175 			rem = ~oip->len_mask[k] & cmd[k];
7176 			if (rem) {
7177 				for (j = 7; j >= 0; --j, rem <<= 1) {
7178 					if (0x80 & rem)
7179 						break;
7180 				}
7181 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7182 				goto check_cond;
7183 			}
7184 		}
7185 	}
7186 	if (unlikely(!(F_SKIP_UA & flags) &&
7187 		     find_first_bit(devip->uas_bm,
7188 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7189 		errsts = make_ua(scp, devip);
7190 		if (errsts)
7191 			goto check_cond;
7192 	}
7193 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7194 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7195 		if (sdebug_verbose)
7196 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7197 				    "%s\n", my_name, "initializing command "
7198 				    "required");
7199 		errsts = check_condition_result;
7200 		goto fini;
7201 	}
7202 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7203 		goto fini;
7204 	if (unlikely(sdebug_every_nth)) {
7205 		if (fake_timeout(scp))
7206 			return 0;	/* ignore command: make trouble */
7207 	}
7208 	if (likely(oip->pfp))
7209 		pfp = oip->pfp;	/* calls a resp_* function */
7210 	else
7211 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7212 
7213 fini:
7214 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7215 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7216 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7217 					    sdebug_ndelay > 10000)) {
7218 		/*
7219 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7220 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7221 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7222 		 * For Synchronize Cache want 1/20 of SSU's delay.
7223 		 */
7224 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7225 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7226 
7227 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7228 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7229 	} else
7230 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7231 				     sdebug_ndelay);
7232 check_cond:
7233 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7234 err_out:
7235 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7236 }
7237 
7238 static struct scsi_host_template sdebug_driver_template = {
7239 	.show_info =		scsi_debug_show_info,
7240 	.write_info =		scsi_debug_write_info,
7241 	.proc_name =		sdebug_proc_name,
7242 	.name =			"SCSI DEBUG",
7243 	.info =			scsi_debug_info,
7244 	.slave_alloc =		scsi_debug_slave_alloc,
7245 	.slave_configure =	scsi_debug_slave_configure,
7246 	.slave_destroy =	scsi_debug_slave_destroy,
7247 	.ioctl =		scsi_debug_ioctl,
7248 	.queuecommand =		scsi_debug_queuecommand,
7249 	.change_queue_depth =	sdebug_change_qdepth,
7250 	.eh_abort_handler =	scsi_debug_abort,
7251 	.eh_device_reset_handler = scsi_debug_device_reset,
7252 	.eh_target_reset_handler = scsi_debug_target_reset,
7253 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7254 	.eh_host_reset_handler = scsi_debug_host_reset,
7255 	.can_queue =		SDEBUG_CANQUEUE,
7256 	.this_id =		7,
7257 	.sg_tablesize =		SG_MAX_SEGMENTS,
7258 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7259 	.max_sectors =		-1U,
7260 	.max_segment_size =	-1U,
7261 	.module =		THIS_MODULE,
7262 	.track_queue_depth =	1,
7263 };
7264 
7265 static int sdebug_driver_probe(struct device *dev)
7266 {
7267 	int error = 0;
7268 	struct sdebug_host_info *sdbg_host;
7269 	struct Scsi_Host *hpnt;
7270 	int hprot;
7271 
7272 	sdbg_host = to_sdebug_host(dev);
7273 
7274 	if (sdebug_host_max_queue)
7275 		sdebug_driver_template.can_queue = sdebug_host_max_queue;
7276 	else
7277 		sdebug_driver_template.can_queue = sdebug_max_queue;
7278 	if (!sdebug_clustering)
7279 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7280 
7281 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7282 	if (NULL == hpnt) {
7283 		pr_err("scsi_host_alloc failed\n");
7284 		error = -ENODEV;
7285 		return error;
7286 	}
7287 	if (submit_queues > nr_cpu_ids) {
7288 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7289 			my_name, submit_queues, nr_cpu_ids);
7290 		submit_queues = nr_cpu_ids;
7291 	}
7292 	/*
7293 	 * Decide whether to tell scsi subsystem that we want mq. The
7294 	 * following should give the same answer for each host. If the host
7295 	 * has a limit of hostwide max commands, then do not set.
7296 	 */
7297 	if (!sdebug_host_max_queue)
7298 		hpnt->nr_hw_queues = submit_queues;
7299 
7300 	sdbg_host->shost = hpnt;
7301 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7302 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7303 		hpnt->max_id = sdebug_num_tgts + 1;
7304 	else
7305 		hpnt->max_id = sdebug_num_tgts;
7306 	/* = sdebug_max_luns; */
7307 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7308 
7309 	hprot = 0;
7310 
7311 	switch (sdebug_dif) {
7312 
7313 	case T10_PI_TYPE1_PROTECTION:
7314 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7315 		if (sdebug_dix)
7316 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7317 		break;
7318 
7319 	case T10_PI_TYPE2_PROTECTION:
7320 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7321 		if (sdebug_dix)
7322 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7323 		break;
7324 
7325 	case T10_PI_TYPE3_PROTECTION:
7326 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7327 		if (sdebug_dix)
7328 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7329 		break;
7330 
7331 	default:
7332 		if (sdebug_dix)
7333 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7334 		break;
7335 	}
7336 
7337 	scsi_host_set_prot(hpnt, hprot);
7338 
7339 	if (have_dif_prot || sdebug_dix)
7340 		pr_info("host protection%s%s%s%s%s%s%s\n",
7341 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7342 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7343 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7344 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7345 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7346 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7347 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7348 
7349 	if (sdebug_guard == 1)
7350 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7351 	else
7352 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7353 
7354 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7355 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7356 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7357 		sdebug_statistics = true;
7358 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7359 	if (error) {
7360 		pr_err("scsi_add_host failed\n");
7361 		error = -ENODEV;
7362 		scsi_host_put(hpnt);
7363 	} else {
7364 		scsi_scan_host(hpnt);
7365 	}
7366 
7367 	return error;
7368 }
7369 
7370 static int sdebug_driver_remove(struct device *dev)
7371 {
7372 	struct sdebug_host_info *sdbg_host;
7373 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7374 
7375 	sdbg_host = to_sdebug_host(dev);
7376 
7377 	if (!sdbg_host) {
7378 		pr_err("Unable to locate host info\n");
7379 		return -ENODEV;
7380 	}
7381 
7382 	scsi_remove_host(sdbg_host->shost);
7383 
7384 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7385 				 dev_list) {
7386 		list_del(&sdbg_devinfo->dev_list);
7387 		kfree(sdbg_devinfo->zstate);
7388 		kfree(sdbg_devinfo);
7389 	}
7390 
7391 	scsi_host_put(sdbg_host->shost);
7392 	return 0;
7393 }
7394 
7395 static int pseudo_lld_bus_match(struct device *dev,
7396 				struct device_driver *dev_driver)
7397 {
7398 	return 1;
7399 }
7400 
7401 static struct bus_type pseudo_lld_bus = {
7402 	.name = "pseudo",
7403 	.match = pseudo_lld_bus_match,
7404 	.probe = sdebug_driver_probe,
7405 	.remove = sdebug_driver_remove,
7406 	.drv_groups = sdebug_drv_groups,
7407 };
7408