xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision fc772314)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
157 
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB	128
160 #define DEF_ZBC_MAX_OPEN_ZONES	8
161 #define DEF_ZBC_NR_CONV_ZONES	1
162 
163 #define SDEBUG_LUN_0_VAL 0
164 
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE		1
167 #define SDEBUG_OPT_MEDIUM_ERR		2
168 #define SDEBUG_OPT_TIMEOUT		4
169 #define SDEBUG_OPT_RECOVERED_ERR	8
170 #define SDEBUG_OPT_TRANSPORT_ERR	16
171 #define SDEBUG_OPT_DIF_ERR		32
172 #define SDEBUG_OPT_DIX_ERR		64
173 #define SDEBUG_OPT_MAC_TIMEOUT		128
174 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
175 #define SDEBUG_OPT_Q_NOISE		0x200
176 #define SDEBUG_OPT_ALL_TSF		0x400
177 #define SDEBUG_OPT_RARE_TSF		0x800
178 #define SDEBUG_OPT_N_WCE		0x1000
179 #define SDEBUG_OPT_RESET_NOISE		0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
181 #define SDEBUG_OPT_HOST_BUSY		0x8000
182 #define SDEBUG_OPT_CMD_ABORT		0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 			      SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 				  SDEBUG_OPT_TRANSPORT_ERR | \
187 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 				  SDEBUG_OPT_SHORT_TRANSFER | \
189 				  SDEBUG_OPT_HOST_BUSY | \
190 				  SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
193 
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195  * priority order. In the subset implemented here lower numbers have higher
196  * priority. The UA numbers should be a sequence starting from 0 with
197  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
206 
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208  * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
211 
212 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
213  * or "peripheral device" addressing (value 0) */
214 #define SAM2_LUN_ADDRESS_METHOD 0
215 
216 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
217  * (for response) per submit queue at one time. Can be reduced by max_queue
218  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
219  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
220  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
221  * but cannot exceed SDEBUG_CANQUEUE .
222  */
223 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
224 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
225 #define DEF_CMD_PER_LUN  255
226 
227 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
228 #define F_D_IN			1	/* Data-in command (e.g. READ) */
229 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
230 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
231 #define F_D_UNKN		8
232 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
233 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
234 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
235 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
236 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
237 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
238 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
239 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
240 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
241 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
242 
243 /* Useful combinations of the above flags */
244 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
245 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
246 #define FF_SA (F_SA_HIGH | F_SA_LOW)
247 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
248 
249 #define SDEBUG_MAX_PARTS 4
250 
251 #define SDEBUG_MAX_CMD_LEN 32
252 
253 #define SDEB_XA_NOT_IN_USE XA_MARK_1
254 
255 /* Zone types (zbcr05 table 25) */
256 enum sdebug_z_type {
257 	ZBC_ZONE_TYPE_CNV	= 0x1,
258 	ZBC_ZONE_TYPE_SWR	= 0x2,
259 	ZBC_ZONE_TYPE_SWP	= 0x3,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t num_in_q;
292 	atomic_t stopped;	/* 1: by SSU, 2: device start */
293 	bool used;
294 
295 	/* For ZBC devices */
296 	enum blk_zoned_model zmodel;
297 	unsigned int zsize;
298 	unsigned int zsize_shift;
299 	unsigned int nr_zones;
300 	unsigned int nr_conv_zones;
301 	unsigned int nr_imp_open;
302 	unsigned int nr_exp_open;
303 	unsigned int nr_closed;
304 	unsigned int max_open;
305 	ktime_t create_ts;	/* time since bootup that this device was created */
306 	struct sdeb_zone_state *zstate;
307 };
308 
309 struct sdebug_host_info {
310 	struct list_head host_list;
311 	int si_idx;	/* sdeb_store_info (per host) xarray index */
312 	struct Scsi_Host *shost;
313 	struct device dev;
314 	struct list_head dev_info_list;
315 };
316 
317 /* There is an xarray of pointers to this struct's objects, one per host */
318 struct sdeb_store_info {
319 	rwlock_t macc_lck;	/* for atomic media access on this store */
320 	u8 *storep;		/* user data storage (ram) */
321 	struct t10_pi_tuple *dif_storep; /* protection info */
322 	void *map_storep;	/* provisioning map */
323 };
324 
325 #define to_sdebug_host(d)	\
326 	container_of(d, struct sdebug_host_info, dev)
327 
328 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
329 		      SDEB_DEFER_WQ = 2};
330 
331 struct sdebug_defer {
332 	struct hrtimer hrt;
333 	struct execute_work ew;
334 	int sqa_idx;	/* index of sdebug_queue array */
335 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
336 	int hc_idx;	/* hostwide tag index */
337 	int issuing_cpu;
338 	bool init_hrt;
339 	bool init_wq;
340 	bool aborted;	/* true when blk_abort_request() already called */
341 	enum sdeb_defer_type defer_t;
342 };
343 
344 struct sdebug_queued_cmd {
345 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
346 	 * instance indicates this slot is in use.
347 	 */
348 	struct sdebug_defer *sd_dp;
349 	struct scsi_cmnd *a_cmnd;
350 };
351 
352 struct sdebug_queue {
353 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
354 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
355 	spinlock_t qc_lock;
356 	atomic_t blocked;	/* to temporarily stop more being queued */
357 };
358 
359 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
360 static atomic_t sdebug_completions;  /* count of deferred completions */
361 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
362 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
363 static atomic_t sdeb_inject_pending;
364 
365 struct opcode_info_t {
366 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
367 				/* for terminating element */
368 	u8 opcode;		/* if num_attached > 0, preferred */
369 	u16 sa;			/* service action */
370 	u32 flags;		/* OR-ed set of SDEB_F_* */
371 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
372 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
373 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
374 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
375 };
376 
377 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
378 enum sdeb_opcode_index {
379 	SDEB_I_INVALID_OPCODE =	0,
380 	SDEB_I_INQUIRY = 1,
381 	SDEB_I_REPORT_LUNS = 2,
382 	SDEB_I_REQUEST_SENSE = 3,
383 	SDEB_I_TEST_UNIT_READY = 4,
384 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
385 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
386 	SDEB_I_LOG_SENSE = 7,
387 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
388 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
389 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
390 	SDEB_I_START_STOP = 11,
391 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
392 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
393 	SDEB_I_MAINT_IN = 14,
394 	SDEB_I_MAINT_OUT = 15,
395 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
396 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
397 	SDEB_I_RESERVE = 18,		/* 6, 10 */
398 	SDEB_I_RELEASE = 19,		/* 6, 10 */
399 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
400 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
401 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
402 	SDEB_I_SEND_DIAG = 23,
403 	SDEB_I_UNMAP = 24,
404 	SDEB_I_WRITE_BUFFER = 25,
405 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
406 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
407 	SDEB_I_COMP_WRITE = 28,
408 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
409 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
410 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
411 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
412 };
413 
414 
415 static const unsigned char opcode_ind_arr[256] = {
416 /* 0x0; 0x0->0x1f: 6 byte cdbs */
417 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 	    0, 0, 0, 0,
419 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
420 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 	    SDEB_I_RELEASE,
422 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
423 	    SDEB_I_ALLOW_REMOVAL, 0,
424 /* 0x20; 0x20->0x3f: 10 byte cdbs */
425 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
426 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
427 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
428 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
429 /* 0x40; 0x40->0x5f: 10 byte cdbs */
430 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
431 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
432 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 	    SDEB_I_RELEASE,
434 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
435 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
436 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
438 	0, SDEB_I_VARIABLE_LEN,
439 /* 0x80; 0x80->0x9f: 16 byte cdbs */
440 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
441 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
442 	0, 0, 0, SDEB_I_VERIFY,
443 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
444 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
445 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
446 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
447 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
448 	     SDEB_I_MAINT_OUT, 0, 0, 0,
449 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
450 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0,
452 	0, 0, 0, 0, 0, 0, 0, 0,
453 /* 0xc0; 0xc0->0xff: vendor specific */
454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 };
459 
460 /*
461  * The following "response" functions return the SCSI mid-level's 4 byte
462  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
463  * command completion, they can mask their return value with
464  * SDEG_RES_IMMED_MASK .
465  */
466 #define SDEG_RES_IMMED_MASK 0x40000000
467 
468 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 
498 static int sdebug_do_add_host(bool mk_new_store);
499 static int sdebug_add_host_helper(int per_host_idx);
500 static void sdebug_do_remove_host(bool the_end);
501 static int sdebug_add_store(void);
502 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
503 static void sdebug_erase_all_stores(bool apart_from_first);
504 
505 /*
506  * The following are overflow arrays for cdbs that "hit" the same index in
507  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
508  * should be placed in opcode_info_arr[], the others should be placed here.
509  */
510 static const struct opcode_info_t msense_iarr[] = {
511 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
512 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
513 };
514 
515 static const struct opcode_info_t mselect_iarr[] = {
516 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
517 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t read_iarr[] = {
521 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
522 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 	     0, 0, 0, 0} },
524 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
525 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
527 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
528 	     0xc7, 0, 0, 0, 0} },
529 };
530 
531 static const struct opcode_info_t write_iarr[] = {
532 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
533 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 		   0, 0, 0, 0, 0, 0} },
535 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
536 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 		   0, 0, 0} },
538 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
539 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
540 		   0xbf, 0xc7, 0, 0, 0, 0} },
541 };
542 
543 static const struct opcode_info_t verify_iarr[] = {
544 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
545 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
546 		   0, 0, 0, 0, 0, 0} },
547 };
548 
549 static const struct opcode_info_t sa_in_16_iarr[] = {
550 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
551 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
552 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
553 };
554 
555 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
556 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
557 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
558 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
559 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
560 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
561 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
562 };
563 
564 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
565 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
566 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
567 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
568 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
569 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
570 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
571 };
572 
573 static const struct opcode_info_t write_same_iarr[] = {
574 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
575 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
576 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
577 };
578 
579 static const struct opcode_info_t reserve_iarr[] = {
580 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
581 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
582 };
583 
584 static const struct opcode_info_t release_iarr[] = {
585 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
586 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t sync_cache_iarr[] = {
590 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
591 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
592 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
593 };
594 
595 static const struct opcode_info_t pre_fetch_iarr[] = {
596 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
597 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
598 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
599 };
600 
601 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
602 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
603 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
604 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
605 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
606 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
607 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
608 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
609 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
610 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
611 };
612 
613 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
614 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
615 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
616 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
617 };
618 
619 
620 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
621  * plus the terminating elements for logic that scans this table such as
622  * REPORT SUPPORTED OPERATION CODES. */
623 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 /* 0 */
625 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
626 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
627 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
628 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
629 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
630 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
631 	     0, 0} },					/* REPORT LUNS */
632 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
633 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
635 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 /* 5 */
637 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
638 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
639 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
640 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
641 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
642 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
643 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
644 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 	     0, 0, 0} },
646 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
647 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 	     0, 0} },
649 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
650 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
651 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 /* 10 */
653 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
654 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
655 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
656 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
658 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
659 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
660 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
661 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
662 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
663 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
664 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
665 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
666 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
667 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
668 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
669 				0xff, 0, 0xc7, 0, 0, 0, 0} },
670 /* 15 */
671 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
672 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
673 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
674 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
675 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
677 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
678 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
679 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 	     0xff, 0xff} },
681 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
682 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
683 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 	     0} },
685 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
686 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
687 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
688 	     0} },
689 /* 20 */
690 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
691 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
693 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
695 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
697 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
699 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 /* 25 */
701 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
702 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
703 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
704 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
705 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
706 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 		 0, 0, 0, 0, 0} },
708 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
709 	    resp_sync_cache, sync_cache_iarr,
710 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
711 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
712 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
713 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
714 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
715 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
716 	    resp_pre_fetch, pre_fetch_iarr,
717 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
718 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
719 
720 /* 30 */
721 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
722 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
723 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
725 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
726 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
727 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
728 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 /* sentinel */
730 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
731 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
732 };
733 
734 static int sdebug_num_hosts;
735 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
736 static int sdebug_ato = DEF_ATO;
737 static int sdebug_cdb_len = DEF_CDB_LEN;
738 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
739 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
740 static int sdebug_dif = DEF_DIF;
741 static int sdebug_dix = DEF_DIX;
742 static int sdebug_dsense = DEF_D_SENSE;
743 static int sdebug_every_nth = DEF_EVERY_NTH;
744 static int sdebug_fake_rw = DEF_FAKE_RW;
745 static unsigned int sdebug_guard = DEF_GUARD;
746 static int sdebug_host_max_queue;	/* per host */
747 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
748 static int sdebug_max_luns = DEF_MAX_LUNS;
749 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
750 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
751 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
752 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
753 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
754 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
755 static int sdebug_no_uld;
756 static int sdebug_num_parts = DEF_NUM_PARTS;
757 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
758 static int sdebug_opt_blks = DEF_OPT_BLKS;
759 static int sdebug_opts = DEF_OPTS;
760 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
761 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
762 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
763 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
764 static int sdebug_sector_size = DEF_SECTOR_SIZE;
765 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
766 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
767 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
768 static unsigned int sdebug_lbpu = DEF_LBPU;
769 static unsigned int sdebug_lbpws = DEF_LBPWS;
770 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
771 static unsigned int sdebug_lbprz = DEF_LBPRZ;
772 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
773 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
774 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
775 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
776 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
777 static int sdebug_uuid_ctl = DEF_UUID_CTL;
778 static bool sdebug_random = DEF_RANDOM;
779 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
780 static bool sdebug_removable = DEF_REMOVABLE;
781 static bool sdebug_clustering;
782 static bool sdebug_host_lock = DEF_HOST_LOCK;
783 static bool sdebug_strict = DEF_STRICT;
784 static bool sdebug_any_injecting_opt;
785 static bool sdebug_verbose;
786 static bool have_dif_prot;
787 static bool write_since_sync;
788 static bool sdebug_statistics = DEF_STATISTICS;
789 static bool sdebug_wp;
790 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
791 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
792 static char *sdeb_zbc_model_s;
793 
794 static unsigned int sdebug_store_sectors;
795 static sector_t sdebug_capacity;	/* in sectors */
796 
797 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
798    may still need them */
799 static int sdebug_heads;		/* heads per disk */
800 static int sdebug_cylinders_per;	/* cylinders per surface */
801 static int sdebug_sectors_per;		/* sectors per cylinder */
802 
803 static LIST_HEAD(sdebug_host_list);
804 static DEFINE_SPINLOCK(sdebug_host_list_lock);
805 
806 static struct xarray per_store_arr;
807 static struct xarray *per_store_ap = &per_store_arr;
808 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
809 static int sdeb_most_recent_idx = -1;
810 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
811 
812 static unsigned long map_size;
813 static int num_aborts;
814 static int num_dev_resets;
815 static int num_target_resets;
816 static int num_bus_resets;
817 static int num_host_resets;
818 static int dix_writes;
819 static int dix_reads;
820 static int dif_errors;
821 
822 /* ZBC global data */
823 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
824 static int sdeb_zbc_zone_size_mb;
825 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
826 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
827 
828 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
829 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
830 
831 static DEFINE_RWLOCK(atomic_rw);
832 static DEFINE_RWLOCK(atomic_rw2);
833 
834 static rwlock_t *ramdisk_lck_a[2];
835 
836 static char sdebug_proc_name[] = MY_NAME;
837 static const char *my_name = MY_NAME;
838 
839 static struct bus_type pseudo_lld_bus;
840 
841 static struct device_driver sdebug_driverfs_driver = {
842 	.name 		= sdebug_proc_name,
843 	.bus		= &pseudo_lld_bus,
844 };
845 
846 static const int check_condition_result =
847 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
848 
849 static const int illegal_condition_result =
850 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
851 
852 static const int device_qfull_result =
853 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
854 
855 static const int condition_met_result = SAM_STAT_CONDITION_MET;
856 
857 
858 /* Only do the extra work involved in logical block provisioning if one or
859  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
860  * real reads and writes (i.e. not skipping them for speed).
861  */
862 static inline bool scsi_debug_lbp(void)
863 {
864 	return 0 == sdebug_fake_rw &&
865 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
866 }
867 
868 static void *lba2fake_store(struct sdeb_store_info *sip,
869 			    unsigned long long lba)
870 {
871 	struct sdeb_store_info *lsip = sip;
872 
873 	lba = do_div(lba, sdebug_store_sectors);
874 	if (!sip || !sip->storep) {
875 		WARN_ON_ONCE(true);
876 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
877 	}
878 	return lsip->storep + lba * sdebug_sector_size;
879 }
880 
881 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
882 				      sector_t sector)
883 {
884 	sector = sector_div(sector, sdebug_store_sectors);
885 
886 	return sip->dif_storep + sector;
887 }
888 
889 static void sdebug_max_tgts_luns(void)
890 {
891 	struct sdebug_host_info *sdbg_host;
892 	struct Scsi_Host *hpnt;
893 
894 	spin_lock(&sdebug_host_list_lock);
895 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
896 		hpnt = sdbg_host->shost;
897 		if ((hpnt->this_id >= 0) &&
898 		    (sdebug_num_tgts > hpnt->this_id))
899 			hpnt->max_id = sdebug_num_tgts + 1;
900 		else
901 			hpnt->max_id = sdebug_num_tgts;
902 		/* sdebug_max_luns; */
903 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
904 	}
905 	spin_unlock(&sdebug_host_list_lock);
906 }
907 
908 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
909 
910 /* Set in_bit to -1 to indicate no bit position of invalid field */
911 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
912 				 enum sdeb_cmd_data c_d,
913 				 int in_byte, int in_bit)
914 {
915 	unsigned char *sbuff;
916 	u8 sks[4];
917 	int sl, asc;
918 
919 	sbuff = scp->sense_buffer;
920 	if (!sbuff) {
921 		sdev_printk(KERN_ERR, scp->device,
922 			    "%s: sense_buffer is NULL\n", __func__);
923 		return;
924 	}
925 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
926 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
927 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
928 	memset(sks, 0, sizeof(sks));
929 	sks[0] = 0x80;
930 	if (c_d)
931 		sks[0] |= 0x40;
932 	if (in_bit >= 0) {
933 		sks[0] |= 0x8;
934 		sks[0] |= 0x7 & in_bit;
935 	}
936 	put_unaligned_be16(in_byte, sks + 1);
937 	if (sdebug_dsense) {
938 		sl = sbuff[7] + 8;
939 		sbuff[7] = sl;
940 		sbuff[sl] = 0x2;
941 		sbuff[sl + 1] = 0x6;
942 		memcpy(sbuff + sl + 4, sks, 3);
943 	} else
944 		memcpy(sbuff + 15, sks, 3);
945 	if (sdebug_verbose)
946 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
947 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
948 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
949 }
950 
951 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
952 {
953 	unsigned char *sbuff;
954 
955 	sbuff = scp->sense_buffer;
956 	if (!sbuff) {
957 		sdev_printk(KERN_ERR, scp->device,
958 			    "%s: sense_buffer is NULL\n", __func__);
959 		return;
960 	}
961 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
962 
963 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
964 
965 	if (sdebug_verbose)
966 		sdev_printk(KERN_INFO, scp->device,
967 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
968 			    my_name, key, asc, asq);
969 }
970 
971 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
972 {
973 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
974 }
975 
976 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
977 			    void __user *arg)
978 {
979 	if (sdebug_verbose) {
980 		if (0x1261 == cmd)
981 			sdev_printk(KERN_INFO, dev,
982 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
983 		else if (0x5331 == cmd)
984 			sdev_printk(KERN_INFO, dev,
985 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
986 				    __func__);
987 		else
988 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
989 				    __func__, cmd);
990 	}
991 	return -EINVAL;
992 	/* return -ENOTTY; // correct return but upsets fdisk */
993 }
994 
995 static void config_cdb_len(struct scsi_device *sdev)
996 {
997 	switch (sdebug_cdb_len) {
998 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
999 		sdev->use_10_for_rw = false;
1000 		sdev->use_16_for_rw = false;
1001 		sdev->use_10_for_ms = false;
1002 		break;
1003 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1004 		sdev->use_10_for_rw = true;
1005 		sdev->use_16_for_rw = false;
1006 		sdev->use_10_for_ms = false;
1007 		break;
1008 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1009 		sdev->use_10_for_rw = true;
1010 		sdev->use_16_for_rw = false;
1011 		sdev->use_10_for_ms = true;
1012 		break;
1013 	case 16:
1014 		sdev->use_10_for_rw = false;
1015 		sdev->use_16_for_rw = true;
1016 		sdev->use_10_for_ms = true;
1017 		break;
1018 	case 32: /* No knobs to suggest this so same as 16 for now */
1019 		sdev->use_10_for_rw = false;
1020 		sdev->use_16_for_rw = true;
1021 		sdev->use_10_for_ms = true;
1022 		break;
1023 	default:
1024 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1025 			sdebug_cdb_len);
1026 		sdev->use_10_for_rw = true;
1027 		sdev->use_16_for_rw = false;
1028 		sdev->use_10_for_ms = false;
1029 		sdebug_cdb_len = 10;
1030 		break;
1031 	}
1032 }
1033 
1034 static void all_config_cdb_len(void)
1035 {
1036 	struct sdebug_host_info *sdbg_host;
1037 	struct Scsi_Host *shost;
1038 	struct scsi_device *sdev;
1039 
1040 	spin_lock(&sdebug_host_list_lock);
1041 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1042 		shost = sdbg_host->shost;
1043 		shost_for_each_device(sdev, shost) {
1044 			config_cdb_len(sdev);
1045 		}
1046 	}
1047 	spin_unlock(&sdebug_host_list_lock);
1048 }
1049 
1050 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1051 {
1052 	struct sdebug_host_info *sdhp;
1053 	struct sdebug_dev_info *dp;
1054 
1055 	spin_lock(&sdebug_host_list_lock);
1056 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1057 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1058 			if ((devip->sdbg_host == dp->sdbg_host) &&
1059 			    (devip->target == dp->target))
1060 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1061 		}
1062 	}
1063 	spin_unlock(&sdebug_host_list_lock);
1064 }
1065 
1066 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1067 {
1068 	int k;
1069 
1070 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1071 	if (k != SDEBUG_NUM_UAS) {
1072 		const char *cp = NULL;
1073 
1074 		switch (k) {
1075 		case SDEBUG_UA_POR:
1076 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1077 					POWER_ON_RESET_ASCQ);
1078 			if (sdebug_verbose)
1079 				cp = "power on reset";
1080 			break;
1081 		case SDEBUG_UA_BUS_RESET:
1082 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1083 					BUS_RESET_ASCQ);
1084 			if (sdebug_verbose)
1085 				cp = "bus reset";
1086 			break;
1087 		case SDEBUG_UA_MODE_CHANGED:
1088 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1089 					MODE_CHANGED_ASCQ);
1090 			if (sdebug_verbose)
1091 				cp = "mode parameters changed";
1092 			break;
1093 		case SDEBUG_UA_CAPACITY_CHANGED:
1094 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 					CAPACITY_CHANGED_ASCQ);
1096 			if (sdebug_verbose)
1097 				cp = "capacity data changed";
1098 			break;
1099 		case SDEBUG_UA_MICROCODE_CHANGED:
1100 			mk_sense_buffer(scp, UNIT_ATTENTION,
1101 					TARGET_CHANGED_ASC,
1102 					MICROCODE_CHANGED_ASCQ);
1103 			if (sdebug_verbose)
1104 				cp = "microcode has been changed";
1105 			break;
1106 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1107 			mk_sense_buffer(scp, UNIT_ATTENTION,
1108 					TARGET_CHANGED_ASC,
1109 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1110 			if (sdebug_verbose)
1111 				cp = "microcode has been changed without reset";
1112 			break;
1113 		case SDEBUG_UA_LUNS_CHANGED:
1114 			/*
1115 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1116 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1117 			 * on the target, until a REPORT LUNS command is
1118 			 * received.  SPC-4 behavior is to report it only once.
1119 			 * NOTE:  sdebug_scsi_level does not use the same
1120 			 * values as struct scsi_device->scsi_level.
1121 			 */
1122 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1123 				clear_luns_changed_on_target(devip);
1124 			mk_sense_buffer(scp, UNIT_ATTENTION,
1125 					TARGET_CHANGED_ASC,
1126 					LUNS_CHANGED_ASCQ);
1127 			if (sdebug_verbose)
1128 				cp = "reported luns data has changed";
1129 			break;
1130 		default:
1131 			pr_warn("unexpected unit attention code=%d\n", k);
1132 			if (sdebug_verbose)
1133 				cp = "unknown";
1134 			break;
1135 		}
1136 		clear_bit(k, devip->uas_bm);
1137 		if (sdebug_verbose)
1138 			sdev_printk(KERN_INFO, scp->device,
1139 				   "%s reports: Unit attention: %s\n",
1140 				   my_name, cp);
1141 		return check_condition_result;
1142 	}
1143 	return 0;
1144 }
1145 
1146 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1147 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1148 				int arr_len)
1149 {
1150 	int act_len;
1151 	struct scsi_data_buffer *sdb = &scp->sdb;
1152 
1153 	if (!sdb->length)
1154 		return 0;
1155 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1156 		return DID_ERROR << 16;
1157 
1158 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1159 				      arr, arr_len);
1160 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1161 
1162 	return 0;
1163 }
1164 
1165 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1166  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1167  * calls, not required to write in ascending offset order. Assumes resid
1168  * set to scsi_bufflen() prior to any calls.
1169  */
1170 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1171 				  int arr_len, unsigned int off_dst)
1172 {
1173 	unsigned int act_len, n;
1174 	struct scsi_data_buffer *sdb = &scp->sdb;
1175 	off_t skip = off_dst;
1176 
1177 	if (sdb->length <= off_dst)
1178 		return 0;
1179 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1180 		return DID_ERROR << 16;
1181 
1182 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1183 				       arr, arr_len, skip);
1184 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1185 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1186 		 scsi_get_resid(scp));
1187 	n = scsi_bufflen(scp) - (off_dst + act_len);
1188 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1189 	return 0;
1190 }
1191 
1192 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1193  * 'arr' or -1 if error.
1194  */
1195 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1196 			       int arr_len)
1197 {
1198 	if (!scsi_bufflen(scp))
1199 		return 0;
1200 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1201 		return -1;
1202 
1203 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1204 }
1205 
1206 
1207 static char sdebug_inq_vendor_id[9] = "Linux   ";
1208 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1209 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1210 /* Use some locally assigned NAAs for SAS addresses. */
1211 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1212 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1213 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1214 
1215 /* Device identification VPD page. Returns number of bytes placed in arr */
1216 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1217 			  int target_dev_id, int dev_id_num,
1218 			  const char *dev_id_str, int dev_id_str_len,
1219 			  const uuid_t *lu_name)
1220 {
1221 	int num, port_a;
1222 	char b[32];
1223 
1224 	port_a = target_dev_id + 1;
1225 	/* T10 vendor identifier field format (faked) */
1226 	arr[0] = 0x2;	/* ASCII */
1227 	arr[1] = 0x1;
1228 	arr[2] = 0x0;
1229 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1230 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1231 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1232 	num = 8 + 16 + dev_id_str_len;
1233 	arr[3] = num;
1234 	num += 4;
1235 	if (dev_id_num >= 0) {
1236 		if (sdebug_uuid_ctl) {
1237 			/* Locally assigned UUID */
1238 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1239 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1240 			arr[num++] = 0x0;
1241 			arr[num++] = 0x12;
1242 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1243 			arr[num++] = 0x0;
1244 			memcpy(arr + num, lu_name, 16);
1245 			num += 16;
1246 		} else {
1247 			/* NAA-3, Logical unit identifier (binary) */
1248 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1249 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1250 			arr[num++] = 0x0;
1251 			arr[num++] = 0x8;
1252 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1253 			num += 8;
1254 		}
1255 		/* Target relative port number */
1256 		arr[num++] = 0x61;	/* proto=sas, binary */
1257 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1258 		arr[num++] = 0x0;	/* reserved */
1259 		arr[num++] = 0x4;	/* length */
1260 		arr[num++] = 0x0;	/* reserved */
1261 		arr[num++] = 0x0;	/* reserved */
1262 		arr[num++] = 0x0;
1263 		arr[num++] = 0x1;	/* relative port A */
1264 	}
1265 	/* NAA-3, Target port identifier */
1266 	arr[num++] = 0x61;	/* proto=sas, binary */
1267 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1268 	arr[num++] = 0x0;
1269 	arr[num++] = 0x8;
1270 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1271 	num += 8;
1272 	/* NAA-3, Target port group identifier */
1273 	arr[num++] = 0x61;	/* proto=sas, binary */
1274 	arr[num++] = 0x95;	/* piv=1, target port group id */
1275 	arr[num++] = 0x0;
1276 	arr[num++] = 0x4;
1277 	arr[num++] = 0;
1278 	arr[num++] = 0;
1279 	put_unaligned_be16(port_group_id, arr + num);
1280 	num += 2;
1281 	/* NAA-3, Target device identifier */
1282 	arr[num++] = 0x61;	/* proto=sas, binary */
1283 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1284 	arr[num++] = 0x0;
1285 	arr[num++] = 0x8;
1286 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1287 	num += 8;
1288 	/* SCSI name string: Target device identifier */
1289 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1290 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1291 	arr[num++] = 0x0;
1292 	arr[num++] = 24;
1293 	memcpy(arr + num, "naa.32222220", 12);
1294 	num += 12;
1295 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1296 	memcpy(arr + num, b, 8);
1297 	num += 8;
1298 	memset(arr + num, 0, 4);
1299 	num += 4;
1300 	return num;
1301 }
1302 
1303 static unsigned char vpd84_data[] = {
1304 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1305     0x22,0x22,0x22,0x0,0xbb,0x1,
1306     0x22,0x22,0x22,0x0,0xbb,0x2,
1307 };
1308 
1309 /*  Software interface identification VPD page */
1310 static int inquiry_vpd_84(unsigned char *arr)
1311 {
1312 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1313 	return sizeof(vpd84_data);
1314 }
1315 
1316 /* Management network addresses VPD page */
1317 static int inquiry_vpd_85(unsigned char *arr)
1318 {
1319 	int num = 0;
1320 	const char *na1 = "https://www.kernel.org/config";
1321 	const char *na2 = "http://www.kernel.org/log";
1322 	int plen, olen;
1323 
1324 	arr[num++] = 0x1;	/* lu, storage config */
1325 	arr[num++] = 0x0;	/* reserved */
1326 	arr[num++] = 0x0;
1327 	olen = strlen(na1);
1328 	plen = olen + 1;
1329 	if (plen % 4)
1330 		plen = ((plen / 4) + 1) * 4;
1331 	arr[num++] = plen;	/* length, null termianted, padded */
1332 	memcpy(arr + num, na1, olen);
1333 	memset(arr + num + olen, 0, plen - olen);
1334 	num += plen;
1335 
1336 	arr[num++] = 0x4;	/* lu, logging */
1337 	arr[num++] = 0x0;	/* reserved */
1338 	arr[num++] = 0x0;
1339 	olen = strlen(na2);
1340 	plen = olen + 1;
1341 	if (plen % 4)
1342 		plen = ((plen / 4) + 1) * 4;
1343 	arr[num++] = plen;	/* length, null terminated, padded */
1344 	memcpy(arr + num, na2, olen);
1345 	memset(arr + num + olen, 0, plen - olen);
1346 	num += plen;
1347 
1348 	return num;
1349 }
1350 
1351 /* SCSI ports VPD page */
1352 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1353 {
1354 	int num = 0;
1355 	int port_a, port_b;
1356 
1357 	port_a = target_dev_id + 1;
1358 	port_b = port_a + 1;
1359 	arr[num++] = 0x0;	/* reserved */
1360 	arr[num++] = 0x0;	/* reserved */
1361 	arr[num++] = 0x0;
1362 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1363 	memset(arr + num, 0, 6);
1364 	num += 6;
1365 	arr[num++] = 0x0;
1366 	arr[num++] = 12;	/* length tp descriptor */
1367 	/* naa-5 target port identifier (A) */
1368 	arr[num++] = 0x61;	/* proto=sas, binary */
1369 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1370 	arr[num++] = 0x0;	/* reserved */
1371 	arr[num++] = 0x8;	/* length */
1372 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1373 	num += 8;
1374 	arr[num++] = 0x0;	/* reserved */
1375 	arr[num++] = 0x0;	/* reserved */
1376 	arr[num++] = 0x0;
1377 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1378 	memset(arr + num, 0, 6);
1379 	num += 6;
1380 	arr[num++] = 0x0;
1381 	arr[num++] = 12;	/* length tp descriptor */
1382 	/* naa-5 target port identifier (B) */
1383 	arr[num++] = 0x61;	/* proto=sas, binary */
1384 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1385 	arr[num++] = 0x0;	/* reserved */
1386 	arr[num++] = 0x8;	/* length */
1387 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1388 	num += 8;
1389 
1390 	return num;
1391 }
1392 
1393 
1394 static unsigned char vpd89_data[] = {
1395 /* from 4th byte */ 0,0,0,0,
1396 'l','i','n','u','x',' ',' ',' ',
1397 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1398 '1','2','3','4',
1399 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1400 0xec,0,0,0,
1401 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1402 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1403 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1404 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1405 0x53,0x41,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1407 0x20,0x20,
1408 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1409 0x10,0x80,
1410 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1411 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1412 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1413 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1414 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1415 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1416 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1418 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1419 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1420 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1421 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1422 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1423 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1424 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1436 };
1437 
1438 /* ATA Information VPD page */
1439 static int inquiry_vpd_89(unsigned char *arr)
1440 {
1441 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1442 	return sizeof(vpd89_data);
1443 }
1444 
1445 
1446 static unsigned char vpdb0_data[] = {
1447 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1448 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 };
1452 
1453 /* Block limits VPD page (SBC-3) */
1454 static int inquiry_vpd_b0(unsigned char *arr)
1455 {
1456 	unsigned int gran;
1457 
1458 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1459 
1460 	/* Optimal transfer length granularity */
1461 	if (sdebug_opt_xferlen_exp != 0 &&
1462 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1463 		gran = 1 << sdebug_opt_xferlen_exp;
1464 	else
1465 		gran = 1 << sdebug_physblk_exp;
1466 	put_unaligned_be16(gran, arr + 2);
1467 
1468 	/* Maximum Transfer Length */
1469 	if (sdebug_store_sectors > 0x400)
1470 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1471 
1472 	/* Optimal Transfer Length */
1473 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1474 
1475 	if (sdebug_lbpu) {
1476 		/* Maximum Unmap LBA Count */
1477 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1478 
1479 		/* Maximum Unmap Block Descriptor Count */
1480 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1481 	}
1482 
1483 	/* Unmap Granularity Alignment */
1484 	if (sdebug_unmap_alignment) {
1485 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1486 		arr[28] |= 0x80; /* UGAVALID */
1487 	}
1488 
1489 	/* Optimal Unmap Granularity */
1490 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1491 
1492 	/* Maximum WRITE SAME Length */
1493 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1494 
1495 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1496 
1497 	return sizeof(vpdb0_data);
1498 }
1499 
1500 /* Block device characteristics VPD page (SBC-3) */
1501 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1502 {
1503 	memset(arr, 0, 0x3c);
1504 	arr[0] = 0;
1505 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1506 	arr[2] = 0;
1507 	arr[3] = 5;	/* less than 1.8" */
1508 	if (devip->zmodel == BLK_ZONED_HA)
1509 		arr[4] = 1 << 4;	/* zoned field = 01b */
1510 
1511 	return 0x3c;
1512 }
1513 
1514 /* Logical block provisioning VPD page (SBC-4) */
1515 static int inquiry_vpd_b2(unsigned char *arr)
1516 {
1517 	memset(arr, 0, 0x4);
1518 	arr[0] = 0;			/* threshold exponent */
1519 	if (sdebug_lbpu)
1520 		arr[1] = 1 << 7;
1521 	if (sdebug_lbpws)
1522 		arr[1] |= 1 << 6;
1523 	if (sdebug_lbpws10)
1524 		arr[1] |= 1 << 5;
1525 	if (sdebug_lbprz && scsi_debug_lbp())
1526 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1527 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1528 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1529 	/* threshold_percentage=0 */
1530 	return 0x4;
1531 }
1532 
1533 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1534 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1535 {
1536 	memset(arr, 0, 0x3c);
1537 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1538 	/*
1539 	 * Set Optimal number of open sequential write preferred zones and
1540 	 * Optimal number of non-sequentially written sequential write
1541 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1542 	 * fields set to zero, apart from Max. number of open swrz_s field.
1543 	 */
1544 	put_unaligned_be32(0xffffffff, &arr[4]);
1545 	put_unaligned_be32(0xffffffff, &arr[8]);
1546 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1547 		put_unaligned_be32(devip->max_open, &arr[12]);
1548 	else
1549 		put_unaligned_be32(0xffffffff, &arr[12]);
1550 	return 0x3c;
1551 }
1552 
1553 #define SDEBUG_LONG_INQ_SZ 96
1554 #define SDEBUG_MAX_INQ_ARR_SZ 584
1555 
1556 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1557 {
1558 	unsigned char pq_pdt;
1559 	unsigned char *arr;
1560 	unsigned char *cmd = scp->cmnd;
1561 	int alloc_len, n, ret;
1562 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1563 
1564 	alloc_len = get_unaligned_be16(cmd + 3);
1565 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1566 	if (! arr)
1567 		return DID_REQUEUE << 16;
1568 	is_disk = (sdebug_ptype == TYPE_DISK);
1569 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1570 	is_disk_zbc = (is_disk || is_zbc);
1571 	have_wlun = scsi_is_wlun(scp->device->lun);
1572 	if (have_wlun)
1573 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1574 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1575 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1576 	else
1577 		pq_pdt = (sdebug_ptype & 0x1f);
1578 	arr[0] = pq_pdt;
1579 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1580 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1581 		kfree(arr);
1582 		return check_condition_result;
1583 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1584 		int lu_id_num, port_group_id, target_dev_id, len;
1585 		char lu_id_str[6];
1586 		int host_no = devip->sdbg_host->shost->host_no;
1587 
1588 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1589 		    (devip->channel & 0x7f);
1590 		if (sdebug_vpd_use_hostno == 0)
1591 			host_no = 0;
1592 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1593 			    (devip->target * 1000) + devip->lun);
1594 		target_dev_id = ((host_no + 1) * 2000) +
1595 				 (devip->target * 1000) - 3;
1596 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1597 		if (0 == cmd[2]) { /* supported vital product data pages */
1598 			arr[1] = cmd[2];	/*sanity */
1599 			n = 4;
1600 			arr[n++] = 0x0;   /* this page */
1601 			arr[n++] = 0x80;  /* unit serial number */
1602 			arr[n++] = 0x83;  /* device identification */
1603 			arr[n++] = 0x84;  /* software interface ident. */
1604 			arr[n++] = 0x85;  /* management network addresses */
1605 			arr[n++] = 0x86;  /* extended inquiry */
1606 			arr[n++] = 0x87;  /* mode page policy */
1607 			arr[n++] = 0x88;  /* SCSI ports */
1608 			if (is_disk_zbc) {	  /* SBC or ZBC */
1609 				arr[n++] = 0x89;  /* ATA information */
1610 				arr[n++] = 0xb0;  /* Block limits */
1611 				arr[n++] = 0xb1;  /* Block characteristics */
1612 				if (is_disk)
1613 					arr[n++] = 0xb2;  /* LB Provisioning */
1614 				if (is_zbc)
1615 					arr[n++] = 0xb6;  /* ZB dev. char. */
1616 			}
1617 			arr[3] = n - 4;	  /* number of supported VPD pages */
1618 		} else if (0x80 == cmd[2]) { /* unit serial number */
1619 			arr[1] = cmd[2];	/*sanity */
1620 			arr[3] = len;
1621 			memcpy(&arr[4], lu_id_str, len);
1622 		} else if (0x83 == cmd[2]) { /* device identification */
1623 			arr[1] = cmd[2];	/*sanity */
1624 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1625 						target_dev_id, lu_id_num,
1626 						lu_id_str, len,
1627 						&devip->lu_name);
1628 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1629 			arr[1] = cmd[2];	/*sanity */
1630 			arr[3] = inquiry_vpd_84(&arr[4]);
1631 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1632 			arr[1] = cmd[2];	/*sanity */
1633 			arr[3] = inquiry_vpd_85(&arr[4]);
1634 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1635 			arr[1] = cmd[2];	/*sanity */
1636 			arr[3] = 0x3c;	/* number of following entries */
1637 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1638 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1639 			else if (have_dif_prot)
1640 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1641 			else
1642 				arr[4] = 0x0;   /* no protection stuff */
1643 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1644 		} else if (0x87 == cmd[2]) { /* mode page policy */
1645 			arr[1] = cmd[2];	/*sanity */
1646 			arr[3] = 0x8;	/* number of following entries */
1647 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1648 			arr[6] = 0x80;	/* mlus, shared */
1649 			arr[8] = 0x18;	 /* protocol specific lu */
1650 			arr[10] = 0x82;	 /* mlus, per initiator port */
1651 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1652 			arr[1] = cmd[2];	/*sanity */
1653 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1654 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1655 			arr[1] = cmd[2];        /*sanity */
1656 			n = inquiry_vpd_89(&arr[4]);
1657 			put_unaligned_be16(n, arr + 2);
1658 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1659 			arr[1] = cmd[2];        /*sanity */
1660 			arr[3] = inquiry_vpd_b0(&arr[4]);
1661 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1662 			arr[1] = cmd[2];        /*sanity */
1663 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1664 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1665 			arr[1] = cmd[2];        /*sanity */
1666 			arr[3] = inquiry_vpd_b2(&arr[4]);
1667 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1668 			arr[1] = cmd[2];        /*sanity */
1669 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1670 		} else {
1671 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1672 			kfree(arr);
1673 			return check_condition_result;
1674 		}
1675 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1676 		ret = fill_from_dev_buffer(scp, arr,
1677 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1678 		kfree(arr);
1679 		return ret;
1680 	}
1681 	/* drops through here for a standard inquiry */
1682 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1683 	arr[2] = sdebug_scsi_level;
1684 	arr[3] = 2;    /* response_data_format==2 */
1685 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1686 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1687 	if (sdebug_vpd_use_hostno == 0)
1688 		arr[5] |= 0x10; /* claim: implicit TPGS */
1689 	arr[6] = 0x10; /* claim: MultiP */
1690 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1691 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1692 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1693 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1694 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1695 	/* Use Vendor Specific area to place driver date in ASCII hex */
1696 	memcpy(&arr[36], sdebug_version_date, 8);
1697 	/* version descriptors (2 bytes each) follow */
1698 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1699 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1700 	n = 62;
1701 	if (is_disk) {		/* SBC-4 no version claimed */
1702 		put_unaligned_be16(0x600, arr + n);
1703 		n += 2;
1704 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1705 		put_unaligned_be16(0x525, arr + n);
1706 		n += 2;
1707 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1708 		put_unaligned_be16(0x624, arr + n);
1709 		n += 2;
1710 	}
1711 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1712 	ret = fill_from_dev_buffer(scp, arr,
1713 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1714 	kfree(arr);
1715 	return ret;
1716 }
1717 
1718 /* See resp_iec_m_pg() for how this data is manipulated */
1719 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1720 				   0, 0, 0x0, 0x0};
1721 
1722 static int resp_requests(struct scsi_cmnd *scp,
1723 			 struct sdebug_dev_info *devip)
1724 {
1725 	unsigned char *cmd = scp->cmnd;
1726 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1727 	bool dsense = !!(cmd[1] & 1);
1728 	int alloc_len = cmd[4];
1729 	int len = 18;
1730 	int stopped_state = atomic_read(&devip->stopped);
1731 
1732 	memset(arr, 0, sizeof(arr));
1733 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1734 		if (dsense) {
1735 			arr[0] = 0x72;
1736 			arr[1] = NOT_READY;
1737 			arr[2] = LOGICAL_UNIT_NOT_READY;
1738 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1739 			len = 8;
1740 		} else {
1741 			arr[0] = 0x70;
1742 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1743 			arr[7] = 0xa;			/* 18 byte sense buffer */
1744 			arr[12] = LOGICAL_UNIT_NOT_READY;
1745 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1746 		}
1747 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1748 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1749 		if (dsense) {
1750 			arr[0] = 0x72;
1751 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1752 			arr[2] = THRESHOLD_EXCEEDED;
1753 			arr[3] = 0xff;		/* Failure prediction(false) */
1754 			len = 8;
1755 		} else {
1756 			arr[0] = 0x70;
1757 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1758 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1759 			arr[12] = THRESHOLD_EXCEEDED;
1760 			arr[13] = 0xff;		/* Failure prediction(false) */
1761 		}
1762 	} else {	/* nothing to report */
1763 		if (dsense) {
1764 			len = 8;
1765 			memset(arr, 0, len);
1766 			arr[0] = 0x72;
1767 		} else {
1768 			memset(arr, 0, len);
1769 			arr[0] = 0x70;
1770 			arr[7] = 0xa;
1771 		}
1772 	}
1773 	return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1774 }
1775 
1776 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1777 {
1778 	unsigned char *cmd = scp->cmnd;
1779 	int power_cond, want_stop, stopped_state;
1780 	bool changing;
1781 
1782 	power_cond = (cmd[4] & 0xf0) >> 4;
1783 	if (power_cond) {
1784 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1785 		return check_condition_result;
1786 	}
1787 	want_stop = !(cmd[4] & 1);
1788 	stopped_state = atomic_read(&devip->stopped);
1789 	if (stopped_state == 2) {
1790 		ktime_t now_ts = ktime_get_boottime();
1791 
1792 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1793 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1794 
1795 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1796 				/* tur_ms_to_ready timer extinguished */
1797 				atomic_set(&devip->stopped, 0);
1798 				stopped_state = 0;
1799 			}
1800 		}
1801 		if (stopped_state == 2) {
1802 			if (want_stop) {
1803 				stopped_state = 1;	/* dummy up success */
1804 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1805 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1806 				return check_condition_result;
1807 			}
1808 		}
1809 	}
1810 	changing = (stopped_state != want_stop);
1811 	if (changing)
1812 		atomic_xchg(&devip->stopped, want_stop);
1813 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1814 		return SDEG_RES_IMMED_MASK;
1815 	else
1816 		return 0;
1817 }
1818 
1819 static sector_t get_sdebug_capacity(void)
1820 {
1821 	static const unsigned int gibibyte = 1073741824;
1822 
1823 	if (sdebug_virtual_gb > 0)
1824 		return (sector_t)sdebug_virtual_gb *
1825 			(gibibyte / sdebug_sector_size);
1826 	else
1827 		return sdebug_store_sectors;
1828 }
1829 
1830 #define SDEBUG_READCAP_ARR_SZ 8
1831 static int resp_readcap(struct scsi_cmnd *scp,
1832 			struct sdebug_dev_info *devip)
1833 {
1834 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1835 	unsigned int capac;
1836 
1837 	/* following just in case virtual_gb changed */
1838 	sdebug_capacity = get_sdebug_capacity();
1839 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1840 	if (sdebug_capacity < 0xffffffff) {
1841 		capac = (unsigned int)sdebug_capacity - 1;
1842 		put_unaligned_be32(capac, arr + 0);
1843 	} else
1844 		put_unaligned_be32(0xffffffff, arr + 0);
1845 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1846 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1847 }
1848 
1849 #define SDEBUG_READCAP16_ARR_SZ 32
1850 static int resp_readcap16(struct scsi_cmnd *scp,
1851 			  struct sdebug_dev_info *devip)
1852 {
1853 	unsigned char *cmd = scp->cmnd;
1854 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1855 	int alloc_len;
1856 
1857 	alloc_len = get_unaligned_be32(cmd + 10);
1858 	/* following just in case virtual_gb changed */
1859 	sdebug_capacity = get_sdebug_capacity();
1860 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1861 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1862 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1863 	arr[13] = sdebug_physblk_exp & 0xf;
1864 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1865 
1866 	if (scsi_debug_lbp()) {
1867 		arr[14] |= 0x80; /* LBPME */
1868 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1869 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1870 		 * in the wider field maps to 0 in this field.
1871 		 */
1872 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1873 			arr[14] |= 0x40;
1874 	}
1875 
1876 	arr[15] = sdebug_lowest_aligned & 0xff;
1877 
1878 	if (have_dif_prot) {
1879 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1880 		arr[12] |= 1; /* PROT_EN */
1881 	}
1882 
1883 	return fill_from_dev_buffer(scp, arr,
1884 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1885 }
1886 
1887 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1888 
1889 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1890 			      struct sdebug_dev_info *devip)
1891 {
1892 	unsigned char *cmd = scp->cmnd;
1893 	unsigned char *arr;
1894 	int host_no = devip->sdbg_host->shost->host_no;
1895 	int n, ret, alen, rlen;
1896 	int port_group_a, port_group_b, port_a, port_b;
1897 
1898 	alen = get_unaligned_be32(cmd + 6);
1899 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1900 	if (! arr)
1901 		return DID_REQUEUE << 16;
1902 	/*
1903 	 * EVPD page 0x88 states we have two ports, one
1904 	 * real and a fake port with no device connected.
1905 	 * So we create two port groups with one port each
1906 	 * and set the group with port B to unavailable.
1907 	 */
1908 	port_a = 0x1; /* relative port A */
1909 	port_b = 0x2; /* relative port B */
1910 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1911 			(devip->channel & 0x7f);
1912 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1913 			(devip->channel & 0x7f) + 0x80;
1914 
1915 	/*
1916 	 * The asymmetric access state is cycled according to the host_id.
1917 	 */
1918 	n = 4;
1919 	if (sdebug_vpd_use_hostno == 0) {
1920 		arr[n++] = host_no % 3; /* Asymm access state */
1921 		arr[n++] = 0x0F; /* claim: all states are supported */
1922 	} else {
1923 		arr[n++] = 0x0; /* Active/Optimized path */
1924 		arr[n++] = 0x01; /* only support active/optimized paths */
1925 	}
1926 	put_unaligned_be16(port_group_a, arr + n);
1927 	n += 2;
1928 	arr[n++] = 0;    /* Reserved */
1929 	arr[n++] = 0;    /* Status code */
1930 	arr[n++] = 0;    /* Vendor unique */
1931 	arr[n++] = 0x1;  /* One port per group */
1932 	arr[n++] = 0;    /* Reserved */
1933 	arr[n++] = 0;    /* Reserved */
1934 	put_unaligned_be16(port_a, arr + n);
1935 	n += 2;
1936 	arr[n++] = 3;    /* Port unavailable */
1937 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1938 	put_unaligned_be16(port_group_b, arr + n);
1939 	n += 2;
1940 	arr[n++] = 0;    /* Reserved */
1941 	arr[n++] = 0;    /* Status code */
1942 	arr[n++] = 0;    /* Vendor unique */
1943 	arr[n++] = 0x1;  /* One port per group */
1944 	arr[n++] = 0;    /* Reserved */
1945 	arr[n++] = 0;    /* Reserved */
1946 	put_unaligned_be16(port_b, arr + n);
1947 	n += 2;
1948 
1949 	rlen = n - 4;
1950 	put_unaligned_be32(rlen, arr + 0);
1951 
1952 	/*
1953 	 * Return the smallest value of either
1954 	 * - The allocated length
1955 	 * - The constructed command length
1956 	 * - The maximum array size
1957 	 */
1958 	rlen = min_t(int, alen, n);
1959 	ret = fill_from_dev_buffer(scp, arr,
1960 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1961 	kfree(arr);
1962 	return ret;
1963 }
1964 
1965 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1966 			     struct sdebug_dev_info *devip)
1967 {
1968 	bool rctd;
1969 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1970 	u16 req_sa, u;
1971 	u32 alloc_len, a_len;
1972 	int k, offset, len, errsts, count, bump, na;
1973 	const struct opcode_info_t *oip;
1974 	const struct opcode_info_t *r_oip;
1975 	u8 *arr;
1976 	u8 *cmd = scp->cmnd;
1977 
1978 	rctd = !!(cmd[2] & 0x80);
1979 	reporting_opts = cmd[2] & 0x7;
1980 	req_opcode = cmd[3];
1981 	req_sa = get_unaligned_be16(cmd + 4);
1982 	alloc_len = get_unaligned_be32(cmd + 6);
1983 	if (alloc_len < 4 || alloc_len > 0xffff) {
1984 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1985 		return check_condition_result;
1986 	}
1987 	if (alloc_len > 8192)
1988 		a_len = 8192;
1989 	else
1990 		a_len = alloc_len;
1991 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1992 	if (NULL == arr) {
1993 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1994 				INSUFF_RES_ASCQ);
1995 		return check_condition_result;
1996 	}
1997 	switch (reporting_opts) {
1998 	case 0:	/* all commands */
1999 		/* count number of commands */
2000 		for (count = 0, oip = opcode_info_arr;
2001 		     oip->num_attached != 0xff; ++oip) {
2002 			if (F_INV_OP & oip->flags)
2003 				continue;
2004 			count += (oip->num_attached + 1);
2005 		}
2006 		bump = rctd ? 20 : 8;
2007 		put_unaligned_be32(count * bump, arr);
2008 		for (offset = 4, oip = opcode_info_arr;
2009 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2010 			if (F_INV_OP & oip->flags)
2011 				continue;
2012 			na = oip->num_attached;
2013 			arr[offset] = oip->opcode;
2014 			put_unaligned_be16(oip->sa, arr + offset + 2);
2015 			if (rctd)
2016 				arr[offset + 5] |= 0x2;
2017 			if (FF_SA & oip->flags)
2018 				arr[offset + 5] |= 0x1;
2019 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2020 			if (rctd)
2021 				put_unaligned_be16(0xa, arr + offset + 8);
2022 			r_oip = oip;
2023 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2024 				if (F_INV_OP & oip->flags)
2025 					continue;
2026 				offset += bump;
2027 				arr[offset] = oip->opcode;
2028 				put_unaligned_be16(oip->sa, arr + offset + 2);
2029 				if (rctd)
2030 					arr[offset + 5] |= 0x2;
2031 				if (FF_SA & oip->flags)
2032 					arr[offset + 5] |= 0x1;
2033 				put_unaligned_be16(oip->len_mask[0],
2034 						   arr + offset + 6);
2035 				if (rctd)
2036 					put_unaligned_be16(0xa,
2037 							   arr + offset + 8);
2038 			}
2039 			oip = r_oip;
2040 			offset += bump;
2041 		}
2042 		break;
2043 	case 1:	/* one command: opcode only */
2044 	case 2:	/* one command: opcode plus service action */
2045 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2046 		sdeb_i = opcode_ind_arr[req_opcode];
2047 		oip = &opcode_info_arr[sdeb_i];
2048 		if (F_INV_OP & oip->flags) {
2049 			supp = 1;
2050 			offset = 4;
2051 		} else {
2052 			if (1 == reporting_opts) {
2053 				if (FF_SA & oip->flags) {
2054 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2055 							     2, 2);
2056 					kfree(arr);
2057 					return check_condition_result;
2058 				}
2059 				req_sa = 0;
2060 			} else if (2 == reporting_opts &&
2061 				   0 == (FF_SA & oip->flags)) {
2062 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2063 				kfree(arr);	/* point at requested sa */
2064 				return check_condition_result;
2065 			}
2066 			if (0 == (FF_SA & oip->flags) &&
2067 			    req_opcode == oip->opcode)
2068 				supp = 3;
2069 			else if (0 == (FF_SA & oip->flags)) {
2070 				na = oip->num_attached;
2071 				for (k = 0, oip = oip->arrp; k < na;
2072 				     ++k, ++oip) {
2073 					if (req_opcode == oip->opcode)
2074 						break;
2075 				}
2076 				supp = (k >= na) ? 1 : 3;
2077 			} else if (req_sa != oip->sa) {
2078 				na = oip->num_attached;
2079 				for (k = 0, oip = oip->arrp; k < na;
2080 				     ++k, ++oip) {
2081 					if (req_sa == oip->sa)
2082 						break;
2083 				}
2084 				supp = (k >= na) ? 1 : 3;
2085 			} else
2086 				supp = 3;
2087 			if (3 == supp) {
2088 				u = oip->len_mask[0];
2089 				put_unaligned_be16(u, arr + 2);
2090 				arr[4] = oip->opcode;
2091 				for (k = 1; k < u; ++k)
2092 					arr[4 + k] = (k < 16) ?
2093 						 oip->len_mask[k] : 0xff;
2094 				offset = 4 + u;
2095 			} else
2096 				offset = 4;
2097 		}
2098 		arr[1] = (rctd ? 0x80 : 0) | supp;
2099 		if (rctd) {
2100 			put_unaligned_be16(0xa, arr + offset);
2101 			offset += 12;
2102 		}
2103 		break;
2104 	default:
2105 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2106 		kfree(arr);
2107 		return check_condition_result;
2108 	}
2109 	offset = (offset < a_len) ? offset : a_len;
2110 	len = (offset < alloc_len) ? offset : alloc_len;
2111 	errsts = fill_from_dev_buffer(scp, arr, len);
2112 	kfree(arr);
2113 	return errsts;
2114 }
2115 
2116 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2117 			  struct sdebug_dev_info *devip)
2118 {
2119 	bool repd;
2120 	u32 alloc_len, len;
2121 	u8 arr[16];
2122 	u8 *cmd = scp->cmnd;
2123 
2124 	memset(arr, 0, sizeof(arr));
2125 	repd = !!(cmd[2] & 0x80);
2126 	alloc_len = get_unaligned_be32(cmd + 6);
2127 	if (alloc_len < 4) {
2128 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2129 		return check_condition_result;
2130 	}
2131 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2132 	arr[1] = 0x1;		/* ITNRS */
2133 	if (repd) {
2134 		arr[3] = 0xc;
2135 		len = 16;
2136 	} else
2137 		len = 4;
2138 
2139 	len = (len < alloc_len) ? len : alloc_len;
2140 	return fill_from_dev_buffer(scp, arr, len);
2141 }
2142 
2143 /* <<Following mode page info copied from ST318451LW>> */
2144 
2145 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2146 {	/* Read-Write Error Recovery page for mode_sense */
2147 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2148 					5, 0, 0xff, 0xff};
2149 
2150 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2151 	if (1 == pcontrol)
2152 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2153 	return sizeof(err_recov_pg);
2154 }
2155 
2156 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2157 { 	/* Disconnect-Reconnect page for mode_sense */
2158 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2159 					 0, 0, 0, 0, 0, 0, 0, 0};
2160 
2161 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2162 	if (1 == pcontrol)
2163 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2164 	return sizeof(disconnect_pg);
2165 }
2166 
2167 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2168 {       /* Format device page for mode_sense */
2169 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2170 				     0, 0, 0, 0, 0, 0, 0, 0,
2171 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2172 
2173 	memcpy(p, format_pg, sizeof(format_pg));
2174 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2175 	put_unaligned_be16(sdebug_sector_size, p + 12);
2176 	if (sdebug_removable)
2177 		p[20] |= 0x20; /* should agree with INQUIRY */
2178 	if (1 == pcontrol)
2179 		memset(p + 2, 0, sizeof(format_pg) - 2);
2180 	return sizeof(format_pg);
2181 }
2182 
2183 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2184 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2185 				     0, 0, 0, 0};
2186 
2187 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2188 { 	/* Caching page for mode_sense */
2189 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2190 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2191 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2192 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2193 
2194 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2195 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2196 	memcpy(p, caching_pg, sizeof(caching_pg));
2197 	if (1 == pcontrol)
2198 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2199 	else if (2 == pcontrol)
2200 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2201 	return sizeof(caching_pg);
2202 }
2203 
2204 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2205 				    0, 0, 0x2, 0x4b};
2206 
2207 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2208 { 	/* Control mode page for mode_sense */
2209 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2210 					0, 0, 0, 0};
2211 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 				     0, 0, 0x2, 0x4b};
2213 
2214 	if (sdebug_dsense)
2215 		ctrl_m_pg[2] |= 0x4;
2216 	else
2217 		ctrl_m_pg[2] &= ~0x4;
2218 
2219 	if (sdebug_ato)
2220 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2221 
2222 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2223 	if (1 == pcontrol)
2224 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2225 	else if (2 == pcontrol)
2226 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2227 	return sizeof(ctrl_m_pg);
2228 }
2229 
2230 
2231 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2232 {	/* Informational Exceptions control mode page for mode_sense */
2233 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2234 				       0, 0, 0x0, 0x0};
2235 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2236 				      0, 0, 0x0, 0x0};
2237 
2238 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2239 	if (1 == pcontrol)
2240 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2241 	else if (2 == pcontrol)
2242 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2243 	return sizeof(iec_m_pg);
2244 }
2245 
2246 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2247 {	/* SAS SSP mode page - short format for mode_sense */
2248 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2249 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2250 
2251 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2252 	if (1 == pcontrol)
2253 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2254 	return sizeof(sas_sf_m_pg);
2255 }
2256 
2257 
2258 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2259 			      int target_dev_id)
2260 {	/* SAS phy control and discover mode page for mode_sense */
2261 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2262 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2263 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2264 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2265 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2266 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2267 		    0, 0, 0, 0, 0, 0, 0, 0,
2268 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2269 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2270 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2271 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2272 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2273 		    0, 0, 0, 0, 0, 0, 0, 0,
2274 		};
2275 	int port_a, port_b;
2276 
2277 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2278 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2279 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2280 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2281 	port_a = target_dev_id + 1;
2282 	port_b = port_a + 1;
2283 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2284 	put_unaligned_be32(port_a, p + 20);
2285 	put_unaligned_be32(port_b, p + 48 + 20);
2286 	if (1 == pcontrol)
2287 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2288 	return sizeof(sas_pcd_m_pg);
2289 }
2290 
2291 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2292 {	/* SAS SSP shared protocol specific port mode subpage */
2293 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2294 		    0, 0, 0, 0, 0, 0, 0, 0,
2295 		};
2296 
2297 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2298 	if (1 == pcontrol)
2299 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2300 	return sizeof(sas_sha_m_pg);
2301 }
2302 
2303 #define SDEBUG_MAX_MSENSE_SZ 256
2304 
2305 static int resp_mode_sense(struct scsi_cmnd *scp,
2306 			   struct sdebug_dev_info *devip)
2307 {
2308 	int pcontrol, pcode, subpcode, bd_len;
2309 	unsigned char dev_spec;
2310 	int alloc_len, offset, len, target_dev_id;
2311 	int target = scp->device->id;
2312 	unsigned char *ap;
2313 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2314 	unsigned char *cmd = scp->cmnd;
2315 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2316 
2317 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2318 	pcontrol = (cmd[2] & 0xc0) >> 6;
2319 	pcode = cmd[2] & 0x3f;
2320 	subpcode = cmd[3];
2321 	msense_6 = (MODE_SENSE == cmd[0]);
2322 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2323 	is_disk = (sdebug_ptype == TYPE_DISK);
2324 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2325 	if ((is_disk || is_zbc) && !dbd)
2326 		bd_len = llbaa ? 16 : 8;
2327 	else
2328 		bd_len = 0;
2329 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2330 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2331 	if (0x3 == pcontrol) {  /* Saving values not supported */
2332 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2333 		return check_condition_result;
2334 	}
2335 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2336 			(devip->target * 1000) - 3;
2337 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2338 	if (is_disk || is_zbc) {
2339 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2340 		if (sdebug_wp)
2341 			dev_spec |= 0x80;
2342 	} else
2343 		dev_spec = 0x0;
2344 	if (msense_6) {
2345 		arr[2] = dev_spec;
2346 		arr[3] = bd_len;
2347 		offset = 4;
2348 	} else {
2349 		arr[3] = dev_spec;
2350 		if (16 == bd_len)
2351 			arr[4] = 0x1;	/* set LONGLBA bit */
2352 		arr[7] = bd_len;	/* assume 255 or less */
2353 		offset = 8;
2354 	}
2355 	ap = arr + offset;
2356 	if ((bd_len > 0) && (!sdebug_capacity))
2357 		sdebug_capacity = get_sdebug_capacity();
2358 
2359 	if (8 == bd_len) {
2360 		if (sdebug_capacity > 0xfffffffe)
2361 			put_unaligned_be32(0xffffffff, ap + 0);
2362 		else
2363 			put_unaligned_be32(sdebug_capacity, ap + 0);
2364 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2365 		offset += bd_len;
2366 		ap = arr + offset;
2367 	} else if (16 == bd_len) {
2368 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2369 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2370 		offset += bd_len;
2371 		ap = arr + offset;
2372 	}
2373 
2374 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2375 		/* TODO: Control Extension page */
2376 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2377 		return check_condition_result;
2378 	}
2379 	bad_pcode = false;
2380 
2381 	switch (pcode) {
2382 	case 0x1:	/* Read-Write error recovery page, direct access */
2383 		len = resp_err_recov_pg(ap, pcontrol, target);
2384 		offset += len;
2385 		break;
2386 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2387 		len = resp_disconnect_pg(ap, pcontrol, target);
2388 		offset += len;
2389 		break;
2390 	case 0x3:       /* Format device page, direct access */
2391 		if (is_disk) {
2392 			len = resp_format_pg(ap, pcontrol, target);
2393 			offset += len;
2394 		} else
2395 			bad_pcode = true;
2396 		break;
2397 	case 0x8:	/* Caching page, direct access */
2398 		if (is_disk || is_zbc) {
2399 			len = resp_caching_pg(ap, pcontrol, target);
2400 			offset += len;
2401 		} else
2402 			bad_pcode = true;
2403 		break;
2404 	case 0xa:	/* Control Mode page, all devices */
2405 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2406 		offset += len;
2407 		break;
2408 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2409 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2410 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2411 			return check_condition_result;
2412 		}
2413 		len = 0;
2414 		if ((0x0 == subpcode) || (0xff == subpcode))
2415 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2416 		if ((0x1 == subpcode) || (0xff == subpcode))
2417 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2418 						  target_dev_id);
2419 		if ((0x2 == subpcode) || (0xff == subpcode))
2420 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2421 		offset += len;
2422 		break;
2423 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2424 		len = resp_iec_m_pg(ap, pcontrol, target);
2425 		offset += len;
2426 		break;
2427 	case 0x3f:	/* Read all Mode pages */
2428 		if ((0 == subpcode) || (0xff == subpcode)) {
2429 			len = resp_err_recov_pg(ap, pcontrol, target);
2430 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2431 			if (is_disk) {
2432 				len += resp_format_pg(ap + len, pcontrol,
2433 						      target);
2434 				len += resp_caching_pg(ap + len, pcontrol,
2435 						       target);
2436 			} else if (is_zbc) {
2437 				len += resp_caching_pg(ap + len, pcontrol,
2438 						       target);
2439 			}
2440 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2441 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2442 			if (0xff == subpcode) {
2443 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2444 						  target, target_dev_id);
2445 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2446 			}
2447 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2448 			offset += len;
2449 		} else {
2450 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2451 			return check_condition_result;
2452 		}
2453 		break;
2454 	default:
2455 		bad_pcode = true;
2456 		break;
2457 	}
2458 	if (bad_pcode) {
2459 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2460 		return check_condition_result;
2461 	}
2462 	if (msense_6)
2463 		arr[0] = offset - 1;
2464 	else
2465 		put_unaligned_be16((offset - 2), arr + 0);
2466 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2467 }
2468 
2469 #define SDEBUG_MAX_MSELECT_SZ 512
2470 
2471 static int resp_mode_select(struct scsi_cmnd *scp,
2472 			    struct sdebug_dev_info *devip)
2473 {
2474 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2475 	int param_len, res, mpage;
2476 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2477 	unsigned char *cmd = scp->cmnd;
2478 	int mselect6 = (MODE_SELECT == cmd[0]);
2479 
2480 	memset(arr, 0, sizeof(arr));
2481 	pf = cmd[1] & 0x10;
2482 	sp = cmd[1] & 0x1;
2483 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2484 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2485 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2486 		return check_condition_result;
2487 	}
2488 	res = fetch_to_dev_buffer(scp, arr, param_len);
2489 	if (-1 == res)
2490 		return DID_ERROR << 16;
2491 	else if (sdebug_verbose && (res < param_len))
2492 		sdev_printk(KERN_INFO, scp->device,
2493 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2494 			    __func__, param_len, res);
2495 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2496 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2497 	if (md_len > 2) {
2498 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2499 		return check_condition_result;
2500 	}
2501 	off = bd_len + (mselect6 ? 4 : 8);
2502 	mpage = arr[off] & 0x3f;
2503 	ps = !!(arr[off] & 0x80);
2504 	if (ps) {
2505 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2506 		return check_condition_result;
2507 	}
2508 	spf = !!(arr[off] & 0x40);
2509 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2510 		       (arr[off + 1] + 2);
2511 	if ((pg_len + off) > param_len) {
2512 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2513 				PARAMETER_LIST_LENGTH_ERR, 0);
2514 		return check_condition_result;
2515 	}
2516 	switch (mpage) {
2517 	case 0x8:      /* Caching Mode page */
2518 		if (caching_pg[1] == arr[off + 1]) {
2519 			memcpy(caching_pg + 2, arr + off + 2,
2520 			       sizeof(caching_pg) - 2);
2521 			goto set_mode_changed_ua;
2522 		}
2523 		break;
2524 	case 0xa:      /* Control Mode page */
2525 		if (ctrl_m_pg[1] == arr[off + 1]) {
2526 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2527 			       sizeof(ctrl_m_pg) - 2);
2528 			if (ctrl_m_pg[4] & 0x8)
2529 				sdebug_wp = true;
2530 			else
2531 				sdebug_wp = false;
2532 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2533 			goto set_mode_changed_ua;
2534 		}
2535 		break;
2536 	case 0x1c:      /* Informational Exceptions Mode page */
2537 		if (iec_m_pg[1] == arr[off + 1]) {
2538 			memcpy(iec_m_pg + 2, arr + off + 2,
2539 			       sizeof(iec_m_pg) - 2);
2540 			goto set_mode_changed_ua;
2541 		}
2542 		break;
2543 	default:
2544 		break;
2545 	}
2546 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2547 	return check_condition_result;
2548 set_mode_changed_ua:
2549 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2550 	return 0;
2551 }
2552 
2553 static int resp_temp_l_pg(unsigned char *arr)
2554 {
2555 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2556 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2557 		};
2558 
2559 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2560 	return sizeof(temp_l_pg);
2561 }
2562 
2563 static int resp_ie_l_pg(unsigned char *arr)
2564 {
2565 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2566 		};
2567 
2568 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2569 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2570 		arr[4] = THRESHOLD_EXCEEDED;
2571 		arr[5] = 0xff;
2572 	}
2573 	return sizeof(ie_l_pg);
2574 }
2575 
2576 #define SDEBUG_MAX_LSENSE_SZ 512
2577 
2578 static int resp_log_sense(struct scsi_cmnd *scp,
2579 			  struct sdebug_dev_info *devip)
2580 {
2581 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2582 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2583 	unsigned char *cmd = scp->cmnd;
2584 
2585 	memset(arr, 0, sizeof(arr));
2586 	ppc = cmd[1] & 0x2;
2587 	sp = cmd[1] & 0x1;
2588 	if (ppc || sp) {
2589 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2590 		return check_condition_result;
2591 	}
2592 	pcode = cmd[2] & 0x3f;
2593 	subpcode = cmd[3] & 0xff;
2594 	alloc_len = get_unaligned_be16(cmd + 7);
2595 	arr[0] = pcode;
2596 	if (0 == subpcode) {
2597 		switch (pcode) {
2598 		case 0x0:	/* Supported log pages log page */
2599 			n = 4;
2600 			arr[n++] = 0x0;		/* this page */
2601 			arr[n++] = 0xd;		/* Temperature */
2602 			arr[n++] = 0x2f;	/* Informational exceptions */
2603 			arr[3] = n - 4;
2604 			break;
2605 		case 0xd:	/* Temperature log page */
2606 			arr[3] = resp_temp_l_pg(arr + 4);
2607 			break;
2608 		case 0x2f:	/* Informational exceptions log page */
2609 			arr[3] = resp_ie_l_pg(arr + 4);
2610 			break;
2611 		default:
2612 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2613 			return check_condition_result;
2614 		}
2615 	} else if (0xff == subpcode) {
2616 		arr[0] |= 0x40;
2617 		arr[1] = subpcode;
2618 		switch (pcode) {
2619 		case 0x0:	/* Supported log pages and subpages log page */
2620 			n = 4;
2621 			arr[n++] = 0x0;
2622 			arr[n++] = 0x0;		/* 0,0 page */
2623 			arr[n++] = 0x0;
2624 			arr[n++] = 0xff;	/* this page */
2625 			arr[n++] = 0xd;
2626 			arr[n++] = 0x0;		/* Temperature */
2627 			arr[n++] = 0x2f;
2628 			arr[n++] = 0x0;	/* Informational exceptions */
2629 			arr[3] = n - 4;
2630 			break;
2631 		case 0xd:	/* Temperature subpages */
2632 			n = 4;
2633 			arr[n++] = 0xd;
2634 			arr[n++] = 0x0;		/* Temperature */
2635 			arr[3] = n - 4;
2636 			break;
2637 		case 0x2f:	/* Informational exceptions subpages */
2638 			n = 4;
2639 			arr[n++] = 0x2f;
2640 			arr[n++] = 0x0;		/* Informational exceptions */
2641 			arr[3] = n - 4;
2642 			break;
2643 		default:
2644 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2645 			return check_condition_result;
2646 		}
2647 	} else {
2648 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2649 		return check_condition_result;
2650 	}
2651 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2652 	return fill_from_dev_buffer(scp, arr,
2653 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2654 }
2655 
2656 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2657 {
2658 	return devip->nr_zones != 0;
2659 }
2660 
2661 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2662 					unsigned long long lba)
2663 {
2664 	return &devip->zstate[lba >> devip->zsize_shift];
2665 }
2666 
2667 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2668 {
2669 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2670 }
2671 
2672 static void zbc_close_zone(struct sdebug_dev_info *devip,
2673 			   struct sdeb_zone_state *zsp)
2674 {
2675 	enum sdebug_z_cond zc;
2676 
2677 	if (zbc_zone_is_conv(zsp))
2678 		return;
2679 
2680 	zc = zsp->z_cond;
2681 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2682 		return;
2683 
2684 	if (zc == ZC2_IMPLICIT_OPEN)
2685 		devip->nr_imp_open--;
2686 	else
2687 		devip->nr_exp_open--;
2688 
2689 	if (zsp->z_wp == zsp->z_start) {
2690 		zsp->z_cond = ZC1_EMPTY;
2691 	} else {
2692 		zsp->z_cond = ZC4_CLOSED;
2693 		devip->nr_closed++;
2694 	}
2695 }
2696 
2697 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2698 {
2699 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2700 	unsigned int i;
2701 
2702 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2703 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2704 			zbc_close_zone(devip, zsp);
2705 			return;
2706 		}
2707 	}
2708 }
2709 
2710 static void zbc_open_zone(struct sdebug_dev_info *devip,
2711 			  struct sdeb_zone_state *zsp, bool explicit)
2712 {
2713 	enum sdebug_z_cond zc;
2714 
2715 	if (zbc_zone_is_conv(zsp))
2716 		return;
2717 
2718 	zc = zsp->z_cond;
2719 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2720 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2721 		return;
2722 
2723 	/* Close an implicit open zone if necessary */
2724 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2725 		zbc_close_zone(devip, zsp);
2726 	else if (devip->max_open &&
2727 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2728 		zbc_close_imp_open_zone(devip);
2729 
2730 	if (zsp->z_cond == ZC4_CLOSED)
2731 		devip->nr_closed--;
2732 	if (explicit) {
2733 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2734 		devip->nr_exp_open++;
2735 	} else {
2736 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2737 		devip->nr_imp_open++;
2738 	}
2739 }
2740 
2741 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2742 		       unsigned long long lba, unsigned int num)
2743 {
2744 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2745 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2746 
2747 	if (zbc_zone_is_conv(zsp))
2748 		return;
2749 
2750 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2751 		zsp->z_wp += num;
2752 		if (zsp->z_wp >= zend)
2753 			zsp->z_cond = ZC5_FULL;
2754 		return;
2755 	}
2756 
2757 	while (num) {
2758 		if (lba != zsp->z_wp)
2759 			zsp->z_non_seq_resource = true;
2760 
2761 		end = lba + num;
2762 		if (end >= zend) {
2763 			n = zend - lba;
2764 			zsp->z_wp = zend;
2765 		} else if (end > zsp->z_wp) {
2766 			n = num;
2767 			zsp->z_wp = end;
2768 		} else {
2769 			n = num;
2770 		}
2771 		if (zsp->z_wp >= zend)
2772 			zsp->z_cond = ZC5_FULL;
2773 
2774 		num -= n;
2775 		lba += n;
2776 		if (num) {
2777 			zsp++;
2778 			zend = zsp->z_start + zsp->z_size;
2779 		}
2780 	}
2781 }
2782 
2783 static int check_zbc_access_params(struct scsi_cmnd *scp,
2784 			unsigned long long lba, unsigned int num, bool write)
2785 {
2786 	struct scsi_device *sdp = scp->device;
2787 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2788 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2789 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2790 
2791 	if (!write) {
2792 		if (devip->zmodel == BLK_ZONED_HA)
2793 			return 0;
2794 		/* For host-managed, reads cannot cross zone types boundaries */
2795 		if (zsp_end != zsp &&
2796 		    zbc_zone_is_conv(zsp) &&
2797 		    !zbc_zone_is_conv(zsp_end)) {
2798 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2799 					LBA_OUT_OF_RANGE,
2800 					READ_INVDATA_ASCQ);
2801 			return check_condition_result;
2802 		}
2803 		return 0;
2804 	}
2805 
2806 	/* No restrictions for writes within conventional zones */
2807 	if (zbc_zone_is_conv(zsp)) {
2808 		if (!zbc_zone_is_conv(zsp_end)) {
2809 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2810 					LBA_OUT_OF_RANGE,
2811 					WRITE_BOUNDARY_ASCQ);
2812 			return check_condition_result;
2813 		}
2814 		return 0;
2815 	}
2816 
2817 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2818 		/* Writes cannot cross sequential zone boundaries */
2819 		if (zsp_end != zsp) {
2820 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2821 					LBA_OUT_OF_RANGE,
2822 					WRITE_BOUNDARY_ASCQ);
2823 			return check_condition_result;
2824 		}
2825 		/* Cannot write full zones */
2826 		if (zsp->z_cond == ZC5_FULL) {
2827 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2828 					INVALID_FIELD_IN_CDB, 0);
2829 			return check_condition_result;
2830 		}
2831 		/* Writes must be aligned to the zone WP */
2832 		if (lba != zsp->z_wp) {
2833 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2834 					LBA_OUT_OF_RANGE,
2835 					UNALIGNED_WRITE_ASCQ);
2836 			return check_condition_result;
2837 		}
2838 	}
2839 
2840 	/* Handle implicit open of closed and empty zones */
2841 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2842 		if (devip->max_open &&
2843 		    devip->nr_exp_open >= devip->max_open) {
2844 			mk_sense_buffer(scp, DATA_PROTECT,
2845 					INSUFF_RES_ASC,
2846 					INSUFF_ZONE_ASCQ);
2847 			return check_condition_result;
2848 		}
2849 		zbc_open_zone(devip, zsp, false);
2850 	}
2851 
2852 	return 0;
2853 }
2854 
2855 static inline int check_device_access_params
2856 			(struct scsi_cmnd *scp, unsigned long long lba,
2857 			 unsigned int num, bool write)
2858 {
2859 	struct scsi_device *sdp = scp->device;
2860 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2861 
2862 	if (lba + num > sdebug_capacity) {
2863 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2864 		return check_condition_result;
2865 	}
2866 	/* transfer length excessive (tie in to block limits VPD page) */
2867 	if (num > sdebug_store_sectors) {
2868 		/* needs work to find which cdb byte 'num' comes from */
2869 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2870 		return check_condition_result;
2871 	}
2872 	if (write && unlikely(sdebug_wp)) {
2873 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2874 		return check_condition_result;
2875 	}
2876 	if (sdebug_dev_is_zoned(devip))
2877 		return check_zbc_access_params(scp, lba, num, write);
2878 
2879 	return 0;
2880 }
2881 
2882 /*
2883  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2884  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2885  * that access any of the "stores" in struct sdeb_store_info should call this
2886  * function with bug_if_fake_rw set to true.
2887  */
2888 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2889 						bool bug_if_fake_rw)
2890 {
2891 	if (sdebug_fake_rw) {
2892 		BUG_ON(bug_if_fake_rw);	/* See note above */
2893 		return NULL;
2894 	}
2895 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2896 }
2897 
2898 /* Returns number of bytes copied or -1 if error. */
2899 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2900 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2901 {
2902 	int ret;
2903 	u64 block, rest = 0;
2904 	enum dma_data_direction dir;
2905 	struct scsi_data_buffer *sdb = &scp->sdb;
2906 	u8 *fsp;
2907 
2908 	if (do_write) {
2909 		dir = DMA_TO_DEVICE;
2910 		write_since_sync = true;
2911 	} else {
2912 		dir = DMA_FROM_DEVICE;
2913 	}
2914 
2915 	if (!sdb->length || !sip)
2916 		return 0;
2917 	if (scp->sc_data_direction != dir)
2918 		return -1;
2919 	fsp = sip->storep;
2920 
2921 	block = do_div(lba, sdebug_store_sectors);
2922 	if (block + num > sdebug_store_sectors)
2923 		rest = block + num - sdebug_store_sectors;
2924 
2925 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2926 		   fsp + (block * sdebug_sector_size),
2927 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2928 	if (ret != (num - rest) * sdebug_sector_size)
2929 		return ret;
2930 
2931 	if (rest) {
2932 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2933 			    fsp, rest * sdebug_sector_size,
2934 			    sg_skip + ((num - rest) * sdebug_sector_size),
2935 			    do_write);
2936 	}
2937 
2938 	return ret;
2939 }
2940 
2941 /* Returns number of bytes copied or -1 if error. */
2942 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2943 {
2944 	struct scsi_data_buffer *sdb = &scp->sdb;
2945 
2946 	if (!sdb->length)
2947 		return 0;
2948 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2949 		return -1;
2950 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2951 			      num * sdebug_sector_size, 0, true);
2952 }
2953 
2954 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2955  * arr into sip->storep+lba and return true. If comparison fails then
2956  * return false. */
2957 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2958 			      const u8 *arr, bool compare_only)
2959 {
2960 	bool res;
2961 	u64 block, rest = 0;
2962 	u32 store_blks = sdebug_store_sectors;
2963 	u32 lb_size = sdebug_sector_size;
2964 	u8 *fsp = sip->storep;
2965 
2966 	block = do_div(lba, store_blks);
2967 	if (block + num > store_blks)
2968 		rest = block + num - store_blks;
2969 
2970 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2971 	if (!res)
2972 		return res;
2973 	if (rest)
2974 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2975 			     rest * lb_size);
2976 	if (!res)
2977 		return res;
2978 	if (compare_only)
2979 		return true;
2980 	arr += num * lb_size;
2981 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2982 	if (rest)
2983 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2984 	return res;
2985 }
2986 
2987 static __be16 dif_compute_csum(const void *buf, int len)
2988 {
2989 	__be16 csum;
2990 
2991 	if (sdebug_guard)
2992 		csum = (__force __be16)ip_compute_csum(buf, len);
2993 	else
2994 		csum = cpu_to_be16(crc_t10dif(buf, len));
2995 
2996 	return csum;
2997 }
2998 
2999 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3000 		      sector_t sector, u32 ei_lba)
3001 {
3002 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3003 
3004 	if (sdt->guard_tag != csum) {
3005 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3006 			(unsigned long)sector,
3007 			be16_to_cpu(sdt->guard_tag),
3008 			be16_to_cpu(csum));
3009 		return 0x01;
3010 	}
3011 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3012 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3013 		pr_err("REF check failed on sector %lu\n",
3014 			(unsigned long)sector);
3015 		return 0x03;
3016 	}
3017 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3018 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3019 		pr_err("REF check failed on sector %lu\n",
3020 			(unsigned long)sector);
3021 		return 0x03;
3022 	}
3023 	return 0;
3024 }
3025 
3026 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3027 			  unsigned int sectors, bool read)
3028 {
3029 	size_t resid;
3030 	void *paddr;
3031 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3032 						scp->device->hostdata, true);
3033 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3034 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3035 	struct sg_mapping_iter miter;
3036 
3037 	/* Bytes of protection data to copy into sgl */
3038 	resid = sectors * sizeof(*dif_storep);
3039 
3040 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3041 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3042 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3043 
3044 	while (sg_miter_next(&miter) && resid > 0) {
3045 		size_t len = min_t(size_t, miter.length, resid);
3046 		void *start = dif_store(sip, sector);
3047 		size_t rest = 0;
3048 
3049 		if (dif_store_end < start + len)
3050 			rest = start + len - dif_store_end;
3051 
3052 		paddr = miter.addr;
3053 
3054 		if (read)
3055 			memcpy(paddr, start, len - rest);
3056 		else
3057 			memcpy(start, paddr, len - rest);
3058 
3059 		if (rest) {
3060 			if (read)
3061 				memcpy(paddr + len - rest, dif_storep, rest);
3062 			else
3063 				memcpy(dif_storep, paddr + len - rest, rest);
3064 		}
3065 
3066 		sector += len / sizeof(*dif_storep);
3067 		resid -= len;
3068 	}
3069 	sg_miter_stop(&miter);
3070 }
3071 
3072 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3073 			    unsigned int sectors, u32 ei_lba)
3074 {
3075 	unsigned int i;
3076 	sector_t sector;
3077 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3078 						scp->device->hostdata, true);
3079 	struct t10_pi_tuple *sdt;
3080 
3081 	for (i = 0; i < sectors; i++, ei_lba++) {
3082 		int ret;
3083 
3084 		sector = start_sec + i;
3085 		sdt = dif_store(sip, sector);
3086 
3087 		if (sdt->app_tag == cpu_to_be16(0xffff))
3088 			continue;
3089 
3090 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3091 				 ei_lba);
3092 		if (ret) {
3093 			dif_errors++;
3094 			return ret;
3095 		}
3096 	}
3097 
3098 	dif_copy_prot(scp, start_sec, sectors, true);
3099 	dix_reads++;
3100 
3101 	return 0;
3102 }
3103 
3104 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3105 {
3106 	bool check_prot;
3107 	u32 num;
3108 	u32 ei_lba;
3109 	int ret;
3110 	u64 lba;
3111 	struct sdeb_store_info *sip = devip2sip(devip, true);
3112 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3113 	u8 *cmd = scp->cmnd;
3114 
3115 	switch (cmd[0]) {
3116 	case READ_16:
3117 		ei_lba = 0;
3118 		lba = get_unaligned_be64(cmd + 2);
3119 		num = get_unaligned_be32(cmd + 10);
3120 		check_prot = true;
3121 		break;
3122 	case READ_10:
3123 		ei_lba = 0;
3124 		lba = get_unaligned_be32(cmd + 2);
3125 		num = get_unaligned_be16(cmd + 7);
3126 		check_prot = true;
3127 		break;
3128 	case READ_6:
3129 		ei_lba = 0;
3130 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3131 		      (u32)(cmd[1] & 0x1f) << 16;
3132 		num = (0 == cmd[4]) ? 256 : cmd[4];
3133 		check_prot = true;
3134 		break;
3135 	case READ_12:
3136 		ei_lba = 0;
3137 		lba = get_unaligned_be32(cmd + 2);
3138 		num = get_unaligned_be32(cmd + 6);
3139 		check_prot = true;
3140 		break;
3141 	case XDWRITEREAD_10:
3142 		ei_lba = 0;
3143 		lba = get_unaligned_be32(cmd + 2);
3144 		num = get_unaligned_be16(cmd + 7);
3145 		check_prot = false;
3146 		break;
3147 	default:	/* assume READ(32) */
3148 		lba = get_unaligned_be64(cmd + 12);
3149 		ei_lba = get_unaligned_be32(cmd + 20);
3150 		num = get_unaligned_be32(cmd + 28);
3151 		check_prot = false;
3152 		break;
3153 	}
3154 	if (unlikely(have_dif_prot && check_prot)) {
3155 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3156 		    (cmd[1] & 0xe0)) {
3157 			mk_sense_invalid_opcode(scp);
3158 			return check_condition_result;
3159 		}
3160 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3161 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3162 		    (cmd[1] & 0xe0) == 0)
3163 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3164 				    "to DIF device\n");
3165 	}
3166 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3167 		     atomic_read(&sdeb_inject_pending))) {
3168 		num /= 2;
3169 		atomic_set(&sdeb_inject_pending, 0);
3170 	}
3171 
3172 	ret = check_device_access_params(scp, lba, num, false);
3173 	if (ret)
3174 		return ret;
3175 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3176 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3177 		     ((lba + num) > sdebug_medium_error_start))) {
3178 		/* claim unrecoverable read error */
3179 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3180 		/* set info field and valid bit for fixed descriptor */
3181 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3182 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3183 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3184 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3185 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3186 		}
3187 		scsi_set_resid(scp, scsi_bufflen(scp));
3188 		return check_condition_result;
3189 	}
3190 
3191 	read_lock(macc_lckp);
3192 
3193 	/* DIX + T10 DIF */
3194 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3195 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3196 
3197 		if (prot_ret) {
3198 			read_unlock(macc_lckp);
3199 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3200 			return illegal_condition_result;
3201 		}
3202 	}
3203 
3204 	ret = do_device_access(sip, scp, 0, lba, num, false);
3205 	read_unlock(macc_lckp);
3206 	if (unlikely(ret == -1))
3207 		return DID_ERROR << 16;
3208 
3209 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3210 
3211 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3212 		     atomic_read(&sdeb_inject_pending))) {
3213 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3214 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3215 			atomic_set(&sdeb_inject_pending, 0);
3216 			return check_condition_result;
3217 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3218 			/* Logical block guard check failed */
3219 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3220 			atomic_set(&sdeb_inject_pending, 0);
3221 			return illegal_condition_result;
3222 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3223 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3224 			atomic_set(&sdeb_inject_pending, 0);
3225 			return illegal_condition_result;
3226 		}
3227 	}
3228 	return 0;
3229 }
3230 
3231 static void dump_sector(unsigned char *buf, int len)
3232 {
3233 	int i, j, n;
3234 
3235 	pr_err(">>> Sector Dump <<<\n");
3236 	for (i = 0 ; i < len ; i += 16) {
3237 		char b[128];
3238 
3239 		for (j = 0, n = 0; j < 16; j++) {
3240 			unsigned char c = buf[i+j];
3241 
3242 			if (c >= 0x20 && c < 0x7e)
3243 				n += scnprintf(b + n, sizeof(b) - n,
3244 					       " %c ", buf[i+j]);
3245 			else
3246 				n += scnprintf(b + n, sizeof(b) - n,
3247 					       "%02x ", buf[i+j]);
3248 		}
3249 		pr_err("%04d: %s\n", i, b);
3250 	}
3251 }
3252 
3253 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3254 			     unsigned int sectors, u32 ei_lba)
3255 {
3256 	int ret;
3257 	struct t10_pi_tuple *sdt;
3258 	void *daddr;
3259 	sector_t sector = start_sec;
3260 	int ppage_offset;
3261 	int dpage_offset;
3262 	struct sg_mapping_iter diter;
3263 	struct sg_mapping_iter piter;
3264 
3265 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3266 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3267 
3268 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3269 			scsi_prot_sg_count(SCpnt),
3270 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3271 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3272 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3273 
3274 	/* For each protection page */
3275 	while (sg_miter_next(&piter)) {
3276 		dpage_offset = 0;
3277 		if (WARN_ON(!sg_miter_next(&diter))) {
3278 			ret = 0x01;
3279 			goto out;
3280 		}
3281 
3282 		for (ppage_offset = 0; ppage_offset < piter.length;
3283 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3284 			/* If we're at the end of the current
3285 			 * data page advance to the next one
3286 			 */
3287 			if (dpage_offset >= diter.length) {
3288 				if (WARN_ON(!sg_miter_next(&diter))) {
3289 					ret = 0x01;
3290 					goto out;
3291 				}
3292 				dpage_offset = 0;
3293 			}
3294 
3295 			sdt = piter.addr + ppage_offset;
3296 			daddr = diter.addr + dpage_offset;
3297 
3298 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3299 			if (ret) {
3300 				dump_sector(daddr, sdebug_sector_size);
3301 				goto out;
3302 			}
3303 
3304 			sector++;
3305 			ei_lba++;
3306 			dpage_offset += sdebug_sector_size;
3307 		}
3308 		diter.consumed = dpage_offset;
3309 		sg_miter_stop(&diter);
3310 	}
3311 	sg_miter_stop(&piter);
3312 
3313 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3314 	dix_writes++;
3315 
3316 	return 0;
3317 
3318 out:
3319 	dif_errors++;
3320 	sg_miter_stop(&diter);
3321 	sg_miter_stop(&piter);
3322 	return ret;
3323 }
3324 
3325 static unsigned long lba_to_map_index(sector_t lba)
3326 {
3327 	if (sdebug_unmap_alignment)
3328 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3329 	sector_div(lba, sdebug_unmap_granularity);
3330 	return lba;
3331 }
3332 
3333 static sector_t map_index_to_lba(unsigned long index)
3334 {
3335 	sector_t lba = index * sdebug_unmap_granularity;
3336 
3337 	if (sdebug_unmap_alignment)
3338 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3339 	return lba;
3340 }
3341 
3342 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3343 			      unsigned int *num)
3344 {
3345 	sector_t end;
3346 	unsigned int mapped;
3347 	unsigned long index;
3348 	unsigned long next;
3349 
3350 	index = lba_to_map_index(lba);
3351 	mapped = test_bit(index, sip->map_storep);
3352 
3353 	if (mapped)
3354 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3355 	else
3356 		next = find_next_bit(sip->map_storep, map_size, index);
3357 
3358 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3359 	*num = end - lba;
3360 	return mapped;
3361 }
3362 
3363 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3364 		       unsigned int len)
3365 {
3366 	sector_t end = lba + len;
3367 
3368 	while (lba < end) {
3369 		unsigned long index = lba_to_map_index(lba);
3370 
3371 		if (index < map_size)
3372 			set_bit(index, sip->map_storep);
3373 
3374 		lba = map_index_to_lba(index + 1);
3375 	}
3376 }
3377 
3378 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3379 			 unsigned int len)
3380 {
3381 	sector_t end = lba + len;
3382 	u8 *fsp = sip->storep;
3383 
3384 	while (lba < end) {
3385 		unsigned long index = lba_to_map_index(lba);
3386 
3387 		if (lba == map_index_to_lba(index) &&
3388 		    lba + sdebug_unmap_granularity <= end &&
3389 		    index < map_size) {
3390 			clear_bit(index, sip->map_storep);
3391 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3392 				memset(fsp + lba * sdebug_sector_size,
3393 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3394 				       sdebug_sector_size *
3395 				       sdebug_unmap_granularity);
3396 			}
3397 			if (sip->dif_storep) {
3398 				memset(sip->dif_storep + lba, 0xff,
3399 				       sizeof(*sip->dif_storep) *
3400 				       sdebug_unmap_granularity);
3401 			}
3402 		}
3403 		lba = map_index_to_lba(index + 1);
3404 	}
3405 }
3406 
3407 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3408 {
3409 	bool check_prot;
3410 	u32 num;
3411 	u32 ei_lba;
3412 	int ret;
3413 	u64 lba;
3414 	struct sdeb_store_info *sip = devip2sip(devip, true);
3415 	rwlock_t *macc_lckp = &sip->macc_lck;
3416 	u8 *cmd = scp->cmnd;
3417 
3418 	switch (cmd[0]) {
3419 	case WRITE_16:
3420 		ei_lba = 0;
3421 		lba = get_unaligned_be64(cmd + 2);
3422 		num = get_unaligned_be32(cmd + 10);
3423 		check_prot = true;
3424 		break;
3425 	case WRITE_10:
3426 		ei_lba = 0;
3427 		lba = get_unaligned_be32(cmd + 2);
3428 		num = get_unaligned_be16(cmd + 7);
3429 		check_prot = true;
3430 		break;
3431 	case WRITE_6:
3432 		ei_lba = 0;
3433 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3434 		      (u32)(cmd[1] & 0x1f) << 16;
3435 		num = (0 == cmd[4]) ? 256 : cmd[4];
3436 		check_prot = true;
3437 		break;
3438 	case WRITE_12:
3439 		ei_lba = 0;
3440 		lba = get_unaligned_be32(cmd + 2);
3441 		num = get_unaligned_be32(cmd + 6);
3442 		check_prot = true;
3443 		break;
3444 	case 0x53:	/* XDWRITEREAD(10) */
3445 		ei_lba = 0;
3446 		lba = get_unaligned_be32(cmd + 2);
3447 		num = get_unaligned_be16(cmd + 7);
3448 		check_prot = false;
3449 		break;
3450 	default:	/* assume WRITE(32) */
3451 		lba = get_unaligned_be64(cmd + 12);
3452 		ei_lba = get_unaligned_be32(cmd + 20);
3453 		num = get_unaligned_be32(cmd + 28);
3454 		check_prot = false;
3455 		break;
3456 	}
3457 	if (unlikely(have_dif_prot && check_prot)) {
3458 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3459 		    (cmd[1] & 0xe0)) {
3460 			mk_sense_invalid_opcode(scp);
3461 			return check_condition_result;
3462 		}
3463 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3464 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3465 		    (cmd[1] & 0xe0) == 0)
3466 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3467 				    "to DIF device\n");
3468 	}
3469 
3470 	write_lock(macc_lckp);
3471 	ret = check_device_access_params(scp, lba, num, true);
3472 	if (ret) {
3473 		write_unlock(macc_lckp);
3474 		return ret;
3475 	}
3476 
3477 	/* DIX + T10 DIF */
3478 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3479 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3480 
3481 		if (prot_ret) {
3482 			write_unlock(macc_lckp);
3483 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3484 			return illegal_condition_result;
3485 		}
3486 	}
3487 
3488 	ret = do_device_access(sip, scp, 0, lba, num, true);
3489 	if (unlikely(scsi_debug_lbp()))
3490 		map_region(sip, lba, num);
3491 	/* If ZBC zone then bump its write pointer */
3492 	if (sdebug_dev_is_zoned(devip))
3493 		zbc_inc_wp(devip, lba, num);
3494 	write_unlock(macc_lckp);
3495 	if (unlikely(-1 == ret))
3496 		return DID_ERROR << 16;
3497 	else if (unlikely(sdebug_verbose &&
3498 			  (ret < (num * sdebug_sector_size))))
3499 		sdev_printk(KERN_INFO, scp->device,
3500 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3501 			    my_name, num * sdebug_sector_size, ret);
3502 
3503 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3504 		     atomic_read(&sdeb_inject_pending))) {
3505 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3506 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3507 			atomic_set(&sdeb_inject_pending, 0);
3508 			return check_condition_result;
3509 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3510 			/* Logical block guard check failed */
3511 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3512 			atomic_set(&sdeb_inject_pending, 0);
3513 			return illegal_condition_result;
3514 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3515 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3516 			atomic_set(&sdeb_inject_pending, 0);
3517 			return illegal_condition_result;
3518 		}
3519 	}
3520 	return 0;
3521 }
3522 
3523 /*
3524  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3525  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3526  */
3527 static int resp_write_scat(struct scsi_cmnd *scp,
3528 			   struct sdebug_dev_info *devip)
3529 {
3530 	u8 *cmd = scp->cmnd;
3531 	u8 *lrdp = NULL;
3532 	u8 *up;
3533 	struct sdeb_store_info *sip = devip2sip(devip, true);
3534 	rwlock_t *macc_lckp = &sip->macc_lck;
3535 	u8 wrprotect;
3536 	u16 lbdof, num_lrd, k;
3537 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3538 	u32 lb_size = sdebug_sector_size;
3539 	u32 ei_lba;
3540 	u64 lba;
3541 	int ret, res;
3542 	bool is_16;
3543 	static const u32 lrd_size = 32; /* + parameter list header size */
3544 
3545 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3546 		is_16 = false;
3547 		wrprotect = (cmd[10] >> 5) & 0x7;
3548 		lbdof = get_unaligned_be16(cmd + 12);
3549 		num_lrd = get_unaligned_be16(cmd + 16);
3550 		bt_len = get_unaligned_be32(cmd + 28);
3551 	} else {        /* that leaves WRITE SCATTERED(16) */
3552 		is_16 = true;
3553 		wrprotect = (cmd[2] >> 5) & 0x7;
3554 		lbdof = get_unaligned_be16(cmd + 4);
3555 		num_lrd = get_unaligned_be16(cmd + 8);
3556 		bt_len = get_unaligned_be32(cmd + 10);
3557 		if (unlikely(have_dif_prot)) {
3558 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3559 			    wrprotect) {
3560 				mk_sense_invalid_opcode(scp);
3561 				return illegal_condition_result;
3562 			}
3563 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3564 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3565 			     wrprotect == 0)
3566 				sdev_printk(KERN_ERR, scp->device,
3567 					    "Unprotected WR to DIF device\n");
3568 		}
3569 	}
3570 	if ((num_lrd == 0) || (bt_len == 0))
3571 		return 0;       /* T10 says these do-nothings are not errors */
3572 	if (lbdof == 0) {
3573 		if (sdebug_verbose)
3574 			sdev_printk(KERN_INFO, scp->device,
3575 				"%s: %s: LB Data Offset field bad\n",
3576 				my_name, __func__);
3577 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3578 		return illegal_condition_result;
3579 	}
3580 	lbdof_blen = lbdof * lb_size;
3581 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3582 		if (sdebug_verbose)
3583 			sdev_printk(KERN_INFO, scp->device,
3584 				"%s: %s: LBA range descriptors don't fit\n",
3585 				my_name, __func__);
3586 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3587 		return illegal_condition_result;
3588 	}
3589 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3590 	if (lrdp == NULL)
3591 		return SCSI_MLQUEUE_HOST_BUSY;
3592 	if (sdebug_verbose)
3593 		sdev_printk(KERN_INFO, scp->device,
3594 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3595 			my_name, __func__, lbdof_blen);
3596 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3597 	if (res == -1) {
3598 		ret = DID_ERROR << 16;
3599 		goto err_out;
3600 	}
3601 
3602 	write_lock(macc_lckp);
3603 	sg_off = lbdof_blen;
3604 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3605 	cum_lb = 0;
3606 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3607 		lba = get_unaligned_be64(up + 0);
3608 		num = get_unaligned_be32(up + 8);
3609 		if (sdebug_verbose)
3610 			sdev_printk(KERN_INFO, scp->device,
3611 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3612 				my_name, __func__, k, lba, num, sg_off);
3613 		if (num == 0)
3614 			continue;
3615 		ret = check_device_access_params(scp, lba, num, true);
3616 		if (ret)
3617 			goto err_out_unlock;
3618 		num_by = num * lb_size;
3619 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3620 
3621 		if ((cum_lb + num) > bt_len) {
3622 			if (sdebug_verbose)
3623 				sdev_printk(KERN_INFO, scp->device,
3624 				    "%s: %s: sum of blocks > data provided\n",
3625 				    my_name, __func__);
3626 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3627 					0);
3628 			ret = illegal_condition_result;
3629 			goto err_out_unlock;
3630 		}
3631 
3632 		/* DIX + T10 DIF */
3633 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3634 			int prot_ret = prot_verify_write(scp, lba, num,
3635 							 ei_lba);
3636 
3637 			if (prot_ret) {
3638 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3639 						prot_ret);
3640 				ret = illegal_condition_result;
3641 				goto err_out_unlock;
3642 			}
3643 		}
3644 
3645 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3646 		/* If ZBC zone then bump its write pointer */
3647 		if (sdebug_dev_is_zoned(devip))
3648 			zbc_inc_wp(devip, lba, num);
3649 		if (unlikely(scsi_debug_lbp()))
3650 			map_region(sip, lba, num);
3651 		if (unlikely(-1 == ret)) {
3652 			ret = DID_ERROR << 16;
3653 			goto err_out_unlock;
3654 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3655 			sdev_printk(KERN_INFO, scp->device,
3656 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3657 			    my_name, num_by, ret);
3658 
3659 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3660 			     atomic_read(&sdeb_inject_pending))) {
3661 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3662 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3663 				atomic_set(&sdeb_inject_pending, 0);
3664 				ret = check_condition_result;
3665 				goto err_out_unlock;
3666 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3667 				/* Logical block guard check failed */
3668 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3669 				atomic_set(&sdeb_inject_pending, 0);
3670 				ret = illegal_condition_result;
3671 				goto err_out_unlock;
3672 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3673 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3674 				atomic_set(&sdeb_inject_pending, 0);
3675 				ret = illegal_condition_result;
3676 				goto err_out_unlock;
3677 			}
3678 		}
3679 		sg_off += num_by;
3680 		cum_lb += num;
3681 	}
3682 	ret = 0;
3683 err_out_unlock:
3684 	write_unlock(macc_lckp);
3685 err_out:
3686 	kfree(lrdp);
3687 	return ret;
3688 }
3689 
3690 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3691 			   u32 ei_lba, bool unmap, bool ndob)
3692 {
3693 	struct scsi_device *sdp = scp->device;
3694 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3695 	unsigned long long i;
3696 	u64 block, lbaa;
3697 	u32 lb_size = sdebug_sector_size;
3698 	int ret;
3699 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3700 						scp->device->hostdata, true);
3701 	rwlock_t *macc_lckp = &sip->macc_lck;
3702 	u8 *fs1p;
3703 	u8 *fsp;
3704 
3705 	write_lock(macc_lckp);
3706 
3707 	ret = check_device_access_params(scp, lba, num, true);
3708 	if (ret) {
3709 		write_unlock(macc_lckp);
3710 		return ret;
3711 	}
3712 
3713 	if (unmap && scsi_debug_lbp()) {
3714 		unmap_region(sip, lba, num);
3715 		goto out;
3716 	}
3717 	lbaa = lba;
3718 	block = do_div(lbaa, sdebug_store_sectors);
3719 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3720 	fsp = sip->storep;
3721 	fs1p = fsp + (block * lb_size);
3722 	if (ndob) {
3723 		memset(fs1p, 0, lb_size);
3724 		ret = 0;
3725 	} else
3726 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3727 
3728 	if (-1 == ret) {
3729 		write_unlock(&sip->macc_lck);
3730 		return DID_ERROR << 16;
3731 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3732 		sdev_printk(KERN_INFO, scp->device,
3733 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3734 			    my_name, "write same", lb_size, ret);
3735 
3736 	/* Copy first sector to remaining blocks */
3737 	for (i = 1 ; i < num ; i++) {
3738 		lbaa = lba + i;
3739 		block = do_div(lbaa, sdebug_store_sectors);
3740 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3741 	}
3742 	if (scsi_debug_lbp())
3743 		map_region(sip, lba, num);
3744 	/* If ZBC zone then bump its write pointer */
3745 	if (sdebug_dev_is_zoned(devip))
3746 		zbc_inc_wp(devip, lba, num);
3747 out:
3748 	write_unlock(macc_lckp);
3749 
3750 	return 0;
3751 }
3752 
3753 static int resp_write_same_10(struct scsi_cmnd *scp,
3754 			      struct sdebug_dev_info *devip)
3755 {
3756 	u8 *cmd = scp->cmnd;
3757 	u32 lba;
3758 	u16 num;
3759 	u32 ei_lba = 0;
3760 	bool unmap = false;
3761 
3762 	if (cmd[1] & 0x8) {
3763 		if (sdebug_lbpws10 == 0) {
3764 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3765 			return check_condition_result;
3766 		} else
3767 			unmap = true;
3768 	}
3769 	lba = get_unaligned_be32(cmd + 2);
3770 	num = get_unaligned_be16(cmd + 7);
3771 	if (num > sdebug_write_same_length) {
3772 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3773 		return check_condition_result;
3774 	}
3775 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3776 }
3777 
3778 static int resp_write_same_16(struct scsi_cmnd *scp,
3779 			      struct sdebug_dev_info *devip)
3780 {
3781 	u8 *cmd = scp->cmnd;
3782 	u64 lba;
3783 	u32 num;
3784 	u32 ei_lba = 0;
3785 	bool unmap = false;
3786 	bool ndob = false;
3787 
3788 	if (cmd[1] & 0x8) {	/* UNMAP */
3789 		if (sdebug_lbpws == 0) {
3790 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3791 			return check_condition_result;
3792 		} else
3793 			unmap = true;
3794 	}
3795 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3796 		ndob = true;
3797 	lba = get_unaligned_be64(cmd + 2);
3798 	num = get_unaligned_be32(cmd + 10);
3799 	if (num > sdebug_write_same_length) {
3800 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3801 		return check_condition_result;
3802 	}
3803 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3804 }
3805 
3806 /* Note the mode field is in the same position as the (lower) service action
3807  * field. For the Report supported operation codes command, SPC-4 suggests
3808  * each mode of this command should be reported separately; for future. */
3809 static int resp_write_buffer(struct scsi_cmnd *scp,
3810 			     struct sdebug_dev_info *devip)
3811 {
3812 	u8 *cmd = scp->cmnd;
3813 	struct scsi_device *sdp = scp->device;
3814 	struct sdebug_dev_info *dp;
3815 	u8 mode;
3816 
3817 	mode = cmd[1] & 0x1f;
3818 	switch (mode) {
3819 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3820 		/* set UAs on this device only */
3821 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3822 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3823 		break;
3824 	case 0x5:	/* download MC, save and ACT */
3825 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3826 		break;
3827 	case 0x6:	/* download MC with offsets and ACT */
3828 		/* set UAs on most devices (LUs) in this target */
3829 		list_for_each_entry(dp,
3830 				    &devip->sdbg_host->dev_info_list,
3831 				    dev_list)
3832 			if (dp->target == sdp->id) {
3833 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3834 				if (devip != dp)
3835 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3836 						dp->uas_bm);
3837 			}
3838 		break;
3839 	case 0x7:	/* download MC with offsets, save, and ACT */
3840 		/* set UA on all devices (LUs) in this target */
3841 		list_for_each_entry(dp,
3842 				    &devip->sdbg_host->dev_info_list,
3843 				    dev_list)
3844 			if (dp->target == sdp->id)
3845 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3846 					dp->uas_bm);
3847 		break;
3848 	default:
3849 		/* do nothing for this command for other mode values */
3850 		break;
3851 	}
3852 	return 0;
3853 }
3854 
3855 static int resp_comp_write(struct scsi_cmnd *scp,
3856 			   struct sdebug_dev_info *devip)
3857 {
3858 	u8 *cmd = scp->cmnd;
3859 	u8 *arr;
3860 	struct sdeb_store_info *sip = devip2sip(devip, true);
3861 	rwlock_t *macc_lckp = &sip->macc_lck;
3862 	u64 lba;
3863 	u32 dnum;
3864 	u32 lb_size = sdebug_sector_size;
3865 	u8 num;
3866 	int ret;
3867 	int retval = 0;
3868 
3869 	lba = get_unaligned_be64(cmd + 2);
3870 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3871 	if (0 == num)
3872 		return 0;	/* degenerate case, not an error */
3873 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3874 	    (cmd[1] & 0xe0)) {
3875 		mk_sense_invalid_opcode(scp);
3876 		return check_condition_result;
3877 	}
3878 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3879 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3880 	    (cmd[1] & 0xe0) == 0)
3881 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3882 			    "to DIF device\n");
3883 	ret = check_device_access_params(scp, lba, num, false);
3884 	if (ret)
3885 		return ret;
3886 	dnum = 2 * num;
3887 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3888 	if (NULL == arr) {
3889 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3890 				INSUFF_RES_ASCQ);
3891 		return check_condition_result;
3892 	}
3893 
3894 	write_lock(macc_lckp);
3895 
3896 	ret = do_dout_fetch(scp, dnum, arr);
3897 	if (ret == -1) {
3898 		retval = DID_ERROR << 16;
3899 		goto cleanup;
3900 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3901 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3902 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3903 			    dnum * lb_size, ret);
3904 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3905 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3906 		retval = check_condition_result;
3907 		goto cleanup;
3908 	}
3909 	if (scsi_debug_lbp())
3910 		map_region(sip, lba, num);
3911 cleanup:
3912 	write_unlock(macc_lckp);
3913 	kfree(arr);
3914 	return retval;
3915 }
3916 
3917 struct unmap_block_desc {
3918 	__be64	lba;
3919 	__be32	blocks;
3920 	__be32	__reserved;
3921 };
3922 
3923 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3924 {
3925 	unsigned char *buf;
3926 	struct unmap_block_desc *desc;
3927 	struct sdeb_store_info *sip = devip2sip(devip, true);
3928 	rwlock_t *macc_lckp = &sip->macc_lck;
3929 	unsigned int i, payload_len, descriptors;
3930 	int ret;
3931 
3932 	if (!scsi_debug_lbp())
3933 		return 0;	/* fib and say its done */
3934 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3935 	BUG_ON(scsi_bufflen(scp) != payload_len);
3936 
3937 	descriptors = (payload_len - 8) / 16;
3938 	if (descriptors > sdebug_unmap_max_desc) {
3939 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3940 		return check_condition_result;
3941 	}
3942 
3943 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3944 	if (!buf) {
3945 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3946 				INSUFF_RES_ASCQ);
3947 		return check_condition_result;
3948 	}
3949 
3950 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3951 
3952 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3953 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3954 
3955 	desc = (void *)&buf[8];
3956 
3957 	write_lock(macc_lckp);
3958 
3959 	for (i = 0 ; i < descriptors ; i++) {
3960 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3961 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3962 
3963 		ret = check_device_access_params(scp, lba, num, true);
3964 		if (ret)
3965 			goto out;
3966 
3967 		unmap_region(sip, lba, num);
3968 	}
3969 
3970 	ret = 0;
3971 
3972 out:
3973 	write_unlock(macc_lckp);
3974 	kfree(buf);
3975 
3976 	return ret;
3977 }
3978 
3979 #define SDEBUG_GET_LBA_STATUS_LEN 32
3980 
3981 static int resp_get_lba_status(struct scsi_cmnd *scp,
3982 			       struct sdebug_dev_info *devip)
3983 {
3984 	u8 *cmd = scp->cmnd;
3985 	u64 lba;
3986 	u32 alloc_len, mapped, num;
3987 	int ret;
3988 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3989 
3990 	lba = get_unaligned_be64(cmd + 2);
3991 	alloc_len = get_unaligned_be32(cmd + 10);
3992 
3993 	if (alloc_len < 24)
3994 		return 0;
3995 
3996 	ret = check_device_access_params(scp, lba, 1, false);
3997 	if (ret)
3998 		return ret;
3999 
4000 	if (scsi_debug_lbp()) {
4001 		struct sdeb_store_info *sip = devip2sip(devip, true);
4002 
4003 		mapped = map_state(sip, lba, &num);
4004 	} else {
4005 		mapped = 1;
4006 		/* following just in case virtual_gb changed */
4007 		sdebug_capacity = get_sdebug_capacity();
4008 		if (sdebug_capacity - lba <= 0xffffffff)
4009 			num = sdebug_capacity - lba;
4010 		else
4011 			num = 0xffffffff;
4012 	}
4013 
4014 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4015 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4016 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4017 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4018 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4019 
4020 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4021 }
4022 
4023 static int resp_sync_cache(struct scsi_cmnd *scp,
4024 			   struct sdebug_dev_info *devip)
4025 {
4026 	int res = 0;
4027 	u64 lba;
4028 	u32 num_blocks;
4029 	u8 *cmd = scp->cmnd;
4030 
4031 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4032 		lba = get_unaligned_be32(cmd + 2);
4033 		num_blocks = get_unaligned_be16(cmd + 7);
4034 	} else {				/* SYNCHRONIZE_CACHE(16) */
4035 		lba = get_unaligned_be64(cmd + 2);
4036 		num_blocks = get_unaligned_be32(cmd + 10);
4037 	}
4038 	if (lba + num_blocks > sdebug_capacity) {
4039 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4040 		return check_condition_result;
4041 	}
4042 	if (!write_since_sync || (cmd[1] & 0x2))
4043 		res = SDEG_RES_IMMED_MASK;
4044 	else		/* delay if write_since_sync and IMMED clear */
4045 		write_since_sync = false;
4046 	return res;
4047 }
4048 
4049 /*
4050  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4051  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4052  * a GOOD status otherwise. Model a disk with a big cache and yield
4053  * CONDITION MET. Actually tries to bring range in main memory into the
4054  * cache associated with the CPU(s).
4055  */
4056 static int resp_pre_fetch(struct scsi_cmnd *scp,
4057 			  struct sdebug_dev_info *devip)
4058 {
4059 	int res = 0;
4060 	u64 lba;
4061 	u64 block, rest = 0;
4062 	u32 nblks;
4063 	u8 *cmd = scp->cmnd;
4064 	struct sdeb_store_info *sip = devip2sip(devip, true);
4065 	rwlock_t *macc_lckp = &sip->macc_lck;
4066 	u8 *fsp = sip->storep;
4067 
4068 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4069 		lba = get_unaligned_be32(cmd + 2);
4070 		nblks = get_unaligned_be16(cmd + 7);
4071 	} else {			/* PRE-FETCH(16) */
4072 		lba = get_unaligned_be64(cmd + 2);
4073 		nblks = get_unaligned_be32(cmd + 10);
4074 	}
4075 	if (lba + nblks > sdebug_capacity) {
4076 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4077 		return check_condition_result;
4078 	}
4079 	if (!fsp)
4080 		goto fini;
4081 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4082 	block = do_div(lba, sdebug_store_sectors);
4083 	if (block + nblks > sdebug_store_sectors)
4084 		rest = block + nblks - sdebug_store_sectors;
4085 
4086 	/* Try to bring the PRE-FETCH range into CPU's cache */
4087 	read_lock(macc_lckp);
4088 	prefetch_range(fsp + (sdebug_sector_size * block),
4089 		       (nblks - rest) * sdebug_sector_size);
4090 	if (rest)
4091 		prefetch_range(fsp, rest * sdebug_sector_size);
4092 	read_unlock(macc_lckp);
4093 fini:
4094 	if (cmd[1] & 0x2)
4095 		res = SDEG_RES_IMMED_MASK;
4096 	return res | condition_met_result;
4097 }
4098 
4099 #define RL_BUCKET_ELEMS 8
4100 
4101 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4102  * (W-LUN), the normal Linux scanning logic does not associate it with a
4103  * device (e.g. /dev/sg7). The following magic will make that association:
4104  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4105  * where <n> is a host number. If there are multiple targets in a host then
4106  * the above will associate a W-LUN to each target. To only get a W-LUN
4107  * for target 2, then use "echo '- 2 49409' > scan" .
4108  */
4109 static int resp_report_luns(struct scsi_cmnd *scp,
4110 			    struct sdebug_dev_info *devip)
4111 {
4112 	unsigned char *cmd = scp->cmnd;
4113 	unsigned int alloc_len;
4114 	unsigned char select_report;
4115 	u64 lun;
4116 	struct scsi_lun *lun_p;
4117 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4118 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4119 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4120 	unsigned int tlun_cnt;	/* total LUN count */
4121 	unsigned int rlen;	/* response length (in bytes) */
4122 	int k, j, n, res;
4123 	unsigned int off_rsp = 0;
4124 	const int sz_lun = sizeof(struct scsi_lun);
4125 
4126 	clear_luns_changed_on_target(devip);
4127 
4128 	select_report = cmd[2];
4129 	alloc_len = get_unaligned_be32(cmd + 6);
4130 
4131 	if (alloc_len < 4) {
4132 		pr_err("alloc len too small %d\n", alloc_len);
4133 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4134 		return check_condition_result;
4135 	}
4136 
4137 	switch (select_report) {
4138 	case 0:		/* all LUNs apart from W-LUNs */
4139 		lun_cnt = sdebug_max_luns;
4140 		wlun_cnt = 0;
4141 		break;
4142 	case 1:		/* only W-LUNs */
4143 		lun_cnt = 0;
4144 		wlun_cnt = 1;
4145 		break;
4146 	case 2:		/* all LUNs */
4147 		lun_cnt = sdebug_max_luns;
4148 		wlun_cnt = 1;
4149 		break;
4150 	case 0x10:	/* only administrative LUs */
4151 	case 0x11:	/* see SPC-5 */
4152 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4153 	default:
4154 		pr_debug("select report invalid %d\n", select_report);
4155 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4156 		return check_condition_result;
4157 	}
4158 
4159 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4160 		--lun_cnt;
4161 
4162 	tlun_cnt = lun_cnt + wlun_cnt;
4163 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4164 	scsi_set_resid(scp, scsi_bufflen(scp));
4165 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4166 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4167 
4168 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4169 	lun = sdebug_no_lun_0 ? 1 : 0;
4170 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4171 		memset(arr, 0, sizeof(arr));
4172 		lun_p = (struct scsi_lun *)&arr[0];
4173 		if (k == 0) {
4174 			put_unaligned_be32(rlen, &arr[0]);
4175 			++lun_p;
4176 			j = 1;
4177 		}
4178 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4179 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4180 				break;
4181 			int_to_scsilun(lun++, lun_p);
4182 		}
4183 		if (j < RL_BUCKET_ELEMS)
4184 			break;
4185 		n = j * sz_lun;
4186 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4187 		if (res)
4188 			return res;
4189 		off_rsp += n;
4190 	}
4191 	if (wlun_cnt) {
4192 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4193 		++j;
4194 	}
4195 	if (j > 0)
4196 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4197 	return res;
4198 }
4199 
4200 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4201 {
4202 	bool is_bytchk3 = false;
4203 	u8 bytchk;
4204 	int ret, j;
4205 	u32 vnum, a_num, off;
4206 	const u32 lb_size = sdebug_sector_size;
4207 	u64 lba;
4208 	u8 *arr;
4209 	u8 *cmd = scp->cmnd;
4210 	struct sdeb_store_info *sip = devip2sip(devip, true);
4211 	rwlock_t *macc_lckp = &sip->macc_lck;
4212 
4213 	bytchk = (cmd[1] >> 1) & 0x3;
4214 	if (bytchk == 0) {
4215 		return 0;	/* always claim internal verify okay */
4216 	} else if (bytchk == 2) {
4217 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4218 		return check_condition_result;
4219 	} else if (bytchk == 3) {
4220 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4221 	}
4222 	switch (cmd[0]) {
4223 	case VERIFY_16:
4224 		lba = get_unaligned_be64(cmd + 2);
4225 		vnum = get_unaligned_be32(cmd + 10);
4226 		break;
4227 	case VERIFY:		/* is VERIFY(10) */
4228 		lba = get_unaligned_be32(cmd + 2);
4229 		vnum = get_unaligned_be16(cmd + 7);
4230 		break;
4231 	default:
4232 		mk_sense_invalid_opcode(scp);
4233 		return check_condition_result;
4234 	}
4235 	a_num = is_bytchk3 ? 1 : vnum;
4236 	/* Treat following check like one for read (i.e. no write) access */
4237 	ret = check_device_access_params(scp, lba, a_num, false);
4238 	if (ret)
4239 		return ret;
4240 
4241 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4242 	if (!arr) {
4243 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4244 				INSUFF_RES_ASCQ);
4245 		return check_condition_result;
4246 	}
4247 	/* Not changing store, so only need read access */
4248 	read_lock(macc_lckp);
4249 
4250 	ret = do_dout_fetch(scp, a_num, arr);
4251 	if (ret == -1) {
4252 		ret = DID_ERROR << 16;
4253 		goto cleanup;
4254 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4255 		sdev_printk(KERN_INFO, scp->device,
4256 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4257 			    my_name, __func__, a_num * lb_size, ret);
4258 	}
4259 	if (is_bytchk3) {
4260 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4261 			memcpy(arr + off, arr, lb_size);
4262 	}
4263 	ret = 0;
4264 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4265 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4266 		ret = check_condition_result;
4267 		goto cleanup;
4268 	}
4269 cleanup:
4270 	read_unlock(macc_lckp);
4271 	kfree(arr);
4272 	return ret;
4273 }
4274 
4275 #define RZONES_DESC_HD 64
4276 
4277 /* Report zones depending on start LBA nad reporting options */
4278 static int resp_report_zones(struct scsi_cmnd *scp,
4279 			     struct sdebug_dev_info *devip)
4280 {
4281 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4282 	int ret = 0;
4283 	u32 alloc_len, rep_opts, rep_len;
4284 	bool partial;
4285 	u64 lba, zs_lba;
4286 	u8 *arr = NULL, *desc;
4287 	u8 *cmd = scp->cmnd;
4288 	struct sdeb_zone_state *zsp;
4289 	struct sdeb_store_info *sip = devip2sip(devip, false);
4290 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4291 
4292 	if (!sdebug_dev_is_zoned(devip)) {
4293 		mk_sense_invalid_opcode(scp);
4294 		return check_condition_result;
4295 	}
4296 	zs_lba = get_unaligned_be64(cmd + 2);
4297 	alloc_len = get_unaligned_be32(cmd + 10);
4298 	rep_opts = cmd[14] & 0x3f;
4299 	partial = cmd[14] & 0x80;
4300 
4301 	if (zs_lba >= sdebug_capacity) {
4302 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4303 		return check_condition_result;
4304 	}
4305 
4306 	max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4307 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4308 			    max_zones);
4309 
4310 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4311 	if (!arr) {
4312 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4313 				INSUFF_RES_ASCQ);
4314 		return check_condition_result;
4315 	}
4316 
4317 	read_lock(macc_lckp);
4318 
4319 	desc = arr + 64;
4320 	for (i = 0; i < max_zones; i++) {
4321 		lba = zs_lba + devip->zsize * i;
4322 		if (lba > sdebug_capacity)
4323 			break;
4324 		zsp = zbc_zone(devip, lba);
4325 		switch (rep_opts) {
4326 		case 0x00:
4327 			/* All zones */
4328 			break;
4329 		case 0x01:
4330 			/* Empty zones */
4331 			if (zsp->z_cond != ZC1_EMPTY)
4332 				continue;
4333 			break;
4334 		case 0x02:
4335 			/* Implicit open zones */
4336 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4337 				continue;
4338 			break;
4339 		case 0x03:
4340 			/* Explicit open zones */
4341 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4342 				continue;
4343 			break;
4344 		case 0x04:
4345 			/* Closed zones */
4346 			if (zsp->z_cond != ZC4_CLOSED)
4347 				continue;
4348 			break;
4349 		case 0x05:
4350 			/* Full zones */
4351 			if (zsp->z_cond != ZC5_FULL)
4352 				continue;
4353 			break;
4354 		case 0x06:
4355 		case 0x07:
4356 		case 0x10:
4357 			/*
4358 			 * Read-only, offline, reset WP recommended are
4359 			 * not emulated: no zones to report;
4360 			 */
4361 			continue;
4362 		case 0x11:
4363 			/* non-seq-resource set */
4364 			if (!zsp->z_non_seq_resource)
4365 				continue;
4366 			break;
4367 		case 0x3f:
4368 			/* Not write pointer (conventional) zones */
4369 			if (!zbc_zone_is_conv(zsp))
4370 				continue;
4371 			break;
4372 		default:
4373 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4374 					INVALID_FIELD_IN_CDB, 0);
4375 			ret = check_condition_result;
4376 			goto fini;
4377 		}
4378 
4379 		if (nrz < rep_max_zones) {
4380 			/* Fill zone descriptor */
4381 			desc[0] = zsp->z_type;
4382 			desc[1] = zsp->z_cond << 4;
4383 			if (zsp->z_non_seq_resource)
4384 				desc[1] |= 1 << 1;
4385 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4386 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4387 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4388 			desc += 64;
4389 		}
4390 
4391 		if (partial && nrz >= rep_max_zones)
4392 			break;
4393 
4394 		nrz++;
4395 	}
4396 
4397 	/* Report header */
4398 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4399 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4400 
4401 	rep_len = (unsigned long)desc - (unsigned long)arr;
4402 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4403 
4404 fini:
4405 	read_unlock(macc_lckp);
4406 	kfree(arr);
4407 	return ret;
4408 }
4409 
4410 /* Logic transplanted from tcmu-runner, file_zbc.c */
4411 static void zbc_open_all(struct sdebug_dev_info *devip)
4412 {
4413 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4414 	unsigned int i;
4415 
4416 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4417 		if (zsp->z_cond == ZC4_CLOSED)
4418 			zbc_open_zone(devip, &devip->zstate[i], true);
4419 	}
4420 }
4421 
4422 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4423 {
4424 	int res = 0;
4425 	u64 z_id;
4426 	enum sdebug_z_cond zc;
4427 	u8 *cmd = scp->cmnd;
4428 	struct sdeb_zone_state *zsp;
4429 	bool all = cmd[14] & 0x01;
4430 	struct sdeb_store_info *sip = devip2sip(devip, false);
4431 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4432 
4433 	if (!sdebug_dev_is_zoned(devip)) {
4434 		mk_sense_invalid_opcode(scp);
4435 		return check_condition_result;
4436 	}
4437 
4438 	write_lock(macc_lckp);
4439 
4440 	if (all) {
4441 		/* Check if all closed zones can be open */
4442 		if (devip->max_open &&
4443 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4444 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4445 					INSUFF_ZONE_ASCQ);
4446 			res = check_condition_result;
4447 			goto fini;
4448 		}
4449 		/* Open all closed zones */
4450 		zbc_open_all(devip);
4451 		goto fini;
4452 	}
4453 
4454 	/* Open the specified zone */
4455 	z_id = get_unaligned_be64(cmd + 2);
4456 	if (z_id >= sdebug_capacity) {
4457 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4458 		res = check_condition_result;
4459 		goto fini;
4460 	}
4461 
4462 	zsp = zbc_zone(devip, z_id);
4463 	if (z_id != zsp->z_start) {
4464 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4465 		res = check_condition_result;
4466 		goto fini;
4467 	}
4468 	if (zbc_zone_is_conv(zsp)) {
4469 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4470 		res = check_condition_result;
4471 		goto fini;
4472 	}
4473 
4474 	zc = zsp->z_cond;
4475 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4476 		goto fini;
4477 
4478 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4479 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4480 				INSUFF_ZONE_ASCQ);
4481 		res = check_condition_result;
4482 		goto fini;
4483 	}
4484 
4485 	if (zc == ZC2_IMPLICIT_OPEN)
4486 		zbc_close_zone(devip, zsp);
4487 	zbc_open_zone(devip, zsp, true);
4488 fini:
4489 	write_unlock(macc_lckp);
4490 	return res;
4491 }
4492 
4493 static void zbc_close_all(struct sdebug_dev_info *devip)
4494 {
4495 	unsigned int i;
4496 
4497 	for (i = 0; i < devip->nr_zones; i++)
4498 		zbc_close_zone(devip, &devip->zstate[i]);
4499 }
4500 
4501 static int resp_close_zone(struct scsi_cmnd *scp,
4502 			   struct sdebug_dev_info *devip)
4503 {
4504 	int res = 0;
4505 	u64 z_id;
4506 	u8 *cmd = scp->cmnd;
4507 	struct sdeb_zone_state *zsp;
4508 	bool all = cmd[14] & 0x01;
4509 	struct sdeb_store_info *sip = devip2sip(devip, false);
4510 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4511 
4512 	if (!sdebug_dev_is_zoned(devip)) {
4513 		mk_sense_invalid_opcode(scp);
4514 		return check_condition_result;
4515 	}
4516 
4517 	write_lock(macc_lckp);
4518 
4519 	if (all) {
4520 		zbc_close_all(devip);
4521 		goto fini;
4522 	}
4523 
4524 	/* Close specified zone */
4525 	z_id = get_unaligned_be64(cmd + 2);
4526 	if (z_id >= sdebug_capacity) {
4527 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4528 		res = check_condition_result;
4529 		goto fini;
4530 	}
4531 
4532 	zsp = zbc_zone(devip, z_id);
4533 	if (z_id != zsp->z_start) {
4534 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4535 		res = check_condition_result;
4536 		goto fini;
4537 	}
4538 	if (zbc_zone_is_conv(zsp)) {
4539 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4540 		res = check_condition_result;
4541 		goto fini;
4542 	}
4543 
4544 	zbc_close_zone(devip, zsp);
4545 fini:
4546 	write_unlock(macc_lckp);
4547 	return res;
4548 }
4549 
4550 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4551 			    struct sdeb_zone_state *zsp, bool empty)
4552 {
4553 	enum sdebug_z_cond zc = zsp->z_cond;
4554 
4555 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4556 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4557 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4558 			zbc_close_zone(devip, zsp);
4559 		if (zsp->z_cond == ZC4_CLOSED)
4560 			devip->nr_closed--;
4561 		zsp->z_wp = zsp->z_start + zsp->z_size;
4562 		zsp->z_cond = ZC5_FULL;
4563 	}
4564 }
4565 
4566 static void zbc_finish_all(struct sdebug_dev_info *devip)
4567 {
4568 	unsigned int i;
4569 
4570 	for (i = 0; i < devip->nr_zones; i++)
4571 		zbc_finish_zone(devip, &devip->zstate[i], false);
4572 }
4573 
4574 static int resp_finish_zone(struct scsi_cmnd *scp,
4575 			    struct sdebug_dev_info *devip)
4576 {
4577 	struct sdeb_zone_state *zsp;
4578 	int res = 0;
4579 	u64 z_id;
4580 	u8 *cmd = scp->cmnd;
4581 	bool all = cmd[14] & 0x01;
4582 	struct sdeb_store_info *sip = devip2sip(devip, false);
4583 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4584 
4585 	if (!sdebug_dev_is_zoned(devip)) {
4586 		mk_sense_invalid_opcode(scp);
4587 		return check_condition_result;
4588 	}
4589 
4590 	write_lock(macc_lckp);
4591 
4592 	if (all) {
4593 		zbc_finish_all(devip);
4594 		goto fini;
4595 	}
4596 
4597 	/* Finish the specified zone */
4598 	z_id = get_unaligned_be64(cmd + 2);
4599 	if (z_id >= sdebug_capacity) {
4600 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4601 		res = check_condition_result;
4602 		goto fini;
4603 	}
4604 
4605 	zsp = zbc_zone(devip, z_id);
4606 	if (z_id != zsp->z_start) {
4607 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4608 		res = check_condition_result;
4609 		goto fini;
4610 	}
4611 	if (zbc_zone_is_conv(zsp)) {
4612 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4613 		res = check_condition_result;
4614 		goto fini;
4615 	}
4616 
4617 	zbc_finish_zone(devip, zsp, true);
4618 fini:
4619 	write_unlock(macc_lckp);
4620 	return res;
4621 }
4622 
4623 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4624 			 struct sdeb_zone_state *zsp)
4625 {
4626 	enum sdebug_z_cond zc;
4627 
4628 	if (zbc_zone_is_conv(zsp))
4629 		return;
4630 
4631 	zc = zsp->z_cond;
4632 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4633 		zbc_close_zone(devip, zsp);
4634 
4635 	if (zsp->z_cond == ZC4_CLOSED)
4636 		devip->nr_closed--;
4637 
4638 	zsp->z_non_seq_resource = false;
4639 	zsp->z_wp = zsp->z_start;
4640 	zsp->z_cond = ZC1_EMPTY;
4641 }
4642 
4643 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4644 {
4645 	unsigned int i;
4646 
4647 	for (i = 0; i < devip->nr_zones; i++)
4648 		zbc_rwp_zone(devip, &devip->zstate[i]);
4649 }
4650 
4651 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4652 {
4653 	struct sdeb_zone_state *zsp;
4654 	int res = 0;
4655 	u64 z_id;
4656 	u8 *cmd = scp->cmnd;
4657 	bool all = cmd[14] & 0x01;
4658 	struct sdeb_store_info *sip = devip2sip(devip, false);
4659 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4660 
4661 	if (!sdebug_dev_is_zoned(devip)) {
4662 		mk_sense_invalid_opcode(scp);
4663 		return check_condition_result;
4664 	}
4665 
4666 	write_lock(macc_lckp);
4667 
4668 	if (all) {
4669 		zbc_rwp_all(devip);
4670 		goto fini;
4671 	}
4672 
4673 	z_id = get_unaligned_be64(cmd + 2);
4674 	if (z_id >= sdebug_capacity) {
4675 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4676 		res = check_condition_result;
4677 		goto fini;
4678 	}
4679 
4680 	zsp = zbc_zone(devip, z_id);
4681 	if (z_id != zsp->z_start) {
4682 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4683 		res = check_condition_result;
4684 		goto fini;
4685 	}
4686 	if (zbc_zone_is_conv(zsp)) {
4687 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4688 		res = check_condition_result;
4689 		goto fini;
4690 	}
4691 
4692 	zbc_rwp_zone(devip, zsp);
4693 fini:
4694 	write_unlock(macc_lckp);
4695 	return res;
4696 }
4697 
4698 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4699 {
4700 	u16 hwq;
4701 
4702 	if (sdebug_host_max_queue) {
4703 		/* Provide a simple method to choose the hwq */
4704 		hwq = smp_processor_id() % submit_queues;
4705 	} else {
4706 		u32 tag = blk_mq_unique_tag(cmnd->request);
4707 
4708 		hwq = blk_mq_unique_tag_to_hwq(tag);
4709 
4710 		pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4711 		if (WARN_ON_ONCE(hwq >= submit_queues))
4712 			hwq = 0;
4713 	}
4714 	return sdebug_q_arr + hwq;
4715 }
4716 
4717 static u32 get_tag(struct scsi_cmnd *cmnd)
4718 {
4719 	return blk_mq_unique_tag(cmnd->request);
4720 }
4721 
4722 /* Queued (deferred) command completions converge here. */
4723 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4724 {
4725 	bool aborted = sd_dp->aborted;
4726 	int qc_idx;
4727 	int retiring = 0;
4728 	unsigned long iflags;
4729 	struct sdebug_queue *sqp;
4730 	struct sdebug_queued_cmd *sqcp;
4731 	struct scsi_cmnd *scp;
4732 	struct sdebug_dev_info *devip;
4733 
4734 	sd_dp->defer_t = SDEB_DEFER_NONE;
4735 	if (unlikely(aborted))
4736 		sd_dp->aborted = false;
4737 	qc_idx = sd_dp->qc_idx;
4738 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4739 	if (sdebug_statistics) {
4740 		atomic_inc(&sdebug_completions);
4741 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4742 			atomic_inc(&sdebug_miss_cpus);
4743 	}
4744 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4745 		pr_err("wild qc_idx=%d\n", qc_idx);
4746 		return;
4747 	}
4748 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4749 	sqcp = &sqp->qc_arr[qc_idx];
4750 	scp = sqcp->a_cmnd;
4751 	if (unlikely(scp == NULL)) {
4752 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4753 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4754 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4755 		return;
4756 	}
4757 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4758 	if (likely(devip))
4759 		atomic_dec(&devip->num_in_q);
4760 	else
4761 		pr_err("devip=NULL\n");
4762 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4763 		retiring = 1;
4764 
4765 	sqcp->a_cmnd = NULL;
4766 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4767 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4768 		pr_err("Unexpected completion\n");
4769 		return;
4770 	}
4771 
4772 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4773 		int k, retval;
4774 
4775 		retval = atomic_read(&retired_max_queue);
4776 		if (qc_idx >= retval) {
4777 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4778 			pr_err("index %d too large\n", retval);
4779 			return;
4780 		}
4781 		k = find_last_bit(sqp->in_use_bm, retval);
4782 		if ((k < sdebug_max_queue) || (k == retval))
4783 			atomic_set(&retired_max_queue, 0);
4784 		else
4785 			atomic_set(&retired_max_queue, k + 1);
4786 	}
4787 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4788 	if (unlikely(aborted)) {
4789 		if (sdebug_verbose)
4790 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4791 		return;
4792 	}
4793 	scp->scsi_done(scp); /* callback to mid level */
4794 }
4795 
4796 /* When high resolution timer goes off this function is called. */
4797 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4798 {
4799 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4800 						  hrt);
4801 	sdebug_q_cmd_complete(sd_dp);
4802 	return HRTIMER_NORESTART;
4803 }
4804 
4805 /* When work queue schedules work, it calls this function. */
4806 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4807 {
4808 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4809 						  ew.work);
4810 	sdebug_q_cmd_complete(sd_dp);
4811 }
4812 
4813 static bool got_shared_uuid;
4814 static uuid_t shared_uuid;
4815 
4816 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4817 {
4818 	struct sdeb_zone_state *zsp;
4819 	sector_t capacity = get_sdebug_capacity();
4820 	sector_t zstart = 0;
4821 	unsigned int i;
4822 
4823 	/*
4824 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4825 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4826 	 * use the specified zone size checking that at least 2 zones can be
4827 	 * created for the device.
4828 	 */
4829 	if (!sdeb_zbc_zone_size_mb) {
4830 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4831 			>> ilog2(sdebug_sector_size);
4832 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4833 			devip->zsize >>= 1;
4834 		if (devip->zsize < 2) {
4835 			pr_err("Device capacity too small\n");
4836 			return -EINVAL;
4837 		}
4838 	} else {
4839 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4840 			pr_err("Zone size is not a power of 2\n");
4841 			return -EINVAL;
4842 		}
4843 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4844 			>> ilog2(sdebug_sector_size);
4845 		if (devip->zsize >= capacity) {
4846 			pr_err("Zone size too large for device capacity\n");
4847 			return -EINVAL;
4848 		}
4849 	}
4850 
4851 	devip->zsize_shift = ilog2(devip->zsize);
4852 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4853 
4854 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4855 		pr_err("Number of conventional zones too large\n");
4856 		return -EINVAL;
4857 	}
4858 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4859 
4860 	if (devip->zmodel == BLK_ZONED_HM) {
4861 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4862 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4863 			devip->max_open = (devip->nr_zones - 1) / 2;
4864 		else
4865 			devip->max_open = sdeb_zbc_max_open;
4866 	}
4867 
4868 	devip->zstate = kcalloc(devip->nr_zones,
4869 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4870 	if (!devip->zstate)
4871 		return -ENOMEM;
4872 
4873 	for (i = 0; i < devip->nr_zones; i++) {
4874 		zsp = &devip->zstate[i];
4875 
4876 		zsp->z_start = zstart;
4877 
4878 		if (i < devip->nr_conv_zones) {
4879 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4880 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4881 			zsp->z_wp = (sector_t)-1;
4882 		} else {
4883 			if (devip->zmodel == BLK_ZONED_HM)
4884 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4885 			else
4886 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4887 			zsp->z_cond = ZC1_EMPTY;
4888 			zsp->z_wp = zsp->z_start;
4889 		}
4890 
4891 		if (zsp->z_start + devip->zsize < capacity)
4892 			zsp->z_size = devip->zsize;
4893 		else
4894 			zsp->z_size = capacity - zsp->z_start;
4895 
4896 		zstart += zsp->z_size;
4897 	}
4898 
4899 	return 0;
4900 }
4901 
4902 static struct sdebug_dev_info *sdebug_device_create(
4903 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4904 {
4905 	struct sdebug_dev_info *devip;
4906 
4907 	devip = kzalloc(sizeof(*devip), flags);
4908 	if (devip) {
4909 		if (sdebug_uuid_ctl == 1)
4910 			uuid_gen(&devip->lu_name);
4911 		else if (sdebug_uuid_ctl == 2) {
4912 			if (got_shared_uuid)
4913 				devip->lu_name = shared_uuid;
4914 			else {
4915 				uuid_gen(&shared_uuid);
4916 				got_shared_uuid = true;
4917 				devip->lu_name = shared_uuid;
4918 			}
4919 		}
4920 		devip->sdbg_host = sdbg_host;
4921 		if (sdeb_zbc_in_use) {
4922 			devip->zmodel = sdeb_zbc_model;
4923 			if (sdebug_device_create_zones(devip)) {
4924 				kfree(devip);
4925 				return NULL;
4926 			}
4927 		} else {
4928 			devip->zmodel = BLK_ZONED_NONE;
4929 		}
4930 		devip->sdbg_host = sdbg_host;
4931 		devip->create_ts = ktime_get_boottime();
4932 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4933 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4934 	}
4935 	return devip;
4936 }
4937 
4938 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4939 {
4940 	struct sdebug_host_info *sdbg_host;
4941 	struct sdebug_dev_info *open_devip = NULL;
4942 	struct sdebug_dev_info *devip;
4943 
4944 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4945 	if (!sdbg_host) {
4946 		pr_err("Host info NULL\n");
4947 		return NULL;
4948 	}
4949 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4950 		if ((devip->used) && (devip->channel == sdev->channel) &&
4951 		    (devip->target == sdev->id) &&
4952 		    (devip->lun == sdev->lun))
4953 			return devip;
4954 		else {
4955 			if ((!devip->used) && (!open_devip))
4956 				open_devip = devip;
4957 		}
4958 	}
4959 	if (!open_devip) { /* try and make a new one */
4960 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4961 		if (!open_devip) {
4962 			pr_err("out of memory at line %d\n", __LINE__);
4963 			return NULL;
4964 		}
4965 	}
4966 
4967 	open_devip->channel = sdev->channel;
4968 	open_devip->target = sdev->id;
4969 	open_devip->lun = sdev->lun;
4970 	open_devip->sdbg_host = sdbg_host;
4971 	atomic_set(&open_devip->num_in_q, 0);
4972 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4973 	open_devip->used = true;
4974 	return open_devip;
4975 }
4976 
4977 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4978 {
4979 	if (sdebug_verbose)
4980 		pr_info("slave_alloc <%u %u %u %llu>\n",
4981 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4982 	return 0;
4983 }
4984 
4985 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4986 {
4987 	struct sdebug_dev_info *devip =
4988 			(struct sdebug_dev_info *)sdp->hostdata;
4989 
4990 	if (sdebug_verbose)
4991 		pr_info("slave_configure <%u %u %u %llu>\n",
4992 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4993 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4994 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4995 	if (devip == NULL) {
4996 		devip = find_build_dev_info(sdp);
4997 		if (devip == NULL)
4998 			return 1;  /* no resources, will be marked offline */
4999 	}
5000 	sdp->hostdata = devip;
5001 	if (sdebug_no_uld)
5002 		sdp->no_uld_attach = 1;
5003 	config_cdb_len(sdp);
5004 	return 0;
5005 }
5006 
5007 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5008 {
5009 	struct sdebug_dev_info *devip =
5010 		(struct sdebug_dev_info *)sdp->hostdata;
5011 
5012 	if (sdebug_verbose)
5013 		pr_info("slave_destroy <%u %u %u %llu>\n",
5014 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5015 	if (devip) {
5016 		/* make this slot available for re-use */
5017 		devip->used = false;
5018 		sdp->hostdata = NULL;
5019 	}
5020 }
5021 
5022 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5023 			   enum sdeb_defer_type defer_t)
5024 {
5025 	if (!sd_dp)
5026 		return;
5027 	if (defer_t == SDEB_DEFER_HRT)
5028 		hrtimer_cancel(&sd_dp->hrt);
5029 	else if (defer_t == SDEB_DEFER_WQ)
5030 		cancel_work_sync(&sd_dp->ew.work);
5031 }
5032 
5033 /* If @cmnd found deletes its timer or work queue and returns true; else
5034    returns false */
5035 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5036 {
5037 	unsigned long iflags;
5038 	int j, k, qmax, r_qmax;
5039 	enum sdeb_defer_type l_defer_t;
5040 	struct sdebug_queue *sqp;
5041 	struct sdebug_queued_cmd *sqcp;
5042 	struct sdebug_dev_info *devip;
5043 	struct sdebug_defer *sd_dp;
5044 
5045 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5046 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5047 		qmax = sdebug_max_queue;
5048 		r_qmax = atomic_read(&retired_max_queue);
5049 		if (r_qmax > qmax)
5050 			qmax = r_qmax;
5051 		for (k = 0; k < qmax; ++k) {
5052 			if (test_bit(k, sqp->in_use_bm)) {
5053 				sqcp = &sqp->qc_arr[k];
5054 				if (cmnd != sqcp->a_cmnd)
5055 					continue;
5056 				/* found */
5057 				devip = (struct sdebug_dev_info *)
5058 						cmnd->device->hostdata;
5059 				if (devip)
5060 					atomic_dec(&devip->num_in_q);
5061 				sqcp->a_cmnd = NULL;
5062 				sd_dp = sqcp->sd_dp;
5063 				if (sd_dp) {
5064 					l_defer_t = sd_dp->defer_t;
5065 					sd_dp->defer_t = SDEB_DEFER_NONE;
5066 				} else
5067 					l_defer_t = SDEB_DEFER_NONE;
5068 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5069 				stop_qc_helper(sd_dp, l_defer_t);
5070 				clear_bit(k, sqp->in_use_bm);
5071 				return true;
5072 			}
5073 		}
5074 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5075 	}
5076 	return false;
5077 }
5078 
5079 /* Deletes (stops) timers or work queues of all queued commands */
5080 static void stop_all_queued(void)
5081 {
5082 	unsigned long iflags;
5083 	int j, k;
5084 	enum sdeb_defer_type l_defer_t;
5085 	struct sdebug_queue *sqp;
5086 	struct sdebug_queued_cmd *sqcp;
5087 	struct sdebug_dev_info *devip;
5088 	struct sdebug_defer *sd_dp;
5089 
5090 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5091 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5092 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5093 			if (test_bit(k, sqp->in_use_bm)) {
5094 				sqcp = &sqp->qc_arr[k];
5095 				if (sqcp->a_cmnd == NULL)
5096 					continue;
5097 				devip = (struct sdebug_dev_info *)
5098 					sqcp->a_cmnd->device->hostdata;
5099 				if (devip)
5100 					atomic_dec(&devip->num_in_q);
5101 				sqcp->a_cmnd = NULL;
5102 				sd_dp = sqcp->sd_dp;
5103 				if (sd_dp) {
5104 					l_defer_t = sd_dp->defer_t;
5105 					sd_dp->defer_t = SDEB_DEFER_NONE;
5106 				} else
5107 					l_defer_t = SDEB_DEFER_NONE;
5108 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5109 				stop_qc_helper(sd_dp, l_defer_t);
5110 				clear_bit(k, sqp->in_use_bm);
5111 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5112 			}
5113 		}
5114 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5115 	}
5116 }
5117 
5118 /* Free queued command memory on heap */
5119 static void free_all_queued(void)
5120 {
5121 	int j, k;
5122 	struct sdebug_queue *sqp;
5123 	struct sdebug_queued_cmd *sqcp;
5124 
5125 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5126 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5127 			sqcp = &sqp->qc_arr[k];
5128 			kfree(sqcp->sd_dp);
5129 			sqcp->sd_dp = NULL;
5130 		}
5131 	}
5132 }
5133 
5134 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5135 {
5136 	bool ok;
5137 
5138 	++num_aborts;
5139 	if (SCpnt) {
5140 		ok = stop_queued_cmnd(SCpnt);
5141 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5142 			sdev_printk(KERN_INFO, SCpnt->device,
5143 				    "%s: command%s found\n", __func__,
5144 				    ok ? "" : " not");
5145 	}
5146 	return SUCCESS;
5147 }
5148 
5149 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5150 {
5151 	++num_dev_resets;
5152 	if (SCpnt && SCpnt->device) {
5153 		struct scsi_device *sdp = SCpnt->device;
5154 		struct sdebug_dev_info *devip =
5155 				(struct sdebug_dev_info *)sdp->hostdata;
5156 
5157 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5158 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5159 		if (devip)
5160 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5161 	}
5162 	return SUCCESS;
5163 }
5164 
5165 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5166 {
5167 	struct sdebug_host_info *sdbg_host;
5168 	struct sdebug_dev_info *devip;
5169 	struct scsi_device *sdp;
5170 	struct Scsi_Host *hp;
5171 	int k = 0;
5172 
5173 	++num_target_resets;
5174 	if (!SCpnt)
5175 		goto lie;
5176 	sdp = SCpnt->device;
5177 	if (!sdp)
5178 		goto lie;
5179 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5180 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5181 	hp = sdp->host;
5182 	if (!hp)
5183 		goto lie;
5184 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5185 	if (sdbg_host) {
5186 		list_for_each_entry(devip,
5187 				    &sdbg_host->dev_info_list,
5188 				    dev_list)
5189 			if (devip->target == sdp->id) {
5190 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5191 				++k;
5192 			}
5193 	}
5194 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5195 		sdev_printk(KERN_INFO, sdp,
5196 			    "%s: %d device(s) found in target\n", __func__, k);
5197 lie:
5198 	return SUCCESS;
5199 }
5200 
5201 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5202 {
5203 	struct sdebug_host_info *sdbg_host;
5204 	struct sdebug_dev_info *devip;
5205 	struct scsi_device *sdp;
5206 	struct Scsi_Host *hp;
5207 	int k = 0;
5208 
5209 	++num_bus_resets;
5210 	if (!(SCpnt && SCpnt->device))
5211 		goto lie;
5212 	sdp = SCpnt->device;
5213 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5214 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5215 	hp = sdp->host;
5216 	if (hp) {
5217 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5218 		if (sdbg_host) {
5219 			list_for_each_entry(devip,
5220 					    &sdbg_host->dev_info_list,
5221 					    dev_list) {
5222 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5223 				++k;
5224 			}
5225 		}
5226 	}
5227 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5228 		sdev_printk(KERN_INFO, sdp,
5229 			    "%s: %d device(s) found in host\n", __func__, k);
5230 lie:
5231 	return SUCCESS;
5232 }
5233 
5234 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5235 {
5236 	struct sdebug_host_info *sdbg_host;
5237 	struct sdebug_dev_info *devip;
5238 	int k = 0;
5239 
5240 	++num_host_resets;
5241 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5242 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5243 	spin_lock(&sdebug_host_list_lock);
5244 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5245 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5246 				    dev_list) {
5247 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5248 			++k;
5249 		}
5250 	}
5251 	spin_unlock(&sdebug_host_list_lock);
5252 	stop_all_queued();
5253 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5254 		sdev_printk(KERN_INFO, SCpnt->device,
5255 			    "%s: %d device(s) found\n", __func__, k);
5256 	return SUCCESS;
5257 }
5258 
5259 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5260 {
5261 	struct msdos_partition *pp;
5262 	int starts[SDEBUG_MAX_PARTS + 2];
5263 	int sectors_per_part, num_sectors, k;
5264 	int heads_by_sects, start_sec, end_sec;
5265 
5266 	/* assume partition table already zeroed */
5267 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5268 		return;
5269 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5270 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5271 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5272 	}
5273 	num_sectors = (int)sdebug_store_sectors;
5274 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5275 			   / sdebug_num_parts;
5276 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5277 	starts[0] = sdebug_sectors_per;
5278 	for (k = 1; k < sdebug_num_parts; ++k)
5279 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5280 			    * heads_by_sects;
5281 	starts[sdebug_num_parts] = num_sectors;
5282 	starts[sdebug_num_parts + 1] = 0;
5283 
5284 	ramp[510] = 0x55;	/* magic partition markings */
5285 	ramp[511] = 0xAA;
5286 	pp = (struct msdos_partition *)(ramp + 0x1be);
5287 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5288 		start_sec = starts[k];
5289 		end_sec = starts[k + 1] - 1;
5290 		pp->boot_ind = 0;
5291 
5292 		pp->cyl = start_sec / heads_by_sects;
5293 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5294 			   / sdebug_sectors_per;
5295 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5296 
5297 		pp->end_cyl = end_sec / heads_by_sects;
5298 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5299 			       / sdebug_sectors_per;
5300 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5301 
5302 		pp->start_sect = cpu_to_le32(start_sec);
5303 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5304 		pp->sys_ind = 0x83;	/* plain Linux partition */
5305 	}
5306 }
5307 
5308 static void block_unblock_all_queues(bool block)
5309 {
5310 	int j;
5311 	struct sdebug_queue *sqp;
5312 
5313 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5314 		atomic_set(&sqp->blocked, (int)block);
5315 }
5316 
5317 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5318  * commands will be processed normally before triggers occur.
5319  */
5320 static void tweak_cmnd_count(void)
5321 {
5322 	int count, modulo;
5323 
5324 	modulo = abs(sdebug_every_nth);
5325 	if (modulo < 2)
5326 		return;
5327 	block_unblock_all_queues(true);
5328 	count = atomic_read(&sdebug_cmnd_count);
5329 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5330 	block_unblock_all_queues(false);
5331 }
5332 
5333 static void clear_queue_stats(void)
5334 {
5335 	atomic_set(&sdebug_cmnd_count, 0);
5336 	atomic_set(&sdebug_completions, 0);
5337 	atomic_set(&sdebug_miss_cpus, 0);
5338 	atomic_set(&sdebug_a_tsf, 0);
5339 }
5340 
5341 static bool inject_on_this_cmd(void)
5342 {
5343 	if (sdebug_every_nth == 0)
5344 		return false;
5345 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5346 }
5347 
5348 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5349 
5350 /* Complete the processing of the thread that queued a SCSI command to this
5351  * driver. It either completes the command by calling cmnd_done() or
5352  * schedules a hr timer or work queue then returns 0. Returns
5353  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5354  */
5355 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5356 			 int scsi_result,
5357 			 int (*pfp)(struct scsi_cmnd *,
5358 				    struct sdebug_dev_info *),
5359 			 int delta_jiff, int ndelay)
5360 {
5361 	bool new_sd_dp;
5362 	bool inject = false;
5363 	int k, num_in_q, qdepth;
5364 	unsigned long iflags;
5365 	u64 ns_from_boot = 0;
5366 	struct sdebug_queue *sqp;
5367 	struct sdebug_queued_cmd *sqcp;
5368 	struct scsi_device *sdp;
5369 	struct sdebug_defer *sd_dp;
5370 
5371 	if (unlikely(devip == NULL)) {
5372 		if (scsi_result == 0)
5373 			scsi_result = DID_NO_CONNECT << 16;
5374 		goto respond_in_thread;
5375 	}
5376 	sdp = cmnd->device;
5377 
5378 	if (delta_jiff == 0)
5379 		goto respond_in_thread;
5380 
5381 	sqp = get_queue(cmnd);
5382 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5383 	if (unlikely(atomic_read(&sqp->blocked))) {
5384 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5385 		return SCSI_MLQUEUE_HOST_BUSY;
5386 	}
5387 	num_in_q = atomic_read(&devip->num_in_q);
5388 	qdepth = cmnd->device->queue_depth;
5389 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5390 		if (scsi_result) {
5391 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5392 			goto respond_in_thread;
5393 		} else
5394 			scsi_result = device_qfull_result;
5395 	} else if (unlikely(sdebug_every_nth &&
5396 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5397 			    (scsi_result == 0))) {
5398 		if ((num_in_q == (qdepth - 1)) &&
5399 		    (atomic_inc_return(&sdebug_a_tsf) >=
5400 		     abs(sdebug_every_nth))) {
5401 			atomic_set(&sdebug_a_tsf, 0);
5402 			inject = true;
5403 			scsi_result = device_qfull_result;
5404 		}
5405 	}
5406 
5407 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5408 	if (unlikely(k >= sdebug_max_queue)) {
5409 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5410 		if (scsi_result)
5411 			goto respond_in_thread;
5412 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5413 			scsi_result = device_qfull_result;
5414 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5415 			sdev_printk(KERN_INFO, sdp,
5416 				    "%s: max_queue=%d exceeded, %s\n",
5417 				    __func__, sdebug_max_queue,
5418 				    (scsi_result ?  "status: TASK SET FULL" :
5419 						    "report: host busy"));
5420 		if (scsi_result)
5421 			goto respond_in_thread;
5422 		else
5423 			return SCSI_MLQUEUE_HOST_BUSY;
5424 	}
5425 	set_bit(k, sqp->in_use_bm);
5426 	atomic_inc(&devip->num_in_q);
5427 	sqcp = &sqp->qc_arr[k];
5428 	sqcp->a_cmnd = cmnd;
5429 	cmnd->host_scribble = (unsigned char *)sqcp;
5430 	sd_dp = sqcp->sd_dp;
5431 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5432 	if (!sd_dp) {
5433 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5434 		if (!sd_dp) {
5435 			atomic_dec(&devip->num_in_q);
5436 			clear_bit(k, sqp->in_use_bm);
5437 			return SCSI_MLQUEUE_HOST_BUSY;
5438 		}
5439 		new_sd_dp = true;
5440 	} else {
5441 		new_sd_dp = false;
5442 	}
5443 
5444 	/* Set the hostwide tag */
5445 	if (sdebug_host_max_queue)
5446 		sd_dp->hc_idx = get_tag(cmnd);
5447 
5448 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5449 		ns_from_boot = ktime_get_boottime_ns();
5450 
5451 	/* one of the resp_*() response functions is called here */
5452 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5453 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5454 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5455 		delta_jiff = ndelay = 0;
5456 	}
5457 	if (cmnd->result == 0 && scsi_result != 0)
5458 		cmnd->result = scsi_result;
5459 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5460 		if (atomic_read(&sdeb_inject_pending)) {
5461 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5462 			atomic_set(&sdeb_inject_pending, 0);
5463 			cmnd->result = check_condition_result;
5464 		}
5465 	}
5466 
5467 	if (unlikely(sdebug_verbose && cmnd->result))
5468 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5469 			    __func__, cmnd->result);
5470 
5471 	if (delta_jiff > 0 || ndelay > 0) {
5472 		ktime_t kt;
5473 
5474 		if (delta_jiff > 0) {
5475 			u64 ns = jiffies_to_nsecs(delta_jiff);
5476 
5477 			if (sdebug_random && ns < U32_MAX) {
5478 				ns = prandom_u32_max((u32)ns);
5479 			} else if (sdebug_random) {
5480 				ns >>= 12;	/* scale to 4 usec precision */
5481 				if (ns < U32_MAX)	/* over 4 hours max */
5482 					ns = prandom_u32_max((u32)ns);
5483 				ns <<= 12;
5484 			}
5485 			kt = ns_to_ktime(ns);
5486 		} else {	/* ndelay has a 4.2 second max */
5487 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5488 					     (u32)ndelay;
5489 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5490 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5491 
5492 				if (kt <= d) {	/* elapsed duration >= kt */
5493 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5494 					sqcp->a_cmnd = NULL;
5495 					atomic_dec(&devip->num_in_q);
5496 					clear_bit(k, sqp->in_use_bm);
5497 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5498 					if (new_sd_dp)
5499 						kfree(sd_dp);
5500 					/* call scsi_done() from this thread */
5501 					cmnd->scsi_done(cmnd);
5502 					return 0;
5503 				}
5504 				/* otherwise reduce kt by elapsed time */
5505 				kt -= d;
5506 			}
5507 		}
5508 		if (!sd_dp->init_hrt) {
5509 			sd_dp->init_hrt = true;
5510 			sqcp->sd_dp = sd_dp;
5511 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5512 				     HRTIMER_MODE_REL_PINNED);
5513 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5514 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5515 			sd_dp->qc_idx = k;
5516 		}
5517 		if (sdebug_statistics)
5518 			sd_dp->issuing_cpu = raw_smp_processor_id();
5519 		sd_dp->defer_t = SDEB_DEFER_HRT;
5520 		/* schedule the invocation of scsi_done() for a later time */
5521 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5522 	} else {	/* jdelay < 0, use work queue */
5523 		if (!sd_dp->init_wq) {
5524 			sd_dp->init_wq = true;
5525 			sqcp->sd_dp = sd_dp;
5526 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5527 			sd_dp->qc_idx = k;
5528 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5529 		}
5530 		if (sdebug_statistics)
5531 			sd_dp->issuing_cpu = raw_smp_processor_id();
5532 		sd_dp->defer_t = SDEB_DEFER_WQ;
5533 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5534 			     atomic_read(&sdeb_inject_pending)))
5535 			sd_dp->aborted = true;
5536 		schedule_work(&sd_dp->ew.work);
5537 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5538 			     atomic_read(&sdeb_inject_pending))) {
5539 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
5540 			blk_abort_request(cmnd->request);
5541 			atomic_set(&sdeb_inject_pending, 0);
5542 		}
5543 	}
5544 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5545 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5546 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5547 	return 0;
5548 
5549 respond_in_thread:	/* call back to mid-layer using invocation thread */
5550 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5551 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5552 	if (cmnd->result == 0 && scsi_result != 0)
5553 		cmnd->result = scsi_result;
5554 	cmnd->scsi_done(cmnd);
5555 	return 0;
5556 }
5557 
5558 /* Note: The following macros create attribute files in the
5559    /sys/module/scsi_debug/parameters directory. Unfortunately this
5560    driver is unaware of a change and cannot trigger auxiliary actions
5561    as it can when the corresponding attribute in the
5562    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5563  */
5564 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5565 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5566 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5567 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5568 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5569 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5570 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5571 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5572 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5573 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5574 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5575 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5576 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5577 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5578 module_param_string(inq_product, sdebug_inq_product_id,
5579 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5580 module_param_string(inq_rev, sdebug_inq_product_rev,
5581 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5582 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5583 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5584 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5585 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5586 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5587 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5588 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5589 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5590 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5591 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5592 		   S_IRUGO | S_IWUSR);
5593 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5594 		   S_IRUGO | S_IWUSR);
5595 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5596 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5597 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5598 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5599 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5600 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5601 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5602 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5603 module_param_named(per_host_store, sdebug_per_host_store, bool,
5604 		   S_IRUGO | S_IWUSR);
5605 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5606 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5607 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5608 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5609 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5610 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5611 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5612 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5613 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5614 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5615 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5616 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5617 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5618 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5619 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5620 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5621 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5622 		   S_IRUGO | S_IWUSR);
5623 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5624 module_param_named(write_same_length, sdebug_write_same_length, int,
5625 		   S_IRUGO | S_IWUSR);
5626 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5627 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5628 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5629 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5630 
5631 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5632 MODULE_DESCRIPTION("SCSI debug adapter driver");
5633 MODULE_LICENSE("GPL");
5634 MODULE_VERSION(SDEBUG_VERSION);
5635 
5636 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5637 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5638 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5639 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5640 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5641 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5642 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5643 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5644 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5645 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5646 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5647 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5648 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5649 MODULE_PARM_DESC(host_max_queue,
5650 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5651 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5652 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5653 		 SDEBUG_VERSION "\")");
5654 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5655 MODULE_PARM_DESC(lbprz,
5656 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5657 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5658 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5659 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5660 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5661 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5662 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5663 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5664 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5665 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5666 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5667 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5668 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5669 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5670 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5671 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5672 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5673 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5674 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5675 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5676 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5677 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5678 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5679 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5680 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5681 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5682 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5683 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5684 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5685 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5686 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5687 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5688 MODULE_PARM_DESC(uuid_ctl,
5689 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5690 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5691 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5692 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5693 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5694 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5695 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5696 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5697 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5698 
5699 #define SDEBUG_INFO_LEN 256
5700 static char sdebug_info[SDEBUG_INFO_LEN];
5701 
5702 static const char *scsi_debug_info(struct Scsi_Host *shp)
5703 {
5704 	int k;
5705 
5706 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5707 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5708 	if (k >= (SDEBUG_INFO_LEN - 1))
5709 		return sdebug_info;
5710 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5711 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5712 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5713 		  "statistics", (int)sdebug_statistics);
5714 	return sdebug_info;
5715 }
5716 
5717 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5718 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5719 				 int length)
5720 {
5721 	char arr[16];
5722 	int opts;
5723 	int minLen = length > 15 ? 15 : length;
5724 
5725 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5726 		return -EACCES;
5727 	memcpy(arr, buffer, minLen);
5728 	arr[minLen] = '\0';
5729 	if (1 != sscanf(arr, "%d", &opts))
5730 		return -EINVAL;
5731 	sdebug_opts = opts;
5732 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5733 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5734 	if (sdebug_every_nth != 0)
5735 		tweak_cmnd_count();
5736 	return length;
5737 }
5738 
5739 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5740  * same for each scsi_debug host (if more than one). Some of the counters
5741  * output are not atomics so might be inaccurate in a busy system. */
5742 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5743 {
5744 	int f, j, l;
5745 	struct sdebug_queue *sqp;
5746 	struct sdebug_host_info *sdhp;
5747 
5748 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5749 		   SDEBUG_VERSION, sdebug_version_date);
5750 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5751 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5752 		   sdebug_opts, sdebug_every_nth);
5753 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5754 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5755 		   sdebug_sector_size, "bytes");
5756 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5757 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5758 		   num_aborts);
5759 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5760 		   num_dev_resets, num_target_resets, num_bus_resets,
5761 		   num_host_resets);
5762 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5763 		   dix_reads, dix_writes, dif_errors);
5764 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5765 		   sdebug_statistics);
5766 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5767 		   atomic_read(&sdebug_cmnd_count),
5768 		   atomic_read(&sdebug_completions),
5769 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5770 		   atomic_read(&sdebug_a_tsf));
5771 
5772 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5773 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5774 		seq_printf(m, "  queue %d:\n", j);
5775 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5776 		if (f != sdebug_max_queue) {
5777 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5778 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5779 				   "first,last bits", f, l);
5780 		}
5781 	}
5782 
5783 	seq_printf(m, "this host_no=%d\n", host->host_no);
5784 	if (!xa_empty(per_store_ap)) {
5785 		bool niu;
5786 		int idx;
5787 		unsigned long l_idx;
5788 		struct sdeb_store_info *sip;
5789 
5790 		seq_puts(m, "\nhost list:\n");
5791 		j = 0;
5792 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5793 			idx = sdhp->si_idx;
5794 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5795 				   sdhp->shost->host_no, idx);
5796 			++j;
5797 		}
5798 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5799 			   sdeb_most_recent_idx);
5800 		j = 0;
5801 		xa_for_each(per_store_ap, l_idx, sip) {
5802 			niu = xa_get_mark(per_store_ap, l_idx,
5803 					  SDEB_XA_NOT_IN_USE);
5804 			idx = (int)l_idx;
5805 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5806 				   (niu ? "  not_in_use" : ""));
5807 			++j;
5808 		}
5809 	}
5810 	return 0;
5811 }
5812 
5813 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5814 {
5815 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5816 }
5817 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5818  * of delay is jiffies.
5819  */
5820 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5821 			   size_t count)
5822 {
5823 	int jdelay, res;
5824 
5825 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5826 		res = count;
5827 		if (sdebug_jdelay != jdelay) {
5828 			int j, k;
5829 			struct sdebug_queue *sqp;
5830 
5831 			block_unblock_all_queues(true);
5832 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5833 			     ++j, ++sqp) {
5834 				k = find_first_bit(sqp->in_use_bm,
5835 						   sdebug_max_queue);
5836 				if (k != sdebug_max_queue) {
5837 					res = -EBUSY;   /* queued commands */
5838 					break;
5839 				}
5840 			}
5841 			if (res > 0) {
5842 				sdebug_jdelay = jdelay;
5843 				sdebug_ndelay = 0;
5844 			}
5845 			block_unblock_all_queues(false);
5846 		}
5847 		return res;
5848 	}
5849 	return -EINVAL;
5850 }
5851 static DRIVER_ATTR_RW(delay);
5852 
5853 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5854 {
5855 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5856 }
5857 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5858 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5859 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5860 			    size_t count)
5861 {
5862 	int ndelay, res;
5863 
5864 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5865 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5866 		res = count;
5867 		if (sdebug_ndelay != ndelay) {
5868 			int j, k;
5869 			struct sdebug_queue *sqp;
5870 
5871 			block_unblock_all_queues(true);
5872 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5873 			     ++j, ++sqp) {
5874 				k = find_first_bit(sqp->in_use_bm,
5875 						   sdebug_max_queue);
5876 				if (k != sdebug_max_queue) {
5877 					res = -EBUSY;   /* queued commands */
5878 					break;
5879 				}
5880 			}
5881 			if (res > 0) {
5882 				sdebug_ndelay = ndelay;
5883 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5884 							: DEF_JDELAY;
5885 			}
5886 			block_unblock_all_queues(false);
5887 		}
5888 		return res;
5889 	}
5890 	return -EINVAL;
5891 }
5892 static DRIVER_ATTR_RW(ndelay);
5893 
5894 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5895 {
5896 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5897 }
5898 
5899 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5900 			  size_t count)
5901 {
5902 	int opts;
5903 	char work[20];
5904 
5905 	if (sscanf(buf, "%10s", work) == 1) {
5906 		if (strncasecmp(work, "0x", 2) == 0) {
5907 			if (kstrtoint(work + 2, 16, &opts) == 0)
5908 				goto opts_done;
5909 		} else {
5910 			if (kstrtoint(work, 10, &opts) == 0)
5911 				goto opts_done;
5912 		}
5913 	}
5914 	return -EINVAL;
5915 opts_done:
5916 	sdebug_opts = opts;
5917 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5918 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5919 	tweak_cmnd_count();
5920 	return count;
5921 }
5922 static DRIVER_ATTR_RW(opts);
5923 
5924 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5925 {
5926 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5927 }
5928 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5929 			   size_t count)
5930 {
5931 	int n;
5932 
5933 	/* Cannot change from or to TYPE_ZBC with sysfs */
5934 	if (sdebug_ptype == TYPE_ZBC)
5935 		return -EINVAL;
5936 
5937 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5938 		if (n == TYPE_ZBC)
5939 			return -EINVAL;
5940 		sdebug_ptype = n;
5941 		return count;
5942 	}
5943 	return -EINVAL;
5944 }
5945 static DRIVER_ATTR_RW(ptype);
5946 
5947 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5948 {
5949 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5950 }
5951 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5952 			    size_t count)
5953 {
5954 	int n;
5955 
5956 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5957 		sdebug_dsense = n;
5958 		return count;
5959 	}
5960 	return -EINVAL;
5961 }
5962 static DRIVER_ATTR_RW(dsense);
5963 
5964 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5965 {
5966 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5967 }
5968 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5969 			     size_t count)
5970 {
5971 	int n, idx;
5972 
5973 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5974 		bool want_store = (n == 0);
5975 		struct sdebug_host_info *sdhp;
5976 
5977 		n = (n > 0);
5978 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5979 		if (sdebug_fake_rw == n)
5980 			return count;	/* not transitioning so do nothing */
5981 
5982 		if (want_store) {	/* 1 --> 0 transition, set up store */
5983 			if (sdeb_first_idx < 0) {
5984 				idx = sdebug_add_store();
5985 				if (idx < 0)
5986 					return idx;
5987 			} else {
5988 				idx = sdeb_first_idx;
5989 				xa_clear_mark(per_store_ap, idx,
5990 					      SDEB_XA_NOT_IN_USE);
5991 			}
5992 			/* make all hosts use same store */
5993 			list_for_each_entry(sdhp, &sdebug_host_list,
5994 					    host_list) {
5995 				if (sdhp->si_idx != idx) {
5996 					xa_set_mark(per_store_ap, sdhp->si_idx,
5997 						    SDEB_XA_NOT_IN_USE);
5998 					sdhp->si_idx = idx;
5999 				}
6000 			}
6001 			sdeb_most_recent_idx = idx;
6002 		} else {	/* 0 --> 1 transition is trigger for shrink */
6003 			sdebug_erase_all_stores(true /* apart from first */);
6004 		}
6005 		sdebug_fake_rw = n;
6006 		return count;
6007 	}
6008 	return -EINVAL;
6009 }
6010 static DRIVER_ATTR_RW(fake_rw);
6011 
6012 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6013 {
6014 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6015 }
6016 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6017 			      size_t count)
6018 {
6019 	int n;
6020 
6021 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6022 		sdebug_no_lun_0 = n;
6023 		return count;
6024 	}
6025 	return -EINVAL;
6026 }
6027 static DRIVER_ATTR_RW(no_lun_0);
6028 
6029 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6030 {
6031 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6032 }
6033 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6034 			      size_t count)
6035 {
6036 	int n;
6037 
6038 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6039 		sdebug_num_tgts = n;
6040 		sdebug_max_tgts_luns();
6041 		return count;
6042 	}
6043 	return -EINVAL;
6044 }
6045 static DRIVER_ATTR_RW(num_tgts);
6046 
6047 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6048 {
6049 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6050 }
6051 static DRIVER_ATTR_RO(dev_size_mb);
6052 
6053 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6054 {
6055 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6056 }
6057 
6058 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6059 				    size_t count)
6060 {
6061 	bool v;
6062 
6063 	if (kstrtobool(buf, &v))
6064 		return -EINVAL;
6065 
6066 	sdebug_per_host_store = v;
6067 	return count;
6068 }
6069 static DRIVER_ATTR_RW(per_host_store);
6070 
6071 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6072 {
6073 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6074 }
6075 static DRIVER_ATTR_RO(num_parts);
6076 
6077 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6078 {
6079 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6080 }
6081 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6082 			       size_t count)
6083 {
6084 	int nth;
6085 	char work[20];
6086 
6087 	if (sscanf(buf, "%10s", work) == 1) {
6088 		if (strncasecmp(work, "0x", 2) == 0) {
6089 			if (kstrtoint(work + 2, 16, &nth) == 0)
6090 				goto every_nth_done;
6091 		} else {
6092 			if (kstrtoint(work, 10, &nth) == 0)
6093 				goto every_nth_done;
6094 		}
6095 	}
6096 	return -EINVAL;
6097 
6098 every_nth_done:
6099 	sdebug_every_nth = nth;
6100 	if (nth && !sdebug_statistics) {
6101 		pr_info("every_nth needs statistics=1, set it\n");
6102 		sdebug_statistics = true;
6103 	}
6104 	tweak_cmnd_count();
6105 	return count;
6106 }
6107 static DRIVER_ATTR_RW(every_nth);
6108 
6109 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6110 {
6111 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6112 }
6113 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6114 			      size_t count)
6115 {
6116 	int n;
6117 	bool changed;
6118 
6119 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6120 		if (n > 256) {
6121 			pr_warn("max_luns can be no more than 256\n");
6122 			return -EINVAL;
6123 		}
6124 		changed = (sdebug_max_luns != n);
6125 		sdebug_max_luns = n;
6126 		sdebug_max_tgts_luns();
6127 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6128 			struct sdebug_host_info *sdhp;
6129 			struct sdebug_dev_info *dp;
6130 
6131 			spin_lock(&sdebug_host_list_lock);
6132 			list_for_each_entry(sdhp, &sdebug_host_list,
6133 					    host_list) {
6134 				list_for_each_entry(dp, &sdhp->dev_info_list,
6135 						    dev_list) {
6136 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6137 						dp->uas_bm);
6138 				}
6139 			}
6140 			spin_unlock(&sdebug_host_list_lock);
6141 		}
6142 		return count;
6143 	}
6144 	return -EINVAL;
6145 }
6146 static DRIVER_ATTR_RW(max_luns);
6147 
6148 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6149 {
6150 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6151 }
6152 /* N.B. max_queue can be changed while there are queued commands. In flight
6153  * commands beyond the new max_queue will be completed. */
6154 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6155 			       size_t count)
6156 {
6157 	int j, n, k, a;
6158 	struct sdebug_queue *sqp;
6159 
6160 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6161 	    (n <= SDEBUG_CANQUEUE) &&
6162 	    (sdebug_host_max_queue == 0)) {
6163 		block_unblock_all_queues(true);
6164 		k = 0;
6165 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6166 		     ++j, ++sqp) {
6167 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6168 			if (a > k)
6169 				k = a;
6170 		}
6171 		sdebug_max_queue = n;
6172 		if (k == SDEBUG_CANQUEUE)
6173 			atomic_set(&retired_max_queue, 0);
6174 		else if (k >= n)
6175 			atomic_set(&retired_max_queue, k + 1);
6176 		else
6177 			atomic_set(&retired_max_queue, 0);
6178 		block_unblock_all_queues(false);
6179 		return count;
6180 	}
6181 	return -EINVAL;
6182 }
6183 static DRIVER_ATTR_RW(max_queue);
6184 
6185 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6186 {
6187 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6188 }
6189 
6190 /*
6191  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6192  * in range [0, sdebug_host_max_queue), we can't change it.
6193  */
6194 static DRIVER_ATTR_RO(host_max_queue);
6195 
6196 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6197 {
6198 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6199 }
6200 static DRIVER_ATTR_RO(no_uld);
6201 
6202 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6203 {
6204 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6205 }
6206 static DRIVER_ATTR_RO(scsi_level);
6207 
6208 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6209 {
6210 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6211 }
6212 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6213 				size_t count)
6214 {
6215 	int n;
6216 	bool changed;
6217 
6218 	/* Ignore capacity change for ZBC drives for now */
6219 	if (sdeb_zbc_in_use)
6220 		return -ENOTSUPP;
6221 
6222 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6223 		changed = (sdebug_virtual_gb != n);
6224 		sdebug_virtual_gb = n;
6225 		sdebug_capacity = get_sdebug_capacity();
6226 		if (changed) {
6227 			struct sdebug_host_info *sdhp;
6228 			struct sdebug_dev_info *dp;
6229 
6230 			spin_lock(&sdebug_host_list_lock);
6231 			list_for_each_entry(sdhp, &sdebug_host_list,
6232 					    host_list) {
6233 				list_for_each_entry(dp, &sdhp->dev_info_list,
6234 						    dev_list) {
6235 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6236 						dp->uas_bm);
6237 				}
6238 			}
6239 			spin_unlock(&sdebug_host_list_lock);
6240 		}
6241 		return count;
6242 	}
6243 	return -EINVAL;
6244 }
6245 static DRIVER_ATTR_RW(virtual_gb);
6246 
6247 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6248 {
6249 	/* absolute number of hosts currently active is what is shown */
6250 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6251 }
6252 
6253 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6254 			      size_t count)
6255 {
6256 	bool found;
6257 	unsigned long idx;
6258 	struct sdeb_store_info *sip;
6259 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6260 	int delta_hosts;
6261 
6262 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6263 		return -EINVAL;
6264 	if (delta_hosts > 0) {
6265 		do {
6266 			found = false;
6267 			if (want_phs) {
6268 				xa_for_each_marked(per_store_ap, idx, sip,
6269 						   SDEB_XA_NOT_IN_USE) {
6270 					sdeb_most_recent_idx = (int)idx;
6271 					found = true;
6272 					break;
6273 				}
6274 				if (found)	/* re-use case */
6275 					sdebug_add_host_helper((int)idx);
6276 				else
6277 					sdebug_do_add_host(true);
6278 			} else {
6279 				sdebug_do_add_host(false);
6280 			}
6281 		} while (--delta_hosts);
6282 	} else if (delta_hosts < 0) {
6283 		do {
6284 			sdebug_do_remove_host(false);
6285 		} while (++delta_hosts);
6286 	}
6287 	return count;
6288 }
6289 static DRIVER_ATTR_RW(add_host);
6290 
6291 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6292 {
6293 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6294 }
6295 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6296 				    size_t count)
6297 {
6298 	int n;
6299 
6300 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6301 		sdebug_vpd_use_hostno = n;
6302 		return count;
6303 	}
6304 	return -EINVAL;
6305 }
6306 static DRIVER_ATTR_RW(vpd_use_hostno);
6307 
6308 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6309 {
6310 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6311 }
6312 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6313 				size_t count)
6314 {
6315 	int n;
6316 
6317 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6318 		if (n > 0)
6319 			sdebug_statistics = true;
6320 		else {
6321 			clear_queue_stats();
6322 			sdebug_statistics = false;
6323 		}
6324 		return count;
6325 	}
6326 	return -EINVAL;
6327 }
6328 static DRIVER_ATTR_RW(statistics);
6329 
6330 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6331 {
6332 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6333 }
6334 static DRIVER_ATTR_RO(sector_size);
6335 
6336 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6337 {
6338 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6339 }
6340 static DRIVER_ATTR_RO(submit_queues);
6341 
6342 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6343 {
6344 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6345 }
6346 static DRIVER_ATTR_RO(dix);
6347 
6348 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6349 {
6350 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6351 }
6352 static DRIVER_ATTR_RO(dif);
6353 
6354 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6355 {
6356 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6357 }
6358 static DRIVER_ATTR_RO(guard);
6359 
6360 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6361 {
6362 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6363 }
6364 static DRIVER_ATTR_RO(ato);
6365 
6366 static ssize_t map_show(struct device_driver *ddp, char *buf)
6367 {
6368 	ssize_t count = 0;
6369 
6370 	if (!scsi_debug_lbp())
6371 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6372 				 sdebug_store_sectors);
6373 
6374 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6375 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6376 
6377 		if (sip)
6378 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6379 					  (int)map_size, sip->map_storep);
6380 	}
6381 	buf[count++] = '\n';
6382 	buf[count] = '\0';
6383 
6384 	return count;
6385 }
6386 static DRIVER_ATTR_RO(map);
6387 
6388 static ssize_t random_show(struct device_driver *ddp, char *buf)
6389 {
6390 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6391 }
6392 
6393 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6394 			    size_t count)
6395 {
6396 	bool v;
6397 
6398 	if (kstrtobool(buf, &v))
6399 		return -EINVAL;
6400 
6401 	sdebug_random = v;
6402 	return count;
6403 }
6404 static DRIVER_ATTR_RW(random);
6405 
6406 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6407 {
6408 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6409 }
6410 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6411 			       size_t count)
6412 {
6413 	int n;
6414 
6415 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6416 		sdebug_removable = (n > 0);
6417 		return count;
6418 	}
6419 	return -EINVAL;
6420 }
6421 static DRIVER_ATTR_RW(removable);
6422 
6423 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6424 {
6425 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6426 }
6427 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6428 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6429 			       size_t count)
6430 {
6431 	int n;
6432 
6433 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6434 		sdebug_host_lock = (n > 0);
6435 		return count;
6436 	}
6437 	return -EINVAL;
6438 }
6439 static DRIVER_ATTR_RW(host_lock);
6440 
6441 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6442 {
6443 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6444 }
6445 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6446 			    size_t count)
6447 {
6448 	int n;
6449 
6450 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6451 		sdebug_strict = (n > 0);
6452 		return count;
6453 	}
6454 	return -EINVAL;
6455 }
6456 static DRIVER_ATTR_RW(strict);
6457 
6458 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6459 {
6460 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6461 }
6462 static DRIVER_ATTR_RO(uuid_ctl);
6463 
6464 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6465 {
6466 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6467 }
6468 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6469 			     size_t count)
6470 {
6471 	int ret, n;
6472 
6473 	ret = kstrtoint(buf, 0, &n);
6474 	if (ret)
6475 		return ret;
6476 	sdebug_cdb_len = n;
6477 	all_config_cdb_len();
6478 	return count;
6479 }
6480 static DRIVER_ATTR_RW(cdb_len);
6481 
6482 static const char * const zbc_model_strs_a[] = {
6483 	[BLK_ZONED_NONE] = "none",
6484 	[BLK_ZONED_HA]   = "host-aware",
6485 	[BLK_ZONED_HM]   = "host-managed",
6486 };
6487 
6488 static const char * const zbc_model_strs_b[] = {
6489 	[BLK_ZONED_NONE] = "no",
6490 	[BLK_ZONED_HA]   = "aware",
6491 	[BLK_ZONED_HM]   = "managed",
6492 };
6493 
6494 static const char * const zbc_model_strs_c[] = {
6495 	[BLK_ZONED_NONE] = "0",
6496 	[BLK_ZONED_HA]   = "1",
6497 	[BLK_ZONED_HM]   = "2",
6498 };
6499 
6500 static int sdeb_zbc_model_str(const char *cp)
6501 {
6502 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6503 
6504 	if (res < 0) {
6505 		res = sysfs_match_string(zbc_model_strs_b, cp);
6506 		if (res < 0) {
6507 			res = sysfs_match_string(zbc_model_strs_c, cp);
6508 			if (res < 0)
6509 				return -EINVAL;
6510 		}
6511 	}
6512 	return res;
6513 }
6514 
6515 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6516 {
6517 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6518 			 zbc_model_strs_a[sdeb_zbc_model]);
6519 }
6520 static DRIVER_ATTR_RO(zbc);
6521 
6522 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6523 {
6524 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6525 }
6526 static DRIVER_ATTR_RO(tur_ms_to_ready);
6527 
6528 /* Note: The following array creates attribute files in the
6529    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6530    files (over those found in the /sys/module/scsi_debug/parameters
6531    directory) is that auxiliary actions can be triggered when an attribute
6532    is changed. For example see: add_host_store() above.
6533  */
6534 
6535 static struct attribute *sdebug_drv_attrs[] = {
6536 	&driver_attr_delay.attr,
6537 	&driver_attr_opts.attr,
6538 	&driver_attr_ptype.attr,
6539 	&driver_attr_dsense.attr,
6540 	&driver_attr_fake_rw.attr,
6541 	&driver_attr_host_max_queue.attr,
6542 	&driver_attr_no_lun_0.attr,
6543 	&driver_attr_num_tgts.attr,
6544 	&driver_attr_dev_size_mb.attr,
6545 	&driver_attr_num_parts.attr,
6546 	&driver_attr_every_nth.attr,
6547 	&driver_attr_max_luns.attr,
6548 	&driver_attr_max_queue.attr,
6549 	&driver_attr_no_uld.attr,
6550 	&driver_attr_scsi_level.attr,
6551 	&driver_attr_virtual_gb.attr,
6552 	&driver_attr_add_host.attr,
6553 	&driver_attr_per_host_store.attr,
6554 	&driver_attr_vpd_use_hostno.attr,
6555 	&driver_attr_sector_size.attr,
6556 	&driver_attr_statistics.attr,
6557 	&driver_attr_submit_queues.attr,
6558 	&driver_attr_dix.attr,
6559 	&driver_attr_dif.attr,
6560 	&driver_attr_guard.attr,
6561 	&driver_attr_ato.attr,
6562 	&driver_attr_map.attr,
6563 	&driver_attr_random.attr,
6564 	&driver_attr_removable.attr,
6565 	&driver_attr_host_lock.attr,
6566 	&driver_attr_ndelay.attr,
6567 	&driver_attr_strict.attr,
6568 	&driver_attr_uuid_ctl.attr,
6569 	&driver_attr_cdb_len.attr,
6570 	&driver_attr_tur_ms_to_ready.attr,
6571 	&driver_attr_zbc.attr,
6572 	NULL,
6573 };
6574 ATTRIBUTE_GROUPS(sdebug_drv);
6575 
6576 static struct device *pseudo_primary;
6577 
6578 static int __init scsi_debug_init(void)
6579 {
6580 	bool want_store = (sdebug_fake_rw == 0);
6581 	unsigned long sz;
6582 	int k, ret, hosts_to_add;
6583 	int idx = -1;
6584 
6585 	ramdisk_lck_a[0] = &atomic_rw;
6586 	ramdisk_lck_a[1] = &atomic_rw2;
6587 	atomic_set(&retired_max_queue, 0);
6588 
6589 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6590 		pr_warn("ndelay must be less than 1 second, ignored\n");
6591 		sdebug_ndelay = 0;
6592 	} else if (sdebug_ndelay > 0)
6593 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6594 
6595 	switch (sdebug_sector_size) {
6596 	case  512:
6597 	case 1024:
6598 	case 2048:
6599 	case 4096:
6600 		break;
6601 	default:
6602 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6603 		return -EINVAL;
6604 	}
6605 
6606 	switch (sdebug_dif) {
6607 	case T10_PI_TYPE0_PROTECTION:
6608 		break;
6609 	case T10_PI_TYPE1_PROTECTION:
6610 	case T10_PI_TYPE2_PROTECTION:
6611 	case T10_PI_TYPE3_PROTECTION:
6612 		have_dif_prot = true;
6613 		break;
6614 
6615 	default:
6616 		pr_err("dif must be 0, 1, 2 or 3\n");
6617 		return -EINVAL;
6618 	}
6619 
6620 	if (sdebug_num_tgts < 0) {
6621 		pr_err("num_tgts must be >= 0\n");
6622 		return -EINVAL;
6623 	}
6624 
6625 	if (sdebug_guard > 1) {
6626 		pr_err("guard must be 0 or 1\n");
6627 		return -EINVAL;
6628 	}
6629 
6630 	if (sdebug_ato > 1) {
6631 		pr_err("ato must be 0 or 1\n");
6632 		return -EINVAL;
6633 	}
6634 
6635 	if (sdebug_physblk_exp > 15) {
6636 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6637 		return -EINVAL;
6638 	}
6639 	if (sdebug_max_luns > 256) {
6640 		pr_warn("max_luns can be no more than 256, use default\n");
6641 		sdebug_max_luns = DEF_MAX_LUNS;
6642 	}
6643 
6644 	if (sdebug_lowest_aligned > 0x3fff) {
6645 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6646 		return -EINVAL;
6647 	}
6648 
6649 	if (submit_queues < 1) {
6650 		pr_err("submit_queues must be 1 or more\n");
6651 		return -EINVAL;
6652 	}
6653 
6654 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6655 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6656 		return -EINVAL;
6657 	}
6658 
6659 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6660 	    (sdebug_host_max_queue < 0)) {
6661 		pr_err("host_max_queue must be in range [0 %d]\n",
6662 		       SDEBUG_CANQUEUE);
6663 		return -EINVAL;
6664 	}
6665 
6666 	if (sdebug_host_max_queue &&
6667 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6668 		sdebug_max_queue = sdebug_host_max_queue;
6669 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6670 			sdebug_max_queue);
6671 	}
6672 
6673 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6674 			       GFP_KERNEL);
6675 	if (sdebug_q_arr == NULL)
6676 		return -ENOMEM;
6677 	for (k = 0; k < submit_queues; ++k)
6678 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6679 
6680 	/*
6681 	 * check for host managed zoned block device specified with
6682 	 * ptype=0x14 or zbc=XXX.
6683 	 */
6684 	if (sdebug_ptype == TYPE_ZBC) {
6685 		sdeb_zbc_model = BLK_ZONED_HM;
6686 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6687 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6688 		if (k < 0) {
6689 			ret = k;
6690 			goto free_vm;
6691 		}
6692 		sdeb_zbc_model = k;
6693 		switch (sdeb_zbc_model) {
6694 		case BLK_ZONED_NONE:
6695 		case BLK_ZONED_HA:
6696 			sdebug_ptype = TYPE_DISK;
6697 			break;
6698 		case BLK_ZONED_HM:
6699 			sdebug_ptype = TYPE_ZBC;
6700 			break;
6701 		default:
6702 			pr_err("Invalid ZBC model\n");
6703 			return -EINVAL;
6704 		}
6705 	}
6706 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6707 		sdeb_zbc_in_use = true;
6708 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6709 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6710 	}
6711 
6712 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6713 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6714 	if (sdebug_dev_size_mb < 1)
6715 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6716 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6717 	sdebug_store_sectors = sz / sdebug_sector_size;
6718 	sdebug_capacity = get_sdebug_capacity();
6719 
6720 	/* play around with geometry, don't waste too much on track 0 */
6721 	sdebug_heads = 8;
6722 	sdebug_sectors_per = 32;
6723 	if (sdebug_dev_size_mb >= 256)
6724 		sdebug_heads = 64;
6725 	else if (sdebug_dev_size_mb >= 16)
6726 		sdebug_heads = 32;
6727 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6728 			       (sdebug_sectors_per * sdebug_heads);
6729 	if (sdebug_cylinders_per >= 1024) {
6730 		/* other LLDs do this; implies >= 1GB ram disk ... */
6731 		sdebug_heads = 255;
6732 		sdebug_sectors_per = 63;
6733 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6734 			       (sdebug_sectors_per * sdebug_heads);
6735 	}
6736 	if (scsi_debug_lbp()) {
6737 		sdebug_unmap_max_blocks =
6738 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6739 
6740 		sdebug_unmap_max_desc =
6741 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6742 
6743 		sdebug_unmap_granularity =
6744 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6745 
6746 		if (sdebug_unmap_alignment &&
6747 		    sdebug_unmap_granularity <=
6748 		    sdebug_unmap_alignment) {
6749 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6750 			ret = -EINVAL;
6751 			goto free_q_arr;
6752 		}
6753 	}
6754 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6755 	if (want_store) {
6756 		idx = sdebug_add_store();
6757 		if (idx < 0) {
6758 			ret = idx;
6759 			goto free_q_arr;
6760 		}
6761 	}
6762 
6763 	pseudo_primary = root_device_register("pseudo_0");
6764 	if (IS_ERR(pseudo_primary)) {
6765 		pr_warn("root_device_register() error\n");
6766 		ret = PTR_ERR(pseudo_primary);
6767 		goto free_vm;
6768 	}
6769 	ret = bus_register(&pseudo_lld_bus);
6770 	if (ret < 0) {
6771 		pr_warn("bus_register error: %d\n", ret);
6772 		goto dev_unreg;
6773 	}
6774 	ret = driver_register(&sdebug_driverfs_driver);
6775 	if (ret < 0) {
6776 		pr_warn("driver_register error: %d\n", ret);
6777 		goto bus_unreg;
6778 	}
6779 
6780 	hosts_to_add = sdebug_add_host;
6781 	sdebug_add_host = 0;
6782 
6783 	for (k = 0; k < hosts_to_add; k++) {
6784 		if (want_store && k == 0) {
6785 			ret = sdebug_add_host_helper(idx);
6786 			if (ret < 0) {
6787 				pr_err("add_host_helper k=%d, error=%d\n",
6788 				       k, -ret);
6789 				break;
6790 			}
6791 		} else {
6792 			ret = sdebug_do_add_host(want_store &&
6793 						 sdebug_per_host_store);
6794 			if (ret < 0) {
6795 				pr_err("add_host k=%d error=%d\n", k, -ret);
6796 				break;
6797 			}
6798 		}
6799 	}
6800 	if (sdebug_verbose)
6801 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6802 
6803 	return 0;
6804 
6805 bus_unreg:
6806 	bus_unregister(&pseudo_lld_bus);
6807 dev_unreg:
6808 	root_device_unregister(pseudo_primary);
6809 free_vm:
6810 	sdebug_erase_store(idx, NULL);
6811 free_q_arr:
6812 	kfree(sdebug_q_arr);
6813 	return ret;
6814 }
6815 
6816 static void __exit scsi_debug_exit(void)
6817 {
6818 	int k = sdebug_num_hosts;
6819 
6820 	stop_all_queued();
6821 	for (; k; k--)
6822 		sdebug_do_remove_host(true);
6823 	free_all_queued();
6824 	driver_unregister(&sdebug_driverfs_driver);
6825 	bus_unregister(&pseudo_lld_bus);
6826 	root_device_unregister(pseudo_primary);
6827 
6828 	sdebug_erase_all_stores(false);
6829 	xa_destroy(per_store_ap);
6830 }
6831 
6832 device_initcall(scsi_debug_init);
6833 module_exit(scsi_debug_exit);
6834 
6835 static void sdebug_release_adapter(struct device *dev)
6836 {
6837 	struct sdebug_host_info *sdbg_host;
6838 
6839 	sdbg_host = to_sdebug_host(dev);
6840 	kfree(sdbg_host);
6841 }
6842 
6843 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6844 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6845 {
6846 	if (idx < 0)
6847 		return;
6848 	if (!sip) {
6849 		if (xa_empty(per_store_ap))
6850 			return;
6851 		sip = xa_load(per_store_ap, idx);
6852 		if (!sip)
6853 			return;
6854 	}
6855 	vfree(sip->map_storep);
6856 	vfree(sip->dif_storep);
6857 	vfree(sip->storep);
6858 	xa_erase(per_store_ap, idx);
6859 	kfree(sip);
6860 }
6861 
6862 /* Assume apart_from_first==false only in shutdown case. */
6863 static void sdebug_erase_all_stores(bool apart_from_first)
6864 {
6865 	unsigned long idx;
6866 	struct sdeb_store_info *sip = NULL;
6867 
6868 	xa_for_each(per_store_ap, idx, sip) {
6869 		if (apart_from_first)
6870 			apart_from_first = false;
6871 		else
6872 			sdebug_erase_store(idx, sip);
6873 	}
6874 	if (apart_from_first)
6875 		sdeb_most_recent_idx = sdeb_first_idx;
6876 }
6877 
6878 /*
6879  * Returns store xarray new element index (idx) if >=0 else negated errno.
6880  * Limit the number of stores to 65536.
6881  */
6882 static int sdebug_add_store(void)
6883 {
6884 	int res;
6885 	u32 n_idx;
6886 	unsigned long iflags;
6887 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6888 	struct sdeb_store_info *sip = NULL;
6889 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6890 
6891 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6892 	if (!sip)
6893 		return -ENOMEM;
6894 
6895 	xa_lock_irqsave(per_store_ap, iflags);
6896 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6897 	if (unlikely(res < 0)) {
6898 		xa_unlock_irqrestore(per_store_ap, iflags);
6899 		kfree(sip);
6900 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6901 		return res;
6902 	}
6903 	sdeb_most_recent_idx = n_idx;
6904 	if (sdeb_first_idx < 0)
6905 		sdeb_first_idx = n_idx;
6906 	xa_unlock_irqrestore(per_store_ap, iflags);
6907 
6908 	res = -ENOMEM;
6909 	sip->storep = vzalloc(sz);
6910 	if (!sip->storep) {
6911 		pr_err("user data oom\n");
6912 		goto err;
6913 	}
6914 	if (sdebug_num_parts > 0)
6915 		sdebug_build_parts(sip->storep, sz);
6916 
6917 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6918 	if (sdebug_dix) {
6919 		int dif_size;
6920 
6921 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6922 		sip->dif_storep = vmalloc(dif_size);
6923 
6924 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6925 			sip->dif_storep);
6926 
6927 		if (!sip->dif_storep) {
6928 			pr_err("DIX oom\n");
6929 			goto err;
6930 		}
6931 		memset(sip->dif_storep, 0xff, dif_size);
6932 	}
6933 	/* Logical Block Provisioning */
6934 	if (scsi_debug_lbp()) {
6935 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6936 		sip->map_storep = vmalloc(array_size(sizeof(long),
6937 						     BITS_TO_LONGS(map_size)));
6938 
6939 		pr_info("%lu provisioning blocks\n", map_size);
6940 
6941 		if (!sip->map_storep) {
6942 			pr_err("LBP map oom\n");
6943 			goto err;
6944 		}
6945 
6946 		bitmap_zero(sip->map_storep, map_size);
6947 
6948 		/* Map first 1KB for partition table */
6949 		if (sdebug_num_parts)
6950 			map_region(sip, 0, 2);
6951 	}
6952 
6953 	rwlock_init(&sip->macc_lck);
6954 	return (int)n_idx;
6955 err:
6956 	sdebug_erase_store((int)n_idx, sip);
6957 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
6958 	return res;
6959 }
6960 
6961 static int sdebug_add_host_helper(int per_host_idx)
6962 {
6963 	int k, devs_per_host, idx;
6964 	int error = -ENOMEM;
6965 	struct sdebug_host_info *sdbg_host;
6966 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6967 
6968 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6969 	if (!sdbg_host)
6970 		return -ENOMEM;
6971 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6972 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6973 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6974 	sdbg_host->si_idx = idx;
6975 
6976 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6977 
6978 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6979 	for (k = 0; k < devs_per_host; k++) {
6980 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6981 		if (!sdbg_devinfo)
6982 			goto clean;
6983 	}
6984 
6985 	spin_lock(&sdebug_host_list_lock);
6986 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6987 	spin_unlock(&sdebug_host_list_lock);
6988 
6989 	sdbg_host->dev.bus = &pseudo_lld_bus;
6990 	sdbg_host->dev.parent = pseudo_primary;
6991 	sdbg_host->dev.release = &sdebug_release_adapter;
6992 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6993 
6994 	error = device_register(&sdbg_host->dev);
6995 	if (error)
6996 		goto clean;
6997 
6998 	++sdebug_num_hosts;
6999 	return 0;
7000 
7001 clean:
7002 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7003 				 dev_list) {
7004 		list_del(&sdbg_devinfo->dev_list);
7005 		kfree(sdbg_devinfo->zstate);
7006 		kfree(sdbg_devinfo);
7007 	}
7008 	kfree(sdbg_host);
7009 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7010 	return error;
7011 }
7012 
7013 static int sdebug_do_add_host(bool mk_new_store)
7014 {
7015 	int ph_idx = sdeb_most_recent_idx;
7016 
7017 	if (mk_new_store) {
7018 		ph_idx = sdebug_add_store();
7019 		if (ph_idx < 0)
7020 			return ph_idx;
7021 	}
7022 	return sdebug_add_host_helper(ph_idx);
7023 }
7024 
7025 static void sdebug_do_remove_host(bool the_end)
7026 {
7027 	int idx = -1;
7028 	struct sdebug_host_info *sdbg_host = NULL;
7029 	struct sdebug_host_info *sdbg_host2;
7030 
7031 	spin_lock(&sdebug_host_list_lock);
7032 	if (!list_empty(&sdebug_host_list)) {
7033 		sdbg_host = list_entry(sdebug_host_list.prev,
7034 				       struct sdebug_host_info, host_list);
7035 		idx = sdbg_host->si_idx;
7036 	}
7037 	if (!the_end && idx >= 0) {
7038 		bool unique = true;
7039 
7040 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7041 			if (sdbg_host2 == sdbg_host)
7042 				continue;
7043 			if (idx == sdbg_host2->si_idx) {
7044 				unique = false;
7045 				break;
7046 			}
7047 		}
7048 		if (unique) {
7049 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7050 			if (idx == sdeb_most_recent_idx)
7051 				--sdeb_most_recent_idx;
7052 		}
7053 	}
7054 	if (sdbg_host)
7055 		list_del(&sdbg_host->host_list);
7056 	spin_unlock(&sdebug_host_list_lock);
7057 
7058 	if (!sdbg_host)
7059 		return;
7060 
7061 	device_unregister(&sdbg_host->dev);
7062 	--sdebug_num_hosts;
7063 }
7064 
7065 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7066 {
7067 	int num_in_q = 0;
7068 	struct sdebug_dev_info *devip;
7069 
7070 	block_unblock_all_queues(true);
7071 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7072 	if (NULL == devip) {
7073 		block_unblock_all_queues(false);
7074 		return	-ENODEV;
7075 	}
7076 	num_in_q = atomic_read(&devip->num_in_q);
7077 
7078 	if (qdepth < 1)
7079 		qdepth = 1;
7080 	/* allow to exceed max host qc_arr elements for testing */
7081 	if (qdepth > SDEBUG_CANQUEUE + 10)
7082 		qdepth = SDEBUG_CANQUEUE + 10;
7083 	scsi_change_queue_depth(sdev, qdepth);
7084 
7085 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7086 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7087 			    __func__, qdepth, num_in_q);
7088 	}
7089 	block_unblock_all_queues(false);
7090 	return sdev->queue_depth;
7091 }
7092 
7093 static bool fake_timeout(struct scsi_cmnd *scp)
7094 {
7095 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7096 		if (sdebug_every_nth < -1)
7097 			sdebug_every_nth = -1;
7098 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7099 			return true; /* ignore command causing timeout */
7100 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7101 			 scsi_medium_access_command(scp))
7102 			return true; /* time out reads and writes */
7103 	}
7104 	return false;
7105 }
7106 
7107 /* Response to TUR or media access command when device stopped */
7108 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7109 {
7110 	int stopped_state;
7111 	u64 diff_ns = 0;
7112 	ktime_t now_ts = ktime_get_boottime();
7113 	struct scsi_device *sdp = scp->device;
7114 
7115 	stopped_state = atomic_read(&devip->stopped);
7116 	if (stopped_state == 2) {
7117 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7118 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7119 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7120 				/* tur_ms_to_ready timer extinguished */
7121 				atomic_set(&devip->stopped, 0);
7122 				return 0;
7123 			}
7124 		}
7125 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7126 		if (sdebug_verbose)
7127 			sdev_printk(KERN_INFO, sdp,
7128 				    "%s: Not ready: in process of becoming ready\n", my_name);
7129 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7130 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7131 
7132 			if (diff_ns <= tur_nanosecs_to_ready)
7133 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7134 			else
7135 				diff_ns = tur_nanosecs_to_ready;
7136 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7137 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7138 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7139 						   diff_ns);
7140 			return check_condition_result;
7141 		}
7142 	}
7143 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7144 	if (sdebug_verbose)
7145 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7146 			    my_name);
7147 	return check_condition_result;
7148 }
7149 
7150 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7151 				   struct scsi_cmnd *scp)
7152 {
7153 	u8 sdeb_i;
7154 	struct scsi_device *sdp = scp->device;
7155 	const struct opcode_info_t *oip;
7156 	const struct opcode_info_t *r_oip;
7157 	struct sdebug_dev_info *devip;
7158 	u8 *cmd = scp->cmnd;
7159 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7160 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7161 	int k, na;
7162 	int errsts = 0;
7163 	u32 flags;
7164 	u16 sa;
7165 	u8 opcode = cmd[0];
7166 	bool has_wlun_rl;
7167 	bool inject_now;
7168 
7169 	scsi_set_resid(scp, 0);
7170 	if (sdebug_statistics) {
7171 		atomic_inc(&sdebug_cmnd_count);
7172 		inject_now = inject_on_this_cmd();
7173 	} else {
7174 		inject_now = false;
7175 	}
7176 	if (unlikely(sdebug_verbose &&
7177 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7178 		char b[120];
7179 		int n, len, sb;
7180 
7181 		len = scp->cmd_len;
7182 		sb = (int)sizeof(b);
7183 		if (len > 32)
7184 			strcpy(b, "too long, over 32 bytes");
7185 		else {
7186 			for (k = 0, n = 0; k < len && n < sb; ++k)
7187 				n += scnprintf(b + n, sb - n, "%02x ",
7188 					       (u32)cmd[k]);
7189 		}
7190 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7191 			    blk_mq_unique_tag(scp->request), b);
7192 	}
7193 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7194 		return SCSI_MLQUEUE_HOST_BUSY;
7195 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7196 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7197 		goto err_out;
7198 
7199 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7200 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7201 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7202 	if (unlikely(!devip)) {
7203 		devip = find_build_dev_info(sdp);
7204 		if (NULL == devip)
7205 			goto err_out;
7206 	}
7207 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7208 		atomic_set(&sdeb_inject_pending, 1);
7209 
7210 	na = oip->num_attached;
7211 	r_pfp = oip->pfp;
7212 	if (na) {	/* multiple commands with this opcode */
7213 		r_oip = oip;
7214 		if (FF_SA & r_oip->flags) {
7215 			if (F_SA_LOW & oip->flags)
7216 				sa = 0x1f & cmd[1];
7217 			else
7218 				sa = get_unaligned_be16(cmd + 8);
7219 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7220 				if (opcode == oip->opcode && sa == oip->sa)
7221 					break;
7222 			}
7223 		} else {   /* since no service action only check opcode */
7224 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7225 				if (opcode == oip->opcode)
7226 					break;
7227 			}
7228 		}
7229 		if (k > na) {
7230 			if (F_SA_LOW & r_oip->flags)
7231 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7232 			else if (F_SA_HIGH & r_oip->flags)
7233 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7234 			else
7235 				mk_sense_invalid_opcode(scp);
7236 			goto check_cond;
7237 		}
7238 	}	/* else (when na==0) we assume the oip is a match */
7239 	flags = oip->flags;
7240 	if (unlikely(F_INV_OP & flags)) {
7241 		mk_sense_invalid_opcode(scp);
7242 		goto check_cond;
7243 	}
7244 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7245 		if (sdebug_verbose)
7246 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7247 				    my_name, opcode, " supported for wlun");
7248 		mk_sense_invalid_opcode(scp);
7249 		goto check_cond;
7250 	}
7251 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7252 		u8 rem;
7253 		int j;
7254 
7255 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7256 			rem = ~oip->len_mask[k] & cmd[k];
7257 			if (rem) {
7258 				for (j = 7; j >= 0; --j, rem <<= 1) {
7259 					if (0x80 & rem)
7260 						break;
7261 				}
7262 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7263 				goto check_cond;
7264 			}
7265 		}
7266 	}
7267 	if (unlikely(!(F_SKIP_UA & flags) &&
7268 		     find_first_bit(devip->uas_bm,
7269 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7270 		errsts = make_ua(scp, devip);
7271 		if (errsts)
7272 			goto check_cond;
7273 	}
7274 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7275 		     atomic_read(&devip->stopped))) {
7276 		errsts = resp_not_ready(scp, devip);
7277 		if (errsts)
7278 			goto fini;
7279 	}
7280 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7281 		goto fini;
7282 	if (unlikely(sdebug_every_nth)) {
7283 		if (fake_timeout(scp))
7284 			return 0;	/* ignore command: make trouble */
7285 	}
7286 	if (likely(oip->pfp))
7287 		pfp = oip->pfp;	/* calls a resp_* function */
7288 	else
7289 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7290 
7291 fini:
7292 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7293 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7294 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7295 					    sdebug_ndelay > 10000)) {
7296 		/*
7297 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7298 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7299 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7300 		 * For Synchronize Cache want 1/20 of SSU's delay.
7301 		 */
7302 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7303 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7304 
7305 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7306 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7307 	} else
7308 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7309 				     sdebug_ndelay);
7310 check_cond:
7311 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7312 err_out:
7313 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7314 }
7315 
7316 static struct scsi_host_template sdebug_driver_template = {
7317 	.show_info =		scsi_debug_show_info,
7318 	.write_info =		scsi_debug_write_info,
7319 	.proc_name =		sdebug_proc_name,
7320 	.name =			"SCSI DEBUG",
7321 	.info =			scsi_debug_info,
7322 	.slave_alloc =		scsi_debug_slave_alloc,
7323 	.slave_configure =	scsi_debug_slave_configure,
7324 	.slave_destroy =	scsi_debug_slave_destroy,
7325 	.ioctl =		scsi_debug_ioctl,
7326 	.queuecommand =		scsi_debug_queuecommand,
7327 	.change_queue_depth =	sdebug_change_qdepth,
7328 	.eh_abort_handler =	scsi_debug_abort,
7329 	.eh_device_reset_handler = scsi_debug_device_reset,
7330 	.eh_target_reset_handler = scsi_debug_target_reset,
7331 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7332 	.eh_host_reset_handler = scsi_debug_host_reset,
7333 	.can_queue =		SDEBUG_CANQUEUE,
7334 	.this_id =		7,
7335 	.sg_tablesize =		SG_MAX_SEGMENTS,
7336 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7337 	.max_sectors =		-1U,
7338 	.max_segment_size =	-1U,
7339 	.module =		THIS_MODULE,
7340 	.track_queue_depth =	1,
7341 };
7342 
7343 static int sdebug_driver_probe(struct device *dev)
7344 {
7345 	int error = 0;
7346 	struct sdebug_host_info *sdbg_host;
7347 	struct Scsi_Host *hpnt;
7348 	int hprot;
7349 
7350 	sdbg_host = to_sdebug_host(dev);
7351 
7352 	if (sdebug_host_max_queue)
7353 		sdebug_driver_template.can_queue = sdebug_host_max_queue;
7354 	else
7355 		sdebug_driver_template.can_queue = sdebug_max_queue;
7356 	if (!sdebug_clustering)
7357 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7358 
7359 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7360 	if (NULL == hpnt) {
7361 		pr_err("scsi_host_alloc failed\n");
7362 		error = -ENODEV;
7363 		return error;
7364 	}
7365 	if (submit_queues > nr_cpu_ids) {
7366 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7367 			my_name, submit_queues, nr_cpu_ids);
7368 		submit_queues = nr_cpu_ids;
7369 	}
7370 	/*
7371 	 * Decide whether to tell scsi subsystem that we want mq. The
7372 	 * following should give the same answer for each host. If the host
7373 	 * has a limit of hostwide max commands, then do not set.
7374 	 */
7375 	if (!sdebug_host_max_queue)
7376 		hpnt->nr_hw_queues = submit_queues;
7377 
7378 	sdbg_host->shost = hpnt;
7379 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7380 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7381 		hpnt->max_id = sdebug_num_tgts + 1;
7382 	else
7383 		hpnt->max_id = sdebug_num_tgts;
7384 	/* = sdebug_max_luns; */
7385 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7386 
7387 	hprot = 0;
7388 
7389 	switch (sdebug_dif) {
7390 
7391 	case T10_PI_TYPE1_PROTECTION:
7392 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7393 		if (sdebug_dix)
7394 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7395 		break;
7396 
7397 	case T10_PI_TYPE2_PROTECTION:
7398 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7399 		if (sdebug_dix)
7400 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7401 		break;
7402 
7403 	case T10_PI_TYPE3_PROTECTION:
7404 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7405 		if (sdebug_dix)
7406 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7407 		break;
7408 
7409 	default:
7410 		if (sdebug_dix)
7411 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7412 		break;
7413 	}
7414 
7415 	scsi_host_set_prot(hpnt, hprot);
7416 
7417 	if (have_dif_prot || sdebug_dix)
7418 		pr_info("host protection%s%s%s%s%s%s%s\n",
7419 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7420 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7421 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7422 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7423 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7424 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7425 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7426 
7427 	if (sdebug_guard == 1)
7428 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7429 	else
7430 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7431 
7432 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7433 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7434 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7435 		sdebug_statistics = true;
7436 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7437 	if (error) {
7438 		pr_err("scsi_add_host failed\n");
7439 		error = -ENODEV;
7440 		scsi_host_put(hpnt);
7441 	} else {
7442 		scsi_scan_host(hpnt);
7443 	}
7444 
7445 	return error;
7446 }
7447 
7448 static int sdebug_driver_remove(struct device *dev)
7449 {
7450 	struct sdebug_host_info *sdbg_host;
7451 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7452 
7453 	sdbg_host = to_sdebug_host(dev);
7454 
7455 	if (!sdbg_host) {
7456 		pr_err("Unable to locate host info\n");
7457 		return -ENODEV;
7458 	}
7459 
7460 	scsi_remove_host(sdbg_host->shost);
7461 
7462 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7463 				 dev_list) {
7464 		list_del(&sdbg_devinfo->dev_list);
7465 		kfree(sdbg_devinfo->zstate);
7466 		kfree(sdbg_devinfo);
7467 	}
7468 
7469 	scsi_host_put(sdbg_host->shost);
7470 	return 0;
7471 }
7472 
7473 static int pseudo_lld_bus_match(struct device *dev,
7474 				struct device_driver *dev_driver)
7475 {
7476 	return 1;
7477 }
7478 
7479 static struct bus_type pseudo_lld_bus = {
7480 	.name = "pseudo",
7481 	.match = pseudo_lld_bus_match,
7482 	.probe = sdebug_driver_probe,
7483 	.remove = sdebug_driver_remove,
7484 	.drv_groups = sdebug_drv_groups,
7485 };
7486