xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 00f9d622)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t stopped;	/* 1: by SSU, 2: device start */
292 	bool used;
293 
294 	/* For ZBC devices */
295 	enum blk_zoned_model zmodel;
296 	unsigned int zcap;
297 	unsigned int zsize;
298 	unsigned int zsize_shift;
299 	unsigned int nr_zones;
300 	unsigned int nr_conv_zones;
301 	unsigned int nr_seq_zones;
302 	unsigned int nr_imp_open;
303 	unsigned int nr_exp_open;
304 	unsigned int nr_closed;
305 	unsigned int max_open;
306 	ktime_t create_ts;	/* time since bootup that this device was created */
307 	struct sdeb_zone_state *zstate;
308 };
309 
310 struct sdebug_host_info {
311 	struct list_head host_list;
312 	int si_idx;	/* sdeb_store_info (per host) xarray index */
313 	struct Scsi_Host *shost;
314 	struct device dev;
315 	struct list_head dev_info_list;
316 };
317 
318 /* There is an xarray of pointers to this struct's objects, one per host */
319 struct sdeb_store_info {
320 	rwlock_t macc_lck;	/* for atomic media access on this store */
321 	u8 *storep;		/* user data storage (ram) */
322 	struct t10_pi_tuple *dif_storep; /* protection info */
323 	void *map_storep;	/* provisioning map */
324 };
325 
326 #define dev_to_sdebug_host(d)	\
327 	container_of(d, struct sdebug_host_info, dev)
328 
329 #define shost_to_sdebug_host(shost)	\
330 	dev_to_sdebug_host(shost->dma_dev)
331 
332 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
333 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
334 
335 struct sdebug_defer {
336 	struct hrtimer hrt;
337 	struct execute_work ew;
338 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
339 	int sqa_idx;	/* index of sdebug_queue array */
340 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
341 	int hc_idx;	/* hostwide tag index */
342 	int issuing_cpu;
343 	bool init_hrt;
344 	bool init_wq;
345 	bool init_poll;
346 	bool aborted;	/* true when blk_abort_request() already called */
347 	enum sdeb_defer_type defer_t;
348 };
349 
350 struct sdebug_queued_cmd {
351 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
352 	 * instance indicates this slot is in use.
353 	 */
354 	struct sdebug_defer *sd_dp;
355 	struct scsi_cmnd *a_cmnd;
356 };
357 
358 struct sdebug_queue {
359 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
360 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
361 	spinlock_t qc_lock;
362 	atomic_t blocked;	/* to temporarily stop more being queued */
363 };
364 
365 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
366 static atomic_t sdebug_completions;  /* count of deferred completions */
367 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
368 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
369 static atomic_t sdeb_inject_pending;
370 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
371 
372 struct opcode_info_t {
373 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
374 				/* for terminating element */
375 	u8 opcode;		/* if num_attached > 0, preferred */
376 	u16 sa;			/* service action */
377 	u32 flags;		/* OR-ed set of SDEB_F_* */
378 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
379 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
380 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
381 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
382 };
383 
384 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
385 enum sdeb_opcode_index {
386 	SDEB_I_INVALID_OPCODE =	0,
387 	SDEB_I_INQUIRY = 1,
388 	SDEB_I_REPORT_LUNS = 2,
389 	SDEB_I_REQUEST_SENSE = 3,
390 	SDEB_I_TEST_UNIT_READY = 4,
391 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
392 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
393 	SDEB_I_LOG_SENSE = 7,
394 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
395 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
396 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
397 	SDEB_I_START_STOP = 11,
398 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
399 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
400 	SDEB_I_MAINT_IN = 14,
401 	SDEB_I_MAINT_OUT = 15,
402 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
403 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
404 	SDEB_I_RESERVE = 18,		/* 6, 10 */
405 	SDEB_I_RELEASE = 19,		/* 6, 10 */
406 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
407 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
408 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
409 	SDEB_I_SEND_DIAG = 23,
410 	SDEB_I_UNMAP = 24,
411 	SDEB_I_WRITE_BUFFER = 25,
412 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
413 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
414 	SDEB_I_COMP_WRITE = 28,
415 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
416 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
417 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
418 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
419 };
420 
421 
422 static const unsigned char opcode_ind_arr[256] = {
423 /* 0x0; 0x0->0x1f: 6 byte cdbs */
424 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
425 	    0, 0, 0, 0,
426 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
427 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
428 	    SDEB_I_RELEASE,
429 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
430 	    SDEB_I_ALLOW_REMOVAL, 0,
431 /* 0x20; 0x20->0x3f: 10 byte cdbs */
432 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
433 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
434 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
435 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
436 /* 0x40; 0x40->0x5f: 10 byte cdbs */
437 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
438 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
439 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
440 	    SDEB_I_RELEASE,
441 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
442 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
443 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
444 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
445 	0, SDEB_I_VARIABLE_LEN,
446 /* 0x80; 0x80->0x9f: 16 byte cdbs */
447 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
448 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
449 	0, 0, 0, SDEB_I_VERIFY,
450 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
451 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
452 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
453 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
454 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
455 	     SDEB_I_MAINT_OUT, 0, 0, 0,
456 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
457 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
458 	0, 0, 0, 0, 0, 0, 0, 0,
459 	0, 0, 0, 0, 0, 0, 0, 0,
460 /* 0xc0; 0xc0->0xff: vendor specific */
461 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
464 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
465 };
466 
467 /*
468  * The following "response" functions return the SCSI mid-level's 4 byte
469  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
470  * command completion, they can mask their return value with
471  * SDEG_RES_IMMED_MASK .
472  */
473 #define SDEG_RES_IMMED_MASK 0x40000000
474 
475 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
504 
505 static int sdebug_do_add_host(bool mk_new_store);
506 static int sdebug_add_host_helper(int per_host_idx);
507 static void sdebug_do_remove_host(bool the_end);
508 static int sdebug_add_store(void);
509 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
510 static void sdebug_erase_all_stores(bool apart_from_first);
511 
512 /*
513  * The following are overflow arrays for cdbs that "hit" the same index in
514  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
515  * should be placed in opcode_info_arr[], the others should be placed here.
516  */
517 static const struct opcode_info_t msense_iarr[] = {
518 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
519 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
520 };
521 
522 static const struct opcode_info_t mselect_iarr[] = {
523 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
524 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 };
526 
527 static const struct opcode_info_t read_iarr[] = {
528 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
529 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
530 	     0, 0, 0, 0} },
531 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
532 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
533 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
534 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
535 	     0xc7, 0, 0, 0, 0} },
536 };
537 
538 static const struct opcode_info_t write_iarr[] = {
539 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
540 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
541 		   0, 0, 0, 0, 0, 0} },
542 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
543 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
544 		   0, 0, 0} },
545 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
546 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
547 		   0xbf, 0xc7, 0, 0, 0, 0} },
548 };
549 
550 static const struct opcode_info_t verify_iarr[] = {
551 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
552 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
553 		   0, 0, 0, 0, 0, 0} },
554 };
555 
556 static const struct opcode_info_t sa_in_16_iarr[] = {
557 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
558 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
559 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
560 };
561 
562 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
563 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
564 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
565 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
566 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
567 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
568 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
569 };
570 
571 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
572 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
573 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
574 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
575 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
576 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
577 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
578 };
579 
580 static const struct opcode_info_t write_same_iarr[] = {
581 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
582 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
583 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
584 };
585 
586 static const struct opcode_info_t reserve_iarr[] = {
587 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
588 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
589 };
590 
591 static const struct opcode_info_t release_iarr[] = {
592 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
593 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 };
595 
596 static const struct opcode_info_t sync_cache_iarr[] = {
597 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
598 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
600 };
601 
602 static const struct opcode_info_t pre_fetch_iarr[] = {
603 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
604 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
606 };
607 
608 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
609 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
610 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
612 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
613 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
614 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
615 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
616 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
617 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
618 };
619 
620 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
621 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
622 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
623 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
624 };
625 
626 
627 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
628  * plus the terminating elements for logic that scans this table such as
629  * REPORT SUPPORTED OPERATION CODES. */
630 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
631 /* 0 */
632 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
633 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
635 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
637 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
638 	     0, 0} },					/* REPORT LUNS */
639 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
640 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
642 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
643 /* 5 */
644 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
645 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
646 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
647 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
648 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
649 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
650 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
651 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
652 	     0, 0, 0} },
653 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
654 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
655 	     0, 0} },
656 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
657 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
658 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
659 /* 10 */
660 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
661 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
662 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
663 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
664 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
665 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
666 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
667 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
668 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
670 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
671 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
672 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
673 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
674 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
675 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
676 				0xff, 0, 0xc7, 0, 0, 0, 0} },
677 /* 15 */
678 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
679 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
680 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
681 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
682 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
683 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
684 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
685 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
686 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
687 	     0xff, 0xff} },
688 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
689 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
690 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
691 	     0} },
692 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
693 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
694 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
695 	     0} },
696 /* 20 */
697 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
698 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
700 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
702 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
704 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
705 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
706 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
707 /* 25 */
708 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
709 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
710 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
711 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
712 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
713 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
714 		 0, 0, 0, 0, 0} },
715 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
716 	    resp_sync_cache, sync_cache_iarr,
717 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
718 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
719 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
720 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
721 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
722 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
723 	    resp_pre_fetch, pre_fetch_iarr,
724 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
725 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
726 
727 /* 30 */
728 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
729 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
730 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
731 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
732 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
733 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
734 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
735 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
736 /* sentinel */
737 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
738 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
739 };
740 
741 static int sdebug_num_hosts;
742 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
743 static int sdebug_ato = DEF_ATO;
744 static int sdebug_cdb_len = DEF_CDB_LEN;
745 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
746 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
747 static int sdebug_dif = DEF_DIF;
748 static int sdebug_dix = DEF_DIX;
749 static int sdebug_dsense = DEF_D_SENSE;
750 static int sdebug_every_nth = DEF_EVERY_NTH;
751 static int sdebug_fake_rw = DEF_FAKE_RW;
752 static unsigned int sdebug_guard = DEF_GUARD;
753 static int sdebug_host_max_queue;	/* per host */
754 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
755 static int sdebug_max_luns = DEF_MAX_LUNS;
756 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
757 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
758 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
759 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
760 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
761 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
762 static int sdebug_no_uld;
763 static int sdebug_num_parts = DEF_NUM_PARTS;
764 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
765 static int sdebug_opt_blks = DEF_OPT_BLKS;
766 static int sdebug_opts = DEF_OPTS;
767 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
768 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
769 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
770 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
771 static int sdebug_sector_size = DEF_SECTOR_SIZE;
772 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
773 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
774 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
775 static unsigned int sdebug_lbpu = DEF_LBPU;
776 static unsigned int sdebug_lbpws = DEF_LBPWS;
777 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
778 static unsigned int sdebug_lbprz = DEF_LBPRZ;
779 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
780 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
781 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
782 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
783 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
784 static int sdebug_uuid_ctl = DEF_UUID_CTL;
785 static bool sdebug_random = DEF_RANDOM;
786 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
787 static bool sdebug_removable = DEF_REMOVABLE;
788 static bool sdebug_clustering;
789 static bool sdebug_host_lock = DEF_HOST_LOCK;
790 static bool sdebug_strict = DEF_STRICT;
791 static bool sdebug_any_injecting_opt;
792 static bool sdebug_no_rwlock;
793 static bool sdebug_verbose;
794 static bool have_dif_prot;
795 static bool write_since_sync;
796 static bool sdebug_statistics = DEF_STATISTICS;
797 static bool sdebug_wp;
798 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
799 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
800 static char *sdeb_zbc_model_s;
801 
802 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
803 			  SAM_LUN_AM_FLAT = 0x1,
804 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
805 			  SAM_LUN_AM_EXTENDED = 0x3};
806 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
807 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
808 
809 static unsigned int sdebug_store_sectors;
810 static sector_t sdebug_capacity;	/* in sectors */
811 
812 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
813    may still need them */
814 static int sdebug_heads;		/* heads per disk */
815 static int sdebug_cylinders_per;	/* cylinders per surface */
816 static int sdebug_sectors_per;		/* sectors per cylinder */
817 
818 static LIST_HEAD(sdebug_host_list);
819 static DEFINE_SPINLOCK(sdebug_host_list_lock);
820 
821 static struct xarray per_store_arr;
822 static struct xarray *per_store_ap = &per_store_arr;
823 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
824 static int sdeb_most_recent_idx = -1;
825 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
826 
827 static unsigned long map_size;
828 static int num_aborts;
829 static int num_dev_resets;
830 static int num_target_resets;
831 static int num_bus_resets;
832 static int num_host_resets;
833 static int dix_writes;
834 static int dix_reads;
835 static int dif_errors;
836 
837 /* ZBC global data */
838 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
839 static int sdeb_zbc_zone_cap_mb;
840 static int sdeb_zbc_zone_size_mb;
841 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
842 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
843 
844 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
845 static int poll_queues; /* iouring iopoll interface.*/
846 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
847 
848 static DEFINE_RWLOCK(atomic_rw);
849 static DEFINE_RWLOCK(atomic_rw2);
850 
851 static rwlock_t *ramdisk_lck_a[2];
852 
853 static char sdebug_proc_name[] = MY_NAME;
854 static const char *my_name = MY_NAME;
855 
856 static struct bus_type pseudo_lld_bus;
857 
858 static struct device_driver sdebug_driverfs_driver = {
859 	.name 		= sdebug_proc_name,
860 	.bus		= &pseudo_lld_bus,
861 };
862 
863 static const int check_condition_result =
864 	SAM_STAT_CHECK_CONDITION;
865 
866 static const int illegal_condition_result =
867 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
868 
869 static const int device_qfull_result =
870 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
871 
872 static const int condition_met_result = SAM_STAT_CONDITION_MET;
873 
874 
875 /* Only do the extra work involved in logical block provisioning if one or
876  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
877  * real reads and writes (i.e. not skipping them for speed).
878  */
879 static inline bool scsi_debug_lbp(void)
880 {
881 	return 0 == sdebug_fake_rw &&
882 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
883 }
884 
885 static void *lba2fake_store(struct sdeb_store_info *sip,
886 			    unsigned long long lba)
887 {
888 	struct sdeb_store_info *lsip = sip;
889 
890 	lba = do_div(lba, sdebug_store_sectors);
891 	if (!sip || !sip->storep) {
892 		WARN_ON_ONCE(true);
893 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
894 	}
895 	return lsip->storep + lba * sdebug_sector_size;
896 }
897 
898 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
899 				      sector_t sector)
900 {
901 	sector = sector_div(sector, sdebug_store_sectors);
902 
903 	return sip->dif_storep + sector;
904 }
905 
906 static void sdebug_max_tgts_luns(void)
907 {
908 	struct sdebug_host_info *sdbg_host;
909 	struct Scsi_Host *hpnt;
910 
911 	spin_lock(&sdebug_host_list_lock);
912 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
913 		hpnt = sdbg_host->shost;
914 		if ((hpnt->this_id >= 0) &&
915 		    (sdebug_num_tgts > hpnt->this_id))
916 			hpnt->max_id = sdebug_num_tgts + 1;
917 		else
918 			hpnt->max_id = sdebug_num_tgts;
919 		/* sdebug_max_luns; */
920 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
921 	}
922 	spin_unlock(&sdebug_host_list_lock);
923 }
924 
925 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
926 
927 /* Set in_bit to -1 to indicate no bit position of invalid field */
928 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
929 				 enum sdeb_cmd_data c_d,
930 				 int in_byte, int in_bit)
931 {
932 	unsigned char *sbuff;
933 	u8 sks[4];
934 	int sl, asc;
935 
936 	sbuff = scp->sense_buffer;
937 	if (!sbuff) {
938 		sdev_printk(KERN_ERR, scp->device,
939 			    "%s: sense_buffer is NULL\n", __func__);
940 		return;
941 	}
942 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
943 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
944 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
945 	memset(sks, 0, sizeof(sks));
946 	sks[0] = 0x80;
947 	if (c_d)
948 		sks[0] |= 0x40;
949 	if (in_bit >= 0) {
950 		sks[0] |= 0x8;
951 		sks[0] |= 0x7 & in_bit;
952 	}
953 	put_unaligned_be16(in_byte, sks + 1);
954 	if (sdebug_dsense) {
955 		sl = sbuff[7] + 8;
956 		sbuff[7] = sl;
957 		sbuff[sl] = 0x2;
958 		sbuff[sl + 1] = 0x6;
959 		memcpy(sbuff + sl + 4, sks, 3);
960 	} else
961 		memcpy(sbuff + 15, sks, 3);
962 	if (sdebug_verbose)
963 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
964 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
965 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
966 }
967 
968 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
969 {
970 	if (!scp->sense_buffer) {
971 		sdev_printk(KERN_ERR, scp->device,
972 			    "%s: sense_buffer is NULL\n", __func__);
973 		return;
974 	}
975 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
976 
977 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
978 
979 	if (sdebug_verbose)
980 		sdev_printk(KERN_INFO, scp->device,
981 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
982 			    my_name, key, asc, asq);
983 }
984 
985 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
986 {
987 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
988 }
989 
990 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
991 			    void __user *arg)
992 {
993 	if (sdebug_verbose) {
994 		if (0x1261 == cmd)
995 			sdev_printk(KERN_INFO, dev,
996 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
997 		else if (0x5331 == cmd)
998 			sdev_printk(KERN_INFO, dev,
999 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1000 				    __func__);
1001 		else
1002 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1003 				    __func__, cmd);
1004 	}
1005 	return -EINVAL;
1006 	/* return -ENOTTY; // correct return but upsets fdisk */
1007 }
1008 
1009 static void config_cdb_len(struct scsi_device *sdev)
1010 {
1011 	switch (sdebug_cdb_len) {
1012 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1013 		sdev->use_10_for_rw = false;
1014 		sdev->use_16_for_rw = false;
1015 		sdev->use_10_for_ms = false;
1016 		break;
1017 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1018 		sdev->use_10_for_rw = true;
1019 		sdev->use_16_for_rw = false;
1020 		sdev->use_10_for_ms = false;
1021 		break;
1022 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1023 		sdev->use_10_for_rw = true;
1024 		sdev->use_16_for_rw = false;
1025 		sdev->use_10_for_ms = true;
1026 		break;
1027 	case 16:
1028 		sdev->use_10_for_rw = false;
1029 		sdev->use_16_for_rw = true;
1030 		sdev->use_10_for_ms = true;
1031 		break;
1032 	case 32: /* No knobs to suggest this so same as 16 for now */
1033 		sdev->use_10_for_rw = false;
1034 		sdev->use_16_for_rw = true;
1035 		sdev->use_10_for_ms = true;
1036 		break;
1037 	default:
1038 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1039 			sdebug_cdb_len);
1040 		sdev->use_10_for_rw = true;
1041 		sdev->use_16_for_rw = false;
1042 		sdev->use_10_for_ms = false;
1043 		sdebug_cdb_len = 10;
1044 		break;
1045 	}
1046 }
1047 
1048 static void all_config_cdb_len(void)
1049 {
1050 	struct sdebug_host_info *sdbg_host;
1051 	struct Scsi_Host *shost;
1052 	struct scsi_device *sdev;
1053 
1054 	spin_lock(&sdebug_host_list_lock);
1055 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1056 		shost = sdbg_host->shost;
1057 		shost_for_each_device(sdev, shost) {
1058 			config_cdb_len(sdev);
1059 		}
1060 	}
1061 	spin_unlock(&sdebug_host_list_lock);
1062 }
1063 
1064 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1065 {
1066 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1067 	struct sdebug_dev_info *dp;
1068 
1069 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 		if ((devip->sdbg_host == dp->sdbg_host) &&
1071 		    (devip->target == dp->target)) {
1072 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 		}
1074 	}
1075 }
1076 
1077 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1078 {
1079 	int k;
1080 
1081 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1082 	if (k != SDEBUG_NUM_UAS) {
1083 		const char *cp = NULL;
1084 
1085 		switch (k) {
1086 		case SDEBUG_UA_POR:
1087 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1088 					POWER_ON_RESET_ASCQ);
1089 			if (sdebug_verbose)
1090 				cp = "power on reset";
1091 			break;
1092 		case SDEBUG_UA_POOCCUR:
1093 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1094 					POWER_ON_OCCURRED_ASCQ);
1095 			if (sdebug_verbose)
1096 				cp = "power on occurred";
1097 			break;
1098 		case SDEBUG_UA_BUS_RESET:
1099 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1100 					BUS_RESET_ASCQ);
1101 			if (sdebug_verbose)
1102 				cp = "bus reset";
1103 			break;
1104 		case SDEBUG_UA_MODE_CHANGED:
1105 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1106 					MODE_CHANGED_ASCQ);
1107 			if (sdebug_verbose)
1108 				cp = "mode parameters changed";
1109 			break;
1110 		case SDEBUG_UA_CAPACITY_CHANGED:
1111 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1112 					CAPACITY_CHANGED_ASCQ);
1113 			if (sdebug_verbose)
1114 				cp = "capacity data changed";
1115 			break;
1116 		case SDEBUG_UA_MICROCODE_CHANGED:
1117 			mk_sense_buffer(scp, UNIT_ATTENTION,
1118 					TARGET_CHANGED_ASC,
1119 					MICROCODE_CHANGED_ASCQ);
1120 			if (sdebug_verbose)
1121 				cp = "microcode has been changed";
1122 			break;
1123 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1124 			mk_sense_buffer(scp, UNIT_ATTENTION,
1125 					TARGET_CHANGED_ASC,
1126 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1127 			if (sdebug_verbose)
1128 				cp = "microcode has been changed without reset";
1129 			break;
1130 		case SDEBUG_UA_LUNS_CHANGED:
1131 			/*
1132 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1133 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1134 			 * on the target, until a REPORT LUNS command is
1135 			 * received.  SPC-4 behavior is to report it only once.
1136 			 * NOTE:  sdebug_scsi_level does not use the same
1137 			 * values as struct scsi_device->scsi_level.
1138 			 */
1139 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1140 				clear_luns_changed_on_target(devip);
1141 			mk_sense_buffer(scp, UNIT_ATTENTION,
1142 					TARGET_CHANGED_ASC,
1143 					LUNS_CHANGED_ASCQ);
1144 			if (sdebug_verbose)
1145 				cp = "reported luns data has changed";
1146 			break;
1147 		default:
1148 			pr_warn("unexpected unit attention code=%d\n", k);
1149 			if (sdebug_verbose)
1150 				cp = "unknown";
1151 			break;
1152 		}
1153 		clear_bit(k, devip->uas_bm);
1154 		if (sdebug_verbose)
1155 			sdev_printk(KERN_INFO, scp->device,
1156 				   "%s reports: Unit attention: %s\n",
1157 				   my_name, cp);
1158 		return check_condition_result;
1159 	}
1160 	return 0;
1161 }
1162 
1163 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1164 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1165 				int arr_len)
1166 {
1167 	int act_len;
1168 	struct scsi_data_buffer *sdb = &scp->sdb;
1169 
1170 	if (!sdb->length)
1171 		return 0;
1172 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1173 		return DID_ERROR << 16;
1174 
1175 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1176 				      arr, arr_len);
1177 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1178 
1179 	return 0;
1180 }
1181 
1182 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1183  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1184  * calls, not required to write in ascending offset order. Assumes resid
1185  * set to scsi_bufflen() prior to any calls.
1186  */
1187 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1188 				  int arr_len, unsigned int off_dst)
1189 {
1190 	unsigned int act_len, n;
1191 	struct scsi_data_buffer *sdb = &scp->sdb;
1192 	off_t skip = off_dst;
1193 
1194 	if (sdb->length <= off_dst)
1195 		return 0;
1196 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1197 		return DID_ERROR << 16;
1198 
1199 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1200 				       arr, arr_len, skip);
1201 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1202 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1203 		 scsi_get_resid(scp));
1204 	n = scsi_bufflen(scp) - (off_dst + act_len);
1205 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1206 	return 0;
1207 }
1208 
1209 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1210  * 'arr' or -1 if error.
1211  */
1212 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1213 			       int arr_len)
1214 {
1215 	if (!scsi_bufflen(scp))
1216 		return 0;
1217 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1218 		return -1;
1219 
1220 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1221 }
1222 
1223 
1224 static char sdebug_inq_vendor_id[9] = "Linux   ";
1225 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1226 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1227 /* Use some locally assigned NAAs for SAS addresses. */
1228 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1229 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1230 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1231 
1232 /* Device identification VPD page. Returns number of bytes placed in arr */
1233 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1234 			  int target_dev_id, int dev_id_num,
1235 			  const char *dev_id_str, int dev_id_str_len,
1236 			  const uuid_t *lu_name)
1237 {
1238 	int num, port_a;
1239 	char b[32];
1240 
1241 	port_a = target_dev_id + 1;
1242 	/* T10 vendor identifier field format (faked) */
1243 	arr[0] = 0x2;	/* ASCII */
1244 	arr[1] = 0x1;
1245 	arr[2] = 0x0;
1246 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1247 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1248 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1249 	num = 8 + 16 + dev_id_str_len;
1250 	arr[3] = num;
1251 	num += 4;
1252 	if (dev_id_num >= 0) {
1253 		if (sdebug_uuid_ctl) {
1254 			/* Locally assigned UUID */
1255 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1256 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1257 			arr[num++] = 0x0;
1258 			arr[num++] = 0x12;
1259 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1260 			arr[num++] = 0x0;
1261 			memcpy(arr + num, lu_name, 16);
1262 			num += 16;
1263 		} else {
1264 			/* NAA-3, Logical unit identifier (binary) */
1265 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1266 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1267 			arr[num++] = 0x0;
1268 			arr[num++] = 0x8;
1269 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1270 			num += 8;
1271 		}
1272 		/* Target relative port number */
1273 		arr[num++] = 0x61;	/* proto=sas, binary */
1274 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1275 		arr[num++] = 0x0;	/* reserved */
1276 		arr[num++] = 0x4;	/* length */
1277 		arr[num++] = 0x0;	/* reserved */
1278 		arr[num++] = 0x0;	/* reserved */
1279 		arr[num++] = 0x0;
1280 		arr[num++] = 0x1;	/* relative port A */
1281 	}
1282 	/* NAA-3, Target port identifier */
1283 	arr[num++] = 0x61;	/* proto=sas, binary */
1284 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1285 	arr[num++] = 0x0;
1286 	arr[num++] = 0x8;
1287 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1288 	num += 8;
1289 	/* NAA-3, Target port group identifier */
1290 	arr[num++] = 0x61;	/* proto=sas, binary */
1291 	arr[num++] = 0x95;	/* piv=1, target port group id */
1292 	arr[num++] = 0x0;
1293 	arr[num++] = 0x4;
1294 	arr[num++] = 0;
1295 	arr[num++] = 0;
1296 	put_unaligned_be16(port_group_id, arr + num);
1297 	num += 2;
1298 	/* NAA-3, Target device identifier */
1299 	arr[num++] = 0x61;	/* proto=sas, binary */
1300 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1301 	arr[num++] = 0x0;
1302 	arr[num++] = 0x8;
1303 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1304 	num += 8;
1305 	/* SCSI name string: Target device identifier */
1306 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1307 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1308 	arr[num++] = 0x0;
1309 	arr[num++] = 24;
1310 	memcpy(arr + num, "naa.32222220", 12);
1311 	num += 12;
1312 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1313 	memcpy(arr + num, b, 8);
1314 	num += 8;
1315 	memset(arr + num, 0, 4);
1316 	num += 4;
1317 	return num;
1318 }
1319 
1320 static unsigned char vpd84_data[] = {
1321 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1322     0x22,0x22,0x22,0x0,0xbb,0x1,
1323     0x22,0x22,0x22,0x0,0xbb,0x2,
1324 };
1325 
1326 /*  Software interface identification VPD page */
1327 static int inquiry_vpd_84(unsigned char *arr)
1328 {
1329 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1330 	return sizeof(vpd84_data);
1331 }
1332 
1333 /* Management network addresses VPD page */
1334 static int inquiry_vpd_85(unsigned char *arr)
1335 {
1336 	int num = 0;
1337 	const char *na1 = "https://www.kernel.org/config";
1338 	const char *na2 = "http://www.kernel.org/log";
1339 	int plen, olen;
1340 
1341 	arr[num++] = 0x1;	/* lu, storage config */
1342 	arr[num++] = 0x0;	/* reserved */
1343 	arr[num++] = 0x0;
1344 	olen = strlen(na1);
1345 	plen = olen + 1;
1346 	if (plen % 4)
1347 		plen = ((plen / 4) + 1) * 4;
1348 	arr[num++] = plen;	/* length, null termianted, padded */
1349 	memcpy(arr + num, na1, olen);
1350 	memset(arr + num + olen, 0, plen - olen);
1351 	num += plen;
1352 
1353 	arr[num++] = 0x4;	/* lu, logging */
1354 	arr[num++] = 0x0;	/* reserved */
1355 	arr[num++] = 0x0;
1356 	olen = strlen(na2);
1357 	plen = olen + 1;
1358 	if (plen % 4)
1359 		plen = ((plen / 4) + 1) * 4;
1360 	arr[num++] = plen;	/* length, null terminated, padded */
1361 	memcpy(arr + num, na2, olen);
1362 	memset(arr + num + olen, 0, plen - olen);
1363 	num += plen;
1364 
1365 	return num;
1366 }
1367 
1368 /* SCSI ports VPD page */
1369 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1370 {
1371 	int num = 0;
1372 	int port_a, port_b;
1373 
1374 	port_a = target_dev_id + 1;
1375 	port_b = port_a + 1;
1376 	arr[num++] = 0x0;	/* reserved */
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;
1379 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1380 	memset(arr + num, 0, 6);
1381 	num += 6;
1382 	arr[num++] = 0x0;
1383 	arr[num++] = 12;	/* length tp descriptor */
1384 	/* naa-5 target port identifier (A) */
1385 	arr[num++] = 0x61;	/* proto=sas, binary */
1386 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1387 	arr[num++] = 0x0;	/* reserved */
1388 	arr[num++] = 0x8;	/* length */
1389 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1390 	num += 8;
1391 	arr[num++] = 0x0;	/* reserved */
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x0;
1394 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1395 	memset(arr + num, 0, 6);
1396 	num += 6;
1397 	arr[num++] = 0x0;
1398 	arr[num++] = 12;	/* length tp descriptor */
1399 	/* naa-5 target port identifier (B) */
1400 	arr[num++] = 0x61;	/* proto=sas, binary */
1401 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1402 	arr[num++] = 0x0;	/* reserved */
1403 	arr[num++] = 0x8;	/* length */
1404 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1405 	num += 8;
1406 
1407 	return num;
1408 }
1409 
1410 
1411 static unsigned char vpd89_data[] = {
1412 /* from 4th byte */ 0,0,0,0,
1413 'l','i','n','u','x',' ',' ',' ',
1414 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1415 '1','2','3','4',
1416 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1417 0xec,0,0,0,
1418 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1419 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1420 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1421 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1422 0x53,0x41,
1423 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1424 0x20,0x20,
1425 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1426 0x10,0x80,
1427 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1428 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1429 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1431 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1432 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1433 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1438 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1439 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1440 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1453 };
1454 
1455 /* ATA Information VPD page */
1456 static int inquiry_vpd_89(unsigned char *arr)
1457 {
1458 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1459 	return sizeof(vpd89_data);
1460 }
1461 
1462 
1463 static unsigned char vpdb0_data[] = {
1464 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1465 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 };
1469 
1470 /* Block limits VPD page (SBC-3) */
1471 static int inquiry_vpd_b0(unsigned char *arr)
1472 {
1473 	unsigned int gran;
1474 
1475 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1476 
1477 	/* Optimal transfer length granularity */
1478 	if (sdebug_opt_xferlen_exp != 0 &&
1479 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1480 		gran = 1 << sdebug_opt_xferlen_exp;
1481 	else
1482 		gran = 1 << sdebug_physblk_exp;
1483 	put_unaligned_be16(gran, arr + 2);
1484 
1485 	/* Maximum Transfer Length */
1486 	if (sdebug_store_sectors > 0x400)
1487 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1488 
1489 	/* Optimal Transfer Length */
1490 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1491 
1492 	if (sdebug_lbpu) {
1493 		/* Maximum Unmap LBA Count */
1494 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1495 
1496 		/* Maximum Unmap Block Descriptor Count */
1497 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1498 	}
1499 
1500 	/* Unmap Granularity Alignment */
1501 	if (sdebug_unmap_alignment) {
1502 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1503 		arr[28] |= 0x80; /* UGAVALID */
1504 	}
1505 
1506 	/* Optimal Unmap Granularity */
1507 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1508 
1509 	/* Maximum WRITE SAME Length */
1510 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1511 
1512 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1513 }
1514 
1515 /* Block device characteristics VPD page (SBC-3) */
1516 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1517 {
1518 	memset(arr, 0, 0x3c);
1519 	arr[0] = 0;
1520 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1521 	arr[2] = 0;
1522 	arr[3] = 5;	/* less than 1.8" */
1523 	if (devip->zmodel == BLK_ZONED_HA)
1524 		arr[4] = 1 << 4;	/* zoned field = 01b */
1525 
1526 	return 0x3c;
1527 }
1528 
1529 /* Logical block provisioning VPD page (SBC-4) */
1530 static int inquiry_vpd_b2(unsigned char *arr)
1531 {
1532 	memset(arr, 0, 0x4);
1533 	arr[0] = 0;			/* threshold exponent */
1534 	if (sdebug_lbpu)
1535 		arr[1] = 1 << 7;
1536 	if (sdebug_lbpws)
1537 		arr[1] |= 1 << 6;
1538 	if (sdebug_lbpws10)
1539 		arr[1] |= 1 << 5;
1540 	if (sdebug_lbprz && scsi_debug_lbp())
1541 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1542 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1543 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1544 	/* threshold_percentage=0 */
1545 	return 0x4;
1546 }
1547 
1548 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1549 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1550 {
1551 	memset(arr, 0, 0x3c);
1552 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1553 	/*
1554 	 * Set Optimal number of open sequential write preferred zones and
1555 	 * Optimal number of non-sequentially written sequential write
1556 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1557 	 * fields set to zero, apart from Max. number of open swrz_s field.
1558 	 */
1559 	put_unaligned_be32(0xffffffff, &arr[4]);
1560 	put_unaligned_be32(0xffffffff, &arr[8]);
1561 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1562 		put_unaligned_be32(devip->max_open, &arr[12]);
1563 	else
1564 		put_unaligned_be32(0xffffffff, &arr[12]);
1565 	if (devip->zcap < devip->zsize) {
1566 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1567 		put_unaligned_be64(devip->zsize, &arr[20]);
1568 	} else {
1569 		arr[19] = 0;
1570 	}
1571 	return 0x3c;
1572 }
1573 
1574 #define SDEBUG_LONG_INQ_SZ 96
1575 #define SDEBUG_MAX_INQ_ARR_SZ 584
1576 
1577 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1578 {
1579 	unsigned char pq_pdt;
1580 	unsigned char *arr;
1581 	unsigned char *cmd = scp->cmnd;
1582 	u32 alloc_len, n;
1583 	int ret;
1584 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1585 
1586 	alloc_len = get_unaligned_be16(cmd + 3);
1587 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1588 	if (! arr)
1589 		return DID_REQUEUE << 16;
1590 	is_disk = (sdebug_ptype == TYPE_DISK);
1591 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1592 	is_disk_zbc = (is_disk || is_zbc);
1593 	have_wlun = scsi_is_wlun(scp->device->lun);
1594 	if (have_wlun)
1595 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1596 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1597 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1598 	else
1599 		pq_pdt = (sdebug_ptype & 0x1f);
1600 	arr[0] = pq_pdt;
1601 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1602 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1603 		kfree(arr);
1604 		return check_condition_result;
1605 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1606 		int lu_id_num, port_group_id, target_dev_id;
1607 		u32 len;
1608 		char lu_id_str[6];
1609 		int host_no = devip->sdbg_host->shost->host_no;
1610 
1611 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1612 		    (devip->channel & 0x7f);
1613 		if (sdebug_vpd_use_hostno == 0)
1614 			host_no = 0;
1615 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1616 			    (devip->target * 1000) + devip->lun);
1617 		target_dev_id = ((host_no + 1) * 2000) +
1618 				 (devip->target * 1000) - 3;
1619 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1620 		if (0 == cmd[2]) { /* supported vital product data pages */
1621 			arr[1] = cmd[2];	/*sanity */
1622 			n = 4;
1623 			arr[n++] = 0x0;   /* this page */
1624 			arr[n++] = 0x80;  /* unit serial number */
1625 			arr[n++] = 0x83;  /* device identification */
1626 			arr[n++] = 0x84;  /* software interface ident. */
1627 			arr[n++] = 0x85;  /* management network addresses */
1628 			arr[n++] = 0x86;  /* extended inquiry */
1629 			arr[n++] = 0x87;  /* mode page policy */
1630 			arr[n++] = 0x88;  /* SCSI ports */
1631 			if (is_disk_zbc) {	  /* SBC or ZBC */
1632 				arr[n++] = 0x89;  /* ATA information */
1633 				arr[n++] = 0xb0;  /* Block limits */
1634 				arr[n++] = 0xb1;  /* Block characteristics */
1635 				if (is_disk)
1636 					arr[n++] = 0xb2;  /* LB Provisioning */
1637 				if (is_zbc)
1638 					arr[n++] = 0xb6;  /* ZB dev. char. */
1639 			}
1640 			arr[3] = n - 4;	  /* number of supported VPD pages */
1641 		} else if (0x80 == cmd[2]) { /* unit serial number */
1642 			arr[1] = cmd[2];	/*sanity */
1643 			arr[3] = len;
1644 			memcpy(&arr[4], lu_id_str, len);
1645 		} else if (0x83 == cmd[2]) { /* device identification */
1646 			arr[1] = cmd[2];	/*sanity */
1647 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1648 						target_dev_id, lu_id_num,
1649 						lu_id_str, len,
1650 						&devip->lu_name);
1651 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1652 			arr[1] = cmd[2];	/*sanity */
1653 			arr[3] = inquiry_vpd_84(&arr[4]);
1654 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1655 			arr[1] = cmd[2];	/*sanity */
1656 			arr[3] = inquiry_vpd_85(&arr[4]);
1657 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1658 			arr[1] = cmd[2];	/*sanity */
1659 			arr[3] = 0x3c;	/* number of following entries */
1660 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1661 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1662 			else if (have_dif_prot)
1663 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1664 			else
1665 				arr[4] = 0x0;   /* no protection stuff */
1666 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1667 		} else if (0x87 == cmd[2]) { /* mode page policy */
1668 			arr[1] = cmd[2];	/*sanity */
1669 			arr[3] = 0x8;	/* number of following entries */
1670 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1671 			arr[6] = 0x80;	/* mlus, shared */
1672 			arr[8] = 0x18;	 /* protocol specific lu */
1673 			arr[10] = 0x82;	 /* mlus, per initiator port */
1674 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1675 			arr[1] = cmd[2];	/*sanity */
1676 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1677 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1678 			arr[1] = cmd[2];        /*sanity */
1679 			n = inquiry_vpd_89(&arr[4]);
1680 			put_unaligned_be16(n, arr + 2);
1681 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1682 			arr[1] = cmd[2];        /*sanity */
1683 			arr[3] = inquiry_vpd_b0(&arr[4]);
1684 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1685 			arr[1] = cmd[2];        /*sanity */
1686 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1687 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1688 			arr[1] = cmd[2];        /*sanity */
1689 			arr[3] = inquiry_vpd_b2(&arr[4]);
1690 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1691 			arr[1] = cmd[2];        /*sanity */
1692 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1693 		} else {
1694 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1695 			kfree(arr);
1696 			return check_condition_result;
1697 		}
1698 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1699 		ret = fill_from_dev_buffer(scp, arr,
1700 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1701 		kfree(arr);
1702 		return ret;
1703 	}
1704 	/* drops through here for a standard inquiry */
1705 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1706 	arr[2] = sdebug_scsi_level;
1707 	arr[3] = 2;    /* response_data_format==2 */
1708 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1709 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1710 	if (sdebug_vpd_use_hostno == 0)
1711 		arr[5] |= 0x10; /* claim: implicit TPGS */
1712 	arr[6] = 0x10; /* claim: MultiP */
1713 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1714 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1715 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1716 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1717 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1718 	/* Use Vendor Specific area to place driver date in ASCII hex */
1719 	memcpy(&arr[36], sdebug_version_date, 8);
1720 	/* version descriptors (2 bytes each) follow */
1721 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1722 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1723 	n = 62;
1724 	if (is_disk) {		/* SBC-4 no version claimed */
1725 		put_unaligned_be16(0x600, arr + n);
1726 		n += 2;
1727 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1728 		put_unaligned_be16(0x525, arr + n);
1729 		n += 2;
1730 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1731 		put_unaligned_be16(0x624, arr + n);
1732 		n += 2;
1733 	}
1734 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1735 	ret = fill_from_dev_buffer(scp, arr,
1736 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1737 	kfree(arr);
1738 	return ret;
1739 }
1740 
1741 /* See resp_iec_m_pg() for how this data is manipulated */
1742 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1743 				   0, 0, 0x0, 0x0};
1744 
1745 static int resp_requests(struct scsi_cmnd *scp,
1746 			 struct sdebug_dev_info *devip)
1747 {
1748 	unsigned char *cmd = scp->cmnd;
1749 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1750 	bool dsense = !!(cmd[1] & 1);
1751 	u32 alloc_len = cmd[4];
1752 	u32 len = 18;
1753 	int stopped_state = atomic_read(&devip->stopped);
1754 
1755 	memset(arr, 0, sizeof(arr));
1756 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1757 		if (dsense) {
1758 			arr[0] = 0x72;
1759 			arr[1] = NOT_READY;
1760 			arr[2] = LOGICAL_UNIT_NOT_READY;
1761 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1762 			len = 8;
1763 		} else {
1764 			arr[0] = 0x70;
1765 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1766 			arr[7] = 0xa;			/* 18 byte sense buffer */
1767 			arr[12] = LOGICAL_UNIT_NOT_READY;
1768 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1769 		}
1770 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1771 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1772 		if (dsense) {
1773 			arr[0] = 0x72;
1774 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1775 			arr[2] = THRESHOLD_EXCEEDED;
1776 			arr[3] = 0xff;		/* Failure prediction(false) */
1777 			len = 8;
1778 		} else {
1779 			arr[0] = 0x70;
1780 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1781 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1782 			arr[12] = THRESHOLD_EXCEEDED;
1783 			arr[13] = 0xff;		/* Failure prediction(false) */
1784 		}
1785 	} else {	/* nothing to report */
1786 		if (dsense) {
1787 			len = 8;
1788 			memset(arr, 0, len);
1789 			arr[0] = 0x72;
1790 		} else {
1791 			memset(arr, 0, len);
1792 			arr[0] = 0x70;
1793 			arr[7] = 0xa;
1794 		}
1795 	}
1796 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1797 }
1798 
1799 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1800 {
1801 	unsigned char *cmd = scp->cmnd;
1802 	int power_cond, want_stop, stopped_state;
1803 	bool changing;
1804 
1805 	power_cond = (cmd[4] & 0xf0) >> 4;
1806 	if (power_cond) {
1807 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1808 		return check_condition_result;
1809 	}
1810 	want_stop = !(cmd[4] & 1);
1811 	stopped_state = atomic_read(&devip->stopped);
1812 	if (stopped_state == 2) {
1813 		ktime_t now_ts = ktime_get_boottime();
1814 
1815 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1816 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1817 
1818 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1819 				/* tur_ms_to_ready timer extinguished */
1820 				atomic_set(&devip->stopped, 0);
1821 				stopped_state = 0;
1822 			}
1823 		}
1824 		if (stopped_state == 2) {
1825 			if (want_stop) {
1826 				stopped_state = 1;	/* dummy up success */
1827 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1828 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1829 				return check_condition_result;
1830 			}
1831 		}
1832 	}
1833 	changing = (stopped_state != want_stop);
1834 	if (changing)
1835 		atomic_xchg(&devip->stopped, want_stop);
1836 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1837 		return SDEG_RES_IMMED_MASK;
1838 	else
1839 		return 0;
1840 }
1841 
1842 static sector_t get_sdebug_capacity(void)
1843 {
1844 	static const unsigned int gibibyte = 1073741824;
1845 
1846 	if (sdebug_virtual_gb > 0)
1847 		return (sector_t)sdebug_virtual_gb *
1848 			(gibibyte / sdebug_sector_size);
1849 	else
1850 		return sdebug_store_sectors;
1851 }
1852 
1853 #define SDEBUG_READCAP_ARR_SZ 8
1854 static int resp_readcap(struct scsi_cmnd *scp,
1855 			struct sdebug_dev_info *devip)
1856 {
1857 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1858 	unsigned int capac;
1859 
1860 	/* following just in case virtual_gb changed */
1861 	sdebug_capacity = get_sdebug_capacity();
1862 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1863 	if (sdebug_capacity < 0xffffffff) {
1864 		capac = (unsigned int)sdebug_capacity - 1;
1865 		put_unaligned_be32(capac, arr + 0);
1866 	} else
1867 		put_unaligned_be32(0xffffffff, arr + 0);
1868 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1869 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1870 }
1871 
1872 #define SDEBUG_READCAP16_ARR_SZ 32
1873 static int resp_readcap16(struct scsi_cmnd *scp,
1874 			  struct sdebug_dev_info *devip)
1875 {
1876 	unsigned char *cmd = scp->cmnd;
1877 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1878 	u32 alloc_len;
1879 
1880 	alloc_len = get_unaligned_be32(cmd + 10);
1881 	/* following just in case virtual_gb changed */
1882 	sdebug_capacity = get_sdebug_capacity();
1883 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1884 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1885 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1886 	arr[13] = sdebug_physblk_exp & 0xf;
1887 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1888 
1889 	if (scsi_debug_lbp()) {
1890 		arr[14] |= 0x80; /* LBPME */
1891 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1892 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1893 		 * in the wider field maps to 0 in this field.
1894 		 */
1895 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1896 			arr[14] |= 0x40;
1897 	}
1898 
1899 	/*
1900 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1901 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1902 	 */
1903 	if (devip->zmodel == BLK_ZONED_HM)
1904 		arr[12] |= 1 << 4;
1905 
1906 	arr[15] = sdebug_lowest_aligned & 0xff;
1907 
1908 	if (have_dif_prot) {
1909 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1910 		arr[12] |= 1; /* PROT_EN */
1911 	}
1912 
1913 	return fill_from_dev_buffer(scp, arr,
1914 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1915 }
1916 
1917 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1918 
1919 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1920 			      struct sdebug_dev_info *devip)
1921 {
1922 	unsigned char *cmd = scp->cmnd;
1923 	unsigned char *arr;
1924 	int host_no = devip->sdbg_host->shost->host_no;
1925 	int port_group_a, port_group_b, port_a, port_b;
1926 	u32 alen, n, rlen;
1927 	int ret;
1928 
1929 	alen = get_unaligned_be32(cmd + 6);
1930 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1931 	if (! arr)
1932 		return DID_REQUEUE << 16;
1933 	/*
1934 	 * EVPD page 0x88 states we have two ports, one
1935 	 * real and a fake port with no device connected.
1936 	 * So we create two port groups with one port each
1937 	 * and set the group with port B to unavailable.
1938 	 */
1939 	port_a = 0x1; /* relative port A */
1940 	port_b = 0x2; /* relative port B */
1941 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1942 			(devip->channel & 0x7f);
1943 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1944 			(devip->channel & 0x7f) + 0x80;
1945 
1946 	/*
1947 	 * The asymmetric access state is cycled according to the host_id.
1948 	 */
1949 	n = 4;
1950 	if (sdebug_vpd_use_hostno == 0) {
1951 		arr[n++] = host_no % 3; /* Asymm access state */
1952 		arr[n++] = 0x0F; /* claim: all states are supported */
1953 	} else {
1954 		arr[n++] = 0x0; /* Active/Optimized path */
1955 		arr[n++] = 0x01; /* only support active/optimized paths */
1956 	}
1957 	put_unaligned_be16(port_group_a, arr + n);
1958 	n += 2;
1959 	arr[n++] = 0;    /* Reserved */
1960 	arr[n++] = 0;    /* Status code */
1961 	arr[n++] = 0;    /* Vendor unique */
1962 	arr[n++] = 0x1;  /* One port per group */
1963 	arr[n++] = 0;    /* Reserved */
1964 	arr[n++] = 0;    /* Reserved */
1965 	put_unaligned_be16(port_a, arr + n);
1966 	n += 2;
1967 	arr[n++] = 3;    /* Port unavailable */
1968 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1969 	put_unaligned_be16(port_group_b, arr + n);
1970 	n += 2;
1971 	arr[n++] = 0;    /* Reserved */
1972 	arr[n++] = 0;    /* Status code */
1973 	arr[n++] = 0;    /* Vendor unique */
1974 	arr[n++] = 0x1;  /* One port per group */
1975 	arr[n++] = 0;    /* Reserved */
1976 	arr[n++] = 0;    /* Reserved */
1977 	put_unaligned_be16(port_b, arr + n);
1978 	n += 2;
1979 
1980 	rlen = n - 4;
1981 	put_unaligned_be32(rlen, arr + 0);
1982 
1983 	/*
1984 	 * Return the smallest value of either
1985 	 * - The allocated length
1986 	 * - The constructed command length
1987 	 * - The maximum array size
1988 	 */
1989 	rlen = min(alen, n);
1990 	ret = fill_from_dev_buffer(scp, arr,
1991 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1992 	kfree(arr);
1993 	return ret;
1994 }
1995 
1996 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1997 			     struct sdebug_dev_info *devip)
1998 {
1999 	bool rctd;
2000 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2001 	u16 req_sa, u;
2002 	u32 alloc_len, a_len;
2003 	int k, offset, len, errsts, count, bump, na;
2004 	const struct opcode_info_t *oip;
2005 	const struct opcode_info_t *r_oip;
2006 	u8 *arr;
2007 	u8 *cmd = scp->cmnd;
2008 
2009 	rctd = !!(cmd[2] & 0x80);
2010 	reporting_opts = cmd[2] & 0x7;
2011 	req_opcode = cmd[3];
2012 	req_sa = get_unaligned_be16(cmd + 4);
2013 	alloc_len = get_unaligned_be32(cmd + 6);
2014 	if (alloc_len < 4 || alloc_len > 0xffff) {
2015 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2016 		return check_condition_result;
2017 	}
2018 	if (alloc_len > 8192)
2019 		a_len = 8192;
2020 	else
2021 		a_len = alloc_len;
2022 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2023 	if (NULL == arr) {
2024 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2025 				INSUFF_RES_ASCQ);
2026 		return check_condition_result;
2027 	}
2028 	switch (reporting_opts) {
2029 	case 0:	/* all commands */
2030 		/* count number of commands */
2031 		for (count = 0, oip = opcode_info_arr;
2032 		     oip->num_attached != 0xff; ++oip) {
2033 			if (F_INV_OP & oip->flags)
2034 				continue;
2035 			count += (oip->num_attached + 1);
2036 		}
2037 		bump = rctd ? 20 : 8;
2038 		put_unaligned_be32(count * bump, arr);
2039 		for (offset = 4, oip = opcode_info_arr;
2040 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2041 			if (F_INV_OP & oip->flags)
2042 				continue;
2043 			na = oip->num_attached;
2044 			arr[offset] = oip->opcode;
2045 			put_unaligned_be16(oip->sa, arr + offset + 2);
2046 			if (rctd)
2047 				arr[offset + 5] |= 0x2;
2048 			if (FF_SA & oip->flags)
2049 				arr[offset + 5] |= 0x1;
2050 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2051 			if (rctd)
2052 				put_unaligned_be16(0xa, arr + offset + 8);
2053 			r_oip = oip;
2054 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2055 				if (F_INV_OP & oip->flags)
2056 					continue;
2057 				offset += bump;
2058 				arr[offset] = oip->opcode;
2059 				put_unaligned_be16(oip->sa, arr + offset + 2);
2060 				if (rctd)
2061 					arr[offset + 5] |= 0x2;
2062 				if (FF_SA & oip->flags)
2063 					arr[offset + 5] |= 0x1;
2064 				put_unaligned_be16(oip->len_mask[0],
2065 						   arr + offset + 6);
2066 				if (rctd)
2067 					put_unaligned_be16(0xa,
2068 							   arr + offset + 8);
2069 			}
2070 			oip = r_oip;
2071 			offset += bump;
2072 		}
2073 		break;
2074 	case 1:	/* one command: opcode only */
2075 	case 2:	/* one command: opcode plus service action */
2076 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2077 		sdeb_i = opcode_ind_arr[req_opcode];
2078 		oip = &opcode_info_arr[sdeb_i];
2079 		if (F_INV_OP & oip->flags) {
2080 			supp = 1;
2081 			offset = 4;
2082 		} else {
2083 			if (1 == reporting_opts) {
2084 				if (FF_SA & oip->flags) {
2085 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2086 							     2, 2);
2087 					kfree(arr);
2088 					return check_condition_result;
2089 				}
2090 				req_sa = 0;
2091 			} else if (2 == reporting_opts &&
2092 				   0 == (FF_SA & oip->flags)) {
2093 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2094 				kfree(arr);	/* point at requested sa */
2095 				return check_condition_result;
2096 			}
2097 			if (0 == (FF_SA & oip->flags) &&
2098 			    req_opcode == oip->opcode)
2099 				supp = 3;
2100 			else if (0 == (FF_SA & oip->flags)) {
2101 				na = oip->num_attached;
2102 				for (k = 0, oip = oip->arrp; k < na;
2103 				     ++k, ++oip) {
2104 					if (req_opcode == oip->opcode)
2105 						break;
2106 				}
2107 				supp = (k >= na) ? 1 : 3;
2108 			} else if (req_sa != oip->sa) {
2109 				na = oip->num_attached;
2110 				for (k = 0, oip = oip->arrp; k < na;
2111 				     ++k, ++oip) {
2112 					if (req_sa == oip->sa)
2113 						break;
2114 				}
2115 				supp = (k >= na) ? 1 : 3;
2116 			} else
2117 				supp = 3;
2118 			if (3 == supp) {
2119 				u = oip->len_mask[0];
2120 				put_unaligned_be16(u, arr + 2);
2121 				arr[4] = oip->opcode;
2122 				for (k = 1; k < u; ++k)
2123 					arr[4 + k] = (k < 16) ?
2124 						 oip->len_mask[k] : 0xff;
2125 				offset = 4 + u;
2126 			} else
2127 				offset = 4;
2128 		}
2129 		arr[1] = (rctd ? 0x80 : 0) | supp;
2130 		if (rctd) {
2131 			put_unaligned_be16(0xa, arr + offset);
2132 			offset += 12;
2133 		}
2134 		break;
2135 	default:
2136 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2137 		kfree(arr);
2138 		return check_condition_result;
2139 	}
2140 	offset = (offset < a_len) ? offset : a_len;
2141 	len = (offset < alloc_len) ? offset : alloc_len;
2142 	errsts = fill_from_dev_buffer(scp, arr, len);
2143 	kfree(arr);
2144 	return errsts;
2145 }
2146 
2147 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2148 			  struct sdebug_dev_info *devip)
2149 {
2150 	bool repd;
2151 	u32 alloc_len, len;
2152 	u8 arr[16];
2153 	u8 *cmd = scp->cmnd;
2154 
2155 	memset(arr, 0, sizeof(arr));
2156 	repd = !!(cmd[2] & 0x80);
2157 	alloc_len = get_unaligned_be32(cmd + 6);
2158 	if (alloc_len < 4) {
2159 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2160 		return check_condition_result;
2161 	}
2162 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2163 	arr[1] = 0x1;		/* ITNRS */
2164 	if (repd) {
2165 		arr[3] = 0xc;
2166 		len = 16;
2167 	} else
2168 		len = 4;
2169 
2170 	len = (len < alloc_len) ? len : alloc_len;
2171 	return fill_from_dev_buffer(scp, arr, len);
2172 }
2173 
2174 /* <<Following mode page info copied from ST318451LW>> */
2175 
2176 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2177 {	/* Read-Write Error Recovery page for mode_sense */
2178 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2179 					5, 0, 0xff, 0xff};
2180 
2181 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2182 	if (1 == pcontrol)
2183 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2184 	return sizeof(err_recov_pg);
2185 }
2186 
2187 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2188 { 	/* Disconnect-Reconnect page for mode_sense */
2189 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2190 					 0, 0, 0, 0, 0, 0, 0, 0};
2191 
2192 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2193 	if (1 == pcontrol)
2194 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2195 	return sizeof(disconnect_pg);
2196 }
2197 
2198 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2199 {       /* Format device page for mode_sense */
2200 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2201 				     0, 0, 0, 0, 0, 0, 0, 0,
2202 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2203 
2204 	memcpy(p, format_pg, sizeof(format_pg));
2205 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2206 	put_unaligned_be16(sdebug_sector_size, p + 12);
2207 	if (sdebug_removable)
2208 		p[20] |= 0x20; /* should agree with INQUIRY */
2209 	if (1 == pcontrol)
2210 		memset(p + 2, 0, sizeof(format_pg) - 2);
2211 	return sizeof(format_pg);
2212 }
2213 
2214 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2215 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2216 				     0, 0, 0, 0};
2217 
2218 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2219 { 	/* Caching page for mode_sense */
2220 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2221 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2222 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2223 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2224 
2225 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2226 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2227 	memcpy(p, caching_pg, sizeof(caching_pg));
2228 	if (1 == pcontrol)
2229 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2230 	else if (2 == pcontrol)
2231 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2232 	return sizeof(caching_pg);
2233 }
2234 
2235 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2236 				    0, 0, 0x2, 0x4b};
2237 
2238 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2239 { 	/* Control mode page for mode_sense */
2240 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2241 					0, 0, 0, 0};
2242 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2243 				     0, 0, 0x2, 0x4b};
2244 
2245 	if (sdebug_dsense)
2246 		ctrl_m_pg[2] |= 0x4;
2247 	else
2248 		ctrl_m_pg[2] &= ~0x4;
2249 
2250 	if (sdebug_ato)
2251 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2252 
2253 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2254 	if (1 == pcontrol)
2255 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2256 	else if (2 == pcontrol)
2257 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2258 	return sizeof(ctrl_m_pg);
2259 }
2260 
2261 
2262 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2263 {	/* Informational Exceptions control mode page for mode_sense */
2264 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2265 				       0, 0, 0x0, 0x0};
2266 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2267 				      0, 0, 0x0, 0x0};
2268 
2269 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2270 	if (1 == pcontrol)
2271 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2272 	else if (2 == pcontrol)
2273 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2274 	return sizeof(iec_m_pg);
2275 }
2276 
2277 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2278 {	/* SAS SSP mode page - short format for mode_sense */
2279 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2280 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2281 
2282 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2283 	if (1 == pcontrol)
2284 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2285 	return sizeof(sas_sf_m_pg);
2286 }
2287 
2288 
2289 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2290 			      int target_dev_id)
2291 {	/* SAS phy control and discover mode page for mode_sense */
2292 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2293 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2294 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2295 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2296 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2297 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2298 		    0, 0, 0, 0, 0, 0, 0, 0,
2299 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2300 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2301 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2302 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2303 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2304 		    0, 0, 0, 0, 0, 0, 0, 0,
2305 		};
2306 	int port_a, port_b;
2307 
2308 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2309 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2310 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2311 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2312 	port_a = target_dev_id + 1;
2313 	port_b = port_a + 1;
2314 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2315 	put_unaligned_be32(port_a, p + 20);
2316 	put_unaligned_be32(port_b, p + 48 + 20);
2317 	if (1 == pcontrol)
2318 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2319 	return sizeof(sas_pcd_m_pg);
2320 }
2321 
2322 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2323 {	/* SAS SSP shared protocol specific port mode subpage */
2324 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2325 		    0, 0, 0, 0, 0, 0, 0, 0,
2326 		};
2327 
2328 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2329 	if (1 == pcontrol)
2330 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2331 	return sizeof(sas_sha_m_pg);
2332 }
2333 
2334 #define SDEBUG_MAX_MSENSE_SZ 256
2335 
2336 static int resp_mode_sense(struct scsi_cmnd *scp,
2337 			   struct sdebug_dev_info *devip)
2338 {
2339 	int pcontrol, pcode, subpcode, bd_len;
2340 	unsigned char dev_spec;
2341 	u32 alloc_len, offset, len;
2342 	int target_dev_id;
2343 	int target = scp->device->id;
2344 	unsigned char *ap;
2345 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2346 	unsigned char *cmd = scp->cmnd;
2347 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2348 
2349 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2350 	pcontrol = (cmd[2] & 0xc0) >> 6;
2351 	pcode = cmd[2] & 0x3f;
2352 	subpcode = cmd[3];
2353 	msense_6 = (MODE_SENSE == cmd[0]);
2354 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2355 	is_disk = (sdebug_ptype == TYPE_DISK);
2356 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2357 	if ((is_disk || is_zbc) && !dbd)
2358 		bd_len = llbaa ? 16 : 8;
2359 	else
2360 		bd_len = 0;
2361 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2362 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2363 	if (0x3 == pcontrol) {  /* Saving values not supported */
2364 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2365 		return check_condition_result;
2366 	}
2367 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2368 			(devip->target * 1000) - 3;
2369 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2370 	if (is_disk || is_zbc) {
2371 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2372 		if (sdebug_wp)
2373 			dev_spec |= 0x80;
2374 	} else
2375 		dev_spec = 0x0;
2376 	if (msense_6) {
2377 		arr[2] = dev_spec;
2378 		arr[3] = bd_len;
2379 		offset = 4;
2380 	} else {
2381 		arr[3] = dev_spec;
2382 		if (16 == bd_len)
2383 			arr[4] = 0x1;	/* set LONGLBA bit */
2384 		arr[7] = bd_len;	/* assume 255 or less */
2385 		offset = 8;
2386 	}
2387 	ap = arr + offset;
2388 	if ((bd_len > 0) && (!sdebug_capacity))
2389 		sdebug_capacity = get_sdebug_capacity();
2390 
2391 	if (8 == bd_len) {
2392 		if (sdebug_capacity > 0xfffffffe)
2393 			put_unaligned_be32(0xffffffff, ap + 0);
2394 		else
2395 			put_unaligned_be32(sdebug_capacity, ap + 0);
2396 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2397 		offset += bd_len;
2398 		ap = arr + offset;
2399 	} else if (16 == bd_len) {
2400 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2401 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2402 		offset += bd_len;
2403 		ap = arr + offset;
2404 	}
2405 
2406 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2407 		/* TODO: Control Extension page */
2408 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2409 		return check_condition_result;
2410 	}
2411 	bad_pcode = false;
2412 
2413 	switch (pcode) {
2414 	case 0x1:	/* Read-Write error recovery page, direct access */
2415 		len = resp_err_recov_pg(ap, pcontrol, target);
2416 		offset += len;
2417 		break;
2418 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2419 		len = resp_disconnect_pg(ap, pcontrol, target);
2420 		offset += len;
2421 		break;
2422 	case 0x3:       /* Format device page, direct access */
2423 		if (is_disk) {
2424 			len = resp_format_pg(ap, pcontrol, target);
2425 			offset += len;
2426 		} else
2427 			bad_pcode = true;
2428 		break;
2429 	case 0x8:	/* Caching page, direct access */
2430 		if (is_disk || is_zbc) {
2431 			len = resp_caching_pg(ap, pcontrol, target);
2432 			offset += len;
2433 		} else
2434 			bad_pcode = true;
2435 		break;
2436 	case 0xa:	/* Control Mode page, all devices */
2437 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2438 		offset += len;
2439 		break;
2440 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2441 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2442 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2443 			return check_condition_result;
2444 		}
2445 		len = 0;
2446 		if ((0x0 == subpcode) || (0xff == subpcode))
2447 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2448 		if ((0x1 == subpcode) || (0xff == subpcode))
2449 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2450 						  target_dev_id);
2451 		if ((0x2 == subpcode) || (0xff == subpcode))
2452 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2453 		offset += len;
2454 		break;
2455 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2456 		len = resp_iec_m_pg(ap, pcontrol, target);
2457 		offset += len;
2458 		break;
2459 	case 0x3f:	/* Read all Mode pages */
2460 		if ((0 == subpcode) || (0xff == subpcode)) {
2461 			len = resp_err_recov_pg(ap, pcontrol, target);
2462 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2463 			if (is_disk) {
2464 				len += resp_format_pg(ap + len, pcontrol,
2465 						      target);
2466 				len += resp_caching_pg(ap + len, pcontrol,
2467 						       target);
2468 			} else if (is_zbc) {
2469 				len += resp_caching_pg(ap + len, pcontrol,
2470 						       target);
2471 			}
2472 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2473 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2474 			if (0xff == subpcode) {
2475 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2476 						  target, target_dev_id);
2477 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2478 			}
2479 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2480 			offset += len;
2481 		} else {
2482 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2483 			return check_condition_result;
2484 		}
2485 		break;
2486 	default:
2487 		bad_pcode = true;
2488 		break;
2489 	}
2490 	if (bad_pcode) {
2491 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2492 		return check_condition_result;
2493 	}
2494 	if (msense_6)
2495 		arr[0] = offset - 1;
2496 	else
2497 		put_unaligned_be16((offset - 2), arr + 0);
2498 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2499 }
2500 
2501 #define SDEBUG_MAX_MSELECT_SZ 512
2502 
2503 static int resp_mode_select(struct scsi_cmnd *scp,
2504 			    struct sdebug_dev_info *devip)
2505 {
2506 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2507 	int param_len, res, mpage;
2508 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2509 	unsigned char *cmd = scp->cmnd;
2510 	int mselect6 = (MODE_SELECT == cmd[0]);
2511 
2512 	memset(arr, 0, sizeof(arr));
2513 	pf = cmd[1] & 0x10;
2514 	sp = cmd[1] & 0x1;
2515 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2516 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2517 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2518 		return check_condition_result;
2519 	}
2520 	res = fetch_to_dev_buffer(scp, arr, param_len);
2521 	if (-1 == res)
2522 		return DID_ERROR << 16;
2523 	else if (sdebug_verbose && (res < param_len))
2524 		sdev_printk(KERN_INFO, scp->device,
2525 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2526 			    __func__, param_len, res);
2527 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2528 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2529 	off = bd_len + (mselect6 ? 4 : 8);
2530 	if (md_len > 2 || off >= res) {
2531 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2532 		return check_condition_result;
2533 	}
2534 	mpage = arr[off] & 0x3f;
2535 	ps = !!(arr[off] & 0x80);
2536 	if (ps) {
2537 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2538 		return check_condition_result;
2539 	}
2540 	spf = !!(arr[off] & 0x40);
2541 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2542 		       (arr[off + 1] + 2);
2543 	if ((pg_len + off) > param_len) {
2544 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2545 				PARAMETER_LIST_LENGTH_ERR, 0);
2546 		return check_condition_result;
2547 	}
2548 	switch (mpage) {
2549 	case 0x8:      /* Caching Mode page */
2550 		if (caching_pg[1] == arr[off + 1]) {
2551 			memcpy(caching_pg + 2, arr + off + 2,
2552 			       sizeof(caching_pg) - 2);
2553 			goto set_mode_changed_ua;
2554 		}
2555 		break;
2556 	case 0xa:      /* Control Mode page */
2557 		if (ctrl_m_pg[1] == arr[off + 1]) {
2558 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2559 			       sizeof(ctrl_m_pg) - 2);
2560 			if (ctrl_m_pg[4] & 0x8)
2561 				sdebug_wp = true;
2562 			else
2563 				sdebug_wp = false;
2564 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2565 			goto set_mode_changed_ua;
2566 		}
2567 		break;
2568 	case 0x1c:      /* Informational Exceptions Mode page */
2569 		if (iec_m_pg[1] == arr[off + 1]) {
2570 			memcpy(iec_m_pg + 2, arr + off + 2,
2571 			       sizeof(iec_m_pg) - 2);
2572 			goto set_mode_changed_ua;
2573 		}
2574 		break;
2575 	default:
2576 		break;
2577 	}
2578 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2579 	return check_condition_result;
2580 set_mode_changed_ua:
2581 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2582 	return 0;
2583 }
2584 
2585 static int resp_temp_l_pg(unsigned char *arr)
2586 {
2587 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2588 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2589 		};
2590 
2591 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2592 	return sizeof(temp_l_pg);
2593 }
2594 
2595 static int resp_ie_l_pg(unsigned char *arr)
2596 {
2597 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2598 		};
2599 
2600 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2601 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2602 		arr[4] = THRESHOLD_EXCEEDED;
2603 		arr[5] = 0xff;
2604 	}
2605 	return sizeof(ie_l_pg);
2606 }
2607 
2608 static int resp_env_rep_l_spg(unsigned char *arr)
2609 {
2610 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2611 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2612 					 0x1, 0x0, 0x23, 0x8,
2613 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2614 		};
2615 
2616 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2617 	return sizeof(env_rep_l_spg);
2618 }
2619 
2620 #define SDEBUG_MAX_LSENSE_SZ 512
2621 
2622 static int resp_log_sense(struct scsi_cmnd *scp,
2623 			  struct sdebug_dev_info *devip)
2624 {
2625 	int ppc, sp, pcode, subpcode;
2626 	u32 alloc_len, len, n;
2627 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2628 	unsigned char *cmd = scp->cmnd;
2629 
2630 	memset(arr, 0, sizeof(arr));
2631 	ppc = cmd[1] & 0x2;
2632 	sp = cmd[1] & 0x1;
2633 	if (ppc || sp) {
2634 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2635 		return check_condition_result;
2636 	}
2637 	pcode = cmd[2] & 0x3f;
2638 	subpcode = cmd[3] & 0xff;
2639 	alloc_len = get_unaligned_be16(cmd + 7);
2640 	arr[0] = pcode;
2641 	if (0 == subpcode) {
2642 		switch (pcode) {
2643 		case 0x0:	/* Supported log pages log page */
2644 			n = 4;
2645 			arr[n++] = 0x0;		/* this page */
2646 			arr[n++] = 0xd;		/* Temperature */
2647 			arr[n++] = 0x2f;	/* Informational exceptions */
2648 			arr[3] = n - 4;
2649 			break;
2650 		case 0xd:	/* Temperature log page */
2651 			arr[3] = resp_temp_l_pg(arr + 4);
2652 			break;
2653 		case 0x2f:	/* Informational exceptions log page */
2654 			arr[3] = resp_ie_l_pg(arr + 4);
2655 			break;
2656 		default:
2657 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2658 			return check_condition_result;
2659 		}
2660 	} else if (0xff == subpcode) {
2661 		arr[0] |= 0x40;
2662 		arr[1] = subpcode;
2663 		switch (pcode) {
2664 		case 0x0:	/* Supported log pages and subpages log page */
2665 			n = 4;
2666 			arr[n++] = 0x0;
2667 			arr[n++] = 0x0;		/* 0,0 page */
2668 			arr[n++] = 0x0;
2669 			arr[n++] = 0xff;	/* this page */
2670 			arr[n++] = 0xd;
2671 			arr[n++] = 0x0;		/* Temperature */
2672 			arr[n++] = 0xd;
2673 			arr[n++] = 0x1;		/* Environment reporting */
2674 			arr[n++] = 0xd;
2675 			arr[n++] = 0xff;	/* all 0xd subpages */
2676 			arr[n++] = 0x2f;
2677 			arr[n++] = 0x0;	/* Informational exceptions */
2678 			arr[n++] = 0x2f;
2679 			arr[n++] = 0xff;	/* all 0x2f subpages */
2680 			arr[3] = n - 4;
2681 			break;
2682 		case 0xd:	/* Temperature subpages */
2683 			n = 4;
2684 			arr[n++] = 0xd;
2685 			arr[n++] = 0x0;		/* Temperature */
2686 			arr[n++] = 0xd;
2687 			arr[n++] = 0x1;		/* Environment reporting */
2688 			arr[n++] = 0xd;
2689 			arr[n++] = 0xff;	/* these subpages */
2690 			arr[3] = n - 4;
2691 			break;
2692 		case 0x2f:	/* Informational exceptions subpages */
2693 			n = 4;
2694 			arr[n++] = 0x2f;
2695 			arr[n++] = 0x0;		/* Informational exceptions */
2696 			arr[n++] = 0x2f;
2697 			arr[n++] = 0xff;	/* these subpages */
2698 			arr[3] = n - 4;
2699 			break;
2700 		default:
2701 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2702 			return check_condition_result;
2703 		}
2704 	} else if (subpcode > 0) {
2705 		arr[0] |= 0x40;
2706 		arr[1] = subpcode;
2707 		if (pcode == 0xd && subpcode == 1)
2708 			arr[3] = resp_env_rep_l_spg(arr + 4);
2709 		else {
2710 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2711 			return check_condition_result;
2712 		}
2713 	} else {
2714 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2715 		return check_condition_result;
2716 	}
2717 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2718 	return fill_from_dev_buffer(scp, arr,
2719 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2720 }
2721 
2722 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2723 {
2724 	return devip->nr_zones != 0;
2725 }
2726 
2727 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2728 					unsigned long long lba)
2729 {
2730 	u32 zno = lba >> devip->zsize_shift;
2731 	struct sdeb_zone_state *zsp;
2732 
2733 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2734 		return &devip->zstate[zno];
2735 
2736 	/*
2737 	 * If the zone capacity is less than the zone size, adjust for gap
2738 	 * zones.
2739 	 */
2740 	zno = 2 * zno - devip->nr_conv_zones;
2741 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2742 	zsp = &devip->zstate[zno];
2743 	if (lba >= zsp->z_start + zsp->z_size)
2744 		zsp++;
2745 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2746 	return zsp;
2747 }
2748 
2749 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2750 {
2751 	return zsp->z_type == ZBC_ZTYPE_CNV;
2752 }
2753 
2754 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2755 {
2756 	return zsp->z_type == ZBC_ZTYPE_GAP;
2757 }
2758 
2759 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2760 {
2761 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2762 }
2763 
2764 static void zbc_close_zone(struct sdebug_dev_info *devip,
2765 			   struct sdeb_zone_state *zsp)
2766 {
2767 	enum sdebug_z_cond zc;
2768 
2769 	if (!zbc_zone_is_seq(zsp))
2770 		return;
2771 
2772 	zc = zsp->z_cond;
2773 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2774 		return;
2775 
2776 	if (zc == ZC2_IMPLICIT_OPEN)
2777 		devip->nr_imp_open--;
2778 	else
2779 		devip->nr_exp_open--;
2780 
2781 	if (zsp->z_wp == zsp->z_start) {
2782 		zsp->z_cond = ZC1_EMPTY;
2783 	} else {
2784 		zsp->z_cond = ZC4_CLOSED;
2785 		devip->nr_closed++;
2786 	}
2787 }
2788 
2789 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2790 {
2791 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2792 	unsigned int i;
2793 
2794 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2795 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2796 			zbc_close_zone(devip, zsp);
2797 			return;
2798 		}
2799 	}
2800 }
2801 
2802 static void zbc_open_zone(struct sdebug_dev_info *devip,
2803 			  struct sdeb_zone_state *zsp, bool explicit)
2804 {
2805 	enum sdebug_z_cond zc;
2806 
2807 	if (!zbc_zone_is_seq(zsp))
2808 		return;
2809 
2810 	zc = zsp->z_cond;
2811 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2812 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2813 		return;
2814 
2815 	/* Close an implicit open zone if necessary */
2816 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2817 		zbc_close_zone(devip, zsp);
2818 	else if (devip->max_open &&
2819 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2820 		zbc_close_imp_open_zone(devip);
2821 
2822 	if (zsp->z_cond == ZC4_CLOSED)
2823 		devip->nr_closed--;
2824 	if (explicit) {
2825 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2826 		devip->nr_exp_open++;
2827 	} else {
2828 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2829 		devip->nr_imp_open++;
2830 	}
2831 }
2832 
2833 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2834 				     struct sdeb_zone_state *zsp)
2835 {
2836 	switch (zsp->z_cond) {
2837 	case ZC2_IMPLICIT_OPEN:
2838 		devip->nr_imp_open--;
2839 		break;
2840 	case ZC3_EXPLICIT_OPEN:
2841 		devip->nr_exp_open--;
2842 		break;
2843 	default:
2844 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2845 			  zsp->z_start, zsp->z_cond);
2846 		break;
2847 	}
2848 	zsp->z_cond = ZC5_FULL;
2849 }
2850 
2851 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2852 		       unsigned long long lba, unsigned int num)
2853 {
2854 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2855 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2856 
2857 	if (!zbc_zone_is_seq(zsp))
2858 		return;
2859 
2860 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2861 		zsp->z_wp += num;
2862 		if (zsp->z_wp >= zend)
2863 			zbc_set_zone_full(devip, zsp);
2864 		return;
2865 	}
2866 
2867 	while (num) {
2868 		if (lba != zsp->z_wp)
2869 			zsp->z_non_seq_resource = true;
2870 
2871 		end = lba + num;
2872 		if (end >= zend) {
2873 			n = zend - lba;
2874 			zsp->z_wp = zend;
2875 		} else if (end > zsp->z_wp) {
2876 			n = num;
2877 			zsp->z_wp = end;
2878 		} else {
2879 			n = num;
2880 		}
2881 		if (zsp->z_wp >= zend)
2882 			zbc_set_zone_full(devip, zsp);
2883 
2884 		num -= n;
2885 		lba += n;
2886 		if (num) {
2887 			zsp++;
2888 			zend = zsp->z_start + zsp->z_size;
2889 		}
2890 	}
2891 }
2892 
2893 static int check_zbc_access_params(struct scsi_cmnd *scp,
2894 			unsigned long long lba, unsigned int num, bool write)
2895 {
2896 	struct scsi_device *sdp = scp->device;
2897 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2898 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2899 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2900 
2901 	if (!write) {
2902 		if (devip->zmodel == BLK_ZONED_HA)
2903 			return 0;
2904 		/* For host-managed, reads cannot cross zone types boundaries */
2905 		if (zsp->z_type != zsp_end->z_type) {
2906 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2907 					LBA_OUT_OF_RANGE,
2908 					READ_INVDATA_ASCQ);
2909 			return check_condition_result;
2910 		}
2911 		return 0;
2912 	}
2913 
2914 	/* Writing into a gap zone is not allowed */
2915 	if (zbc_zone_is_gap(zsp)) {
2916 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2917 				ATTEMPT_ACCESS_GAP);
2918 		return check_condition_result;
2919 	}
2920 
2921 	/* No restrictions for writes within conventional zones */
2922 	if (zbc_zone_is_conv(zsp)) {
2923 		if (!zbc_zone_is_conv(zsp_end)) {
2924 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2925 					LBA_OUT_OF_RANGE,
2926 					WRITE_BOUNDARY_ASCQ);
2927 			return check_condition_result;
2928 		}
2929 		return 0;
2930 	}
2931 
2932 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2933 		/* Writes cannot cross sequential zone boundaries */
2934 		if (zsp_end != zsp) {
2935 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2936 					LBA_OUT_OF_RANGE,
2937 					WRITE_BOUNDARY_ASCQ);
2938 			return check_condition_result;
2939 		}
2940 		/* Cannot write full zones */
2941 		if (zsp->z_cond == ZC5_FULL) {
2942 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2943 					INVALID_FIELD_IN_CDB, 0);
2944 			return check_condition_result;
2945 		}
2946 		/* Writes must be aligned to the zone WP */
2947 		if (lba != zsp->z_wp) {
2948 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2949 					LBA_OUT_OF_RANGE,
2950 					UNALIGNED_WRITE_ASCQ);
2951 			return check_condition_result;
2952 		}
2953 	}
2954 
2955 	/* Handle implicit open of closed and empty zones */
2956 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2957 		if (devip->max_open &&
2958 		    devip->nr_exp_open >= devip->max_open) {
2959 			mk_sense_buffer(scp, DATA_PROTECT,
2960 					INSUFF_RES_ASC,
2961 					INSUFF_ZONE_ASCQ);
2962 			return check_condition_result;
2963 		}
2964 		zbc_open_zone(devip, zsp, false);
2965 	}
2966 
2967 	return 0;
2968 }
2969 
2970 static inline int check_device_access_params
2971 			(struct scsi_cmnd *scp, unsigned long long lba,
2972 			 unsigned int num, bool write)
2973 {
2974 	struct scsi_device *sdp = scp->device;
2975 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2976 
2977 	if (lba + num > sdebug_capacity) {
2978 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2979 		return check_condition_result;
2980 	}
2981 	/* transfer length excessive (tie in to block limits VPD page) */
2982 	if (num > sdebug_store_sectors) {
2983 		/* needs work to find which cdb byte 'num' comes from */
2984 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2985 		return check_condition_result;
2986 	}
2987 	if (write && unlikely(sdebug_wp)) {
2988 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2989 		return check_condition_result;
2990 	}
2991 	if (sdebug_dev_is_zoned(devip))
2992 		return check_zbc_access_params(scp, lba, num, write);
2993 
2994 	return 0;
2995 }
2996 
2997 /*
2998  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2999  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3000  * that access any of the "stores" in struct sdeb_store_info should call this
3001  * function with bug_if_fake_rw set to true.
3002  */
3003 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3004 						bool bug_if_fake_rw)
3005 {
3006 	if (sdebug_fake_rw) {
3007 		BUG_ON(bug_if_fake_rw);	/* See note above */
3008 		return NULL;
3009 	}
3010 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3011 }
3012 
3013 /* Returns number of bytes copied or -1 if error. */
3014 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3015 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3016 {
3017 	int ret;
3018 	u64 block, rest = 0;
3019 	enum dma_data_direction dir;
3020 	struct scsi_data_buffer *sdb = &scp->sdb;
3021 	u8 *fsp;
3022 
3023 	if (do_write) {
3024 		dir = DMA_TO_DEVICE;
3025 		write_since_sync = true;
3026 	} else {
3027 		dir = DMA_FROM_DEVICE;
3028 	}
3029 
3030 	if (!sdb->length || !sip)
3031 		return 0;
3032 	if (scp->sc_data_direction != dir)
3033 		return -1;
3034 	fsp = sip->storep;
3035 
3036 	block = do_div(lba, sdebug_store_sectors);
3037 	if (block + num > sdebug_store_sectors)
3038 		rest = block + num - sdebug_store_sectors;
3039 
3040 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3041 		   fsp + (block * sdebug_sector_size),
3042 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3043 	if (ret != (num - rest) * sdebug_sector_size)
3044 		return ret;
3045 
3046 	if (rest) {
3047 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3048 			    fsp, rest * sdebug_sector_size,
3049 			    sg_skip + ((num - rest) * sdebug_sector_size),
3050 			    do_write);
3051 	}
3052 
3053 	return ret;
3054 }
3055 
3056 /* Returns number of bytes copied or -1 if error. */
3057 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3058 {
3059 	struct scsi_data_buffer *sdb = &scp->sdb;
3060 
3061 	if (!sdb->length)
3062 		return 0;
3063 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3064 		return -1;
3065 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3066 			      num * sdebug_sector_size, 0, true);
3067 }
3068 
3069 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3070  * arr into sip->storep+lba and return true. If comparison fails then
3071  * return false. */
3072 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3073 			      const u8 *arr, bool compare_only)
3074 {
3075 	bool res;
3076 	u64 block, rest = 0;
3077 	u32 store_blks = sdebug_store_sectors;
3078 	u32 lb_size = sdebug_sector_size;
3079 	u8 *fsp = sip->storep;
3080 
3081 	block = do_div(lba, store_blks);
3082 	if (block + num > store_blks)
3083 		rest = block + num - store_blks;
3084 
3085 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3086 	if (!res)
3087 		return res;
3088 	if (rest)
3089 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3090 			     rest * lb_size);
3091 	if (!res)
3092 		return res;
3093 	if (compare_only)
3094 		return true;
3095 	arr += num * lb_size;
3096 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3097 	if (rest)
3098 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3099 	return res;
3100 }
3101 
3102 static __be16 dif_compute_csum(const void *buf, int len)
3103 {
3104 	__be16 csum;
3105 
3106 	if (sdebug_guard)
3107 		csum = (__force __be16)ip_compute_csum(buf, len);
3108 	else
3109 		csum = cpu_to_be16(crc_t10dif(buf, len));
3110 
3111 	return csum;
3112 }
3113 
3114 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3115 		      sector_t sector, u32 ei_lba)
3116 {
3117 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3118 
3119 	if (sdt->guard_tag != csum) {
3120 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3121 			(unsigned long)sector,
3122 			be16_to_cpu(sdt->guard_tag),
3123 			be16_to_cpu(csum));
3124 		return 0x01;
3125 	}
3126 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3127 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3128 		pr_err("REF check failed on sector %lu\n",
3129 			(unsigned long)sector);
3130 		return 0x03;
3131 	}
3132 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3133 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3134 		pr_err("REF check failed on sector %lu\n",
3135 			(unsigned long)sector);
3136 		return 0x03;
3137 	}
3138 	return 0;
3139 }
3140 
3141 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3142 			  unsigned int sectors, bool read)
3143 {
3144 	size_t resid;
3145 	void *paddr;
3146 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3147 						scp->device->hostdata, true);
3148 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3149 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3150 	struct sg_mapping_iter miter;
3151 
3152 	/* Bytes of protection data to copy into sgl */
3153 	resid = sectors * sizeof(*dif_storep);
3154 
3155 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3156 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3157 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3158 
3159 	while (sg_miter_next(&miter) && resid > 0) {
3160 		size_t len = min_t(size_t, miter.length, resid);
3161 		void *start = dif_store(sip, sector);
3162 		size_t rest = 0;
3163 
3164 		if (dif_store_end < start + len)
3165 			rest = start + len - dif_store_end;
3166 
3167 		paddr = miter.addr;
3168 
3169 		if (read)
3170 			memcpy(paddr, start, len - rest);
3171 		else
3172 			memcpy(start, paddr, len - rest);
3173 
3174 		if (rest) {
3175 			if (read)
3176 				memcpy(paddr + len - rest, dif_storep, rest);
3177 			else
3178 				memcpy(dif_storep, paddr + len - rest, rest);
3179 		}
3180 
3181 		sector += len / sizeof(*dif_storep);
3182 		resid -= len;
3183 	}
3184 	sg_miter_stop(&miter);
3185 }
3186 
3187 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3188 			    unsigned int sectors, u32 ei_lba)
3189 {
3190 	int ret = 0;
3191 	unsigned int i;
3192 	sector_t sector;
3193 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3194 						scp->device->hostdata, true);
3195 	struct t10_pi_tuple *sdt;
3196 
3197 	for (i = 0; i < sectors; i++, ei_lba++) {
3198 		sector = start_sec + i;
3199 		sdt = dif_store(sip, sector);
3200 
3201 		if (sdt->app_tag == cpu_to_be16(0xffff))
3202 			continue;
3203 
3204 		/*
3205 		 * Because scsi_debug acts as both initiator and
3206 		 * target we proceed to verify the PI even if
3207 		 * RDPROTECT=3. This is done so the "initiator" knows
3208 		 * which type of error to return. Otherwise we would
3209 		 * have to iterate over the PI twice.
3210 		 */
3211 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3212 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3213 					 sector, ei_lba);
3214 			if (ret) {
3215 				dif_errors++;
3216 				break;
3217 			}
3218 		}
3219 	}
3220 
3221 	dif_copy_prot(scp, start_sec, sectors, true);
3222 	dix_reads++;
3223 
3224 	return ret;
3225 }
3226 
3227 static inline void
3228 sdeb_read_lock(struct sdeb_store_info *sip)
3229 {
3230 	if (sdebug_no_rwlock) {
3231 		if (sip)
3232 			__acquire(&sip->macc_lck);
3233 		else
3234 			__acquire(&sdeb_fake_rw_lck);
3235 	} else {
3236 		if (sip)
3237 			read_lock(&sip->macc_lck);
3238 		else
3239 			read_lock(&sdeb_fake_rw_lck);
3240 	}
3241 }
3242 
3243 static inline void
3244 sdeb_read_unlock(struct sdeb_store_info *sip)
3245 {
3246 	if (sdebug_no_rwlock) {
3247 		if (sip)
3248 			__release(&sip->macc_lck);
3249 		else
3250 			__release(&sdeb_fake_rw_lck);
3251 	} else {
3252 		if (sip)
3253 			read_unlock(&sip->macc_lck);
3254 		else
3255 			read_unlock(&sdeb_fake_rw_lck);
3256 	}
3257 }
3258 
3259 static inline void
3260 sdeb_write_lock(struct sdeb_store_info *sip)
3261 {
3262 	if (sdebug_no_rwlock) {
3263 		if (sip)
3264 			__acquire(&sip->macc_lck);
3265 		else
3266 			__acquire(&sdeb_fake_rw_lck);
3267 	} else {
3268 		if (sip)
3269 			write_lock(&sip->macc_lck);
3270 		else
3271 			write_lock(&sdeb_fake_rw_lck);
3272 	}
3273 }
3274 
3275 static inline void
3276 sdeb_write_unlock(struct sdeb_store_info *sip)
3277 {
3278 	if (sdebug_no_rwlock) {
3279 		if (sip)
3280 			__release(&sip->macc_lck);
3281 		else
3282 			__release(&sdeb_fake_rw_lck);
3283 	} else {
3284 		if (sip)
3285 			write_unlock(&sip->macc_lck);
3286 		else
3287 			write_unlock(&sdeb_fake_rw_lck);
3288 	}
3289 }
3290 
3291 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3292 {
3293 	bool check_prot;
3294 	u32 num;
3295 	u32 ei_lba;
3296 	int ret;
3297 	u64 lba;
3298 	struct sdeb_store_info *sip = devip2sip(devip, true);
3299 	u8 *cmd = scp->cmnd;
3300 
3301 	switch (cmd[0]) {
3302 	case READ_16:
3303 		ei_lba = 0;
3304 		lba = get_unaligned_be64(cmd + 2);
3305 		num = get_unaligned_be32(cmd + 10);
3306 		check_prot = true;
3307 		break;
3308 	case READ_10:
3309 		ei_lba = 0;
3310 		lba = get_unaligned_be32(cmd + 2);
3311 		num = get_unaligned_be16(cmd + 7);
3312 		check_prot = true;
3313 		break;
3314 	case READ_6:
3315 		ei_lba = 0;
3316 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3317 		      (u32)(cmd[1] & 0x1f) << 16;
3318 		num = (0 == cmd[4]) ? 256 : cmd[4];
3319 		check_prot = true;
3320 		break;
3321 	case READ_12:
3322 		ei_lba = 0;
3323 		lba = get_unaligned_be32(cmd + 2);
3324 		num = get_unaligned_be32(cmd + 6);
3325 		check_prot = true;
3326 		break;
3327 	case XDWRITEREAD_10:
3328 		ei_lba = 0;
3329 		lba = get_unaligned_be32(cmd + 2);
3330 		num = get_unaligned_be16(cmd + 7);
3331 		check_prot = false;
3332 		break;
3333 	default:	/* assume READ(32) */
3334 		lba = get_unaligned_be64(cmd + 12);
3335 		ei_lba = get_unaligned_be32(cmd + 20);
3336 		num = get_unaligned_be32(cmd + 28);
3337 		check_prot = false;
3338 		break;
3339 	}
3340 	if (unlikely(have_dif_prot && check_prot)) {
3341 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3342 		    (cmd[1] & 0xe0)) {
3343 			mk_sense_invalid_opcode(scp);
3344 			return check_condition_result;
3345 		}
3346 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3347 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3348 		    (cmd[1] & 0xe0) == 0)
3349 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3350 				    "to DIF device\n");
3351 	}
3352 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3353 		     atomic_read(&sdeb_inject_pending))) {
3354 		num /= 2;
3355 		atomic_set(&sdeb_inject_pending, 0);
3356 	}
3357 
3358 	ret = check_device_access_params(scp, lba, num, false);
3359 	if (ret)
3360 		return ret;
3361 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3362 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3363 		     ((lba + num) > sdebug_medium_error_start))) {
3364 		/* claim unrecoverable read error */
3365 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3366 		/* set info field and valid bit for fixed descriptor */
3367 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3368 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3369 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3370 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3371 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3372 		}
3373 		scsi_set_resid(scp, scsi_bufflen(scp));
3374 		return check_condition_result;
3375 	}
3376 
3377 	sdeb_read_lock(sip);
3378 
3379 	/* DIX + T10 DIF */
3380 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3381 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3382 		case 1: /* Guard tag error */
3383 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3384 				sdeb_read_unlock(sip);
3385 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3386 				return check_condition_result;
3387 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3388 				sdeb_read_unlock(sip);
3389 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3390 				return illegal_condition_result;
3391 			}
3392 			break;
3393 		case 3: /* Reference tag error */
3394 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3395 				sdeb_read_unlock(sip);
3396 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3397 				return check_condition_result;
3398 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3399 				sdeb_read_unlock(sip);
3400 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3401 				return illegal_condition_result;
3402 			}
3403 			break;
3404 		}
3405 	}
3406 
3407 	ret = do_device_access(sip, scp, 0, lba, num, false);
3408 	sdeb_read_unlock(sip);
3409 	if (unlikely(ret == -1))
3410 		return DID_ERROR << 16;
3411 
3412 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3413 
3414 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3415 		     atomic_read(&sdeb_inject_pending))) {
3416 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3417 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3418 			atomic_set(&sdeb_inject_pending, 0);
3419 			return check_condition_result;
3420 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3421 			/* Logical block guard check failed */
3422 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3423 			atomic_set(&sdeb_inject_pending, 0);
3424 			return illegal_condition_result;
3425 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3426 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3427 			atomic_set(&sdeb_inject_pending, 0);
3428 			return illegal_condition_result;
3429 		}
3430 	}
3431 	return 0;
3432 }
3433 
3434 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3435 			     unsigned int sectors, u32 ei_lba)
3436 {
3437 	int ret;
3438 	struct t10_pi_tuple *sdt;
3439 	void *daddr;
3440 	sector_t sector = start_sec;
3441 	int ppage_offset;
3442 	int dpage_offset;
3443 	struct sg_mapping_iter diter;
3444 	struct sg_mapping_iter piter;
3445 
3446 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3447 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3448 
3449 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3450 			scsi_prot_sg_count(SCpnt),
3451 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3452 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3453 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3454 
3455 	/* For each protection page */
3456 	while (sg_miter_next(&piter)) {
3457 		dpage_offset = 0;
3458 		if (WARN_ON(!sg_miter_next(&diter))) {
3459 			ret = 0x01;
3460 			goto out;
3461 		}
3462 
3463 		for (ppage_offset = 0; ppage_offset < piter.length;
3464 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3465 			/* If we're at the end of the current
3466 			 * data page advance to the next one
3467 			 */
3468 			if (dpage_offset >= diter.length) {
3469 				if (WARN_ON(!sg_miter_next(&diter))) {
3470 					ret = 0x01;
3471 					goto out;
3472 				}
3473 				dpage_offset = 0;
3474 			}
3475 
3476 			sdt = piter.addr + ppage_offset;
3477 			daddr = diter.addr + dpage_offset;
3478 
3479 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3480 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3481 				if (ret)
3482 					goto out;
3483 			}
3484 
3485 			sector++;
3486 			ei_lba++;
3487 			dpage_offset += sdebug_sector_size;
3488 		}
3489 		diter.consumed = dpage_offset;
3490 		sg_miter_stop(&diter);
3491 	}
3492 	sg_miter_stop(&piter);
3493 
3494 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3495 	dix_writes++;
3496 
3497 	return 0;
3498 
3499 out:
3500 	dif_errors++;
3501 	sg_miter_stop(&diter);
3502 	sg_miter_stop(&piter);
3503 	return ret;
3504 }
3505 
3506 static unsigned long lba_to_map_index(sector_t lba)
3507 {
3508 	if (sdebug_unmap_alignment)
3509 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3510 	sector_div(lba, sdebug_unmap_granularity);
3511 	return lba;
3512 }
3513 
3514 static sector_t map_index_to_lba(unsigned long index)
3515 {
3516 	sector_t lba = index * sdebug_unmap_granularity;
3517 
3518 	if (sdebug_unmap_alignment)
3519 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3520 	return lba;
3521 }
3522 
3523 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3524 			      unsigned int *num)
3525 {
3526 	sector_t end;
3527 	unsigned int mapped;
3528 	unsigned long index;
3529 	unsigned long next;
3530 
3531 	index = lba_to_map_index(lba);
3532 	mapped = test_bit(index, sip->map_storep);
3533 
3534 	if (mapped)
3535 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3536 	else
3537 		next = find_next_bit(sip->map_storep, map_size, index);
3538 
3539 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3540 	*num = end - lba;
3541 	return mapped;
3542 }
3543 
3544 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3545 		       unsigned int len)
3546 {
3547 	sector_t end = lba + len;
3548 
3549 	while (lba < end) {
3550 		unsigned long index = lba_to_map_index(lba);
3551 
3552 		if (index < map_size)
3553 			set_bit(index, sip->map_storep);
3554 
3555 		lba = map_index_to_lba(index + 1);
3556 	}
3557 }
3558 
3559 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3560 			 unsigned int len)
3561 {
3562 	sector_t end = lba + len;
3563 	u8 *fsp = sip->storep;
3564 
3565 	while (lba < end) {
3566 		unsigned long index = lba_to_map_index(lba);
3567 
3568 		if (lba == map_index_to_lba(index) &&
3569 		    lba + sdebug_unmap_granularity <= end &&
3570 		    index < map_size) {
3571 			clear_bit(index, sip->map_storep);
3572 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3573 				memset(fsp + lba * sdebug_sector_size,
3574 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3575 				       sdebug_sector_size *
3576 				       sdebug_unmap_granularity);
3577 			}
3578 			if (sip->dif_storep) {
3579 				memset(sip->dif_storep + lba, 0xff,
3580 				       sizeof(*sip->dif_storep) *
3581 				       sdebug_unmap_granularity);
3582 			}
3583 		}
3584 		lba = map_index_to_lba(index + 1);
3585 	}
3586 }
3587 
3588 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3589 {
3590 	bool check_prot;
3591 	u32 num;
3592 	u32 ei_lba;
3593 	int ret;
3594 	u64 lba;
3595 	struct sdeb_store_info *sip = devip2sip(devip, true);
3596 	u8 *cmd = scp->cmnd;
3597 
3598 	switch (cmd[0]) {
3599 	case WRITE_16:
3600 		ei_lba = 0;
3601 		lba = get_unaligned_be64(cmd + 2);
3602 		num = get_unaligned_be32(cmd + 10);
3603 		check_prot = true;
3604 		break;
3605 	case WRITE_10:
3606 		ei_lba = 0;
3607 		lba = get_unaligned_be32(cmd + 2);
3608 		num = get_unaligned_be16(cmd + 7);
3609 		check_prot = true;
3610 		break;
3611 	case WRITE_6:
3612 		ei_lba = 0;
3613 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3614 		      (u32)(cmd[1] & 0x1f) << 16;
3615 		num = (0 == cmd[4]) ? 256 : cmd[4];
3616 		check_prot = true;
3617 		break;
3618 	case WRITE_12:
3619 		ei_lba = 0;
3620 		lba = get_unaligned_be32(cmd + 2);
3621 		num = get_unaligned_be32(cmd + 6);
3622 		check_prot = true;
3623 		break;
3624 	case 0x53:	/* XDWRITEREAD(10) */
3625 		ei_lba = 0;
3626 		lba = get_unaligned_be32(cmd + 2);
3627 		num = get_unaligned_be16(cmd + 7);
3628 		check_prot = false;
3629 		break;
3630 	default:	/* assume WRITE(32) */
3631 		lba = get_unaligned_be64(cmd + 12);
3632 		ei_lba = get_unaligned_be32(cmd + 20);
3633 		num = get_unaligned_be32(cmd + 28);
3634 		check_prot = false;
3635 		break;
3636 	}
3637 	if (unlikely(have_dif_prot && check_prot)) {
3638 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3639 		    (cmd[1] & 0xe0)) {
3640 			mk_sense_invalid_opcode(scp);
3641 			return check_condition_result;
3642 		}
3643 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3644 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3645 		    (cmd[1] & 0xe0) == 0)
3646 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3647 				    "to DIF device\n");
3648 	}
3649 
3650 	sdeb_write_lock(sip);
3651 	ret = check_device_access_params(scp, lba, num, true);
3652 	if (ret) {
3653 		sdeb_write_unlock(sip);
3654 		return ret;
3655 	}
3656 
3657 	/* DIX + T10 DIF */
3658 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3659 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3660 		case 1: /* Guard tag error */
3661 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3662 				sdeb_write_unlock(sip);
3663 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3664 				return illegal_condition_result;
3665 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3666 				sdeb_write_unlock(sip);
3667 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3668 				return check_condition_result;
3669 			}
3670 			break;
3671 		case 3: /* Reference tag error */
3672 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3673 				sdeb_write_unlock(sip);
3674 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3675 				return illegal_condition_result;
3676 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3677 				sdeb_write_unlock(sip);
3678 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3679 				return check_condition_result;
3680 			}
3681 			break;
3682 		}
3683 	}
3684 
3685 	ret = do_device_access(sip, scp, 0, lba, num, true);
3686 	if (unlikely(scsi_debug_lbp()))
3687 		map_region(sip, lba, num);
3688 	/* If ZBC zone then bump its write pointer */
3689 	if (sdebug_dev_is_zoned(devip))
3690 		zbc_inc_wp(devip, lba, num);
3691 	sdeb_write_unlock(sip);
3692 	if (unlikely(-1 == ret))
3693 		return DID_ERROR << 16;
3694 	else if (unlikely(sdebug_verbose &&
3695 			  (ret < (num * sdebug_sector_size))))
3696 		sdev_printk(KERN_INFO, scp->device,
3697 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3698 			    my_name, num * sdebug_sector_size, ret);
3699 
3700 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3701 		     atomic_read(&sdeb_inject_pending))) {
3702 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3703 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3704 			atomic_set(&sdeb_inject_pending, 0);
3705 			return check_condition_result;
3706 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3707 			/* Logical block guard check failed */
3708 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3709 			atomic_set(&sdeb_inject_pending, 0);
3710 			return illegal_condition_result;
3711 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3712 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3713 			atomic_set(&sdeb_inject_pending, 0);
3714 			return illegal_condition_result;
3715 		}
3716 	}
3717 	return 0;
3718 }
3719 
3720 /*
3721  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3722  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3723  */
3724 static int resp_write_scat(struct scsi_cmnd *scp,
3725 			   struct sdebug_dev_info *devip)
3726 {
3727 	u8 *cmd = scp->cmnd;
3728 	u8 *lrdp = NULL;
3729 	u8 *up;
3730 	struct sdeb_store_info *sip = devip2sip(devip, true);
3731 	u8 wrprotect;
3732 	u16 lbdof, num_lrd, k;
3733 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3734 	u32 lb_size = sdebug_sector_size;
3735 	u32 ei_lba;
3736 	u64 lba;
3737 	int ret, res;
3738 	bool is_16;
3739 	static const u32 lrd_size = 32; /* + parameter list header size */
3740 
3741 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3742 		is_16 = false;
3743 		wrprotect = (cmd[10] >> 5) & 0x7;
3744 		lbdof = get_unaligned_be16(cmd + 12);
3745 		num_lrd = get_unaligned_be16(cmd + 16);
3746 		bt_len = get_unaligned_be32(cmd + 28);
3747 	} else {        /* that leaves WRITE SCATTERED(16) */
3748 		is_16 = true;
3749 		wrprotect = (cmd[2] >> 5) & 0x7;
3750 		lbdof = get_unaligned_be16(cmd + 4);
3751 		num_lrd = get_unaligned_be16(cmd + 8);
3752 		bt_len = get_unaligned_be32(cmd + 10);
3753 		if (unlikely(have_dif_prot)) {
3754 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3755 			    wrprotect) {
3756 				mk_sense_invalid_opcode(scp);
3757 				return illegal_condition_result;
3758 			}
3759 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3760 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3761 			     wrprotect == 0)
3762 				sdev_printk(KERN_ERR, scp->device,
3763 					    "Unprotected WR to DIF device\n");
3764 		}
3765 	}
3766 	if ((num_lrd == 0) || (bt_len == 0))
3767 		return 0;       /* T10 says these do-nothings are not errors */
3768 	if (lbdof == 0) {
3769 		if (sdebug_verbose)
3770 			sdev_printk(KERN_INFO, scp->device,
3771 				"%s: %s: LB Data Offset field bad\n",
3772 				my_name, __func__);
3773 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3774 		return illegal_condition_result;
3775 	}
3776 	lbdof_blen = lbdof * lb_size;
3777 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3778 		if (sdebug_verbose)
3779 			sdev_printk(KERN_INFO, scp->device,
3780 				"%s: %s: LBA range descriptors don't fit\n",
3781 				my_name, __func__);
3782 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3783 		return illegal_condition_result;
3784 	}
3785 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3786 	if (lrdp == NULL)
3787 		return SCSI_MLQUEUE_HOST_BUSY;
3788 	if (sdebug_verbose)
3789 		sdev_printk(KERN_INFO, scp->device,
3790 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3791 			my_name, __func__, lbdof_blen);
3792 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3793 	if (res == -1) {
3794 		ret = DID_ERROR << 16;
3795 		goto err_out;
3796 	}
3797 
3798 	sdeb_write_lock(sip);
3799 	sg_off = lbdof_blen;
3800 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3801 	cum_lb = 0;
3802 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3803 		lba = get_unaligned_be64(up + 0);
3804 		num = get_unaligned_be32(up + 8);
3805 		if (sdebug_verbose)
3806 			sdev_printk(KERN_INFO, scp->device,
3807 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3808 				my_name, __func__, k, lba, num, sg_off);
3809 		if (num == 0)
3810 			continue;
3811 		ret = check_device_access_params(scp, lba, num, true);
3812 		if (ret)
3813 			goto err_out_unlock;
3814 		num_by = num * lb_size;
3815 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3816 
3817 		if ((cum_lb + num) > bt_len) {
3818 			if (sdebug_verbose)
3819 				sdev_printk(KERN_INFO, scp->device,
3820 				    "%s: %s: sum of blocks > data provided\n",
3821 				    my_name, __func__);
3822 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3823 					0);
3824 			ret = illegal_condition_result;
3825 			goto err_out_unlock;
3826 		}
3827 
3828 		/* DIX + T10 DIF */
3829 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3830 			int prot_ret = prot_verify_write(scp, lba, num,
3831 							 ei_lba);
3832 
3833 			if (prot_ret) {
3834 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3835 						prot_ret);
3836 				ret = illegal_condition_result;
3837 				goto err_out_unlock;
3838 			}
3839 		}
3840 
3841 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3842 		/* If ZBC zone then bump its write pointer */
3843 		if (sdebug_dev_is_zoned(devip))
3844 			zbc_inc_wp(devip, lba, num);
3845 		if (unlikely(scsi_debug_lbp()))
3846 			map_region(sip, lba, num);
3847 		if (unlikely(-1 == ret)) {
3848 			ret = DID_ERROR << 16;
3849 			goto err_out_unlock;
3850 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3851 			sdev_printk(KERN_INFO, scp->device,
3852 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3853 			    my_name, num_by, ret);
3854 
3855 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3856 			     atomic_read(&sdeb_inject_pending))) {
3857 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3858 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3859 				atomic_set(&sdeb_inject_pending, 0);
3860 				ret = check_condition_result;
3861 				goto err_out_unlock;
3862 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3863 				/* Logical block guard check failed */
3864 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3865 				atomic_set(&sdeb_inject_pending, 0);
3866 				ret = illegal_condition_result;
3867 				goto err_out_unlock;
3868 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3869 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3870 				atomic_set(&sdeb_inject_pending, 0);
3871 				ret = illegal_condition_result;
3872 				goto err_out_unlock;
3873 			}
3874 		}
3875 		sg_off += num_by;
3876 		cum_lb += num;
3877 	}
3878 	ret = 0;
3879 err_out_unlock:
3880 	sdeb_write_unlock(sip);
3881 err_out:
3882 	kfree(lrdp);
3883 	return ret;
3884 }
3885 
3886 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3887 			   u32 ei_lba, bool unmap, bool ndob)
3888 {
3889 	struct scsi_device *sdp = scp->device;
3890 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3891 	unsigned long long i;
3892 	u64 block, lbaa;
3893 	u32 lb_size = sdebug_sector_size;
3894 	int ret;
3895 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3896 						scp->device->hostdata, true);
3897 	u8 *fs1p;
3898 	u8 *fsp;
3899 
3900 	sdeb_write_lock(sip);
3901 
3902 	ret = check_device_access_params(scp, lba, num, true);
3903 	if (ret) {
3904 		sdeb_write_unlock(sip);
3905 		return ret;
3906 	}
3907 
3908 	if (unmap && scsi_debug_lbp()) {
3909 		unmap_region(sip, lba, num);
3910 		goto out;
3911 	}
3912 	lbaa = lba;
3913 	block = do_div(lbaa, sdebug_store_sectors);
3914 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3915 	fsp = sip->storep;
3916 	fs1p = fsp + (block * lb_size);
3917 	if (ndob) {
3918 		memset(fs1p, 0, lb_size);
3919 		ret = 0;
3920 	} else
3921 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3922 
3923 	if (-1 == ret) {
3924 		sdeb_write_unlock(sip);
3925 		return DID_ERROR << 16;
3926 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3927 		sdev_printk(KERN_INFO, scp->device,
3928 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3929 			    my_name, "write same", lb_size, ret);
3930 
3931 	/* Copy first sector to remaining blocks */
3932 	for (i = 1 ; i < num ; i++) {
3933 		lbaa = lba + i;
3934 		block = do_div(lbaa, sdebug_store_sectors);
3935 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3936 	}
3937 	if (scsi_debug_lbp())
3938 		map_region(sip, lba, num);
3939 	/* If ZBC zone then bump its write pointer */
3940 	if (sdebug_dev_is_zoned(devip))
3941 		zbc_inc_wp(devip, lba, num);
3942 out:
3943 	sdeb_write_unlock(sip);
3944 
3945 	return 0;
3946 }
3947 
3948 static int resp_write_same_10(struct scsi_cmnd *scp,
3949 			      struct sdebug_dev_info *devip)
3950 {
3951 	u8 *cmd = scp->cmnd;
3952 	u32 lba;
3953 	u16 num;
3954 	u32 ei_lba = 0;
3955 	bool unmap = false;
3956 
3957 	if (cmd[1] & 0x8) {
3958 		if (sdebug_lbpws10 == 0) {
3959 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3960 			return check_condition_result;
3961 		} else
3962 			unmap = true;
3963 	}
3964 	lba = get_unaligned_be32(cmd + 2);
3965 	num = get_unaligned_be16(cmd + 7);
3966 	if (num > sdebug_write_same_length) {
3967 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3968 		return check_condition_result;
3969 	}
3970 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3971 }
3972 
3973 static int resp_write_same_16(struct scsi_cmnd *scp,
3974 			      struct sdebug_dev_info *devip)
3975 {
3976 	u8 *cmd = scp->cmnd;
3977 	u64 lba;
3978 	u32 num;
3979 	u32 ei_lba = 0;
3980 	bool unmap = false;
3981 	bool ndob = false;
3982 
3983 	if (cmd[1] & 0x8) {	/* UNMAP */
3984 		if (sdebug_lbpws == 0) {
3985 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3986 			return check_condition_result;
3987 		} else
3988 			unmap = true;
3989 	}
3990 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3991 		ndob = true;
3992 	lba = get_unaligned_be64(cmd + 2);
3993 	num = get_unaligned_be32(cmd + 10);
3994 	if (num > sdebug_write_same_length) {
3995 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3996 		return check_condition_result;
3997 	}
3998 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3999 }
4000 
4001 /* Note the mode field is in the same position as the (lower) service action
4002  * field. For the Report supported operation codes command, SPC-4 suggests
4003  * each mode of this command should be reported separately; for future. */
4004 static int resp_write_buffer(struct scsi_cmnd *scp,
4005 			     struct sdebug_dev_info *devip)
4006 {
4007 	u8 *cmd = scp->cmnd;
4008 	struct scsi_device *sdp = scp->device;
4009 	struct sdebug_dev_info *dp;
4010 	u8 mode;
4011 
4012 	mode = cmd[1] & 0x1f;
4013 	switch (mode) {
4014 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4015 		/* set UAs on this device only */
4016 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4017 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4018 		break;
4019 	case 0x5:	/* download MC, save and ACT */
4020 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4021 		break;
4022 	case 0x6:	/* download MC with offsets and ACT */
4023 		/* set UAs on most devices (LUs) in this target */
4024 		list_for_each_entry(dp,
4025 				    &devip->sdbg_host->dev_info_list,
4026 				    dev_list)
4027 			if (dp->target == sdp->id) {
4028 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4029 				if (devip != dp)
4030 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4031 						dp->uas_bm);
4032 			}
4033 		break;
4034 	case 0x7:	/* download MC with offsets, save, and ACT */
4035 		/* set UA on all devices (LUs) in this target */
4036 		list_for_each_entry(dp,
4037 				    &devip->sdbg_host->dev_info_list,
4038 				    dev_list)
4039 			if (dp->target == sdp->id)
4040 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4041 					dp->uas_bm);
4042 		break;
4043 	default:
4044 		/* do nothing for this command for other mode values */
4045 		break;
4046 	}
4047 	return 0;
4048 }
4049 
4050 static int resp_comp_write(struct scsi_cmnd *scp,
4051 			   struct sdebug_dev_info *devip)
4052 {
4053 	u8 *cmd = scp->cmnd;
4054 	u8 *arr;
4055 	struct sdeb_store_info *sip = devip2sip(devip, true);
4056 	u64 lba;
4057 	u32 dnum;
4058 	u32 lb_size = sdebug_sector_size;
4059 	u8 num;
4060 	int ret;
4061 	int retval = 0;
4062 
4063 	lba = get_unaligned_be64(cmd + 2);
4064 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4065 	if (0 == num)
4066 		return 0;	/* degenerate case, not an error */
4067 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4068 	    (cmd[1] & 0xe0)) {
4069 		mk_sense_invalid_opcode(scp);
4070 		return check_condition_result;
4071 	}
4072 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4073 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4074 	    (cmd[1] & 0xe0) == 0)
4075 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4076 			    "to DIF device\n");
4077 	ret = check_device_access_params(scp, lba, num, false);
4078 	if (ret)
4079 		return ret;
4080 	dnum = 2 * num;
4081 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4082 	if (NULL == arr) {
4083 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4084 				INSUFF_RES_ASCQ);
4085 		return check_condition_result;
4086 	}
4087 
4088 	sdeb_write_lock(sip);
4089 
4090 	ret = do_dout_fetch(scp, dnum, arr);
4091 	if (ret == -1) {
4092 		retval = DID_ERROR << 16;
4093 		goto cleanup;
4094 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4095 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4096 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4097 			    dnum * lb_size, ret);
4098 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4099 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4100 		retval = check_condition_result;
4101 		goto cleanup;
4102 	}
4103 	if (scsi_debug_lbp())
4104 		map_region(sip, lba, num);
4105 cleanup:
4106 	sdeb_write_unlock(sip);
4107 	kfree(arr);
4108 	return retval;
4109 }
4110 
4111 struct unmap_block_desc {
4112 	__be64	lba;
4113 	__be32	blocks;
4114 	__be32	__reserved;
4115 };
4116 
4117 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4118 {
4119 	unsigned char *buf;
4120 	struct unmap_block_desc *desc;
4121 	struct sdeb_store_info *sip = devip2sip(devip, true);
4122 	unsigned int i, payload_len, descriptors;
4123 	int ret;
4124 
4125 	if (!scsi_debug_lbp())
4126 		return 0;	/* fib and say its done */
4127 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4128 	BUG_ON(scsi_bufflen(scp) != payload_len);
4129 
4130 	descriptors = (payload_len - 8) / 16;
4131 	if (descriptors > sdebug_unmap_max_desc) {
4132 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4133 		return check_condition_result;
4134 	}
4135 
4136 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4137 	if (!buf) {
4138 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4139 				INSUFF_RES_ASCQ);
4140 		return check_condition_result;
4141 	}
4142 
4143 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4144 
4145 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4146 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4147 
4148 	desc = (void *)&buf[8];
4149 
4150 	sdeb_write_lock(sip);
4151 
4152 	for (i = 0 ; i < descriptors ; i++) {
4153 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4154 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4155 
4156 		ret = check_device_access_params(scp, lba, num, true);
4157 		if (ret)
4158 			goto out;
4159 
4160 		unmap_region(sip, lba, num);
4161 	}
4162 
4163 	ret = 0;
4164 
4165 out:
4166 	sdeb_write_unlock(sip);
4167 	kfree(buf);
4168 
4169 	return ret;
4170 }
4171 
4172 #define SDEBUG_GET_LBA_STATUS_LEN 32
4173 
4174 static int resp_get_lba_status(struct scsi_cmnd *scp,
4175 			       struct sdebug_dev_info *devip)
4176 {
4177 	u8 *cmd = scp->cmnd;
4178 	u64 lba;
4179 	u32 alloc_len, mapped, num;
4180 	int ret;
4181 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4182 
4183 	lba = get_unaligned_be64(cmd + 2);
4184 	alloc_len = get_unaligned_be32(cmd + 10);
4185 
4186 	if (alloc_len < 24)
4187 		return 0;
4188 
4189 	ret = check_device_access_params(scp, lba, 1, false);
4190 	if (ret)
4191 		return ret;
4192 
4193 	if (scsi_debug_lbp()) {
4194 		struct sdeb_store_info *sip = devip2sip(devip, true);
4195 
4196 		mapped = map_state(sip, lba, &num);
4197 	} else {
4198 		mapped = 1;
4199 		/* following just in case virtual_gb changed */
4200 		sdebug_capacity = get_sdebug_capacity();
4201 		if (sdebug_capacity - lba <= 0xffffffff)
4202 			num = sdebug_capacity - lba;
4203 		else
4204 			num = 0xffffffff;
4205 	}
4206 
4207 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4208 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4209 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4210 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4211 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4212 
4213 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4214 }
4215 
4216 static int resp_sync_cache(struct scsi_cmnd *scp,
4217 			   struct sdebug_dev_info *devip)
4218 {
4219 	int res = 0;
4220 	u64 lba;
4221 	u32 num_blocks;
4222 	u8 *cmd = scp->cmnd;
4223 
4224 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4225 		lba = get_unaligned_be32(cmd + 2);
4226 		num_blocks = get_unaligned_be16(cmd + 7);
4227 	} else {				/* SYNCHRONIZE_CACHE(16) */
4228 		lba = get_unaligned_be64(cmd + 2);
4229 		num_blocks = get_unaligned_be32(cmd + 10);
4230 	}
4231 	if (lba + num_blocks > sdebug_capacity) {
4232 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4233 		return check_condition_result;
4234 	}
4235 	if (!write_since_sync || (cmd[1] & 0x2))
4236 		res = SDEG_RES_IMMED_MASK;
4237 	else		/* delay if write_since_sync and IMMED clear */
4238 		write_since_sync = false;
4239 	return res;
4240 }
4241 
4242 /*
4243  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4244  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4245  * a GOOD status otherwise. Model a disk with a big cache and yield
4246  * CONDITION MET. Actually tries to bring range in main memory into the
4247  * cache associated with the CPU(s).
4248  */
4249 static int resp_pre_fetch(struct scsi_cmnd *scp,
4250 			  struct sdebug_dev_info *devip)
4251 {
4252 	int res = 0;
4253 	u64 lba;
4254 	u64 block, rest = 0;
4255 	u32 nblks;
4256 	u8 *cmd = scp->cmnd;
4257 	struct sdeb_store_info *sip = devip2sip(devip, true);
4258 	u8 *fsp = sip->storep;
4259 
4260 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4261 		lba = get_unaligned_be32(cmd + 2);
4262 		nblks = get_unaligned_be16(cmd + 7);
4263 	} else {			/* PRE-FETCH(16) */
4264 		lba = get_unaligned_be64(cmd + 2);
4265 		nblks = get_unaligned_be32(cmd + 10);
4266 	}
4267 	if (lba + nblks > sdebug_capacity) {
4268 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4269 		return check_condition_result;
4270 	}
4271 	if (!fsp)
4272 		goto fini;
4273 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4274 	block = do_div(lba, sdebug_store_sectors);
4275 	if (block + nblks > sdebug_store_sectors)
4276 		rest = block + nblks - sdebug_store_sectors;
4277 
4278 	/* Try to bring the PRE-FETCH range into CPU's cache */
4279 	sdeb_read_lock(sip);
4280 	prefetch_range(fsp + (sdebug_sector_size * block),
4281 		       (nblks - rest) * sdebug_sector_size);
4282 	if (rest)
4283 		prefetch_range(fsp, rest * sdebug_sector_size);
4284 	sdeb_read_unlock(sip);
4285 fini:
4286 	if (cmd[1] & 0x2)
4287 		res = SDEG_RES_IMMED_MASK;
4288 	return res | condition_met_result;
4289 }
4290 
4291 #define RL_BUCKET_ELEMS 8
4292 
4293 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4294  * (W-LUN), the normal Linux scanning logic does not associate it with a
4295  * device (e.g. /dev/sg7). The following magic will make that association:
4296  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4297  * where <n> is a host number. If there are multiple targets in a host then
4298  * the above will associate a W-LUN to each target. To only get a W-LUN
4299  * for target 2, then use "echo '- 2 49409' > scan" .
4300  */
4301 static int resp_report_luns(struct scsi_cmnd *scp,
4302 			    struct sdebug_dev_info *devip)
4303 {
4304 	unsigned char *cmd = scp->cmnd;
4305 	unsigned int alloc_len;
4306 	unsigned char select_report;
4307 	u64 lun;
4308 	struct scsi_lun *lun_p;
4309 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4310 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4311 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4312 	unsigned int tlun_cnt;	/* total LUN count */
4313 	unsigned int rlen;	/* response length (in bytes) */
4314 	int k, j, n, res;
4315 	unsigned int off_rsp = 0;
4316 	const int sz_lun = sizeof(struct scsi_lun);
4317 
4318 	clear_luns_changed_on_target(devip);
4319 
4320 	select_report = cmd[2];
4321 	alloc_len = get_unaligned_be32(cmd + 6);
4322 
4323 	if (alloc_len < 4) {
4324 		pr_err("alloc len too small %d\n", alloc_len);
4325 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4326 		return check_condition_result;
4327 	}
4328 
4329 	switch (select_report) {
4330 	case 0:		/* all LUNs apart from W-LUNs */
4331 		lun_cnt = sdebug_max_luns;
4332 		wlun_cnt = 0;
4333 		break;
4334 	case 1:		/* only W-LUNs */
4335 		lun_cnt = 0;
4336 		wlun_cnt = 1;
4337 		break;
4338 	case 2:		/* all LUNs */
4339 		lun_cnt = sdebug_max_luns;
4340 		wlun_cnt = 1;
4341 		break;
4342 	case 0x10:	/* only administrative LUs */
4343 	case 0x11:	/* see SPC-5 */
4344 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4345 	default:
4346 		pr_debug("select report invalid %d\n", select_report);
4347 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4348 		return check_condition_result;
4349 	}
4350 
4351 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4352 		--lun_cnt;
4353 
4354 	tlun_cnt = lun_cnt + wlun_cnt;
4355 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4356 	scsi_set_resid(scp, scsi_bufflen(scp));
4357 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4358 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4359 
4360 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4361 	lun = sdebug_no_lun_0 ? 1 : 0;
4362 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4363 		memset(arr, 0, sizeof(arr));
4364 		lun_p = (struct scsi_lun *)&arr[0];
4365 		if (k == 0) {
4366 			put_unaligned_be32(rlen, &arr[0]);
4367 			++lun_p;
4368 			j = 1;
4369 		}
4370 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4371 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4372 				break;
4373 			int_to_scsilun(lun++, lun_p);
4374 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4375 				lun_p->scsi_lun[0] |= 0x40;
4376 		}
4377 		if (j < RL_BUCKET_ELEMS)
4378 			break;
4379 		n = j * sz_lun;
4380 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4381 		if (res)
4382 			return res;
4383 		off_rsp += n;
4384 	}
4385 	if (wlun_cnt) {
4386 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4387 		++j;
4388 	}
4389 	if (j > 0)
4390 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4391 	return res;
4392 }
4393 
4394 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4395 {
4396 	bool is_bytchk3 = false;
4397 	u8 bytchk;
4398 	int ret, j;
4399 	u32 vnum, a_num, off;
4400 	const u32 lb_size = sdebug_sector_size;
4401 	u64 lba;
4402 	u8 *arr;
4403 	u8 *cmd = scp->cmnd;
4404 	struct sdeb_store_info *sip = devip2sip(devip, true);
4405 
4406 	bytchk = (cmd[1] >> 1) & 0x3;
4407 	if (bytchk == 0) {
4408 		return 0;	/* always claim internal verify okay */
4409 	} else if (bytchk == 2) {
4410 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4411 		return check_condition_result;
4412 	} else if (bytchk == 3) {
4413 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4414 	}
4415 	switch (cmd[0]) {
4416 	case VERIFY_16:
4417 		lba = get_unaligned_be64(cmd + 2);
4418 		vnum = get_unaligned_be32(cmd + 10);
4419 		break;
4420 	case VERIFY:		/* is VERIFY(10) */
4421 		lba = get_unaligned_be32(cmd + 2);
4422 		vnum = get_unaligned_be16(cmd + 7);
4423 		break;
4424 	default:
4425 		mk_sense_invalid_opcode(scp);
4426 		return check_condition_result;
4427 	}
4428 	if (vnum == 0)
4429 		return 0;	/* not an error */
4430 	a_num = is_bytchk3 ? 1 : vnum;
4431 	/* Treat following check like one for read (i.e. no write) access */
4432 	ret = check_device_access_params(scp, lba, a_num, false);
4433 	if (ret)
4434 		return ret;
4435 
4436 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4437 	if (!arr) {
4438 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4439 				INSUFF_RES_ASCQ);
4440 		return check_condition_result;
4441 	}
4442 	/* Not changing store, so only need read access */
4443 	sdeb_read_lock(sip);
4444 
4445 	ret = do_dout_fetch(scp, a_num, arr);
4446 	if (ret == -1) {
4447 		ret = DID_ERROR << 16;
4448 		goto cleanup;
4449 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4450 		sdev_printk(KERN_INFO, scp->device,
4451 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4452 			    my_name, __func__, a_num * lb_size, ret);
4453 	}
4454 	if (is_bytchk3) {
4455 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4456 			memcpy(arr + off, arr, lb_size);
4457 	}
4458 	ret = 0;
4459 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4460 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4461 		ret = check_condition_result;
4462 		goto cleanup;
4463 	}
4464 cleanup:
4465 	sdeb_read_unlock(sip);
4466 	kfree(arr);
4467 	return ret;
4468 }
4469 
4470 #define RZONES_DESC_HD 64
4471 
4472 /* Report zones depending on start LBA and reporting options */
4473 static int resp_report_zones(struct scsi_cmnd *scp,
4474 			     struct sdebug_dev_info *devip)
4475 {
4476 	unsigned int rep_max_zones, nrz = 0;
4477 	int ret = 0;
4478 	u32 alloc_len, rep_opts, rep_len;
4479 	bool partial;
4480 	u64 lba, zs_lba;
4481 	u8 *arr = NULL, *desc;
4482 	u8 *cmd = scp->cmnd;
4483 	struct sdeb_zone_state *zsp = NULL;
4484 	struct sdeb_store_info *sip = devip2sip(devip, false);
4485 
4486 	if (!sdebug_dev_is_zoned(devip)) {
4487 		mk_sense_invalid_opcode(scp);
4488 		return check_condition_result;
4489 	}
4490 	zs_lba = get_unaligned_be64(cmd + 2);
4491 	alloc_len = get_unaligned_be32(cmd + 10);
4492 	if (alloc_len == 0)
4493 		return 0;	/* not an error */
4494 	rep_opts = cmd[14] & 0x3f;
4495 	partial = cmd[14] & 0x80;
4496 
4497 	if (zs_lba >= sdebug_capacity) {
4498 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4499 		return check_condition_result;
4500 	}
4501 
4502 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4503 
4504 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4505 	if (!arr) {
4506 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4507 				INSUFF_RES_ASCQ);
4508 		return check_condition_result;
4509 	}
4510 
4511 	sdeb_read_lock(sip);
4512 
4513 	desc = arr + 64;
4514 	for (lba = zs_lba; lba < sdebug_capacity;
4515 	     lba = zsp->z_start + zsp->z_size) {
4516 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4517 			break;
4518 		zsp = zbc_zone(devip, lba);
4519 		switch (rep_opts) {
4520 		case 0x00:
4521 			/* All zones */
4522 			break;
4523 		case 0x01:
4524 			/* Empty zones */
4525 			if (zsp->z_cond != ZC1_EMPTY)
4526 				continue;
4527 			break;
4528 		case 0x02:
4529 			/* Implicit open zones */
4530 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4531 				continue;
4532 			break;
4533 		case 0x03:
4534 			/* Explicit open zones */
4535 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4536 				continue;
4537 			break;
4538 		case 0x04:
4539 			/* Closed zones */
4540 			if (zsp->z_cond != ZC4_CLOSED)
4541 				continue;
4542 			break;
4543 		case 0x05:
4544 			/* Full zones */
4545 			if (zsp->z_cond != ZC5_FULL)
4546 				continue;
4547 			break;
4548 		case 0x06:
4549 		case 0x07:
4550 		case 0x10:
4551 			/*
4552 			 * Read-only, offline, reset WP recommended are
4553 			 * not emulated: no zones to report;
4554 			 */
4555 			continue;
4556 		case 0x11:
4557 			/* non-seq-resource set */
4558 			if (!zsp->z_non_seq_resource)
4559 				continue;
4560 			break;
4561 		case 0x3e:
4562 			/* All zones except gap zones. */
4563 			if (zbc_zone_is_gap(zsp))
4564 				continue;
4565 			break;
4566 		case 0x3f:
4567 			/* Not write pointer (conventional) zones */
4568 			if (zbc_zone_is_seq(zsp))
4569 				continue;
4570 			break;
4571 		default:
4572 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4573 					INVALID_FIELD_IN_CDB, 0);
4574 			ret = check_condition_result;
4575 			goto fini;
4576 		}
4577 
4578 		if (nrz < rep_max_zones) {
4579 			/* Fill zone descriptor */
4580 			desc[0] = zsp->z_type;
4581 			desc[1] = zsp->z_cond << 4;
4582 			if (zsp->z_non_seq_resource)
4583 				desc[1] |= 1 << 1;
4584 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4585 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4586 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4587 			desc += 64;
4588 		}
4589 
4590 		if (partial && nrz >= rep_max_zones)
4591 			break;
4592 
4593 		nrz++;
4594 	}
4595 
4596 	/* Report header */
4597 	/* Zone list length. */
4598 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4599 	/* Maximum LBA */
4600 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4601 	/* Zone starting LBA granularity. */
4602 	if (devip->zcap < devip->zsize)
4603 		put_unaligned_be64(devip->zsize, arr + 16);
4604 
4605 	rep_len = (unsigned long)desc - (unsigned long)arr;
4606 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4607 
4608 fini:
4609 	sdeb_read_unlock(sip);
4610 	kfree(arr);
4611 	return ret;
4612 }
4613 
4614 /* Logic transplanted from tcmu-runner, file_zbc.c */
4615 static void zbc_open_all(struct sdebug_dev_info *devip)
4616 {
4617 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4618 	unsigned int i;
4619 
4620 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4621 		if (zsp->z_cond == ZC4_CLOSED)
4622 			zbc_open_zone(devip, &devip->zstate[i], true);
4623 	}
4624 }
4625 
4626 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4627 {
4628 	int res = 0;
4629 	u64 z_id;
4630 	enum sdebug_z_cond zc;
4631 	u8 *cmd = scp->cmnd;
4632 	struct sdeb_zone_state *zsp;
4633 	bool all = cmd[14] & 0x01;
4634 	struct sdeb_store_info *sip = devip2sip(devip, false);
4635 
4636 	if (!sdebug_dev_is_zoned(devip)) {
4637 		mk_sense_invalid_opcode(scp);
4638 		return check_condition_result;
4639 	}
4640 
4641 	sdeb_write_lock(sip);
4642 
4643 	if (all) {
4644 		/* Check if all closed zones can be open */
4645 		if (devip->max_open &&
4646 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4647 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4648 					INSUFF_ZONE_ASCQ);
4649 			res = check_condition_result;
4650 			goto fini;
4651 		}
4652 		/* Open all closed zones */
4653 		zbc_open_all(devip);
4654 		goto fini;
4655 	}
4656 
4657 	/* Open the specified zone */
4658 	z_id = get_unaligned_be64(cmd + 2);
4659 	if (z_id >= sdebug_capacity) {
4660 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4661 		res = check_condition_result;
4662 		goto fini;
4663 	}
4664 
4665 	zsp = zbc_zone(devip, z_id);
4666 	if (z_id != zsp->z_start) {
4667 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4668 		res = check_condition_result;
4669 		goto fini;
4670 	}
4671 	if (zbc_zone_is_conv(zsp)) {
4672 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4673 		res = check_condition_result;
4674 		goto fini;
4675 	}
4676 
4677 	zc = zsp->z_cond;
4678 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4679 		goto fini;
4680 
4681 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4682 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4683 				INSUFF_ZONE_ASCQ);
4684 		res = check_condition_result;
4685 		goto fini;
4686 	}
4687 
4688 	zbc_open_zone(devip, zsp, true);
4689 fini:
4690 	sdeb_write_unlock(sip);
4691 	return res;
4692 }
4693 
4694 static void zbc_close_all(struct sdebug_dev_info *devip)
4695 {
4696 	unsigned int i;
4697 
4698 	for (i = 0; i < devip->nr_zones; i++)
4699 		zbc_close_zone(devip, &devip->zstate[i]);
4700 }
4701 
4702 static int resp_close_zone(struct scsi_cmnd *scp,
4703 			   struct sdebug_dev_info *devip)
4704 {
4705 	int res = 0;
4706 	u64 z_id;
4707 	u8 *cmd = scp->cmnd;
4708 	struct sdeb_zone_state *zsp;
4709 	bool all = cmd[14] & 0x01;
4710 	struct sdeb_store_info *sip = devip2sip(devip, false);
4711 
4712 	if (!sdebug_dev_is_zoned(devip)) {
4713 		mk_sense_invalid_opcode(scp);
4714 		return check_condition_result;
4715 	}
4716 
4717 	sdeb_write_lock(sip);
4718 
4719 	if (all) {
4720 		zbc_close_all(devip);
4721 		goto fini;
4722 	}
4723 
4724 	/* Close specified zone */
4725 	z_id = get_unaligned_be64(cmd + 2);
4726 	if (z_id >= sdebug_capacity) {
4727 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4728 		res = check_condition_result;
4729 		goto fini;
4730 	}
4731 
4732 	zsp = zbc_zone(devip, z_id);
4733 	if (z_id != zsp->z_start) {
4734 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4735 		res = check_condition_result;
4736 		goto fini;
4737 	}
4738 	if (zbc_zone_is_conv(zsp)) {
4739 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4740 		res = check_condition_result;
4741 		goto fini;
4742 	}
4743 
4744 	zbc_close_zone(devip, zsp);
4745 fini:
4746 	sdeb_write_unlock(sip);
4747 	return res;
4748 }
4749 
4750 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4751 			    struct sdeb_zone_state *zsp, bool empty)
4752 {
4753 	enum sdebug_z_cond zc = zsp->z_cond;
4754 
4755 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4756 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4757 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4758 			zbc_close_zone(devip, zsp);
4759 		if (zsp->z_cond == ZC4_CLOSED)
4760 			devip->nr_closed--;
4761 		zsp->z_wp = zsp->z_start + zsp->z_size;
4762 		zsp->z_cond = ZC5_FULL;
4763 	}
4764 }
4765 
4766 static void zbc_finish_all(struct sdebug_dev_info *devip)
4767 {
4768 	unsigned int i;
4769 
4770 	for (i = 0; i < devip->nr_zones; i++)
4771 		zbc_finish_zone(devip, &devip->zstate[i], false);
4772 }
4773 
4774 static int resp_finish_zone(struct scsi_cmnd *scp,
4775 			    struct sdebug_dev_info *devip)
4776 {
4777 	struct sdeb_zone_state *zsp;
4778 	int res = 0;
4779 	u64 z_id;
4780 	u8 *cmd = scp->cmnd;
4781 	bool all = cmd[14] & 0x01;
4782 	struct sdeb_store_info *sip = devip2sip(devip, false);
4783 
4784 	if (!sdebug_dev_is_zoned(devip)) {
4785 		mk_sense_invalid_opcode(scp);
4786 		return check_condition_result;
4787 	}
4788 
4789 	sdeb_write_lock(sip);
4790 
4791 	if (all) {
4792 		zbc_finish_all(devip);
4793 		goto fini;
4794 	}
4795 
4796 	/* Finish the specified zone */
4797 	z_id = get_unaligned_be64(cmd + 2);
4798 	if (z_id >= sdebug_capacity) {
4799 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4800 		res = check_condition_result;
4801 		goto fini;
4802 	}
4803 
4804 	zsp = zbc_zone(devip, z_id);
4805 	if (z_id != zsp->z_start) {
4806 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4807 		res = check_condition_result;
4808 		goto fini;
4809 	}
4810 	if (zbc_zone_is_conv(zsp)) {
4811 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4812 		res = check_condition_result;
4813 		goto fini;
4814 	}
4815 
4816 	zbc_finish_zone(devip, zsp, true);
4817 fini:
4818 	sdeb_write_unlock(sip);
4819 	return res;
4820 }
4821 
4822 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4823 			 struct sdeb_zone_state *zsp)
4824 {
4825 	enum sdebug_z_cond zc;
4826 	struct sdeb_store_info *sip = devip2sip(devip, false);
4827 
4828 	if (!zbc_zone_is_seq(zsp))
4829 		return;
4830 
4831 	zc = zsp->z_cond;
4832 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4833 		zbc_close_zone(devip, zsp);
4834 
4835 	if (zsp->z_cond == ZC4_CLOSED)
4836 		devip->nr_closed--;
4837 
4838 	if (zsp->z_wp > zsp->z_start)
4839 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4840 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4841 
4842 	zsp->z_non_seq_resource = false;
4843 	zsp->z_wp = zsp->z_start;
4844 	zsp->z_cond = ZC1_EMPTY;
4845 }
4846 
4847 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4848 {
4849 	unsigned int i;
4850 
4851 	for (i = 0; i < devip->nr_zones; i++)
4852 		zbc_rwp_zone(devip, &devip->zstate[i]);
4853 }
4854 
4855 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4856 {
4857 	struct sdeb_zone_state *zsp;
4858 	int res = 0;
4859 	u64 z_id;
4860 	u8 *cmd = scp->cmnd;
4861 	bool all = cmd[14] & 0x01;
4862 	struct sdeb_store_info *sip = devip2sip(devip, false);
4863 
4864 	if (!sdebug_dev_is_zoned(devip)) {
4865 		mk_sense_invalid_opcode(scp);
4866 		return check_condition_result;
4867 	}
4868 
4869 	sdeb_write_lock(sip);
4870 
4871 	if (all) {
4872 		zbc_rwp_all(devip);
4873 		goto fini;
4874 	}
4875 
4876 	z_id = get_unaligned_be64(cmd + 2);
4877 	if (z_id >= sdebug_capacity) {
4878 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4879 		res = check_condition_result;
4880 		goto fini;
4881 	}
4882 
4883 	zsp = zbc_zone(devip, z_id);
4884 	if (z_id != zsp->z_start) {
4885 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4886 		res = check_condition_result;
4887 		goto fini;
4888 	}
4889 	if (zbc_zone_is_conv(zsp)) {
4890 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4891 		res = check_condition_result;
4892 		goto fini;
4893 	}
4894 
4895 	zbc_rwp_zone(devip, zsp);
4896 fini:
4897 	sdeb_write_unlock(sip);
4898 	return res;
4899 }
4900 
4901 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4902 {
4903 	u16 hwq;
4904 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4905 
4906 	hwq = blk_mq_unique_tag_to_hwq(tag);
4907 
4908 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4909 	if (WARN_ON_ONCE(hwq >= submit_queues))
4910 		hwq = 0;
4911 
4912 	return sdebug_q_arr + hwq;
4913 }
4914 
4915 static u32 get_tag(struct scsi_cmnd *cmnd)
4916 {
4917 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4918 }
4919 
4920 /* Queued (deferred) command completions converge here. */
4921 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4922 {
4923 	bool aborted = sd_dp->aborted;
4924 	int qc_idx;
4925 	int retiring = 0;
4926 	unsigned long iflags;
4927 	struct sdebug_queue *sqp;
4928 	struct sdebug_queued_cmd *sqcp;
4929 	struct scsi_cmnd *scp;
4930 
4931 	if (unlikely(aborted))
4932 		sd_dp->aborted = false;
4933 	qc_idx = sd_dp->qc_idx;
4934 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4935 	if (sdebug_statistics) {
4936 		atomic_inc(&sdebug_completions);
4937 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4938 			atomic_inc(&sdebug_miss_cpus);
4939 	}
4940 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4941 		pr_err("wild qc_idx=%d\n", qc_idx);
4942 		return;
4943 	}
4944 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4945 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4946 	sqcp = &sqp->qc_arr[qc_idx];
4947 	scp = sqcp->a_cmnd;
4948 	if (unlikely(scp == NULL)) {
4949 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4950 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4951 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4952 		return;
4953 	}
4954 
4955 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4956 		retiring = 1;
4957 
4958 	sqcp->a_cmnd = NULL;
4959 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4960 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4961 		pr_err("Unexpected completion\n");
4962 		return;
4963 	}
4964 
4965 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4966 		int k, retval;
4967 
4968 		retval = atomic_read(&retired_max_queue);
4969 		if (qc_idx >= retval) {
4970 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4971 			pr_err("index %d too large\n", retval);
4972 			return;
4973 		}
4974 		k = find_last_bit(sqp->in_use_bm, retval);
4975 		if ((k < sdebug_max_queue) || (k == retval))
4976 			atomic_set(&retired_max_queue, 0);
4977 		else
4978 			atomic_set(&retired_max_queue, k + 1);
4979 	}
4980 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4981 	if (unlikely(aborted)) {
4982 		if (sdebug_verbose)
4983 			pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4984 		blk_abort_request(scsi_cmd_to_rq(scp));
4985 		return;
4986 	}
4987 	scsi_done(scp); /* callback to mid level */
4988 }
4989 
4990 /* When high resolution timer goes off this function is called. */
4991 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4992 {
4993 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4994 						  hrt);
4995 	sdebug_q_cmd_complete(sd_dp);
4996 	return HRTIMER_NORESTART;
4997 }
4998 
4999 /* When work queue schedules work, it calls this function. */
5000 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5001 {
5002 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5003 						  ew.work);
5004 	sdebug_q_cmd_complete(sd_dp);
5005 }
5006 
5007 static bool got_shared_uuid;
5008 static uuid_t shared_uuid;
5009 
5010 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5011 {
5012 	struct sdeb_zone_state *zsp;
5013 	sector_t capacity = get_sdebug_capacity();
5014 	sector_t conv_capacity;
5015 	sector_t zstart = 0;
5016 	unsigned int i;
5017 
5018 	/*
5019 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5020 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5021 	 * use the specified zone size checking that at least 2 zones can be
5022 	 * created for the device.
5023 	 */
5024 	if (!sdeb_zbc_zone_size_mb) {
5025 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5026 			>> ilog2(sdebug_sector_size);
5027 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5028 			devip->zsize >>= 1;
5029 		if (devip->zsize < 2) {
5030 			pr_err("Device capacity too small\n");
5031 			return -EINVAL;
5032 		}
5033 	} else {
5034 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5035 			pr_err("Zone size is not a power of 2\n");
5036 			return -EINVAL;
5037 		}
5038 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5039 			>> ilog2(sdebug_sector_size);
5040 		if (devip->zsize >= capacity) {
5041 			pr_err("Zone size too large for device capacity\n");
5042 			return -EINVAL;
5043 		}
5044 	}
5045 
5046 	devip->zsize_shift = ilog2(devip->zsize);
5047 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5048 
5049 	if (sdeb_zbc_zone_cap_mb == 0) {
5050 		devip->zcap = devip->zsize;
5051 	} else {
5052 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5053 			      ilog2(sdebug_sector_size);
5054 		if (devip->zcap > devip->zsize) {
5055 			pr_err("Zone capacity too large\n");
5056 			return -EINVAL;
5057 		}
5058 	}
5059 
5060 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5061 	if (conv_capacity >= capacity) {
5062 		pr_err("Number of conventional zones too large\n");
5063 		return -EINVAL;
5064 	}
5065 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5066 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5067 			      devip->zsize_shift;
5068 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5069 
5070 	/* Add gap zones if zone capacity is smaller than the zone size */
5071 	if (devip->zcap < devip->zsize)
5072 		devip->nr_zones += devip->nr_seq_zones;
5073 
5074 	if (devip->zmodel == BLK_ZONED_HM) {
5075 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5076 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5077 			devip->max_open = (devip->nr_zones - 1) / 2;
5078 		else
5079 			devip->max_open = sdeb_zbc_max_open;
5080 	}
5081 
5082 	devip->zstate = kcalloc(devip->nr_zones,
5083 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5084 	if (!devip->zstate)
5085 		return -ENOMEM;
5086 
5087 	for (i = 0; i < devip->nr_zones; i++) {
5088 		zsp = &devip->zstate[i];
5089 
5090 		zsp->z_start = zstart;
5091 
5092 		if (i < devip->nr_conv_zones) {
5093 			zsp->z_type = ZBC_ZTYPE_CNV;
5094 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5095 			zsp->z_wp = (sector_t)-1;
5096 			zsp->z_size =
5097 				min_t(u64, devip->zsize, capacity - zstart);
5098 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5099 			if (devip->zmodel == BLK_ZONED_HM)
5100 				zsp->z_type = ZBC_ZTYPE_SWR;
5101 			else
5102 				zsp->z_type = ZBC_ZTYPE_SWP;
5103 			zsp->z_cond = ZC1_EMPTY;
5104 			zsp->z_wp = zsp->z_start;
5105 			zsp->z_size =
5106 				min_t(u64, devip->zcap, capacity - zstart);
5107 		} else {
5108 			zsp->z_type = ZBC_ZTYPE_GAP;
5109 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5110 			zsp->z_wp = (sector_t)-1;
5111 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5112 					    capacity - zstart);
5113 		}
5114 
5115 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5116 		zstart += zsp->z_size;
5117 	}
5118 
5119 	return 0;
5120 }
5121 
5122 static struct sdebug_dev_info *sdebug_device_create(
5123 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5124 {
5125 	struct sdebug_dev_info *devip;
5126 
5127 	devip = kzalloc(sizeof(*devip), flags);
5128 	if (devip) {
5129 		if (sdebug_uuid_ctl == 1)
5130 			uuid_gen(&devip->lu_name);
5131 		else if (sdebug_uuid_ctl == 2) {
5132 			if (got_shared_uuid)
5133 				devip->lu_name = shared_uuid;
5134 			else {
5135 				uuid_gen(&shared_uuid);
5136 				got_shared_uuid = true;
5137 				devip->lu_name = shared_uuid;
5138 			}
5139 		}
5140 		devip->sdbg_host = sdbg_host;
5141 		if (sdeb_zbc_in_use) {
5142 			devip->zmodel = sdeb_zbc_model;
5143 			if (sdebug_device_create_zones(devip)) {
5144 				kfree(devip);
5145 				return NULL;
5146 			}
5147 		} else {
5148 			devip->zmodel = BLK_ZONED_NONE;
5149 		}
5150 		devip->create_ts = ktime_get_boottime();
5151 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5152 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5153 	}
5154 	return devip;
5155 }
5156 
5157 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5158 {
5159 	struct sdebug_host_info *sdbg_host;
5160 	struct sdebug_dev_info *open_devip = NULL;
5161 	struct sdebug_dev_info *devip;
5162 
5163 	sdbg_host = shost_to_sdebug_host(sdev->host);
5164 
5165 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5166 		if ((devip->used) && (devip->channel == sdev->channel) &&
5167 		    (devip->target == sdev->id) &&
5168 		    (devip->lun == sdev->lun))
5169 			return devip;
5170 		else {
5171 			if ((!devip->used) && (!open_devip))
5172 				open_devip = devip;
5173 		}
5174 	}
5175 	if (!open_devip) { /* try and make a new one */
5176 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5177 		if (!open_devip) {
5178 			pr_err("out of memory at line %d\n", __LINE__);
5179 			return NULL;
5180 		}
5181 	}
5182 
5183 	open_devip->channel = sdev->channel;
5184 	open_devip->target = sdev->id;
5185 	open_devip->lun = sdev->lun;
5186 	open_devip->sdbg_host = sdbg_host;
5187 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5188 	open_devip->used = true;
5189 	return open_devip;
5190 }
5191 
5192 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5193 {
5194 	if (sdebug_verbose)
5195 		pr_info("slave_alloc <%u %u %u %llu>\n",
5196 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5197 	return 0;
5198 }
5199 
5200 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5201 {
5202 	struct sdebug_dev_info *devip =
5203 			(struct sdebug_dev_info *)sdp->hostdata;
5204 
5205 	if (sdebug_verbose)
5206 		pr_info("slave_configure <%u %u %u %llu>\n",
5207 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5208 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5209 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5210 	if (devip == NULL) {
5211 		devip = find_build_dev_info(sdp);
5212 		if (devip == NULL)
5213 			return 1;  /* no resources, will be marked offline */
5214 	}
5215 	sdp->hostdata = devip;
5216 	if (sdebug_no_uld)
5217 		sdp->no_uld_attach = 1;
5218 	config_cdb_len(sdp);
5219 	return 0;
5220 }
5221 
5222 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5223 {
5224 	struct sdebug_dev_info *devip =
5225 		(struct sdebug_dev_info *)sdp->hostdata;
5226 
5227 	if (sdebug_verbose)
5228 		pr_info("slave_destroy <%u %u %u %llu>\n",
5229 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5230 	if (devip) {
5231 		/* make this slot available for re-use */
5232 		devip->used = false;
5233 		sdp->hostdata = NULL;
5234 	}
5235 }
5236 
5237 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5238 			   enum sdeb_defer_type defer_t)
5239 {
5240 	if (!sd_dp)
5241 		return;
5242 	if (defer_t == SDEB_DEFER_HRT)
5243 		hrtimer_cancel(&sd_dp->hrt);
5244 	else if (defer_t == SDEB_DEFER_WQ)
5245 		cancel_work_sync(&sd_dp->ew.work);
5246 }
5247 
5248 /* If @cmnd found deletes its timer or work queue and returns true; else
5249    returns false */
5250 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5251 {
5252 	unsigned long iflags;
5253 	int j, k, qmax, r_qmax;
5254 	enum sdeb_defer_type l_defer_t;
5255 	struct sdebug_queue *sqp;
5256 	struct sdebug_queued_cmd *sqcp;
5257 	struct sdebug_defer *sd_dp;
5258 
5259 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5260 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5261 		qmax = sdebug_max_queue;
5262 		r_qmax = atomic_read(&retired_max_queue);
5263 		if (r_qmax > qmax)
5264 			qmax = r_qmax;
5265 		for (k = 0; k < qmax; ++k) {
5266 			if (test_bit(k, sqp->in_use_bm)) {
5267 				sqcp = &sqp->qc_arr[k];
5268 				if (cmnd != sqcp->a_cmnd)
5269 					continue;
5270 				/* found */
5271 				sqcp->a_cmnd = NULL;
5272 				sd_dp = sqcp->sd_dp;
5273 				if (sd_dp) {
5274 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5275 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5276 				} else
5277 					l_defer_t = SDEB_DEFER_NONE;
5278 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5279 				stop_qc_helper(sd_dp, l_defer_t);
5280 				clear_bit(k, sqp->in_use_bm);
5281 				return true;
5282 			}
5283 		}
5284 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5285 	}
5286 	return false;
5287 }
5288 
5289 /* Deletes (stops) timers or work queues of all queued commands */
5290 static void stop_all_queued(void)
5291 {
5292 	unsigned long iflags;
5293 	int j, k;
5294 	enum sdeb_defer_type l_defer_t;
5295 	struct sdebug_queue *sqp;
5296 	struct sdebug_queued_cmd *sqcp;
5297 	struct sdebug_defer *sd_dp;
5298 
5299 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5300 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5301 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5302 			if (test_bit(k, sqp->in_use_bm)) {
5303 				sqcp = &sqp->qc_arr[k];
5304 				if (sqcp->a_cmnd == NULL)
5305 					continue;
5306 				sqcp->a_cmnd = NULL;
5307 				sd_dp = sqcp->sd_dp;
5308 				if (sd_dp) {
5309 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5310 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5311 				} else
5312 					l_defer_t = SDEB_DEFER_NONE;
5313 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5314 				stop_qc_helper(sd_dp, l_defer_t);
5315 				clear_bit(k, sqp->in_use_bm);
5316 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5317 			}
5318 		}
5319 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5320 	}
5321 }
5322 
5323 /* Free queued command memory on heap */
5324 static void free_all_queued(void)
5325 {
5326 	int j, k;
5327 	struct sdebug_queue *sqp;
5328 	struct sdebug_queued_cmd *sqcp;
5329 
5330 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5331 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5332 			sqcp = &sqp->qc_arr[k];
5333 			kfree(sqcp->sd_dp);
5334 			sqcp->sd_dp = NULL;
5335 		}
5336 	}
5337 }
5338 
5339 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5340 {
5341 	bool ok;
5342 
5343 	++num_aborts;
5344 
5345 	ok = stop_queued_cmnd(SCpnt);
5346 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5347 		sdev_printk(KERN_INFO, SCpnt->device,
5348 			    "%s: command%s found\n", __func__,
5349 			    ok ? "" : " not");
5350 
5351 	return SUCCESS;
5352 }
5353 
5354 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5355 {
5356 	struct scsi_device *sdp = SCpnt->device;
5357 	struct sdebug_dev_info *devip = sdp->hostdata;
5358 
5359 	++num_dev_resets;
5360 
5361 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5362 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5363 	if (devip)
5364 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5365 
5366 	return SUCCESS;
5367 }
5368 
5369 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5370 {
5371 	struct scsi_device *sdp = SCpnt->device;
5372 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5373 	struct sdebug_dev_info *devip;
5374 	int k = 0;
5375 
5376 	++num_target_resets;
5377 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5378 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5379 
5380 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5381 		if (devip->target == sdp->id) {
5382 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5383 			++k;
5384 		}
5385 	}
5386 
5387 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5388 		sdev_printk(KERN_INFO, sdp,
5389 			    "%s: %d device(s) found in target\n", __func__, k);
5390 
5391 	return SUCCESS;
5392 }
5393 
5394 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5395 {
5396 	struct scsi_device *sdp = SCpnt->device;
5397 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5398 	struct sdebug_dev_info *devip;
5399 	int k = 0;
5400 
5401 	++num_bus_resets;
5402 
5403 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5404 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5405 
5406 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5407 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5408 		++k;
5409 	}
5410 
5411 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5412 		sdev_printk(KERN_INFO, sdp,
5413 			    "%s: %d device(s) found in host\n", __func__, k);
5414 	return SUCCESS;
5415 }
5416 
5417 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5418 {
5419 	struct sdebug_host_info *sdbg_host;
5420 	struct sdebug_dev_info *devip;
5421 	int k = 0;
5422 
5423 	++num_host_resets;
5424 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5425 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5426 	spin_lock(&sdebug_host_list_lock);
5427 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5428 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5429 				    dev_list) {
5430 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5431 			++k;
5432 		}
5433 	}
5434 	spin_unlock(&sdebug_host_list_lock);
5435 	stop_all_queued();
5436 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5437 		sdev_printk(KERN_INFO, SCpnt->device,
5438 			    "%s: %d device(s) found\n", __func__, k);
5439 	return SUCCESS;
5440 }
5441 
5442 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5443 {
5444 	struct msdos_partition *pp;
5445 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5446 	int sectors_per_part, num_sectors, k;
5447 	int heads_by_sects, start_sec, end_sec;
5448 
5449 	/* assume partition table already zeroed */
5450 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5451 		return;
5452 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5453 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5454 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5455 	}
5456 	num_sectors = (int)get_sdebug_capacity();
5457 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5458 			   / sdebug_num_parts;
5459 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5460 	starts[0] = sdebug_sectors_per;
5461 	max_part_secs = sectors_per_part;
5462 	for (k = 1; k < sdebug_num_parts; ++k) {
5463 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5464 			    * heads_by_sects;
5465 		if (starts[k] - starts[k - 1] < max_part_secs)
5466 			max_part_secs = starts[k] - starts[k - 1];
5467 	}
5468 	starts[sdebug_num_parts] = num_sectors;
5469 	starts[sdebug_num_parts + 1] = 0;
5470 
5471 	ramp[510] = 0x55;	/* magic partition markings */
5472 	ramp[511] = 0xAA;
5473 	pp = (struct msdos_partition *)(ramp + 0x1be);
5474 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5475 		start_sec = starts[k];
5476 		end_sec = starts[k] + max_part_secs - 1;
5477 		pp->boot_ind = 0;
5478 
5479 		pp->cyl = start_sec / heads_by_sects;
5480 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5481 			   / sdebug_sectors_per;
5482 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5483 
5484 		pp->end_cyl = end_sec / heads_by_sects;
5485 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5486 			       / sdebug_sectors_per;
5487 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5488 
5489 		pp->start_sect = cpu_to_le32(start_sec);
5490 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5491 		pp->sys_ind = 0x83;	/* plain Linux partition */
5492 	}
5493 }
5494 
5495 static void block_unblock_all_queues(bool block)
5496 {
5497 	int j;
5498 	struct sdebug_queue *sqp;
5499 
5500 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5501 		atomic_set(&sqp->blocked, (int)block);
5502 }
5503 
5504 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5505  * commands will be processed normally before triggers occur.
5506  */
5507 static void tweak_cmnd_count(void)
5508 {
5509 	int count, modulo;
5510 
5511 	modulo = abs(sdebug_every_nth);
5512 	if (modulo < 2)
5513 		return;
5514 	block_unblock_all_queues(true);
5515 	count = atomic_read(&sdebug_cmnd_count);
5516 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5517 	block_unblock_all_queues(false);
5518 }
5519 
5520 static void clear_queue_stats(void)
5521 {
5522 	atomic_set(&sdebug_cmnd_count, 0);
5523 	atomic_set(&sdebug_completions, 0);
5524 	atomic_set(&sdebug_miss_cpus, 0);
5525 	atomic_set(&sdebug_a_tsf, 0);
5526 }
5527 
5528 static bool inject_on_this_cmd(void)
5529 {
5530 	if (sdebug_every_nth == 0)
5531 		return false;
5532 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5533 }
5534 
5535 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5536 
5537 /* Complete the processing of the thread that queued a SCSI command to this
5538  * driver. It either completes the command by calling cmnd_done() or
5539  * schedules a hr timer or work queue then returns 0. Returns
5540  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5541  */
5542 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5543 			 int scsi_result,
5544 			 int (*pfp)(struct scsi_cmnd *,
5545 				    struct sdebug_dev_info *),
5546 			 int delta_jiff, int ndelay)
5547 {
5548 	bool new_sd_dp;
5549 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5550 	int k;
5551 	unsigned long iflags;
5552 	u64 ns_from_boot = 0;
5553 	struct sdebug_queue *sqp;
5554 	struct sdebug_queued_cmd *sqcp;
5555 	struct scsi_device *sdp;
5556 	struct sdebug_defer *sd_dp;
5557 
5558 	if (unlikely(devip == NULL)) {
5559 		if (scsi_result == 0)
5560 			scsi_result = DID_NO_CONNECT << 16;
5561 		goto respond_in_thread;
5562 	}
5563 	sdp = cmnd->device;
5564 
5565 	if (delta_jiff == 0)
5566 		goto respond_in_thread;
5567 
5568 	sqp = get_queue(cmnd);
5569 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5570 	if (unlikely(atomic_read(&sqp->blocked))) {
5571 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5572 		return SCSI_MLQUEUE_HOST_BUSY;
5573 	}
5574 
5575 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5576 		     (scsi_result == 0))) {
5577 		int num_in_q = scsi_device_busy(sdp);
5578 		int qdepth = cmnd->device->queue_depth;
5579 
5580 		if ((num_in_q == qdepth) &&
5581 		    (atomic_inc_return(&sdebug_a_tsf) >=
5582 		     abs(sdebug_every_nth))) {
5583 			atomic_set(&sdebug_a_tsf, 0);
5584 			scsi_result = device_qfull_result;
5585 
5586 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5587 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5588 					    __func__, num_in_q);
5589 		}
5590 	}
5591 
5592 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5593 	if (unlikely(k >= sdebug_max_queue)) {
5594 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5595 		if (scsi_result)
5596 			goto respond_in_thread;
5597 		scsi_result = device_qfull_result;
5598 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5599 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5600 				    __func__, sdebug_max_queue);
5601 		goto respond_in_thread;
5602 	}
5603 	set_bit(k, sqp->in_use_bm);
5604 	sqcp = &sqp->qc_arr[k];
5605 	sqcp->a_cmnd = cmnd;
5606 	cmnd->host_scribble = (unsigned char *)sqcp;
5607 	sd_dp = sqcp->sd_dp;
5608 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5609 
5610 	if (!sd_dp) {
5611 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5612 		if (!sd_dp) {
5613 			clear_bit(k, sqp->in_use_bm);
5614 			return SCSI_MLQUEUE_HOST_BUSY;
5615 		}
5616 		new_sd_dp = true;
5617 	} else {
5618 		new_sd_dp = false;
5619 	}
5620 
5621 	/* Set the hostwide tag */
5622 	if (sdebug_host_max_queue)
5623 		sd_dp->hc_idx = get_tag(cmnd);
5624 
5625 	if (polled)
5626 		ns_from_boot = ktime_get_boottime_ns();
5627 
5628 	/* one of the resp_*() response functions is called here */
5629 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5630 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5631 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5632 		delta_jiff = ndelay = 0;
5633 	}
5634 	if (cmnd->result == 0 && scsi_result != 0)
5635 		cmnd->result = scsi_result;
5636 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5637 		if (atomic_read(&sdeb_inject_pending)) {
5638 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5639 			atomic_set(&sdeb_inject_pending, 0);
5640 			cmnd->result = check_condition_result;
5641 		}
5642 	}
5643 
5644 	if (unlikely(sdebug_verbose && cmnd->result))
5645 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5646 			    __func__, cmnd->result);
5647 
5648 	if (delta_jiff > 0 || ndelay > 0) {
5649 		ktime_t kt;
5650 
5651 		if (delta_jiff > 0) {
5652 			u64 ns = jiffies_to_nsecs(delta_jiff);
5653 
5654 			if (sdebug_random && ns < U32_MAX) {
5655 				ns = get_random_u32_below((u32)ns);
5656 			} else if (sdebug_random) {
5657 				ns >>= 12;	/* scale to 4 usec precision */
5658 				if (ns < U32_MAX)	/* over 4 hours max */
5659 					ns = get_random_u32_below((u32)ns);
5660 				ns <<= 12;
5661 			}
5662 			kt = ns_to_ktime(ns);
5663 		} else {	/* ndelay has a 4.2 second max */
5664 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5665 					     (u32)ndelay;
5666 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5667 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5668 
5669 				if (kt <= d) {	/* elapsed duration >= kt */
5670 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5671 					sqcp->a_cmnd = NULL;
5672 					clear_bit(k, sqp->in_use_bm);
5673 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5674 					if (new_sd_dp)
5675 						kfree(sd_dp);
5676 					/* call scsi_done() from this thread */
5677 					scsi_done(cmnd);
5678 					return 0;
5679 				}
5680 				/* otherwise reduce kt by elapsed time */
5681 				kt -= d;
5682 			}
5683 		}
5684 		if (polled) {
5685 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5686 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5687 			if (!sd_dp->init_poll) {
5688 				sd_dp->init_poll = true;
5689 				sqcp->sd_dp = sd_dp;
5690 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5691 				sd_dp->qc_idx = k;
5692 			}
5693 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5694 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5695 		} else {
5696 			if (!sd_dp->init_hrt) {
5697 				sd_dp->init_hrt = true;
5698 				sqcp->sd_dp = sd_dp;
5699 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5700 					     HRTIMER_MODE_REL_PINNED);
5701 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5702 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5703 				sd_dp->qc_idx = k;
5704 			}
5705 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5706 			/* schedule the invocation of scsi_done() for a later time */
5707 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5708 		}
5709 		if (sdebug_statistics)
5710 			sd_dp->issuing_cpu = raw_smp_processor_id();
5711 	} else {	/* jdelay < 0, use work queue */
5712 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5713 			     atomic_read(&sdeb_inject_pending))) {
5714 			sd_dp->aborted = true;
5715 			atomic_set(&sdeb_inject_pending, 0);
5716 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5717 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5718 		}
5719 
5720 		if (polled) {
5721 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5722 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5723 			if (!sd_dp->init_poll) {
5724 				sd_dp->init_poll = true;
5725 				sqcp->sd_dp = sd_dp;
5726 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5727 				sd_dp->qc_idx = k;
5728 			}
5729 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5730 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5731 		} else {
5732 			if (!sd_dp->init_wq) {
5733 				sd_dp->init_wq = true;
5734 				sqcp->sd_dp = sd_dp;
5735 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5736 				sd_dp->qc_idx = k;
5737 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5738 			}
5739 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5740 			schedule_work(&sd_dp->ew.work);
5741 		}
5742 		if (sdebug_statistics)
5743 			sd_dp->issuing_cpu = raw_smp_processor_id();
5744 	}
5745 
5746 	return 0;
5747 
5748 respond_in_thread:	/* call back to mid-layer using invocation thread */
5749 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5750 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5751 	if (cmnd->result == 0 && scsi_result != 0)
5752 		cmnd->result = scsi_result;
5753 	scsi_done(cmnd);
5754 	return 0;
5755 }
5756 
5757 /* Note: The following macros create attribute files in the
5758    /sys/module/scsi_debug/parameters directory. Unfortunately this
5759    driver is unaware of a change and cannot trigger auxiliary actions
5760    as it can when the corresponding attribute in the
5761    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5762  */
5763 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5764 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5765 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5766 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5767 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5768 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5769 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5770 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5771 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5772 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5773 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5774 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5775 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5776 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5777 module_param_string(inq_product, sdebug_inq_product_id,
5778 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5779 module_param_string(inq_rev, sdebug_inq_product_rev,
5780 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5781 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5782 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5783 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5784 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5785 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5786 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5787 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5788 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5789 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5790 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5791 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5792 		   S_IRUGO | S_IWUSR);
5793 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5794 		   S_IRUGO | S_IWUSR);
5795 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5796 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5797 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5798 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5799 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5800 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5801 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5802 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5803 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5804 module_param_named(per_host_store, sdebug_per_host_store, bool,
5805 		   S_IRUGO | S_IWUSR);
5806 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5807 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5808 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5809 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5810 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5811 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5812 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5813 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5814 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5815 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5816 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5817 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5818 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5819 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5820 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5821 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5822 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5823 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5824 		   S_IRUGO | S_IWUSR);
5825 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5826 module_param_named(write_same_length, sdebug_write_same_length, int,
5827 		   S_IRUGO | S_IWUSR);
5828 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5829 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5830 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5831 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5832 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5833 
5834 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5835 MODULE_DESCRIPTION("SCSI debug adapter driver");
5836 MODULE_LICENSE("GPL");
5837 MODULE_VERSION(SDEBUG_VERSION);
5838 
5839 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5840 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5841 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5842 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5843 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5844 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5845 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5846 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5847 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5848 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5849 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5850 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5851 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5852 MODULE_PARM_DESC(host_max_queue,
5853 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5854 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5855 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5856 		 SDEBUG_VERSION "\")");
5857 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5858 MODULE_PARM_DESC(lbprz,
5859 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5860 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5861 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5862 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5863 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5864 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5865 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5866 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5867 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5868 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5869 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5870 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5871 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5872 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5873 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5874 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5875 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5876 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5877 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5878 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5879 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5880 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5881 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5882 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5883 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5884 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5885 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5886 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5887 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5888 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5889 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5890 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5891 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5892 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5893 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5894 MODULE_PARM_DESC(uuid_ctl,
5895 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5896 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5897 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5898 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5899 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5900 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5901 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5902 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5903 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5904 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5905 
5906 #define SDEBUG_INFO_LEN 256
5907 static char sdebug_info[SDEBUG_INFO_LEN];
5908 
5909 static const char *scsi_debug_info(struct Scsi_Host *shp)
5910 {
5911 	int k;
5912 
5913 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5914 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5915 	if (k >= (SDEBUG_INFO_LEN - 1))
5916 		return sdebug_info;
5917 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5918 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5919 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5920 		  "statistics", (int)sdebug_statistics);
5921 	return sdebug_info;
5922 }
5923 
5924 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5925 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5926 				 int length)
5927 {
5928 	char arr[16];
5929 	int opts;
5930 	int minLen = length > 15 ? 15 : length;
5931 
5932 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5933 		return -EACCES;
5934 	memcpy(arr, buffer, minLen);
5935 	arr[minLen] = '\0';
5936 	if (1 != sscanf(arr, "%d", &opts))
5937 		return -EINVAL;
5938 	sdebug_opts = opts;
5939 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5940 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5941 	if (sdebug_every_nth != 0)
5942 		tweak_cmnd_count();
5943 	return length;
5944 }
5945 
5946 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5947  * same for each scsi_debug host (if more than one). Some of the counters
5948  * output are not atomics so might be inaccurate in a busy system. */
5949 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5950 {
5951 	int f, j, l;
5952 	struct sdebug_queue *sqp;
5953 	struct sdebug_host_info *sdhp;
5954 
5955 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5956 		   SDEBUG_VERSION, sdebug_version_date);
5957 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5958 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5959 		   sdebug_opts, sdebug_every_nth);
5960 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5961 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5962 		   sdebug_sector_size, "bytes");
5963 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5964 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5965 		   num_aborts);
5966 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5967 		   num_dev_resets, num_target_resets, num_bus_resets,
5968 		   num_host_resets);
5969 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5970 		   dix_reads, dix_writes, dif_errors);
5971 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5972 		   sdebug_statistics);
5973 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5974 		   atomic_read(&sdebug_cmnd_count),
5975 		   atomic_read(&sdebug_completions),
5976 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5977 		   atomic_read(&sdebug_a_tsf),
5978 		   atomic_read(&sdeb_mq_poll_count));
5979 
5980 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5981 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5982 		seq_printf(m, "  queue %d:\n", j);
5983 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5984 		if (f != sdebug_max_queue) {
5985 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5986 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5987 				   "first,last bits", f, l);
5988 		}
5989 	}
5990 
5991 	seq_printf(m, "this host_no=%d\n", host->host_no);
5992 	if (!xa_empty(per_store_ap)) {
5993 		bool niu;
5994 		int idx;
5995 		unsigned long l_idx;
5996 		struct sdeb_store_info *sip;
5997 
5998 		seq_puts(m, "\nhost list:\n");
5999 		j = 0;
6000 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6001 			idx = sdhp->si_idx;
6002 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6003 				   sdhp->shost->host_no, idx);
6004 			++j;
6005 		}
6006 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6007 			   sdeb_most_recent_idx);
6008 		j = 0;
6009 		xa_for_each(per_store_ap, l_idx, sip) {
6010 			niu = xa_get_mark(per_store_ap, l_idx,
6011 					  SDEB_XA_NOT_IN_USE);
6012 			idx = (int)l_idx;
6013 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6014 				   (niu ? "  not_in_use" : ""));
6015 			++j;
6016 		}
6017 	}
6018 	return 0;
6019 }
6020 
6021 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6022 {
6023 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6024 }
6025 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6026  * of delay is jiffies.
6027  */
6028 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6029 			   size_t count)
6030 {
6031 	int jdelay, res;
6032 
6033 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6034 		res = count;
6035 		if (sdebug_jdelay != jdelay) {
6036 			int j, k;
6037 			struct sdebug_queue *sqp;
6038 
6039 			block_unblock_all_queues(true);
6040 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6041 			     ++j, ++sqp) {
6042 				k = find_first_bit(sqp->in_use_bm,
6043 						   sdebug_max_queue);
6044 				if (k != sdebug_max_queue) {
6045 					res = -EBUSY;   /* queued commands */
6046 					break;
6047 				}
6048 			}
6049 			if (res > 0) {
6050 				sdebug_jdelay = jdelay;
6051 				sdebug_ndelay = 0;
6052 			}
6053 			block_unblock_all_queues(false);
6054 		}
6055 		return res;
6056 	}
6057 	return -EINVAL;
6058 }
6059 static DRIVER_ATTR_RW(delay);
6060 
6061 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6062 {
6063 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6064 }
6065 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6066 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6067 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6068 			    size_t count)
6069 {
6070 	int ndelay, res;
6071 
6072 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6073 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6074 		res = count;
6075 		if (sdebug_ndelay != ndelay) {
6076 			int j, k;
6077 			struct sdebug_queue *sqp;
6078 
6079 			block_unblock_all_queues(true);
6080 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6081 			     ++j, ++sqp) {
6082 				k = find_first_bit(sqp->in_use_bm,
6083 						   sdebug_max_queue);
6084 				if (k != sdebug_max_queue) {
6085 					res = -EBUSY;   /* queued commands */
6086 					break;
6087 				}
6088 			}
6089 			if (res > 0) {
6090 				sdebug_ndelay = ndelay;
6091 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6092 							: DEF_JDELAY;
6093 			}
6094 			block_unblock_all_queues(false);
6095 		}
6096 		return res;
6097 	}
6098 	return -EINVAL;
6099 }
6100 static DRIVER_ATTR_RW(ndelay);
6101 
6102 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6103 {
6104 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6105 }
6106 
6107 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6108 			  size_t count)
6109 {
6110 	int opts;
6111 	char work[20];
6112 
6113 	if (sscanf(buf, "%10s", work) == 1) {
6114 		if (strncasecmp(work, "0x", 2) == 0) {
6115 			if (kstrtoint(work + 2, 16, &opts) == 0)
6116 				goto opts_done;
6117 		} else {
6118 			if (kstrtoint(work, 10, &opts) == 0)
6119 				goto opts_done;
6120 		}
6121 	}
6122 	return -EINVAL;
6123 opts_done:
6124 	sdebug_opts = opts;
6125 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6126 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6127 	tweak_cmnd_count();
6128 	return count;
6129 }
6130 static DRIVER_ATTR_RW(opts);
6131 
6132 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6133 {
6134 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6135 }
6136 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6137 			   size_t count)
6138 {
6139 	int n;
6140 
6141 	/* Cannot change from or to TYPE_ZBC with sysfs */
6142 	if (sdebug_ptype == TYPE_ZBC)
6143 		return -EINVAL;
6144 
6145 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6146 		if (n == TYPE_ZBC)
6147 			return -EINVAL;
6148 		sdebug_ptype = n;
6149 		return count;
6150 	}
6151 	return -EINVAL;
6152 }
6153 static DRIVER_ATTR_RW(ptype);
6154 
6155 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6156 {
6157 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6158 }
6159 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6160 			    size_t count)
6161 {
6162 	int n;
6163 
6164 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6165 		sdebug_dsense = n;
6166 		return count;
6167 	}
6168 	return -EINVAL;
6169 }
6170 static DRIVER_ATTR_RW(dsense);
6171 
6172 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6173 {
6174 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6175 }
6176 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6177 			     size_t count)
6178 {
6179 	int n, idx;
6180 
6181 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6182 		bool want_store = (n == 0);
6183 		struct sdebug_host_info *sdhp;
6184 
6185 		n = (n > 0);
6186 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6187 		if (sdebug_fake_rw == n)
6188 			return count;	/* not transitioning so do nothing */
6189 
6190 		if (want_store) {	/* 1 --> 0 transition, set up store */
6191 			if (sdeb_first_idx < 0) {
6192 				idx = sdebug_add_store();
6193 				if (idx < 0)
6194 					return idx;
6195 			} else {
6196 				idx = sdeb_first_idx;
6197 				xa_clear_mark(per_store_ap, idx,
6198 					      SDEB_XA_NOT_IN_USE);
6199 			}
6200 			/* make all hosts use same store */
6201 			list_for_each_entry(sdhp, &sdebug_host_list,
6202 					    host_list) {
6203 				if (sdhp->si_idx != idx) {
6204 					xa_set_mark(per_store_ap, sdhp->si_idx,
6205 						    SDEB_XA_NOT_IN_USE);
6206 					sdhp->si_idx = idx;
6207 				}
6208 			}
6209 			sdeb_most_recent_idx = idx;
6210 		} else {	/* 0 --> 1 transition is trigger for shrink */
6211 			sdebug_erase_all_stores(true /* apart from first */);
6212 		}
6213 		sdebug_fake_rw = n;
6214 		return count;
6215 	}
6216 	return -EINVAL;
6217 }
6218 static DRIVER_ATTR_RW(fake_rw);
6219 
6220 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6221 {
6222 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6223 }
6224 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6225 			      size_t count)
6226 {
6227 	int n;
6228 
6229 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6230 		sdebug_no_lun_0 = n;
6231 		return count;
6232 	}
6233 	return -EINVAL;
6234 }
6235 static DRIVER_ATTR_RW(no_lun_0);
6236 
6237 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6238 {
6239 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6240 }
6241 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6242 			      size_t count)
6243 {
6244 	int n;
6245 
6246 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6247 		sdebug_num_tgts = n;
6248 		sdebug_max_tgts_luns();
6249 		return count;
6250 	}
6251 	return -EINVAL;
6252 }
6253 static DRIVER_ATTR_RW(num_tgts);
6254 
6255 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6256 {
6257 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6258 }
6259 static DRIVER_ATTR_RO(dev_size_mb);
6260 
6261 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6262 {
6263 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6264 }
6265 
6266 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6267 				    size_t count)
6268 {
6269 	bool v;
6270 
6271 	if (kstrtobool(buf, &v))
6272 		return -EINVAL;
6273 
6274 	sdebug_per_host_store = v;
6275 	return count;
6276 }
6277 static DRIVER_ATTR_RW(per_host_store);
6278 
6279 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6280 {
6281 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6282 }
6283 static DRIVER_ATTR_RO(num_parts);
6284 
6285 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6286 {
6287 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6288 }
6289 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6290 			       size_t count)
6291 {
6292 	int nth;
6293 	char work[20];
6294 
6295 	if (sscanf(buf, "%10s", work) == 1) {
6296 		if (strncasecmp(work, "0x", 2) == 0) {
6297 			if (kstrtoint(work + 2, 16, &nth) == 0)
6298 				goto every_nth_done;
6299 		} else {
6300 			if (kstrtoint(work, 10, &nth) == 0)
6301 				goto every_nth_done;
6302 		}
6303 	}
6304 	return -EINVAL;
6305 
6306 every_nth_done:
6307 	sdebug_every_nth = nth;
6308 	if (nth && !sdebug_statistics) {
6309 		pr_info("every_nth needs statistics=1, set it\n");
6310 		sdebug_statistics = true;
6311 	}
6312 	tweak_cmnd_count();
6313 	return count;
6314 }
6315 static DRIVER_ATTR_RW(every_nth);
6316 
6317 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6318 {
6319 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6320 }
6321 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6322 				size_t count)
6323 {
6324 	int n;
6325 	bool changed;
6326 
6327 	if (kstrtoint(buf, 0, &n))
6328 		return -EINVAL;
6329 	if (n >= 0) {
6330 		if (n > (int)SAM_LUN_AM_FLAT) {
6331 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6332 			return -EINVAL;
6333 		}
6334 		changed = ((int)sdebug_lun_am != n);
6335 		sdebug_lun_am = n;
6336 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6337 			struct sdebug_host_info *sdhp;
6338 			struct sdebug_dev_info *dp;
6339 
6340 			spin_lock(&sdebug_host_list_lock);
6341 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6342 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6343 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6344 				}
6345 			}
6346 			spin_unlock(&sdebug_host_list_lock);
6347 		}
6348 		return count;
6349 	}
6350 	return -EINVAL;
6351 }
6352 static DRIVER_ATTR_RW(lun_format);
6353 
6354 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6355 {
6356 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6357 }
6358 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6359 			      size_t count)
6360 {
6361 	int n;
6362 	bool changed;
6363 
6364 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6365 		if (n > 256) {
6366 			pr_warn("max_luns can be no more than 256\n");
6367 			return -EINVAL;
6368 		}
6369 		changed = (sdebug_max_luns != n);
6370 		sdebug_max_luns = n;
6371 		sdebug_max_tgts_luns();
6372 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6373 			struct sdebug_host_info *sdhp;
6374 			struct sdebug_dev_info *dp;
6375 
6376 			spin_lock(&sdebug_host_list_lock);
6377 			list_for_each_entry(sdhp, &sdebug_host_list,
6378 					    host_list) {
6379 				list_for_each_entry(dp, &sdhp->dev_info_list,
6380 						    dev_list) {
6381 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6382 						dp->uas_bm);
6383 				}
6384 			}
6385 			spin_unlock(&sdebug_host_list_lock);
6386 		}
6387 		return count;
6388 	}
6389 	return -EINVAL;
6390 }
6391 static DRIVER_ATTR_RW(max_luns);
6392 
6393 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6394 {
6395 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6396 }
6397 /* N.B. max_queue can be changed while there are queued commands. In flight
6398  * commands beyond the new max_queue will be completed. */
6399 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6400 			       size_t count)
6401 {
6402 	int j, n, k, a;
6403 	struct sdebug_queue *sqp;
6404 
6405 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6406 	    (n <= SDEBUG_CANQUEUE) &&
6407 	    (sdebug_host_max_queue == 0)) {
6408 		block_unblock_all_queues(true);
6409 		k = 0;
6410 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6411 		     ++j, ++sqp) {
6412 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6413 			if (a > k)
6414 				k = a;
6415 		}
6416 		sdebug_max_queue = n;
6417 		if (k == SDEBUG_CANQUEUE)
6418 			atomic_set(&retired_max_queue, 0);
6419 		else if (k >= n)
6420 			atomic_set(&retired_max_queue, k + 1);
6421 		else
6422 			atomic_set(&retired_max_queue, 0);
6423 		block_unblock_all_queues(false);
6424 		return count;
6425 	}
6426 	return -EINVAL;
6427 }
6428 static DRIVER_ATTR_RW(max_queue);
6429 
6430 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6431 {
6432 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6433 }
6434 
6435 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6436 {
6437 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6438 }
6439 
6440 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6441 {
6442 	bool v;
6443 
6444 	if (kstrtobool(buf, &v))
6445 		return -EINVAL;
6446 
6447 	sdebug_no_rwlock = v;
6448 	return count;
6449 }
6450 static DRIVER_ATTR_RW(no_rwlock);
6451 
6452 /*
6453  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6454  * in range [0, sdebug_host_max_queue), we can't change it.
6455  */
6456 static DRIVER_ATTR_RO(host_max_queue);
6457 
6458 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6459 {
6460 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6461 }
6462 static DRIVER_ATTR_RO(no_uld);
6463 
6464 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6465 {
6466 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6467 }
6468 static DRIVER_ATTR_RO(scsi_level);
6469 
6470 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6471 {
6472 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6473 }
6474 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6475 				size_t count)
6476 {
6477 	int n;
6478 	bool changed;
6479 
6480 	/* Ignore capacity change for ZBC drives for now */
6481 	if (sdeb_zbc_in_use)
6482 		return -ENOTSUPP;
6483 
6484 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6485 		changed = (sdebug_virtual_gb != n);
6486 		sdebug_virtual_gb = n;
6487 		sdebug_capacity = get_sdebug_capacity();
6488 		if (changed) {
6489 			struct sdebug_host_info *sdhp;
6490 			struct sdebug_dev_info *dp;
6491 
6492 			spin_lock(&sdebug_host_list_lock);
6493 			list_for_each_entry(sdhp, &sdebug_host_list,
6494 					    host_list) {
6495 				list_for_each_entry(dp, &sdhp->dev_info_list,
6496 						    dev_list) {
6497 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6498 						dp->uas_bm);
6499 				}
6500 			}
6501 			spin_unlock(&sdebug_host_list_lock);
6502 		}
6503 		return count;
6504 	}
6505 	return -EINVAL;
6506 }
6507 static DRIVER_ATTR_RW(virtual_gb);
6508 
6509 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6510 {
6511 	/* absolute number of hosts currently active is what is shown */
6512 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6513 }
6514 
6515 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6516 			      size_t count)
6517 {
6518 	bool found;
6519 	unsigned long idx;
6520 	struct sdeb_store_info *sip;
6521 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6522 	int delta_hosts;
6523 
6524 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6525 		return -EINVAL;
6526 	if (delta_hosts > 0) {
6527 		do {
6528 			found = false;
6529 			if (want_phs) {
6530 				xa_for_each_marked(per_store_ap, idx, sip,
6531 						   SDEB_XA_NOT_IN_USE) {
6532 					sdeb_most_recent_idx = (int)idx;
6533 					found = true;
6534 					break;
6535 				}
6536 				if (found)	/* re-use case */
6537 					sdebug_add_host_helper((int)idx);
6538 				else
6539 					sdebug_do_add_host(true);
6540 			} else {
6541 				sdebug_do_add_host(false);
6542 			}
6543 		} while (--delta_hosts);
6544 	} else if (delta_hosts < 0) {
6545 		do {
6546 			sdebug_do_remove_host(false);
6547 		} while (++delta_hosts);
6548 	}
6549 	return count;
6550 }
6551 static DRIVER_ATTR_RW(add_host);
6552 
6553 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6554 {
6555 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6556 }
6557 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6558 				    size_t count)
6559 {
6560 	int n;
6561 
6562 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6563 		sdebug_vpd_use_hostno = n;
6564 		return count;
6565 	}
6566 	return -EINVAL;
6567 }
6568 static DRIVER_ATTR_RW(vpd_use_hostno);
6569 
6570 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6571 {
6572 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6573 }
6574 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6575 				size_t count)
6576 {
6577 	int n;
6578 
6579 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6580 		if (n > 0)
6581 			sdebug_statistics = true;
6582 		else {
6583 			clear_queue_stats();
6584 			sdebug_statistics = false;
6585 		}
6586 		return count;
6587 	}
6588 	return -EINVAL;
6589 }
6590 static DRIVER_ATTR_RW(statistics);
6591 
6592 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6593 {
6594 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6595 }
6596 static DRIVER_ATTR_RO(sector_size);
6597 
6598 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6599 {
6600 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6601 }
6602 static DRIVER_ATTR_RO(submit_queues);
6603 
6604 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6605 {
6606 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6607 }
6608 static DRIVER_ATTR_RO(dix);
6609 
6610 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6611 {
6612 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6613 }
6614 static DRIVER_ATTR_RO(dif);
6615 
6616 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6617 {
6618 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6619 }
6620 static DRIVER_ATTR_RO(guard);
6621 
6622 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6623 {
6624 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6625 }
6626 static DRIVER_ATTR_RO(ato);
6627 
6628 static ssize_t map_show(struct device_driver *ddp, char *buf)
6629 {
6630 	ssize_t count = 0;
6631 
6632 	if (!scsi_debug_lbp())
6633 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6634 				 sdebug_store_sectors);
6635 
6636 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6637 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6638 
6639 		if (sip)
6640 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6641 					  (int)map_size, sip->map_storep);
6642 	}
6643 	buf[count++] = '\n';
6644 	buf[count] = '\0';
6645 
6646 	return count;
6647 }
6648 static DRIVER_ATTR_RO(map);
6649 
6650 static ssize_t random_show(struct device_driver *ddp, char *buf)
6651 {
6652 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6653 }
6654 
6655 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6656 			    size_t count)
6657 {
6658 	bool v;
6659 
6660 	if (kstrtobool(buf, &v))
6661 		return -EINVAL;
6662 
6663 	sdebug_random = v;
6664 	return count;
6665 }
6666 static DRIVER_ATTR_RW(random);
6667 
6668 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6669 {
6670 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6671 }
6672 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6673 			       size_t count)
6674 {
6675 	int n;
6676 
6677 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6678 		sdebug_removable = (n > 0);
6679 		return count;
6680 	}
6681 	return -EINVAL;
6682 }
6683 static DRIVER_ATTR_RW(removable);
6684 
6685 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6686 {
6687 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6688 }
6689 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6690 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6691 			       size_t count)
6692 {
6693 	int n;
6694 
6695 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6696 		sdebug_host_lock = (n > 0);
6697 		return count;
6698 	}
6699 	return -EINVAL;
6700 }
6701 static DRIVER_ATTR_RW(host_lock);
6702 
6703 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6704 {
6705 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6706 }
6707 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6708 			    size_t count)
6709 {
6710 	int n;
6711 
6712 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6713 		sdebug_strict = (n > 0);
6714 		return count;
6715 	}
6716 	return -EINVAL;
6717 }
6718 static DRIVER_ATTR_RW(strict);
6719 
6720 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6721 {
6722 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6723 }
6724 static DRIVER_ATTR_RO(uuid_ctl);
6725 
6726 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6727 {
6728 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6729 }
6730 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6731 			     size_t count)
6732 {
6733 	int ret, n;
6734 
6735 	ret = kstrtoint(buf, 0, &n);
6736 	if (ret)
6737 		return ret;
6738 	sdebug_cdb_len = n;
6739 	all_config_cdb_len();
6740 	return count;
6741 }
6742 static DRIVER_ATTR_RW(cdb_len);
6743 
6744 static const char * const zbc_model_strs_a[] = {
6745 	[BLK_ZONED_NONE] = "none",
6746 	[BLK_ZONED_HA]   = "host-aware",
6747 	[BLK_ZONED_HM]   = "host-managed",
6748 };
6749 
6750 static const char * const zbc_model_strs_b[] = {
6751 	[BLK_ZONED_NONE] = "no",
6752 	[BLK_ZONED_HA]   = "aware",
6753 	[BLK_ZONED_HM]   = "managed",
6754 };
6755 
6756 static const char * const zbc_model_strs_c[] = {
6757 	[BLK_ZONED_NONE] = "0",
6758 	[BLK_ZONED_HA]   = "1",
6759 	[BLK_ZONED_HM]   = "2",
6760 };
6761 
6762 static int sdeb_zbc_model_str(const char *cp)
6763 {
6764 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6765 
6766 	if (res < 0) {
6767 		res = sysfs_match_string(zbc_model_strs_b, cp);
6768 		if (res < 0) {
6769 			res = sysfs_match_string(zbc_model_strs_c, cp);
6770 			if (res < 0)
6771 				return -EINVAL;
6772 		}
6773 	}
6774 	return res;
6775 }
6776 
6777 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6778 {
6779 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6780 			 zbc_model_strs_a[sdeb_zbc_model]);
6781 }
6782 static DRIVER_ATTR_RO(zbc);
6783 
6784 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6785 {
6786 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6787 }
6788 static DRIVER_ATTR_RO(tur_ms_to_ready);
6789 
6790 /* Note: The following array creates attribute files in the
6791    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6792    files (over those found in the /sys/module/scsi_debug/parameters
6793    directory) is that auxiliary actions can be triggered when an attribute
6794    is changed. For example see: add_host_store() above.
6795  */
6796 
6797 static struct attribute *sdebug_drv_attrs[] = {
6798 	&driver_attr_delay.attr,
6799 	&driver_attr_opts.attr,
6800 	&driver_attr_ptype.attr,
6801 	&driver_attr_dsense.attr,
6802 	&driver_attr_fake_rw.attr,
6803 	&driver_attr_host_max_queue.attr,
6804 	&driver_attr_no_lun_0.attr,
6805 	&driver_attr_num_tgts.attr,
6806 	&driver_attr_dev_size_mb.attr,
6807 	&driver_attr_num_parts.attr,
6808 	&driver_attr_every_nth.attr,
6809 	&driver_attr_lun_format.attr,
6810 	&driver_attr_max_luns.attr,
6811 	&driver_attr_max_queue.attr,
6812 	&driver_attr_no_rwlock.attr,
6813 	&driver_attr_no_uld.attr,
6814 	&driver_attr_scsi_level.attr,
6815 	&driver_attr_virtual_gb.attr,
6816 	&driver_attr_add_host.attr,
6817 	&driver_attr_per_host_store.attr,
6818 	&driver_attr_vpd_use_hostno.attr,
6819 	&driver_attr_sector_size.attr,
6820 	&driver_attr_statistics.attr,
6821 	&driver_attr_submit_queues.attr,
6822 	&driver_attr_dix.attr,
6823 	&driver_attr_dif.attr,
6824 	&driver_attr_guard.attr,
6825 	&driver_attr_ato.attr,
6826 	&driver_attr_map.attr,
6827 	&driver_attr_random.attr,
6828 	&driver_attr_removable.attr,
6829 	&driver_attr_host_lock.attr,
6830 	&driver_attr_ndelay.attr,
6831 	&driver_attr_strict.attr,
6832 	&driver_attr_uuid_ctl.attr,
6833 	&driver_attr_cdb_len.attr,
6834 	&driver_attr_tur_ms_to_ready.attr,
6835 	&driver_attr_zbc.attr,
6836 	NULL,
6837 };
6838 ATTRIBUTE_GROUPS(sdebug_drv);
6839 
6840 static struct device *pseudo_primary;
6841 
6842 static int __init scsi_debug_init(void)
6843 {
6844 	bool want_store = (sdebug_fake_rw == 0);
6845 	unsigned long sz;
6846 	int k, ret, hosts_to_add;
6847 	int idx = -1;
6848 
6849 	ramdisk_lck_a[0] = &atomic_rw;
6850 	ramdisk_lck_a[1] = &atomic_rw2;
6851 	atomic_set(&retired_max_queue, 0);
6852 
6853 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6854 		pr_warn("ndelay must be less than 1 second, ignored\n");
6855 		sdebug_ndelay = 0;
6856 	} else if (sdebug_ndelay > 0)
6857 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6858 
6859 	switch (sdebug_sector_size) {
6860 	case  512:
6861 	case 1024:
6862 	case 2048:
6863 	case 4096:
6864 		break;
6865 	default:
6866 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6867 		return -EINVAL;
6868 	}
6869 
6870 	switch (sdebug_dif) {
6871 	case T10_PI_TYPE0_PROTECTION:
6872 		break;
6873 	case T10_PI_TYPE1_PROTECTION:
6874 	case T10_PI_TYPE2_PROTECTION:
6875 	case T10_PI_TYPE3_PROTECTION:
6876 		have_dif_prot = true;
6877 		break;
6878 
6879 	default:
6880 		pr_err("dif must be 0, 1, 2 or 3\n");
6881 		return -EINVAL;
6882 	}
6883 
6884 	if (sdebug_num_tgts < 0) {
6885 		pr_err("num_tgts must be >= 0\n");
6886 		return -EINVAL;
6887 	}
6888 
6889 	if (sdebug_guard > 1) {
6890 		pr_err("guard must be 0 or 1\n");
6891 		return -EINVAL;
6892 	}
6893 
6894 	if (sdebug_ato > 1) {
6895 		pr_err("ato must be 0 or 1\n");
6896 		return -EINVAL;
6897 	}
6898 
6899 	if (sdebug_physblk_exp > 15) {
6900 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6901 		return -EINVAL;
6902 	}
6903 
6904 	sdebug_lun_am = sdebug_lun_am_i;
6905 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6906 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6907 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6908 	}
6909 
6910 	if (sdebug_max_luns > 256) {
6911 		if (sdebug_max_luns > 16384) {
6912 			pr_warn("max_luns can be no more than 16384, use default\n");
6913 			sdebug_max_luns = DEF_MAX_LUNS;
6914 		}
6915 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6916 	}
6917 
6918 	if (sdebug_lowest_aligned > 0x3fff) {
6919 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6920 		return -EINVAL;
6921 	}
6922 
6923 	if (submit_queues < 1) {
6924 		pr_err("submit_queues must be 1 or more\n");
6925 		return -EINVAL;
6926 	}
6927 
6928 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6929 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6930 		return -EINVAL;
6931 	}
6932 
6933 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6934 	    (sdebug_host_max_queue < 0)) {
6935 		pr_err("host_max_queue must be in range [0 %d]\n",
6936 		       SDEBUG_CANQUEUE);
6937 		return -EINVAL;
6938 	}
6939 
6940 	if (sdebug_host_max_queue &&
6941 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6942 		sdebug_max_queue = sdebug_host_max_queue;
6943 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6944 			sdebug_max_queue);
6945 	}
6946 
6947 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6948 			       GFP_KERNEL);
6949 	if (sdebug_q_arr == NULL)
6950 		return -ENOMEM;
6951 	for (k = 0; k < submit_queues; ++k)
6952 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6953 
6954 	/*
6955 	 * check for host managed zoned block device specified with
6956 	 * ptype=0x14 or zbc=XXX.
6957 	 */
6958 	if (sdebug_ptype == TYPE_ZBC) {
6959 		sdeb_zbc_model = BLK_ZONED_HM;
6960 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6961 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6962 		if (k < 0) {
6963 			ret = k;
6964 			goto free_q_arr;
6965 		}
6966 		sdeb_zbc_model = k;
6967 		switch (sdeb_zbc_model) {
6968 		case BLK_ZONED_NONE:
6969 		case BLK_ZONED_HA:
6970 			sdebug_ptype = TYPE_DISK;
6971 			break;
6972 		case BLK_ZONED_HM:
6973 			sdebug_ptype = TYPE_ZBC;
6974 			break;
6975 		default:
6976 			pr_err("Invalid ZBC model\n");
6977 			ret = -EINVAL;
6978 			goto free_q_arr;
6979 		}
6980 	}
6981 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6982 		sdeb_zbc_in_use = true;
6983 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6984 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6985 	}
6986 
6987 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6988 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6989 	if (sdebug_dev_size_mb < 1)
6990 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6991 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6992 	sdebug_store_sectors = sz / sdebug_sector_size;
6993 	sdebug_capacity = get_sdebug_capacity();
6994 
6995 	/* play around with geometry, don't waste too much on track 0 */
6996 	sdebug_heads = 8;
6997 	sdebug_sectors_per = 32;
6998 	if (sdebug_dev_size_mb >= 256)
6999 		sdebug_heads = 64;
7000 	else if (sdebug_dev_size_mb >= 16)
7001 		sdebug_heads = 32;
7002 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7003 			       (sdebug_sectors_per * sdebug_heads);
7004 	if (sdebug_cylinders_per >= 1024) {
7005 		/* other LLDs do this; implies >= 1GB ram disk ... */
7006 		sdebug_heads = 255;
7007 		sdebug_sectors_per = 63;
7008 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7009 			       (sdebug_sectors_per * sdebug_heads);
7010 	}
7011 	if (scsi_debug_lbp()) {
7012 		sdebug_unmap_max_blocks =
7013 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7014 
7015 		sdebug_unmap_max_desc =
7016 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7017 
7018 		sdebug_unmap_granularity =
7019 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7020 
7021 		if (sdebug_unmap_alignment &&
7022 		    sdebug_unmap_granularity <=
7023 		    sdebug_unmap_alignment) {
7024 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7025 			ret = -EINVAL;
7026 			goto free_q_arr;
7027 		}
7028 	}
7029 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7030 	if (want_store) {
7031 		idx = sdebug_add_store();
7032 		if (idx < 0) {
7033 			ret = idx;
7034 			goto free_q_arr;
7035 		}
7036 	}
7037 
7038 	pseudo_primary = root_device_register("pseudo_0");
7039 	if (IS_ERR(pseudo_primary)) {
7040 		pr_warn("root_device_register() error\n");
7041 		ret = PTR_ERR(pseudo_primary);
7042 		goto free_vm;
7043 	}
7044 	ret = bus_register(&pseudo_lld_bus);
7045 	if (ret < 0) {
7046 		pr_warn("bus_register error: %d\n", ret);
7047 		goto dev_unreg;
7048 	}
7049 	ret = driver_register(&sdebug_driverfs_driver);
7050 	if (ret < 0) {
7051 		pr_warn("driver_register error: %d\n", ret);
7052 		goto bus_unreg;
7053 	}
7054 
7055 	hosts_to_add = sdebug_add_host;
7056 	sdebug_add_host = 0;
7057 
7058 	for (k = 0; k < hosts_to_add; k++) {
7059 		if (want_store && k == 0) {
7060 			ret = sdebug_add_host_helper(idx);
7061 			if (ret < 0) {
7062 				pr_err("add_host_helper k=%d, error=%d\n",
7063 				       k, -ret);
7064 				break;
7065 			}
7066 		} else {
7067 			ret = sdebug_do_add_host(want_store &&
7068 						 sdebug_per_host_store);
7069 			if (ret < 0) {
7070 				pr_err("add_host k=%d error=%d\n", k, -ret);
7071 				break;
7072 			}
7073 		}
7074 	}
7075 	if (sdebug_verbose)
7076 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7077 
7078 	return 0;
7079 
7080 bus_unreg:
7081 	bus_unregister(&pseudo_lld_bus);
7082 dev_unreg:
7083 	root_device_unregister(pseudo_primary);
7084 free_vm:
7085 	sdebug_erase_store(idx, NULL);
7086 free_q_arr:
7087 	kfree(sdebug_q_arr);
7088 	return ret;
7089 }
7090 
7091 static void __exit scsi_debug_exit(void)
7092 {
7093 	int k = sdebug_num_hosts;
7094 
7095 	stop_all_queued();
7096 	for (; k; k--)
7097 		sdebug_do_remove_host(true);
7098 	free_all_queued();
7099 	driver_unregister(&sdebug_driverfs_driver);
7100 	bus_unregister(&pseudo_lld_bus);
7101 	root_device_unregister(pseudo_primary);
7102 
7103 	sdebug_erase_all_stores(false);
7104 	xa_destroy(per_store_ap);
7105 	kfree(sdebug_q_arr);
7106 }
7107 
7108 device_initcall(scsi_debug_init);
7109 module_exit(scsi_debug_exit);
7110 
7111 static void sdebug_release_adapter(struct device *dev)
7112 {
7113 	struct sdebug_host_info *sdbg_host;
7114 
7115 	sdbg_host = dev_to_sdebug_host(dev);
7116 	kfree(sdbg_host);
7117 }
7118 
7119 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7120 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7121 {
7122 	if (idx < 0)
7123 		return;
7124 	if (!sip) {
7125 		if (xa_empty(per_store_ap))
7126 			return;
7127 		sip = xa_load(per_store_ap, idx);
7128 		if (!sip)
7129 			return;
7130 	}
7131 	vfree(sip->map_storep);
7132 	vfree(sip->dif_storep);
7133 	vfree(sip->storep);
7134 	xa_erase(per_store_ap, idx);
7135 	kfree(sip);
7136 }
7137 
7138 /* Assume apart_from_first==false only in shutdown case. */
7139 static void sdebug_erase_all_stores(bool apart_from_first)
7140 {
7141 	unsigned long idx;
7142 	struct sdeb_store_info *sip = NULL;
7143 
7144 	xa_for_each(per_store_ap, idx, sip) {
7145 		if (apart_from_first)
7146 			apart_from_first = false;
7147 		else
7148 			sdebug_erase_store(idx, sip);
7149 	}
7150 	if (apart_from_first)
7151 		sdeb_most_recent_idx = sdeb_first_idx;
7152 }
7153 
7154 /*
7155  * Returns store xarray new element index (idx) if >=0 else negated errno.
7156  * Limit the number of stores to 65536.
7157  */
7158 static int sdebug_add_store(void)
7159 {
7160 	int res;
7161 	u32 n_idx;
7162 	unsigned long iflags;
7163 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7164 	struct sdeb_store_info *sip = NULL;
7165 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7166 
7167 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7168 	if (!sip)
7169 		return -ENOMEM;
7170 
7171 	xa_lock_irqsave(per_store_ap, iflags);
7172 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7173 	if (unlikely(res < 0)) {
7174 		xa_unlock_irqrestore(per_store_ap, iflags);
7175 		kfree(sip);
7176 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7177 		return res;
7178 	}
7179 	sdeb_most_recent_idx = n_idx;
7180 	if (sdeb_first_idx < 0)
7181 		sdeb_first_idx = n_idx;
7182 	xa_unlock_irqrestore(per_store_ap, iflags);
7183 
7184 	res = -ENOMEM;
7185 	sip->storep = vzalloc(sz);
7186 	if (!sip->storep) {
7187 		pr_err("user data oom\n");
7188 		goto err;
7189 	}
7190 	if (sdebug_num_parts > 0)
7191 		sdebug_build_parts(sip->storep, sz);
7192 
7193 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7194 	if (sdebug_dix) {
7195 		int dif_size;
7196 
7197 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7198 		sip->dif_storep = vmalloc(dif_size);
7199 
7200 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7201 			sip->dif_storep);
7202 
7203 		if (!sip->dif_storep) {
7204 			pr_err("DIX oom\n");
7205 			goto err;
7206 		}
7207 		memset(sip->dif_storep, 0xff, dif_size);
7208 	}
7209 	/* Logical Block Provisioning */
7210 	if (scsi_debug_lbp()) {
7211 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7212 		sip->map_storep = vmalloc(array_size(sizeof(long),
7213 						     BITS_TO_LONGS(map_size)));
7214 
7215 		pr_info("%lu provisioning blocks\n", map_size);
7216 
7217 		if (!sip->map_storep) {
7218 			pr_err("LBP map oom\n");
7219 			goto err;
7220 		}
7221 
7222 		bitmap_zero(sip->map_storep, map_size);
7223 
7224 		/* Map first 1KB for partition table */
7225 		if (sdebug_num_parts)
7226 			map_region(sip, 0, 2);
7227 	}
7228 
7229 	rwlock_init(&sip->macc_lck);
7230 	return (int)n_idx;
7231 err:
7232 	sdebug_erase_store((int)n_idx, sip);
7233 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7234 	return res;
7235 }
7236 
7237 static int sdebug_add_host_helper(int per_host_idx)
7238 {
7239 	int k, devs_per_host, idx;
7240 	int error = -ENOMEM;
7241 	struct sdebug_host_info *sdbg_host;
7242 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7243 
7244 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7245 	if (!sdbg_host)
7246 		return -ENOMEM;
7247 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7248 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7249 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7250 	sdbg_host->si_idx = idx;
7251 
7252 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7253 
7254 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7255 	for (k = 0; k < devs_per_host; k++) {
7256 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7257 		if (!sdbg_devinfo)
7258 			goto clean;
7259 	}
7260 
7261 	spin_lock(&sdebug_host_list_lock);
7262 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7263 	spin_unlock(&sdebug_host_list_lock);
7264 
7265 	sdbg_host->dev.bus = &pseudo_lld_bus;
7266 	sdbg_host->dev.parent = pseudo_primary;
7267 	sdbg_host->dev.release = &sdebug_release_adapter;
7268 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7269 
7270 	error = device_register(&sdbg_host->dev);
7271 	if (error) {
7272 		spin_lock(&sdebug_host_list_lock);
7273 		list_del(&sdbg_host->host_list);
7274 		spin_unlock(&sdebug_host_list_lock);
7275 		goto clean;
7276 	}
7277 
7278 	++sdebug_num_hosts;
7279 	return 0;
7280 
7281 clean:
7282 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7283 				 dev_list) {
7284 		list_del(&sdbg_devinfo->dev_list);
7285 		kfree(sdbg_devinfo->zstate);
7286 		kfree(sdbg_devinfo);
7287 	}
7288 	if (sdbg_host->dev.release)
7289 		put_device(&sdbg_host->dev);
7290 	else
7291 		kfree(sdbg_host);
7292 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7293 	return error;
7294 }
7295 
7296 static int sdebug_do_add_host(bool mk_new_store)
7297 {
7298 	int ph_idx = sdeb_most_recent_idx;
7299 
7300 	if (mk_new_store) {
7301 		ph_idx = sdebug_add_store();
7302 		if (ph_idx < 0)
7303 			return ph_idx;
7304 	}
7305 	return sdebug_add_host_helper(ph_idx);
7306 }
7307 
7308 static void sdebug_do_remove_host(bool the_end)
7309 {
7310 	int idx = -1;
7311 	struct sdebug_host_info *sdbg_host = NULL;
7312 	struct sdebug_host_info *sdbg_host2;
7313 
7314 	spin_lock(&sdebug_host_list_lock);
7315 	if (!list_empty(&sdebug_host_list)) {
7316 		sdbg_host = list_entry(sdebug_host_list.prev,
7317 				       struct sdebug_host_info, host_list);
7318 		idx = sdbg_host->si_idx;
7319 	}
7320 	if (!the_end && idx >= 0) {
7321 		bool unique = true;
7322 
7323 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7324 			if (sdbg_host2 == sdbg_host)
7325 				continue;
7326 			if (idx == sdbg_host2->si_idx) {
7327 				unique = false;
7328 				break;
7329 			}
7330 		}
7331 		if (unique) {
7332 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7333 			if (idx == sdeb_most_recent_idx)
7334 				--sdeb_most_recent_idx;
7335 		}
7336 	}
7337 	if (sdbg_host)
7338 		list_del(&sdbg_host->host_list);
7339 	spin_unlock(&sdebug_host_list_lock);
7340 
7341 	if (!sdbg_host)
7342 		return;
7343 
7344 	device_unregister(&sdbg_host->dev);
7345 	--sdebug_num_hosts;
7346 }
7347 
7348 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7349 {
7350 	struct sdebug_dev_info *devip = sdev->hostdata;
7351 
7352 	if (!devip)
7353 		return	-ENODEV;
7354 
7355 	block_unblock_all_queues(true);
7356 	if (qdepth > SDEBUG_CANQUEUE) {
7357 		qdepth = SDEBUG_CANQUEUE;
7358 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7359 			qdepth, SDEBUG_CANQUEUE);
7360 	}
7361 	if (qdepth < 1)
7362 		qdepth = 1;
7363 	if (qdepth != sdev->queue_depth)
7364 		scsi_change_queue_depth(sdev, qdepth);
7365 
7366 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7367 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7368 	block_unblock_all_queues(false);
7369 	return sdev->queue_depth;
7370 }
7371 
7372 static bool fake_timeout(struct scsi_cmnd *scp)
7373 {
7374 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7375 		if (sdebug_every_nth < -1)
7376 			sdebug_every_nth = -1;
7377 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7378 			return true; /* ignore command causing timeout */
7379 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7380 			 scsi_medium_access_command(scp))
7381 			return true; /* time out reads and writes */
7382 	}
7383 	return false;
7384 }
7385 
7386 /* Response to TUR or media access command when device stopped */
7387 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7388 {
7389 	int stopped_state;
7390 	u64 diff_ns = 0;
7391 	ktime_t now_ts = ktime_get_boottime();
7392 	struct scsi_device *sdp = scp->device;
7393 
7394 	stopped_state = atomic_read(&devip->stopped);
7395 	if (stopped_state == 2) {
7396 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7397 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7398 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7399 				/* tur_ms_to_ready timer extinguished */
7400 				atomic_set(&devip->stopped, 0);
7401 				return 0;
7402 			}
7403 		}
7404 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7405 		if (sdebug_verbose)
7406 			sdev_printk(KERN_INFO, sdp,
7407 				    "%s: Not ready: in process of becoming ready\n", my_name);
7408 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7409 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7410 
7411 			if (diff_ns <= tur_nanosecs_to_ready)
7412 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7413 			else
7414 				diff_ns = tur_nanosecs_to_ready;
7415 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7416 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7417 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7418 						   diff_ns);
7419 			return check_condition_result;
7420 		}
7421 	}
7422 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7423 	if (sdebug_verbose)
7424 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7425 			    my_name);
7426 	return check_condition_result;
7427 }
7428 
7429 static void sdebug_map_queues(struct Scsi_Host *shost)
7430 {
7431 	int i, qoff;
7432 
7433 	if (shost->nr_hw_queues == 1)
7434 		return;
7435 
7436 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7437 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7438 
7439 		map->nr_queues  = 0;
7440 
7441 		if (i == HCTX_TYPE_DEFAULT)
7442 			map->nr_queues = submit_queues - poll_queues;
7443 		else if (i == HCTX_TYPE_POLL)
7444 			map->nr_queues = poll_queues;
7445 
7446 		if (!map->nr_queues) {
7447 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7448 			continue;
7449 		}
7450 
7451 		map->queue_offset = qoff;
7452 		blk_mq_map_queues(map);
7453 
7454 		qoff += map->nr_queues;
7455 	}
7456 }
7457 
7458 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7459 {
7460 	bool first;
7461 	bool retiring = false;
7462 	int num_entries = 0;
7463 	unsigned int qc_idx = 0;
7464 	unsigned long iflags;
7465 	ktime_t kt_from_boot = ktime_get_boottime();
7466 	struct sdebug_queue *sqp;
7467 	struct sdebug_queued_cmd *sqcp;
7468 	struct scsi_cmnd *scp;
7469 	struct sdebug_defer *sd_dp;
7470 
7471 	sqp = sdebug_q_arr + queue_num;
7472 
7473 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7474 
7475 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7476 	if (qc_idx >= sdebug_max_queue)
7477 		goto unlock;
7478 
7479 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7480 		if (first) {
7481 			first = false;
7482 			if (!test_bit(qc_idx, sqp->in_use_bm))
7483 				continue;
7484 		} else {
7485 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7486 		}
7487 		if (qc_idx >= sdebug_max_queue)
7488 			break;
7489 
7490 		sqcp = &sqp->qc_arr[qc_idx];
7491 		sd_dp = sqcp->sd_dp;
7492 		if (unlikely(!sd_dp))
7493 			continue;
7494 		scp = sqcp->a_cmnd;
7495 		if (unlikely(scp == NULL)) {
7496 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7497 			       queue_num, qc_idx, __func__);
7498 			break;
7499 		}
7500 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7501 			if (kt_from_boot < sd_dp->cmpl_ts)
7502 				continue;
7503 
7504 		} else		/* ignoring non REQ_POLLED requests */
7505 			continue;
7506 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7507 			retiring = true;
7508 
7509 		sqcp->a_cmnd = NULL;
7510 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7511 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7512 				sqp, queue_num, qc_idx, __func__);
7513 			break;
7514 		}
7515 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7516 			int k, retval;
7517 
7518 			retval = atomic_read(&retired_max_queue);
7519 			if (qc_idx >= retval) {
7520 				pr_err("index %d too large\n", retval);
7521 				break;
7522 			}
7523 			k = find_last_bit(sqp->in_use_bm, retval);
7524 			if ((k < sdebug_max_queue) || (k == retval))
7525 				atomic_set(&retired_max_queue, 0);
7526 			else
7527 				atomic_set(&retired_max_queue, k + 1);
7528 		}
7529 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7530 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7531 
7532 		if (sdebug_statistics) {
7533 			atomic_inc(&sdebug_completions);
7534 			if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7535 				atomic_inc(&sdebug_miss_cpus);
7536 		}
7537 
7538 		scsi_done(scp); /* callback to mid level */
7539 		num_entries++;
7540 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7541 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7542 			break;
7543 	}
7544 
7545 unlock:
7546 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7547 
7548 	if (num_entries > 0)
7549 		atomic_add(num_entries, &sdeb_mq_poll_count);
7550 	return num_entries;
7551 }
7552 
7553 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7554 				   struct scsi_cmnd *scp)
7555 {
7556 	u8 sdeb_i;
7557 	struct scsi_device *sdp = scp->device;
7558 	const struct opcode_info_t *oip;
7559 	const struct opcode_info_t *r_oip;
7560 	struct sdebug_dev_info *devip;
7561 	u8 *cmd = scp->cmnd;
7562 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7563 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7564 	int k, na;
7565 	int errsts = 0;
7566 	u64 lun_index = sdp->lun & 0x3FFF;
7567 	u32 flags;
7568 	u16 sa;
7569 	u8 opcode = cmd[0];
7570 	bool has_wlun_rl;
7571 	bool inject_now;
7572 
7573 	scsi_set_resid(scp, 0);
7574 	if (sdebug_statistics) {
7575 		atomic_inc(&sdebug_cmnd_count);
7576 		inject_now = inject_on_this_cmd();
7577 	} else {
7578 		inject_now = false;
7579 	}
7580 	if (unlikely(sdebug_verbose &&
7581 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7582 		char b[120];
7583 		int n, len, sb;
7584 
7585 		len = scp->cmd_len;
7586 		sb = (int)sizeof(b);
7587 		if (len > 32)
7588 			strcpy(b, "too long, over 32 bytes");
7589 		else {
7590 			for (k = 0, n = 0; k < len && n < sb; ++k)
7591 				n += scnprintf(b + n, sb - n, "%02x ",
7592 					       (u32)cmd[k]);
7593 		}
7594 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7595 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7596 	}
7597 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7598 		return SCSI_MLQUEUE_HOST_BUSY;
7599 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7600 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7601 		goto err_out;
7602 
7603 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7604 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7605 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7606 	if (unlikely(!devip)) {
7607 		devip = find_build_dev_info(sdp);
7608 		if (NULL == devip)
7609 			goto err_out;
7610 	}
7611 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7612 		atomic_set(&sdeb_inject_pending, 1);
7613 
7614 	na = oip->num_attached;
7615 	r_pfp = oip->pfp;
7616 	if (na) {	/* multiple commands with this opcode */
7617 		r_oip = oip;
7618 		if (FF_SA & r_oip->flags) {
7619 			if (F_SA_LOW & oip->flags)
7620 				sa = 0x1f & cmd[1];
7621 			else
7622 				sa = get_unaligned_be16(cmd + 8);
7623 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7624 				if (opcode == oip->opcode && sa == oip->sa)
7625 					break;
7626 			}
7627 		} else {   /* since no service action only check opcode */
7628 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7629 				if (opcode == oip->opcode)
7630 					break;
7631 			}
7632 		}
7633 		if (k > na) {
7634 			if (F_SA_LOW & r_oip->flags)
7635 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7636 			else if (F_SA_HIGH & r_oip->flags)
7637 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7638 			else
7639 				mk_sense_invalid_opcode(scp);
7640 			goto check_cond;
7641 		}
7642 	}	/* else (when na==0) we assume the oip is a match */
7643 	flags = oip->flags;
7644 	if (unlikely(F_INV_OP & flags)) {
7645 		mk_sense_invalid_opcode(scp);
7646 		goto check_cond;
7647 	}
7648 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7649 		if (sdebug_verbose)
7650 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7651 				    my_name, opcode, " supported for wlun");
7652 		mk_sense_invalid_opcode(scp);
7653 		goto check_cond;
7654 	}
7655 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7656 		u8 rem;
7657 		int j;
7658 
7659 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7660 			rem = ~oip->len_mask[k] & cmd[k];
7661 			if (rem) {
7662 				for (j = 7; j >= 0; --j, rem <<= 1) {
7663 					if (0x80 & rem)
7664 						break;
7665 				}
7666 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7667 				goto check_cond;
7668 			}
7669 		}
7670 	}
7671 	if (unlikely(!(F_SKIP_UA & flags) &&
7672 		     find_first_bit(devip->uas_bm,
7673 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7674 		errsts = make_ua(scp, devip);
7675 		if (errsts)
7676 			goto check_cond;
7677 	}
7678 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7679 		     atomic_read(&devip->stopped))) {
7680 		errsts = resp_not_ready(scp, devip);
7681 		if (errsts)
7682 			goto fini;
7683 	}
7684 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7685 		goto fini;
7686 	if (unlikely(sdebug_every_nth)) {
7687 		if (fake_timeout(scp))
7688 			return 0;	/* ignore command: make trouble */
7689 	}
7690 	if (likely(oip->pfp))
7691 		pfp = oip->pfp;	/* calls a resp_* function */
7692 	else
7693 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7694 
7695 fini:
7696 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7697 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7698 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7699 					    sdebug_ndelay > 10000)) {
7700 		/*
7701 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7702 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7703 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7704 		 * For Synchronize Cache want 1/20 of SSU's delay.
7705 		 */
7706 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7707 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7708 
7709 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7710 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7711 	} else
7712 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7713 				     sdebug_ndelay);
7714 check_cond:
7715 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7716 err_out:
7717 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7718 }
7719 
7720 static struct scsi_host_template sdebug_driver_template = {
7721 	.show_info =		scsi_debug_show_info,
7722 	.write_info =		scsi_debug_write_info,
7723 	.proc_name =		sdebug_proc_name,
7724 	.name =			"SCSI DEBUG",
7725 	.info =			scsi_debug_info,
7726 	.slave_alloc =		scsi_debug_slave_alloc,
7727 	.slave_configure =	scsi_debug_slave_configure,
7728 	.slave_destroy =	scsi_debug_slave_destroy,
7729 	.ioctl =		scsi_debug_ioctl,
7730 	.queuecommand =		scsi_debug_queuecommand,
7731 	.change_queue_depth =	sdebug_change_qdepth,
7732 	.map_queues =		sdebug_map_queues,
7733 	.mq_poll =		sdebug_blk_mq_poll,
7734 	.eh_abort_handler =	scsi_debug_abort,
7735 	.eh_device_reset_handler = scsi_debug_device_reset,
7736 	.eh_target_reset_handler = scsi_debug_target_reset,
7737 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7738 	.eh_host_reset_handler = scsi_debug_host_reset,
7739 	.can_queue =		SDEBUG_CANQUEUE,
7740 	.this_id =		7,
7741 	.sg_tablesize =		SG_MAX_SEGMENTS,
7742 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7743 	.max_sectors =		-1U,
7744 	.max_segment_size =	-1U,
7745 	.module =		THIS_MODULE,
7746 	.track_queue_depth =	1,
7747 };
7748 
7749 static int sdebug_driver_probe(struct device *dev)
7750 {
7751 	int error = 0;
7752 	struct sdebug_host_info *sdbg_host;
7753 	struct Scsi_Host *hpnt;
7754 	int hprot;
7755 
7756 	sdbg_host = dev_to_sdebug_host(dev);
7757 
7758 	sdebug_driver_template.can_queue = sdebug_max_queue;
7759 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7760 	if (!sdebug_clustering)
7761 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7762 
7763 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7764 	if (NULL == hpnt) {
7765 		pr_err("scsi_host_alloc failed\n");
7766 		error = -ENODEV;
7767 		return error;
7768 	}
7769 	if (submit_queues > nr_cpu_ids) {
7770 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7771 			my_name, submit_queues, nr_cpu_ids);
7772 		submit_queues = nr_cpu_ids;
7773 	}
7774 	/*
7775 	 * Decide whether to tell scsi subsystem that we want mq. The
7776 	 * following should give the same answer for each host.
7777 	 */
7778 	hpnt->nr_hw_queues = submit_queues;
7779 	if (sdebug_host_max_queue)
7780 		hpnt->host_tagset = 1;
7781 
7782 	/* poll queues are possible for nr_hw_queues > 1 */
7783 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7784 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7785 			 my_name, poll_queues, hpnt->nr_hw_queues);
7786 		poll_queues = 0;
7787 	}
7788 
7789 	/*
7790 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7791 	 * left over for non-polled I/O.
7792 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7793 	 */
7794 	if (poll_queues >= submit_queues) {
7795 		if (submit_queues < 3)
7796 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7797 		else
7798 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7799 				my_name, submit_queues - 1);
7800 		poll_queues = 1;
7801 	}
7802 	if (poll_queues)
7803 		hpnt->nr_maps = 3;
7804 
7805 	sdbg_host->shost = hpnt;
7806 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7807 		hpnt->max_id = sdebug_num_tgts + 1;
7808 	else
7809 		hpnt->max_id = sdebug_num_tgts;
7810 	/* = sdebug_max_luns; */
7811 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7812 
7813 	hprot = 0;
7814 
7815 	switch (sdebug_dif) {
7816 
7817 	case T10_PI_TYPE1_PROTECTION:
7818 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7819 		if (sdebug_dix)
7820 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7821 		break;
7822 
7823 	case T10_PI_TYPE2_PROTECTION:
7824 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7825 		if (sdebug_dix)
7826 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7827 		break;
7828 
7829 	case T10_PI_TYPE3_PROTECTION:
7830 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7831 		if (sdebug_dix)
7832 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7833 		break;
7834 
7835 	default:
7836 		if (sdebug_dix)
7837 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7838 		break;
7839 	}
7840 
7841 	scsi_host_set_prot(hpnt, hprot);
7842 
7843 	if (have_dif_prot || sdebug_dix)
7844 		pr_info("host protection%s%s%s%s%s%s%s\n",
7845 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7846 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7847 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7848 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7849 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7850 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7851 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7852 
7853 	if (sdebug_guard == 1)
7854 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7855 	else
7856 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7857 
7858 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7859 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7860 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7861 		sdebug_statistics = true;
7862 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7863 	if (error) {
7864 		pr_err("scsi_add_host failed\n");
7865 		error = -ENODEV;
7866 		scsi_host_put(hpnt);
7867 	} else {
7868 		scsi_scan_host(hpnt);
7869 	}
7870 
7871 	return error;
7872 }
7873 
7874 static void sdebug_driver_remove(struct device *dev)
7875 {
7876 	struct sdebug_host_info *sdbg_host;
7877 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7878 
7879 	sdbg_host = dev_to_sdebug_host(dev);
7880 
7881 	scsi_remove_host(sdbg_host->shost);
7882 
7883 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7884 				 dev_list) {
7885 		list_del(&sdbg_devinfo->dev_list);
7886 		kfree(sdbg_devinfo->zstate);
7887 		kfree(sdbg_devinfo);
7888 	}
7889 
7890 	scsi_host_put(sdbg_host->shost);
7891 }
7892 
7893 static int pseudo_lld_bus_match(struct device *dev,
7894 				struct device_driver *dev_driver)
7895 {
7896 	return 1;
7897 }
7898 
7899 static struct bus_type pseudo_lld_bus = {
7900 	.name = "pseudo",
7901 	.match = pseudo_lld_bus_match,
7902 	.probe = sdebug_driver_probe,
7903 	.remove = sdebug_driver_remove,
7904 	.drv_groups = sdebug_drv_groups,
7905 };
7906