xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision c411a42f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 /* Zone types (zbcr05 table 25) */
254 enum sdebug_z_type {
255 	ZBC_ZTYPE_CNV	= 0x1,
256 	ZBC_ZTYPE_SWR	= 0x2,
257 	ZBC_ZTYPE_SWP	= 0x3,
258 	/* ZBC_ZTYPE_SOBR = 0x4, */
259 	ZBC_ZTYPE_GAP	= 0x5,
260 };
261 
262 /* enumeration names taken from table 26, zbcr05 */
263 enum sdebug_z_cond {
264 	ZBC_NOT_WRITE_POINTER	= 0x0,
265 	ZC1_EMPTY		= 0x1,
266 	ZC2_IMPLICIT_OPEN	= 0x2,
267 	ZC3_EXPLICIT_OPEN	= 0x3,
268 	ZC4_CLOSED		= 0x4,
269 	ZC6_READ_ONLY		= 0xd,
270 	ZC5_FULL		= 0xe,
271 	ZC7_OFFLINE		= 0xf,
272 };
273 
274 struct sdeb_zone_state {	/* ZBC: per zone state */
275 	enum sdebug_z_type z_type;
276 	enum sdebug_z_cond z_cond;
277 	bool z_non_seq_resource;
278 	unsigned int z_size;
279 	sector_t z_start;
280 	sector_t z_wp;
281 };
282 
283 struct sdebug_dev_info {
284 	struct list_head dev_list;
285 	unsigned int channel;
286 	unsigned int target;
287 	u64 lun;
288 	uuid_t lu_name;
289 	struct sdebug_host_info *sdbg_host;
290 	unsigned long uas_bm[1];
291 	atomic_t num_in_q;
292 	atomic_t stopped;	/* 1: by SSU, 2: device start */
293 	bool used;
294 
295 	/* For ZBC devices */
296 	enum blk_zoned_model zmodel;
297 	unsigned int zcap;
298 	unsigned int zsize;
299 	unsigned int zsize_shift;
300 	unsigned int nr_zones;
301 	unsigned int nr_conv_zones;
302 	unsigned int nr_seq_zones;
303 	unsigned int nr_imp_open;
304 	unsigned int nr_exp_open;
305 	unsigned int nr_closed;
306 	unsigned int max_open;
307 	ktime_t create_ts;	/* time since bootup that this device was created */
308 	struct sdeb_zone_state *zstate;
309 };
310 
311 struct sdebug_host_info {
312 	struct list_head host_list;
313 	int si_idx;	/* sdeb_store_info (per host) xarray index */
314 	struct Scsi_Host *shost;
315 	struct device dev;
316 	struct list_head dev_info_list;
317 };
318 
319 /* There is an xarray of pointers to this struct's objects, one per host */
320 struct sdeb_store_info {
321 	rwlock_t macc_lck;	/* for atomic media access on this store */
322 	u8 *storep;		/* user data storage (ram) */
323 	struct t10_pi_tuple *dif_storep; /* protection info */
324 	void *map_storep;	/* provisioning map */
325 };
326 
327 #define to_sdebug_host(d)	\
328 	container_of(d, struct sdebug_host_info, dev)
329 
330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
331 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
332 
333 struct sdebug_defer {
334 	struct hrtimer hrt;
335 	struct execute_work ew;
336 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
337 	int sqa_idx;	/* index of sdebug_queue array */
338 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
339 	int hc_idx;	/* hostwide tag index */
340 	int issuing_cpu;
341 	bool init_hrt;
342 	bool init_wq;
343 	bool init_poll;
344 	bool aborted;	/* true when blk_abort_request() already called */
345 	enum sdeb_defer_type defer_t;
346 };
347 
348 struct sdebug_queued_cmd {
349 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
350 	 * instance indicates this slot is in use.
351 	 */
352 	struct sdebug_defer *sd_dp;
353 	struct scsi_cmnd *a_cmnd;
354 };
355 
356 struct sdebug_queue {
357 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
358 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
359 	spinlock_t qc_lock;
360 	atomic_t blocked;	/* to temporarily stop more being queued */
361 };
362 
363 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
364 static atomic_t sdebug_completions;  /* count of deferred completions */
365 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
366 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
367 static atomic_t sdeb_inject_pending;
368 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
369 
370 struct opcode_info_t {
371 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
372 				/* for terminating element */
373 	u8 opcode;		/* if num_attached > 0, preferred */
374 	u16 sa;			/* service action */
375 	u32 flags;		/* OR-ed set of SDEB_F_* */
376 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
377 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
378 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
379 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
380 };
381 
382 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
383 enum sdeb_opcode_index {
384 	SDEB_I_INVALID_OPCODE =	0,
385 	SDEB_I_INQUIRY = 1,
386 	SDEB_I_REPORT_LUNS = 2,
387 	SDEB_I_REQUEST_SENSE = 3,
388 	SDEB_I_TEST_UNIT_READY = 4,
389 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
390 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
391 	SDEB_I_LOG_SENSE = 7,
392 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
393 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
394 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
395 	SDEB_I_START_STOP = 11,
396 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
397 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
398 	SDEB_I_MAINT_IN = 14,
399 	SDEB_I_MAINT_OUT = 15,
400 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
401 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
402 	SDEB_I_RESERVE = 18,		/* 6, 10 */
403 	SDEB_I_RELEASE = 19,		/* 6, 10 */
404 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
405 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
406 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
407 	SDEB_I_SEND_DIAG = 23,
408 	SDEB_I_UNMAP = 24,
409 	SDEB_I_WRITE_BUFFER = 25,
410 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
411 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
412 	SDEB_I_COMP_WRITE = 28,
413 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
414 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
415 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
416 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
417 };
418 
419 
420 static const unsigned char opcode_ind_arr[256] = {
421 /* 0x0; 0x0->0x1f: 6 byte cdbs */
422 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
423 	    0, 0, 0, 0,
424 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
425 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
426 	    SDEB_I_RELEASE,
427 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
428 	    SDEB_I_ALLOW_REMOVAL, 0,
429 /* 0x20; 0x20->0x3f: 10 byte cdbs */
430 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
431 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
432 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
433 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
434 /* 0x40; 0x40->0x5f: 10 byte cdbs */
435 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
436 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
437 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
438 	    SDEB_I_RELEASE,
439 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
440 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
441 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
443 	0, SDEB_I_VARIABLE_LEN,
444 /* 0x80; 0x80->0x9f: 16 byte cdbs */
445 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
446 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
447 	0, 0, 0, SDEB_I_VERIFY,
448 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
449 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
450 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
451 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
452 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
453 	     SDEB_I_MAINT_OUT, 0, 0, 0,
454 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
455 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
456 	0, 0, 0, 0, 0, 0, 0, 0,
457 	0, 0, 0, 0, 0, 0, 0, 0,
458 /* 0xc0; 0xc0->0xff: vendor specific */
459 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
463 };
464 
465 /*
466  * The following "response" functions return the SCSI mid-level's 4 byte
467  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
468  * command completion, they can mask their return value with
469  * SDEG_RES_IMMED_MASK .
470  */
471 #define SDEG_RES_IMMED_MASK 0x40000000
472 
473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
502 
503 static int sdebug_do_add_host(bool mk_new_store);
504 static int sdebug_add_host_helper(int per_host_idx);
505 static void sdebug_do_remove_host(bool the_end);
506 static int sdebug_add_store(void);
507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
508 static void sdebug_erase_all_stores(bool apart_from_first);
509 
510 /*
511  * The following are overflow arrays for cdbs that "hit" the same index in
512  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513  * should be placed in opcode_info_arr[], the others should be placed here.
514  */
515 static const struct opcode_info_t msense_iarr[] = {
516 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
517 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519 
520 static const struct opcode_info_t mselect_iarr[] = {
521 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
522 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 };
524 
525 static const struct opcode_info_t read_iarr[] = {
526 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
528 	     0, 0, 0, 0} },
529 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533 	     0xc7, 0, 0, 0, 0} },
534 };
535 
536 static const struct opcode_info_t write_iarr[] = {
537 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
538 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
539 		   0, 0, 0, 0, 0, 0} },
540 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
541 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
542 		   0, 0, 0} },
543 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
544 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 		   0xbf, 0xc7, 0, 0, 0, 0} },
546 };
547 
548 static const struct opcode_info_t verify_iarr[] = {
549 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 };
553 
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
558 };
559 
560 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
561 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
564 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
567 };
568 
569 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
570 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
576 };
577 
578 static const struct opcode_info_t write_same_iarr[] = {
579 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
582 };
583 
584 static const struct opcode_info_t reserve_iarr[] = {
585 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
586 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588 
589 static const struct opcode_info_t release_iarr[] = {
590 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
591 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 };
593 
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
598 };
599 
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
604 };
605 
606 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
607 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
610 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
613 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
616 };
617 
618 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
619 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 };
623 
624 
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626  * plus the terminating elements for logic that scans this table such as
627  * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
629 /* 0 */
630 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
631 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 	     0, 0} },					/* REPORT LUNS */
637 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 /* 5 */
642 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
643 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
644 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
646 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
647 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
649 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
650 	     0, 0, 0} },
651 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
652 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
653 	     0, 0} },
654 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
656 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 /* 10 */
658 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
660 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
671 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
673 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 				0xff, 0, 0xc7, 0, 0, 0, 0} },
675 /* 15 */
676 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
680 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
684 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
685 	     0xff, 0xff} },
686 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
688 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 	     0} },
690 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
692 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
693 	     0} },
694 /* 20 */
695 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
702 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 /* 25 */
706 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
709 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
711 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
712 		 0, 0, 0, 0, 0} },
713 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 	    resp_sync_cache, sync_cache_iarr,
715 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
717 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
720 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 	    resp_pre_fetch, pre_fetch_iarr,
722 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
724 
725 /* 30 */
726 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
734 /* sentinel */
735 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
736 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
737 };
738 
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue;	/* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
758 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
760 static int sdebug_no_uld;
761 static int sdebug_num_parts = DEF_NUM_PARTS;
762 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
763 static int sdebug_opt_blks = DEF_OPT_BLKS;
764 static int sdebug_opts = DEF_OPTS;
765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
767 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
773 static unsigned int sdebug_lbpu = DEF_LBPU;
774 static unsigned int sdebug_lbpws = DEF_LBPWS;
775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
783 static bool sdebug_random = DEF_RANDOM;
784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
785 static bool sdebug_removable = DEF_REMOVABLE;
786 static bool sdebug_clustering;
787 static bool sdebug_host_lock = DEF_HOST_LOCK;
788 static bool sdebug_strict = DEF_STRICT;
789 static bool sdebug_any_injecting_opt;
790 static bool sdebug_no_rwlock;
791 static bool sdebug_verbose;
792 static bool have_dif_prot;
793 static bool write_since_sync;
794 static bool sdebug_statistics = DEF_STATISTICS;
795 static bool sdebug_wp;
796 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
798 static char *sdeb_zbc_model_s;
799 
800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
801 			  SAM_LUN_AM_FLAT = 0x1,
802 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
803 			  SAM_LUN_AM_EXTENDED = 0x3};
804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
806 
807 static unsigned int sdebug_store_sectors;
808 static sector_t sdebug_capacity;	/* in sectors */
809 
810 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
811    may still need them */
812 static int sdebug_heads;		/* heads per disk */
813 static int sdebug_cylinders_per;	/* cylinders per surface */
814 static int sdebug_sectors_per;		/* sectors per cylinder */
815 
816 static LIST_HEAD(sdebug_host_list);
817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
818 
819 static struct xarray per_store_arr;
820 static struct xarray *per_store_ap = &per_store_arr;
821 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
822 static int sdeb_most_recent_idx = -1;
823 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
824 
825 static unsigned long map_size;
826 static int num_aborts;
827 static int num_dev_resets;
828 static int num_target_resets;
829 static int num_bus_resets;
830 static int num_host_resets;
831 static int dix_writes;
832 static int dix_reads;
833 static int dif_errors;
834 
835 /* ZBC global data */
836 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
837 static int sdeb_zbc_zone_cap_mb;
838 static int sdeb_zbc_zone_size_mb;
839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
841 
842 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
843 static int poll_queues; /* iouring iopoll interface.*/
844 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
845 
846 static DEFINE_RWLOCK(atomic_rw);
847 static DEFINE_RWLOCK(atomic_rw2);
848 
849 static rwlock_t *ramdisk_lck_a[2];
850 
851 static char sdebug_proc_name[] = MY_NAME;
852 static const char *my_name = MY_NAME;
853 
854 static struct bus_type pseudo_lld_bus;
855 
856 static struct device_driver sdebug_driverfs_driver = {
857 	.name 		= sdebug_proc_name,
858 	.bus		= &pseudo_lld_bus,
859 };
860 
861 static const int check_condition_result =
862 	SAM_STAT_CHECK_CONDITION;
863 
864 static const int illegal_condition_result =
865 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
866 
867 static const int device_qfull_result =
868 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
869 
870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
871 
872 
873 /* Only do the extra work involved in logical block provisioning if one or
874  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
875  * real reads and writes (i.e. not skipping them for speed).
876  */
877 static inline bool scsi_debug_lbp(void)
878 {
879 	return 0 == sdebug_fake_rw &&
880 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
881 }
882 
883 static void *lba2fake_store(struct sdeb_store_info *sip,
884 			    unsigned long long lba)
885 {
886 	struct sdeb_store_info *lsip = sip;
887 
888 	lba = do_div(lba, sdebug_store_sectors);
889 	if (!sip || !sip->storep) {
890 		WARN_ON_ONCE(true);
891 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
892 	}
893 	return lsip->storep + lba * sdebug_sector_size;
894 }
895 
896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
897 				      sector_t sector)
898 {
899 	sector = sector_div(sector, sdebug_store_sectors);
900 
901 	return sip->dif_storep + sector;
902 }
903 
904 static void sdebug_max_tgts_luns(void)
905 {
906 	struct sdebug_host_info *sdbg_host;
907 	struct Scsi_Host *hpnt;
908 
909 	spin_lock(&sdebug_host_list_lock);
910 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
911 		hpnt = sdbg_host->shost;
912 		if ((hpnt->this_id >= 0) &&
913 		    (sdebug_num_tgts > hpnt->this_id))
914 			hpnt->max_id = sdebug_num_tgts + 1;
915 		else
916 			hpnt->max_id = sdebug_num_tgts;
917 		/* sdebug_max_luns; */
918 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
919 	}
920 	spin_unlock(&sdebug_host_list_lock);
921 }
922 
923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
924 
925 /* Set in_bit to -1 to indicate no bit position of invalid field */
926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
927 				 enum sdeb_cmd_data c_d,
928 				 int in_byte, int in_bit)
929 {
930 	unsigned char *sbuff;
931 	u8 sks[4];
932 	int sl, asc;
933 
934 	sbuff = scp->sense_buffer;
935 	if (!sbuff) {
936 		sdev_printk(KERN_ERR, scp->device,
937 			    "%s: sense_buffer is NULL\n", __func__);
938 		return;
939 	}
940 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
941 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
942 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
943 	memset(sks, 0, sizeof(sks));
944 	sks[0] = 0x80;
945 	if (c_d)
946 		sks[0] |= 0x40;
947 	if (in_bit >= 0) {
948 		sks[0] |= 0x8;
949 		sks[0] |= 0x7 & in_bit;
950 	}
951 	put_unaligned_be16(in_byte, sks + 1);
952 	if (sdebug_dsense) {
953 		sl = sbuff[7] + 8;
954 		sbuff[7] = sl;
955 		sbuff[sl] = 0x2;
956 		sbuff[sl + 1] = 0x6;
957 		memcpy(sbuff + sl + 4, sks, 3);
958 	} else
959 		memcpy(sbuff + 15, sks, 3);
960 	if (sdebug_verbose)
961 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
962 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
963 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
964 }
965 
966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
967 {
968 	if (!scp->sense_buffer) {
969 		sdev_printk(KERN_ERR, scp->device,
970 			    "%s: sense_buffer is NULL\n", __func__);
971 		return;
972 	}
973 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
974 
975 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
976 
977 	if (sdebug_verbose)
978 		sdev_printk(KERN_INFO, scp->device,
979 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 			    my_name, key, asc, asq);
981 }
982 
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
984 {
985 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 }
987 
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989 			    void __user *arg)
990 {
991 	if (sdebug_verbose) {
992 		if (0x1261 == cmd)
993 			sdev_printk(KERN_INFO, dev,
994 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
995 		else if (0x5331 == cmd)
996 			sdev_printk(KERN_INFO, dev,
997 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998 				    __func__);
999 		else
1000 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1001 				    __func__, cmd);
1002 	}
1003 	return -EINVAL;
1004 	/* return -ENOTTY; // correct return but upsets fdisk */
1005 }
1006 
1007 static void config_cdb_len(struct scsi_device *sdev)
1008 {
1009 	switch (sdebug_cdb_len) {
1010 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 		sdev->use_10_for_rw = false;
1012 		sdev->use_16_for_rw = false;
1013 		sdev->use_10_for_ms = false;
1014 		break;
1015 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 		sdev->use_10_for_rw = true;
1017 		sdev->use_16_for_rw = false;
1018 		sdev->use_10_for_ms = false;
1019 		break;
1020 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 		sdev->use_10_for_rw = true;
1022 		sdev->use_16_for_rw = false;
1023 		sdev->use_10_for_ms = true;
1024 		break;
1025 	case 16:
1026 		sdev->use_10_for_rw = false;
1027 		sdev->use_16_for_rw = true;
1028 		sdev->use_10_for_ms = true;
1029 		break;
1030 	case 32: /* No knobs to suggest this so same as 16 for now */
1031 		sdev->use_10_for_rw = false;
1032 		sdev->use_16_for_rw = true;
1033 		sdev->use_10_for_ms = true;
1034 		break;
1035 	default:
1036 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1037 			sdebug_cdb_len);
1038 		sdev->use_10_for_rw = true;
1039 		sdev->use_16_for_rw = false;
1040 		sdev->use_10_for_ms = false;
1041 		sdebug_cdb_len = 10;
1042 		break;
1043 	}
1044 }
1045 
1046 static void all_config_cdb_len(void)
1047 {
1048 	struct sdebug_host_info *sdbg_host;
1049 	struct Scsi_Host *shost;
1050 	struct scsi_device *sdev;
1051 
1052 	spin_lock(&sdebug_host_list_lock);
1053 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 		shost = sdbg_host->shost;
1055 		shost_for_each_device(sdev, shost) {
1056 			config_cdb_len(sdev);
1057 		}
1058 	}
1059 	spin_unlock(&sdebug_host_list_lock);
1060 }
1061 
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1063 {
1064 	struct sdebug_host_info *sdhp;
1065 	struct sdebug_dev_info *dp;
1066 
1067 	spin_lock(&sdebug_host_list_lock);
1068 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 			if ((devip->sdbg_host == dp->sdbg_host) &&
1071 			    (devip->target == dp->target))
1072 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 		}
1074 	}
1075 	spin_unlock(&sdebug_host_list_lock);
1076 }
1077 
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1079 {
1080 	int k;
1081 
1082 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 	if (k != SDEBUG_NUM_UAS) {
1084 		const char *cp = NULL;
1085 
1086 		switch (k) {
1087 		case SDEBUG_UA_POR:
1088 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 					POWER_ON_RESET_ASCQ);
1090 			if (sdebug_verbose)
1091 				cp = "power on reset";
1092 			break;
1093 		case SDEBUG_UA_POOCCUR:
1094 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 					POWER_ON_OCCURRED_ASCQ);
1096 			if (sdebug_verbose)
1097 				cp = "power on occurred";
1098 			break;
1099 		case SDEBUG_UA_BUS_RESET:
1100 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1101 					BUS_RESET_ASCQ);
1102 			if (sdebug_verbose)
1103 				cp = "bus reset";
1104 			break;
1105 		case SDEBUG_UA_MODE_CHANGED:
1106 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 					MODE_CHANGED_ASCQ);
1108 			if (sdebug_verbose)
1109 				cp = "mode parameters changed";
1110 			break;
1111 		case SDEBUG_UA_CAPACITY_CHANGED:
1112 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 					CAPACITY_CHANGED_ASCQ);
1114 			if (sdebug_verbose)
1115 				cp = "capacity data changed";
1116 			break;
1117 		case SDEBUG_UA_MICROCODE_CHANGED:
1118 			mk_sense_buffer(scp, UNIT_ATTENTION,
1119 					TARGET_CHANGED_ASC,
1120 					MICROCODE_CHANGED_ASCQ);
1121 			if (sdebug_verbose)
1122 				cp = "microcode has been changed";
1123 			break;
1124 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 			mk_sense_buffer(scp, UNIT_ATTENTION,
1126 					TARGET_CHANGED_ASC,
1127 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1128 			if (sdebug_verbose)
1129 				cp = "microcode has been changed without reset";
1130 			break;
1131 		case SDEBUG_UA_LUNS_CHANGED:
1132 			/*
1133 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1134 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1135 			 * on the target, until a REPORT LUNS command is
1136 			 * received.  SPC-4 behavior is to report it only once.
1137 			 * NOTE:  sdebug_scsi_level does not use the same
1138 			 * values as struct scsi_device->scsi_level.
1139 			 */
1140 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1141 				clear_luns_changed_on_target(devip);
1142 			mk_sense_buffer(scp, UNIT_ATTENTION,
1143 					TARGET_CHANGED_ASC,
1144 					LUNS_CHANGED_ASCQ);
1145 			if (sdebug_verbose)
1146 				cp = "reported luns data has changed";
1147 			break;
1148 		default:
1149 			pr_warn("unexpected unit attention code=%d\n", k);
1150 			if (sdebug_verbose)
1151 				cp = "unknown";
1152 			break;
1153 		}
1154 		clear_bit(k, devip->uas_bm);
1155 		if (sdebug_verbose)
1156 			sdev_printk(KERN_INFO, scp->device,
1157 				   "%s reports: Unit attention: %s\n",
1158 				   my_name, cp);
1159 		return check_condition_result;
1160 	}
1161 	return 0;
1162 }
1163 
1164 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1166 				int arr_len)
1167 {
1168 	int act_len;
1169 	struct scsi_data_buffer *sdb = &scp->sdb;
1170 
1171 	if (!sdb->length)
1172 		return 0;
1173 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 		return DID_ERROR << 16;
1175 
1176 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1177 				      arr, arr_len);
1178 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1179 
1180 	return 0;
1181 }
1182 
1183 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1184  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1185  * calls, not required to write in ascending offset order. Assumes resid
1186  * set to scsi_bufflen() prior to any calls.
1187  */
1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 				  int arr_len, unsigned int off_dst)
1190 {
1191 	unsigned int act_len, n;
1192 	struct scsi_data_buffer *sdb = &scp->sdb;
1193 	off_t skip = off_dst;
1194 
1195 	if (sdb->length <= off_dst)
1196 		return 0;
1197 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 		return DID_ERROR << 16;
1199 
1200 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 				       arr, arr_len, skip);
1202 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 		 scsi_get_resid(scp));
1205 	n = scsi_bufflen(scp) - (off_dst + act_len);
1206 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1207 	return 0;
1208 }
1209 
1210 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1211  * 'arr' or -1 if error.
1212  */
1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1214 			       int arr_len)
1215 {
1216 	if (!scsi_bufflen(scp))
1217 		return 0;
1218 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1219 		return -1;
1220 
1221 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1222 }
1223 
1224 
1225 static char sdebug_inq_vendor_id[9] = "Linux   ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228 /* Use some locally assigned NAAs for SAS addresses. */
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1232 
1233 /* Device identification VPD page. Returns number of bytes placed in arr */
1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 			  int target_dev_id, int dev_id_num,
1236 			  const char *dev_id_str, int dev_id_str_len,
1237 			  const uuid_t *lu_name)
1238 {
1239 	int num, port_a;
1240 	char b[32];
1241 
1242 	port_a = target_dev_id + 1;
1243 	/* T10 vendor identifier field format (faked) */
1244 	arr[0] = 0x2;	/* ASCII */
1245 	arr[1] = 0x1;
1246 	arr[2] = 0x0;
1247 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 	num = 8 + 16 + dev_id_str_len;
1251 	arr[3] = num;
1252 	num += 4;
1253 	if (dev_id_num >= 0) {
1254 		if (sdebug_uuid_ctl) {
1255 			/* Locally assigned UUID */
1256 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1257 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1258 			arr[num++] = 0x0;
1259 			arr[num++] = 0x12;
1260 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1261 			arr[num++] = 0x0;
1262 			memcpy(arr + num, lu_name, 16);
1263 			num += 16;
1264 		} else {
1265 			/* NAA-3, Logical unit identifier (binary) */
1266 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1267 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1268 			arr[num++] = 0x0;
1269 			arr[num++] = 0x8;
1270 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1271 			num += 8;
1272 		}
1273 		/* Target relative port number */
1274 		arr[num++] = 0x61;	/* proto=sas, binary */
1275 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1276 		arr[num++] = 0x0;	/* reserved */
1277 		arr[num++] = 0x4;	/* length */
1278 		arr[num++] = 0x0;	/* reserved */
1279 		arr[num++] = 0x0;	/* reserved */
1280 		arr[num++] = 0x0;
1281 		arr[num++] = 0x1;	/* relative port A */
1282 	}
1283 	/* NAA-3, Target port identifier */
1284 	arr[num++] = 0x61;	/* proto=sas, binary */
1285 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1286 	arr[num++] = 0x0;
1287 	arr[num++] = 0x8;
1288 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1289 	num += 8;
1290 	/* NAA-3, Target port group identifier */
1291 	arr[num++] = 0x61;	/* proto=sas, binary */
1292 	arr[num++] = 0x95;	/* piv=1, target port group id */
1293 	arr[num++] = 0x0;
1294 	arr[num++] = 0x4;
1295 	arr[num++] = 0;
1296 	arr[num++] = 0;
1297 	put_unaligned_be16(port_group_id, arr + num);
1298 	num += 2;
1299 	/* NAA-3, Target device identifier */
1300 	arr[num++] = 0x61;	/* proto=sas, binary */
1301 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1302 	arr[num++] = 0x0;
1303 	arr[num++] = 0x8;
1304 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1305 	num += 8;
1306 	/* SCSI name string: Target device identifier */
1307 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1308 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1309 	arr[num++] = 0x0;
1310 	arr[num++] = 24;
1311 	memcpy(arr + num, "naa.32222220", 12);
1312 	num += 12;
1313 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 	memcpy(arr + num, b, 8);
1315 	num += 8;
1316 	memset(arr + num, 0, 4);
1317 	num += 4;
1318 	return num;
1319 }
1320 
1321 static unsigned char vpd84_data[] = {
1322 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1323     0x22,0x22,0x22,0x0,0xbb,0x1,
1324     0x22,0x22,0x22,0x0,0xbb,0x2,
1325 };
1326 
1327 /*  Software interface identification VPD page */
1328 static int inquiry_vpd_84(unsigned char *arr)
1329 {
1330 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 	return sizeof(vpd84_data);
1332 }
1333 
1334 /* Management network addresses VPD page */
1335 static int inquiry_vpd_85(unsigned char *arr)
1336 {
1337 	int num = 0;
1338 	const char *na1 = "https://www.kernel.org/config";
1339 	const char *na2 = "http://www.kernel.org/log";
1340 	int plen, olen;
1341 
1342 	arr[num++] = 0x1;	/* lu, storage config */
1343 	arr[num++] = 0x0;	/* reserved */
1344 	arr[num++] = 0x0;
1345 	olen = strlen(na1);
1346 	plen = olen + 1;
1347 	if (plen % 4)
1348 		plen = ((plen / 4) + 1) * 4;
1349 	arr[num++] = plen;	/* length, null termianted, padded */
1350 	memcpy(arr + num, na1, olen);
1351 	memset(arr + num + olen, 0, plen - olen);
1352 	num += plen;
1353 
1354 	arr[num++] = 0x4;	/* lu, logging */
1355 	arr[num++] = 0x0;	/* reserved */
1356 	arr[num++] = 0x0;
1357 	olen = strlen(na2);
1358 	plen = olen + 1;
1359 	if (plen % 4)
1360 		plen = ((plen / 4) + 1) * 4;
1361 	arr[num++] = plen;	/* length, null terminated, padded */
1362 	memcpy(arr + num, na2, olen);
1363 	memset(arr + num + olen, 0, plen - olen);
1364 	num += plen;
1365 
1366 	return num;
1367 }
1368 
1369 /* SCSI ports VPD page */
1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1371 {
1372 	int num = 0;
1373 	int port_a, port_b;
1374 
1375 	port_a = target_dev_id + 1;
1376 	port_b = port_a + 1;
1377 	arr[num++] = 0x0;	/* reserved */
1378 	arr[num++] = 0x0;	/* reserved */
1379 	arr[num++] = 0x0;
1380 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1381 	memset(arr + num, 0, 6);
1382 	num += 6;
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 12;	/* length tp descriptor */
1385 	/* naa-5 target port identifier (A) */
1386 	arr[num++] = 0x61;	/* proto=sas, binary */
1387 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1388 	arr[num++] = 0x0;	/* reserved */
1389 	arr[num++] = 0x8;	/* length */
1390 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1391 	num += 8;
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x0;	/* reserved */
1394 	arr[num++] = 0x0;
1395 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1396 	memset(arr + num, 0, 6);
1397 	num += 6;
1398 	arr[num++] = 0x0;
1399 	arr[num++] = 12;	/* length tp descriptor */
1400 	/* naa-5 target port identifier (B) */
1401 	arr[num++] = 0x61;	/* proto=sas, binary */
1402 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1403 	arr[num++] = 0x0;	/* reserved */
1404 	arr[num++] = 0x8;	/* length */
1405 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1406 	num += 8;
1407 
1408 	return num;
1409 }
1410 
1411 
1412 static unsigned char vpd89_data[] = {
1413 /* from 4th byte */ 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1416 '1','2','3','4',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1418 0xec,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1423 0x53,0x41,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1427 0x10,0x80,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1454 };
1455 
1456 /* ATA Information VPD page */
1457 static int inquiry_vpd_89(unsigned char *arr)
1458 {
1459 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 	return sizeof(vpd89_data);
1461 }
1462 
1463 
1464 static unsigned char vpdb0_data[] = {
1465 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1469 };
1470 
1471 /* Block limits VPD page (SBC-3) */
1472 static int inquiry_vpd_b0(unsigned char *arr)
1473 {
1474 	unsigned int gran;
1475 
1476 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1477 
1478 	/* Optimal transfer length granularity */
1479 	if (sdebug_opt_xferlen_exp != 0 &&
1480 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 		gran = 1 << sdebug_opt_xferlen_exp;
1482 	else
1483 		gran = 1 << sdebug_physblk_exp;
1484 	put_unaligned_be16(gran, arr + 2);
1485 
1486 	/* Maximum Transfer Length */
1487 	if (sdebug_store_sectors > 0x400)
1488 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1489 
1490 	/* Optimal Transfer Length */
1491 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1492 
1493 	if (sdebug_lbpu) {
1494 		/* Maximum Unmap LBA Count */
1495 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1496 
1497 		/* Maximum Unmap Block Descriptor Count */
1498 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1499 	}
1500 
1501 	/* Unmap Granularity Alignment */
1502 	if (sdebug_unmap_alignment) {
1503 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 		arr[28] |= 0x80; /* UGAVALID */
1505 	}
1506 
1507 	/* Optimal Unmap Granularity */
1508 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1509 
1510 	/* Maximum WRITE SAME Length */
1511 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1512 
1513 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1514 }
1515 
1516 /* Block device characteristics VPD page (SBC-3) */
1517 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1518 {
1519 	memset(arr, 0, 0x3c);
1520 	arr[0] = 0;
1521 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1522 	arr[2] = 0;
1523 	arr[3] = 5;	/* less than 1.8" */
1524 	if (devip->zmodel == BLK_ZONED_HA)
1525 		arr[4] = 1 << 4;	/* zoned field = 01b */
1526 
1527 	return 0x3c;
1528 }
1529 
1530 /* Logical block provisioning VPD page (SBC-4) */
1531 static int inquiry_vpd_b2(unsigned char *arr)
1532 {
1533 	memset(arr, 0, 0x4);
1534 	arr[0] = 0;			/* threshold exponent */
1535 	if (sdebug_lbpu)
1536 		arr[1] = 1 << 7;
1537 	if (sdebug_lbpws)
1538 		arr[1] |= 1 << 6;
1539 	if (sdebug_lbpws10)
1540 		arr[1] |= 1 << 5;
1541 	if (sdebug_lbprz && scsi_debug_lbp())
1542 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1543 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1544 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1545 	/* threshold_percentage=0 */
1546 	return 0x4;
1547 }
1548 
1549 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1550 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1551 {
1552 	memset(arr, 0, 0x3c);
1553 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1554 	/*
1555 	 * Set Optimal number of open sequential write preferred zones and
1556 	 * Optimal number of non-sequentially written sequential write
1557 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1558 	 * fields set to zero, apart from Max. number of open swrz_s field.
1559 	 */
1560 	put_unaligned_be32(0xffffffff, &arr[4]);
1561 	put_unaligned_be32(0xffffffff, &arr[8]);
1562 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1563 		put_unaligned_be32(devip->max_open, &arr[12]);
1564 	else
1565 		put_unaligned_be32(0xffffffff, &arr[12]);
1566 	if (devip->zcap < devip->zsize) {
1567 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1568 		put_unaligned_be64(devip->zsize, &arr[20]);
1569 	} else {
1570 		arr[19] = 0;
1571 	}
1572 	return 0x3c;
1573 }
1574 
1575 #define SDEBUG_LONG_INQ_SZ 96
1576 #define SDEBUG_MAX_INQ_ARR_SZ 584
1577 
1578 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1579 {
1580 	unsigned char pq_pdt;
1581 	unsigned char *arr;
1582 	unsigned char *cmd = scp->cmnd;
1583 	u32 alloc_len, n;
1584 	int ret;
1585 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1586 
1587 	alloc_len = get_unaligned_be16(cmd + 3);
1588 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1589 	if (! arr)
1590 		return DID_REQUEUE << 16;
1591 	is_disk = (sdebug_ptype == TYPE_DISK);
1592 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1593 	is_disk_zbc = (is_disk || is_zbc);
1594 	have_wlun = scsi_is_wlun(scp->device->lun);
1595 	if (have_wlun)
1596 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1597 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1598 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1599 	else
1600 		pq_pdt = (sdebug_ptype & 0x1f);
1601 	arr[0] = pq_pdt;
1602 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1603 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1604 		kfree(arr);
1605 		return check_condition_result;
1606 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1607 		int lu_id_num, port_group_id, target_dev_id;
1608 		u32 len;
1609 		char lu_id_str[6];
1610 		int host_no = devip->sdbg_host->shost->host_no;
1611 
1612 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1613 		    (devip->channel & 0x7f);
1614 		if (sdebug_vpd_use_hostno == 0)
1615 			host_no = 0;
1616 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1617 			    (devip->target * 1000) + devip->lun);
1618 		target_dev_id = ((host_no + 1) * 2000) +
1619 				 (devip->target * 1000) - 3;
1620 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1621 		if (0 == cmd[2]) { /* supported vital product data pages */
1622 			arr[1] = cmd[2];	/*sanity */
1623 			n = 4;
1624 			arr[n++] = 0x0;   /* this page */
1625 			arr[n++] = 0x80;  /* unit serial number */
1626 			arr[n++] = 0x83;  /* device identification */
1627 			arr[n++] = 0x84;  /* software interface ident. */
1628 			arr[n++] = 0x85;  /* management network addresses */
1629 			arr[n++] = 0x86;  /* extended inquiry */
1630 			arr[n++] = 0x87;  /* mode page policy */
1631 			arr[n++] = 0x88;  /* SCSI ports */
1632 			if (is_disk_zbc) {	  /* SBC or ZBC */
1633 				arr[n++] = 0x89;  /* ATA information */
1634 				arr[n++] = 0xb0;  /* Block limits */
1635 				arr[n++] = 0xb1;  /* Block characteristics */
1636 				if (is_disk)
1637 					arr[n++] = 0xb2;  /* LB Provisioning */
1638 				if (is_zbc)
1639 					arr[n++] = 0xb6;  /* ZB dev. char. */
1640 			}
1641 			arr[3] = n - 4;	  /* number of supported VPD pages */
1642 		} else if (0x80 == cmd[2]) { /* unit serial number */
1643 			arr[1] = cmd[2];	/*sanity */
1644 			arr[3] = len;
1645 			memcpy(&arr[4], lu_id_str, len);
1646 		} else if (0x83 == cmd[2]) { /* device identification */
1647 			arr[1] = cmd[2];	/*sanity */
1648 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1649 						target_dev_id, lu_id_num,
1650 						lu_id_str, len,
1651 						&devip->lu_name);
1652 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1653 			arr[1] = cmd[2];	/*sanity */
1654 			arr[3] = inquiry_vpd_84(&arr[4]);
1655 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1656 			arr[1] = cmd[2];	/*sanity */
1657 			arr[3] = inquiry_vpd_85(&arr[4]);
1658 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1659 			arr[1] = cmd[2];	/*sanity */
1660 			arr[3] = 0x3c;	/* number of following entries */
1661 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1662 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1663 			else if (have_dif_prot)
1664 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1665 			else
1666 				arr[4] = 0x0;   /* no protection stuff */
1667 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1668 		} else if (0x87 == cmd[2]) { /* mode page policy */
1669 			arr[1] = cmd[2];	/*sanity */
1670 			arr[3] = 0x8;	/* number of following entries */
1671 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1672 			arr[6] = 0x80;	/* mlus, shared */
1673 			arr[8] = 0x18;	 /* protocol specific lu */
1674 			arr[10] = 0x82;	 /* mlus, per initiator port */
1675 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1676 			arr[1] = cmd[2];	/*sanity */
1677 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1678 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1679 			arr[1] = cmd[2];        /*sanity */
1680 			n = inquiry_vpd_89(&arr[4]);
1681 			put_unaligned_be16(n, arr + 2);
1682 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1683 			arr[1] = cmd[2];        /*sanity */
1684 			arr[3] = inquiry_vpd_b0(&arr[4]);
1685 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1686 			arr[1] = cmd[2];        /*sanity */
1687 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1688 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1689 			arr[1] = cmd[2];        /*sanity */
1690 			arr[3] = inquiry_vpd_b2(&arr[4]);
1691 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1692 			arr[1] = cmd[2];        /*sanity */
1693 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1694 		} else {
1695 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1696 			kfree(arr);
1697 			return check_condition_result;
1698 		}
1699 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1700 		ret = fill_from_dev_buffer(scp, arr,
1701 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1702 		kfree(arr);
1703 		return ret;
1704 	}
1705 	/* drops through here for a standard inquiry */
1706 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1707 	arr[2] = sdebug_scsi_level;
1708 	arr[3] = 2;    /* response_data_format==2 */
1709 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1710 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1711 	if (sdebug_vpd_use_hostno == 0)
1712 		arr[5] |= 0x10; /* claim: implicit TPGS */
1713 	arr[6] = 0x10; /* claim: MultiP */
1714 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1715 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1716 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1717 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1718 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1719 	/* Use Vendor Specific area to place driver date in ASCII hex */
1720 	memcpy(&arr[36], sdebug_version_date, 8);
1721 	/* version descriptors (2 bytes each) follow */
1722 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1723 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1724 	n = 62;
1725 	if (is_disk) {		/* SBC-4 no version claimed */
1726 		put_unaligned_be16(0x600, arr + n);
1727 		n += 2;
1728 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1729 		put_unaligned_be16(0x525, arr + n);
1730 		n += 2;
1731 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1732 		put_unaligned_be16(0x624, arr + n);
1733 		n += 2;
1734 	}
1735 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1736 	ret = fill_from_dev_buffer(scp, arr,
1737 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1738 	kfree(arr);
1739 	return ret;
1740 }
1741 
1742 /* See resp_iec_m_pg() for how this data is manipulated */
1743 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1744 				   0, 0, 0x0, 0x0};
1745 
1746 static int resp_requests(struct scsi_cmnd *scp,
1747 			 struct sdebug_dev_info *devip)
1748 {
1749 	unsigned char *cmd = scp->cmnd;
1750 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1751 	bool dsense = !!(cmd[1] & 1);
1752 	u32 alloc_len = cmd[4];
1753 	u32 len = 18;
1754 	int stopped_state = atomic_read(&devip->stopped);
1755 
1756 	memset(arr, 0, sizeof(arr));
1757 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1758 		if (dsense) {
1759 			arr[0] = 0x72;
1760 			arr[1] = NOT_READY;
1761 			arr[2] = LOGICAL_UNIT_NOT_READY;
1762 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1763 			len = 8;
1764 		} else {
1765 			arr[0] = 0x70;
1766 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1767 			arr[7] = 0xa;			/* 18 byte sense buffer */
1768 			arr[12] = LOGICAL_UNIT_NOT_READY;
1769 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1770 		}
1771 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1772 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1773 		if (dsense) {
1774 			arr[0] = 0x72;
1775 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1776 			arr[2] = THRESHOLD_EXCEEDED;
1777 			arr[3] = 0xff;		/* Failure prediction(false) */
1778 			len = 8;
1779 		} else {
1780 			arr[0] = 0x70;
1781 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1782 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1783 			arr[12] = THRESHOLD_EXCEEDED;
1784 			arr[13] = 0xff;		/* Failure prediction(false) */
1785 		}
1786 	} else {	/* nothing to report */
1787 		if (dsense) {
1788 			len = 8;
1789 			memset(arr, 0, len);
1790 			arr[0] = 0x72;
1791 		} else {
1792 			memset(arr, 0, len);
1793 			arr[0] = 0x70;
1794 			arr[7] = 0xa;
1795 		}
1796 	}
1797 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1798 }
1799 
1800 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1801 {
1802 	unsigned char *cmd = scp->cmnd;
1803 	int power_cond, want_stop, stopped_state;
1804 	bool changing;
1805 
1806 	power_cond = (cmd[4] & 0xf0) >> 4;
1807 	if (power_cond) {
1808 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1809 		return check_condition_result;
1810 	}
1811 	want_stop = !(cmd[4] & 1);
1812 	stopped_state = atomic_read(&devip->stopped);
1813 	if (stopped_state == 2) {
1814 		ktime_t now_ts = ktime_get_boottime();
1815 
1816 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1817 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1818 
1819 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1820 				/* tur_ms_to_ready timer extinguished */
1821 				atomic_set(&devip->stopped, 0);
1822 				stopped_state = 0;
1823 			}
1824 		}
1825 		if (stopped_state == 2) {
1826 			if (want_stop) {
1827 				stopped_state = 1;	/* dummy up success */
1828 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1829 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1830 				return check_condition_result;
1831 			}
1832 		}
1833 	}
1834 	changing = (stopped_state != want_stop);
1835 	if (changing)
1836 		atomic_xchg(&devip->stopped, want_stop);
1837 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1838 		return SDEG_RES_IMMED_MASK;
1839 	else
1840 		return 0;
1841 }
1842 
1843 static sector_t get_sdebug_capacity(void)
1844 {
1845 	static const unsigned int gibibyte = 1073741824;
1846 
1847 	if (sdebug_virtual_gb > 0)
1848 		return (sector_t)sdebug_virtual_gb *
1849 			(gibibyte / sdebug_sector_size);
1850 	else
1851 		return sdebug_store_sectors;
1852 }
1853 
1854 #define SDEBUG_READCAP_ARR_SZ 8
1855 static int resp_readcap(struct scsi_cmnd *scp,
1856 			struct sdebug_dev_info *devip)
1857 {
1858 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1859 	unsigned int capac;
1860 
1861 	/* following just in case virtual_gb changed */
1862 	sdebug_capacity = get_sdebug_capacity();
1863 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1864 	if (sdebug_capacity < 0xffffffff) {
1865 		capac = (unsigned int)sdebug_capacity - 1;
1866 		put_unaligned_be32(capac, arr + 0);
1867 	} else
1868 		put_unaligned_be32(0xffffffff, arr + 0);
1869 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1870 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1871 }
1872 
1873 #define SDEBUG_READCAP16_ARR_SZ 32
1874 static int resp_readcap16(struct scsi_cmnd *scp,
1875 			  struct sdebug_dev_info *devip)
1876 {
1877 	unsigned char *cmd = scp->cmnd;
1878 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1879 	u32 alloc_len;
1880 
1881 	alloc_len = get_unaligned_be32(cmd + 10);
1882 	/* following just in case virtual_gb changed */
1883 	sdebug_capacity = get_sdebug_capacity();
1884 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1885 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1886 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1887 	arr[13] = sdebug_physblk_exp & 0xf;
1888 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1889 
1890 	if (scsi_debug_lbp()) {
1891 		arr[14] |= 0x80; /* LBPME */
1892 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1893 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1894 		 * in the wider field maps to 0 in this field.
1895 		 */
1896 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1897 			arr[14] |= 0x40;
1898 	}
1899 
1900 	arr[15] = sdebug_lowest_aligned & 0xff;
1901 
1902 	if (have_dif_prot) {
1903 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1904 		arr[12] |= 1; /* PROT_EN */
1905 	}
1906 
1907 	return fill_from_dev_buffer(scp, arr,
1908 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1909 }
1910 
1911 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1912 
1913 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1914 			      struct sdebug_dev_info *devip)
1915 {
1916 	unsigned char *cmd = scp->cmnd;
1917 	unsigned char *arr;
1918 	int host_no = devip->sdbg_host->shost->host_no;
1919 	int port_group_a, port_group_b, port_a, port_b;
1920 	u32 alen, n, rlen;
1921 	int ret;
1922 
1923 	alen = get_unaligned_be32(cmd + 6);
1924 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1925 	if (! arr)
1926 		return DID_REQUEUE << 16;
1927 	/*
1928 	 * EVPD page 0x88 states we have two ports, one
1929 	 * real and a fake port with no device connected.
1930 	 * So we create two port groups with one port each
1931 	 * and set the group with port B to unavailable.
1932 	 */
1933 	port_a = 0x1; /* relative port A */
1934 	port_b = 0x2; /* relative port B */
1935 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1936 			(devip->channel & 0x7f);
1937 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1938 			(devip->channel & 0x7f) + 0x80;
1939 
1940 	/*
1941 	 * The asymmetric access state is cycled according to the host_id.
1942 	 */
1943 	n = 4;
1944 	if (sdebug_vpd_use_hostno == 0) {
1945 		arr[n++] = host_no % 3; /* Asymm access state */
1946 		arr[n++] = 0x0F; /* claim: all states are supported */
1947 	} else {
1948 		arr[n++] = 0x0; /* Active/Optimized path */
1949 		arr[n++] = 0x01; /* only support active/optimized paths */
1950 	}
1951 	put_unaligned_be16(port_group_a, arr + n);
1952 	n += 2;
1953 	arr[n++] = 0;    /* Reserved */
1954 	arr[n++] = 0;    /* Status code */
1955 	arr[n++] = 0;    /* Vendor unique */
1956 	arr[n++] = 0x1;  /* One port per group */
1957 	arr[n++] = 0;    /* Reserved */
1958 	arr[n++] = 0;    /* Reserved */
1959 	put_unaligned_be16(port_a, arr + n);
1960 	n += 2;
1961 	arr[n++] = 3;    /* Port unavailable */
1962 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1963 	put_unaligned_be16(port_group_b, arr + n);
1964 	n += 2;
1965 	arr[n++] = 0;    /* Reserved */
1966 	arr[n++] = 0;    /* Status code */
1967 	arr[n++] = 0;    /* Vendor unique */
1968 	arr[n++] = 0x1;  /* One port per group */
1969 	arr[n++] = 0;    /* Reserved */
1970 	arr[n++] = 0;    /* Reserved */
1971 	put_unaligned_be16(port_b, arr + n);
1972 	n += 2;
1973 
1974 	rlen = n - 4;
1975 	put_unaligned_be32(rlen, arr + 0);
1976 
1977 	/*
1978 	 * Return the smallest value of either
1979 	 * - The allocated length
1980 	 * - The constructed command length
1981 	 * - The maximum array size
1982 	 */
1983 	rlen = min(alen, n);
1984 	ret = fill_from_dev_buffer(scp, arr,
1985 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1986 	kfree(arr);
1987 	return ret;
1988 }
1989 
1990 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1991 			     struct sdebug_dev_info *devip)
1992 {
1993 	bool rctd;
1994 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1995 	u16 req_sa, u;
1996 	u32 alloc_len, a_len;
1997 	int k, offset, len, errsts, count, bump, na;
1998 	const struct opcode_info_t *oip;
1999 	const struct opcode_info_t *r_oip;
2000 	u8 *arr;
2001 	u8 *cmd = scp->cmnd;
2002 
2003 	rctd = !!(cmd[2] & 0x80);
2004 	reporting_opts = cmd[2] & 0x7;
2005 	req_opcode = cmd[3];
2006 	req_sa = get_unaligned_be16(cmd + 4);
2007 	alloc_len = get_unaligned_be32(cmd + 6);
2008 	if (alloc_len < 4 || alloc_len > 0xffff) {
2009 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2010 		return check_condition_result;
2011 	}
2012 	if (alloc_len > 8192)
2013 		a_len = 8192;
2014 	else
2015 		a_len = alloc_len;
2016 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2017 	if (NULL == arr) {
2018 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2019 				INSUFF_RES_ASCQ);
2020 		return check_condition_result;
2021 	}
2022 	switch (reporting_opts) {
2023 	case 0:	/* all commands */
2024 		/* count number of commands */
2025 		for (count = 0, oip = opcode_info_arr;
2026 		     oip->num_attached != 0xff; ++oip) {
2027 			if (F_INV_OP & oip->flags)
2028 				continue;
2029 			count += (oip->num_attached + 1);
2030 		}
2031 		bump = rctd ? 20 : 8;
2032 		put_unaligned_be32(count * bump, arr);
2033 		for (offset = 4, oip = opcode_info_arr;
2034 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2035 			if (F_INV_OP & oip->flags)
2036 				continue;
2037 			na = oip->num_attached;
2038 			arr[offset] = oip->opcode;
2039 			put_unaligned_be16(oip->sa, arr + offset + 2);
2040 			if (rctd)
2041 				arr[offset + 5] |= 0x2;
2042 			if (FF_SA & oip->flags)
2043 				arr[offset + 5] |= 0x1;
2044 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2045 			if (rctd)
2046 				put_unaligned_be16(0xa, arr + offset + 8);
2047 			r_oip = oip;
2048 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2049 				if (F_INV_OP & oip->flags)
2050 					continue;
2051 				offset += bump;
2052 				arr[offset] = oip->opcode;
2053 				put_unaligned_be16(oip->sa, arr + offset + 2);
2054 				if (rctd)
2055 					arr[offset + 5] |= 0x2;
2056 				if (FF_SA & oip->flags)
2057 					arr[offset + 5] |= 0x1;
2058 				put_unaligned_be16(oip->len_mask[0],
2059 						   arr + offset + 6);
2060 				if (rctd)
2061 					put_unaligned_be16(0xa,
2062 							   arr + offset + 8);
2063 			}
2064 			oip = r_oip;
2065 			offset += bump;
2066 		}
2067 		break;
2068 	case 1:	/* one command: opcode only */
2069 	case 2:	/* one command: opcode plus service action */
2070 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2071 		sdeb_i = opcode_ind_arr[req_opcode];
2072 		oip = &opcode_info_arr[sdeb_i];
2073 		if (F_INV_OP & oip->flags) {
2074 			supp = 1;
2075 			offset = 4;
2076 		} else {
2077 			if (1 == reporting_opts) {
2078 				if (FF_SA & oip->flags) {
2079 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2080 							     2, 2);
2081 					kfree(arr);
2082 					return check_condition_result;
2083 				}
2084 				req_sa = 0;
2085 			} else if (2 == reporting_opts &&
2086 				   0 == (FF_SA & oip->flags)) {
2087 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2088 				kfree(arr);	/* point at requested sa */
2089 				return check_condition_result;
2090 			}
2091 			if (0 == (FF_SA & oip->flags) &&
2092 			    req_opcode == oip->opcode)
2093 				supp = 3;
2094 			else if (0 == (FF_SA & oip->flags)) {
2095 				na = oip->num_attached;
2096 				for (k = 0, oip = oip->arrp; k < na;
2097 				     ++k, ++oip) {
2098 					if (req_opcode == oip->opcode)
2099 						break;
2100 				}
2101 				supp = (k >= na) ? 1 : 3;
2102 			} else if (req_sa != oip->sa) {
2103 				na = oip->num_attached;
2104 				for (k = 0, oip = oip->arrp; k < na;
2105 				     ++k, ++oip) {
2106 					if (req_sa == oip->sa)
2107 						break;
2108 				}
2109 				supp = (k >= na) ? 1 : 3;
2110 			} else
2111 				supp = 3;
2112 			if (3 == supp) {
2113 				u = oip->len_mask[0];
2114 				put_unaligned_be16(u, arr + 2);
2115 				arr[4] = oip->opcode;
2116 				for (k = 1; k < u; ++k)
2117 					arr[4 + k] = (k < 16) ?
2118 						 oip->len_mask[k] : 0xff;
2119 				offset = 4 + u;
2120 			} else
2121 				offset = 4;
2122 		}
2123 		arr[1] = (rctd ? 0x80 : 0) | supp;
2124 		if (rctd) {
2125 			put_unaligned_be16(0xa, arr + offset);
2126 			offset += 12;
2127 		}
2128 		break;
2129 	default:
2130 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2131 		kfree(arr);
2132 		return check_condition_result;
2133 	}
2134 	offset = (offset < a_len) ? offset : a_len;
2135 	len = (offset < alloc_len) ? offset : alloc_len;
2136 	errsts = fill_from_dev_buffer(scp, arr, len);
2137 	kfree(arr);
2138 	return errsts;
2139 }
2140 
2141 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2142 			  struct sdebug_dev_info *devip)
2143 {
2144 	bool repd;
2145 	u32 alloc_len, len;
2146 	u8 arr[16];
2147 	u8 *cmd = scp->cmnd;
2148 
2149 	memset(arr, 0, sizeof(arr));
2150 	repd = !!(cmd[2] & 0x80);
2151 	alloc_len = get_unaligned_be32(cmd + 6);
2152 	if (alloc_len < 4) {
2153 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2154 		return check_condition_result;
2155 	}
2156 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2157 	arr[1] = 0x1;		/* ITNRS */
2158 	if (repd) {
2159 		arr[3] = 0xc;
2160 		len = 16;
2161 	} else
2162 		len = 4;
2163 
2164 	len = (len < alloc_len) ? len : alloc_len;
2165 	return fill_from_dev_buffer(scp, arr, len);
2166 }
2167 
2168 /* <<Following mode page info copied from ST318451LW>> */
2169 
2170 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2171 {	/* Read-Write Error Recovery page for mode_sense */
2172 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2173 					5, 0, 0xff, 0xff};
2174 
2175 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2176 	if (1 == pcontrol)
2177 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2178 	return sizeof(err_recov_pg);
2179 }
2180 
2181 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2182 { 	/* Disconnect-Reconnect page for mode_sense */
2183 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2184 					 0, 0, 0, 0, 0, 0, 0, 0};
2185 
2186 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2187 	if (1 == pcontrol)
2188 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2189 	return sizeof(disconnect_pg);
2190 }
2191 
2192 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2193 {       /* Format device page for mode_sense */
2194 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2195 				     0, 0, 0, 0, 0, 0, 0, 0,
2196 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2197 
2198 	memcpy(p, format_pg, sizeof(format_pg));
2199 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2200 	put_unaligned_be16(sdebug_sector_size, p + 12);
2201 	if (sdebug_removable)
2202 		p[20] |= 0x20; /* should agree with INQUIRY */
2203 	if (1 == pcontrol)
2204 		memset(p + 2, 0, sizeof(format_pg) - 2);
2205 	return sizeof(format_pg);
2206 }
2207 
2208 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2209 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2210 				     0, 0, 0, 0};
2211 
2212 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2213 { 	/* Caching page for mode_sense */
2214 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2215 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2216 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2217 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2218 
2219 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2220 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2221 	memcpy(p, caching_pg, sizeof(caching_pg));
2222 	if (1 == pcontrol)
2223 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2224 	else if (2 == pcontrol)
2225 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2226 	return sizeof(caching_pg);
2227 }
2228 
2229 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2230 				    0, 0, 0x2, 0x4b};
2231 
2232 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2233 { 	/* Control mode page for mode_sense */
2234 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2235 					0, 0, 0, 0};
2236 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2237 				     0, 0, 0x2, 0x4b};
2238 
2239 	if (sdebug_dsense)
2240 		ctrl_m_pg[2] |= 0x4;
2241 	else
2242 		ctrl_m_pg[2] &= ~0x4;
2243 
2244 	if (sdebug_ato)
2245 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2246 
2247 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2248 	if (1 == pcontrol)
2249 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2250 	else if (2 == pcontrol)
2251 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2252 	return sizeof(ctrl_m_pg);
2253 }
2254 
2255 
2256 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2257 {	/* Informational Exceptions control mode page for mode_sense */
2258 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2259 				       0, 0, 0x0, 0x0};
2260 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2261 				      0, 0, 0x0, 0x0};
2262 
2263 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2264 	if (1 == pcontrol)
2265 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2266 	else if (2 == pcontrol)
2267 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2268 	return sizeof(iec_m_pg);
2269 }
2270 
2271 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2272 {	/* SAS SSP mode page - short format for mode_sense */
2273 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2274 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2275 
2276 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2277 	if (1 == pcontrol)
2278 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2279 	return sizeof(sas_sf_m_pg);
2280 }
2281 
2282 
2283 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2284 			      int target_dev_id)
2285 {	/* SAS phy control and discover mode page for mode_sense */
2286 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2287 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2288 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2289 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2290 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2291 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2292 		    0, 0, 0, 0, 0, 0, 0, 0,
2293 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2294 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2295 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2296 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2297 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2298 		    0, 0, 0, 0, 0, 0, 0, 0,
2299 		};
2300 	int port_a, port_b;
2301 
2302 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2303 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2304 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2305 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2306 	port_a = target_dev_id + 1;
2307 	port_b = port_a + 1;
2308 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2309 	put_unaligned_be32(port_a, p + 20);
2310 	put_unaligned_be32(port_b, p + 48 + 20);
2311 	if (1 == pcontrol)
2312 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2313 	return sizeof(sas_pcd_m_pg);
2314 }
2315 
2316 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2317 {	/* SAS SSP shared protocol specific port mode subpage */
2318 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2319 		    0, 0, 0, 0, 0, 0, 0, 0,
2320 		};
2321 
2322 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2323 	if (1 == pcontrol)
2324 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2325 	return sizeof(sas_sha_m_pg);
2326 }
2327 
2328 #define SDEBUG_MAX_MSENSE_SZ 256
2329 
2330 static int resp_mode_sense(struct scsi_cmnd *scp,
2331 			   struct sdebug_dev_info *devip)
2332 {
2333 	int pcontrol, pcode, subpcode, bd_len;
2334 	unsigned char dev_spec;
2335 	u32 alloc_len, offset, len;
2336 	int target_dev_id;
2337 	int target = scp->device->id;
2338 	unsigned char *ap;
2339 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2340 	unsigned char *cmd = scp->cmnd;
2341 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2342 
2343 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2344 	pcontrol = (cmd[2] & 0xc0) >> 6;
2345 	pcode = cmd[2] & 0x3f;
2346 	subpcode = cmd[3];
2347 	msense_6 = (MODE_SENSE == cmd[0]);
2348 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2349 	is_disk = (sdebug_ptype == TYPE_DISK);
2350 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2351 	if ((is_disk || is_zbc) && !dbd)
2352 		bd_len = llbaa ? 16 : 8;
2353 	else
2354 		bd_len = 0;
2355 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2356 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2357 	if (0x3 == pcontrol) {  /* Saving values not supported */
2358 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2359 		return check_condition_result;
2360 	}
2361 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2362 			(devip->target * 1000) - 3;
2363 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2364 	if (is_disk || is_zbc) {
2365 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2366 		if (sdebug_wp)
2367 			dev_spec |= 0x80;
2368 	} else
2369 		dev_spec = 0x0;
2370 	if (msense_6) {
2371 		arr[2] = dev_spec;
2372 		arr[3] = bd_len;
2373 		offset = 4;
2374 	} else {
2375 		arr[3] = dev_spec;
2376 		if (16 == bd_len)
2377 			arr[4] = 0x1;	/* set LONGLBA bit */
2378 		arr[7] = bd_len;	/* assume 255 or less */
2379 		offset = 8;
2380 	}
2381 	ap = arr + offset;
2382 	if ((bd_len > 0) && (!sdebug_capacity))
2383 		sdebug_capacity = get_sdebug_capacity();
2384 
2385 	if (8 == bd_len) {
2386 		if (sdebug_capacity > 0xfffffffe)
2387 			put_unaligned_be32(0xffffffff, ap + 0);
2388 		else
2389 			put_unaligned_be32(sdebug_capacity, ap + 0);
2390 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2391 		offset += bd_len;
2392 		ap = arr + offset;
2393 	} else if (16 == bd_len) {
2394 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2395 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2396 		offset += bd_len;
2397 		ap = arr + offset;
2398 	}
2399 
2400 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2401 		/* TODO: Control Extension page */
2402 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2403 		return check_condition_result;
2404 	}
2405 	bad_pcode = false;
2406 
2407 	switch (pcode) {
2408 	case 0x1:	/* Read-Write error recovery page, direct access */
2409 		len = resp_err_recov_pg(ap, pcontrol, target);
2410 		offset += len;
2411 		break;
2412 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2413 		len = resp_disconnect_pg(ap, pcontrol, target);
2414 		offset += len;
2415 		break;
2416 	case 0x3:       /* Format device page, direct access */
2417 		if (is_disk) {
2418 			len = resp_format_pg(ap, pcontrol, target);
2419 			offset += len;
2420 		} else
2421 			bad_pcode = true;
2422 		break;
2423 	case 0x8:	/* Caching page, direct access */
2424 		if (is_disk || is_zbc) {
2425 			len = resp_caching_pg(ap, pcontrol, target);
2426 			offset += len;
2427 		} else
2428 			bad_pcode = true;
2429 		break;
2430 	case 0xa:	/* Control Mode page, all devices */
2431 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2432 		offset += len;
2433 		break;
2434 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2435 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2436 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2437 			return check_condition_result;
2438 		}
2439 		len = 0;
2440 		if ((0x0 == subpcode) || (0xff == subpcode))
2441 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2442 		if ((0x1 == subpcode) || (0xff == subpcode))
2443 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2444 						  target_dev_id);
2445 		if ((0x2 == subpcode) || (0xff == subpcode))
2446 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2447 		offset += len;
2448 		break;
2449 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2450 		len = resp_iec_m_pg(ap, pcontrol, target);
2451 		offset += len;
2452 		break;
2453 	case 0x3f:	/* Read all Mode pages */
2454 		if ((0 == subpcode) || (0xff == subpcode)) {
2455 			len = resp_err_recov_pg(ap, pcontrol, target);
2456 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2457 			if (is_disk) {
2458 				len += resp_format_pg(ap + len, pcontrol,
2459 						      target);
2460 				len += resp_caching_pg(ap + len, pcontrol,
2461 						       target);
2462 			} else if (is_zbc) {
2463 				len += resp_caching_pg(ap + len, pcontrol,
2464 						       target);
2465 			}
2466 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2467 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2468 			if (0xff == subpcode) {
2469 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2470 						  target, target_dev_id);
2471 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2472 			}
2473 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2474 			offset += len;
2475 		} else {
2476 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2477 			return check_condition_result;
2478 		}
2479 		break;
2480 	default:
2481 		bad_pcode = true;
2482 		break;
2483 	}
2484 	if (bad_pcode) {
2485 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2486 		return check_condition_result;
2487 	}
2488 	if (msense_6)
2489 		arr[0] = offset - 1;
2490 	else
2491 		put_unaligned_be16((offset - 2), arr + 0);
2492 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2493 }
2494 
2495 #define SDEBUG_MAX_MSELECT_SZ 512
2496 
2497 static int resp_mode_select(struct scsi_cmnd *scp,
2498 			    struct sdebug_dev_info *devip)
2499 {
2500 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2501 	int param_len, res, mpage;
2502 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2503 	unsigned char *cmd = scp->cmnd;
2504 	int mselect6 = (MODE_SELECT == cmd[0]);
2505 
2506 	memset(arr, 0, sizeof(arr));
2507 	pf = cmd[1] & 0x10;
2508 	sp = cmd[1] & 0x1;
2509 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2510 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2511 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2512 		return check_condition_result;
2513 	}
2514 	res = fetch_to_dev_buffer(scp, arr, param_len);
2515 	if (-1 == res)
2516 		return DID_ERROR << 16;
2517 	else if (sdebug_verbose && (res < param_len))
2518 		sdev_printk(KERN_INFO, scp->device,
2519 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2520 			    __func__, param_len, res);
2521 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2522 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2523 	off = bd_len + (mselect6 ? 4 : 8);
2524 	if (md_len > 2 || off >= res) {
2525 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2526 		return check_condition_result;
2527 	}
2528 	mpage = arr[off] & 0x3f;
2529 	ps = !!(arr[off] & 0x80);
2530 	if (ps) {
2531 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2532 		return check_condition_result;
2533 	}
2534 	spf = !!(arr[off] & 0x40);
2535 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2536 		       (arr[off + 1] + 2);
2537 	if ((pg_len + off) > param_len) {
2538 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2539 				PARAMETER_LIST_LENGTH_ERR, 0);
2540 		return check_condition_result;
2541 	}
2542 	switch (mpage) {
2543 	case 0x8:      /* Caching Mode page */
2544 		if (caching_pg[1] == arr[off + 1]) {
2545 			memcpy(caching_pg + 2, arr + off + 2,
2546 			       sizeof(caching_pg) - 2);
2547 			goto set_mode_changed_ua;
2548 		}
2549 		break;
2550 	case 0xa:      /* Control Mode page */
2551 		if (ctrl_m_pg[1] == arr[off + 1]) {
2552 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2553 			       sizeof(ctrl_m_pg) - 2);
2554 			if (ctrl_m_pg[4] & 0x8)
2555 				sdebug_wp = true;
2556 			else
2557 				sdebug_wp = false;
2558 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2559 			goto set_mode_changed_ua;
2560 		}
2561 		break;
2562 	case 0x1c:      /* Informational Exceptions Mode page */
2563 		if (iec_m_pg[1] == arr[off + 1]) {
2564 			memcpy(iec_m_pg + 2, arr + off + 2,
2565 			       sizeof(iec_m_pg) - 2);
2566 			goto set_mode_changed_ua;
2567 		}
2568 		break;
2569 	default:
2570 		break;
2571 	}
2572 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2573 	return check_condition_result;
2574 set_mode_changed_ua:
2575 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2576 	return 0;
2577 }
2578 
2579 static int resp_temp_l_pg(unsigned char *arr)
2580 {
2581 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2582 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2583 		};
2584 
2585 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2586 	return sizeof(temp_l_pg);
2587 }
2588 
2589 static int resp_ie_l_pg(unsigned char *arr)
2590 {
2591 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2592 		};
2593 
2594 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2595 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2596 		arr[4] = THRESHOLD_EXCEEDED;
2597 		arr[5] = 0xff;
2598 	}
2599 	return sizeof(ie_l_pg);
2600 }
2601 
2602 static int resp_env_rep_l_spg(unsigned char *arr)
2603 {
2604 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2605 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2606 					 0x1, 0x0, 0x23, 0x8,
2607 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2608 		};
2609 
2610 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2611 	return sizeof(env_rep_l_spg);
2612 }
2613 
2614 #define SDEBUG_MAX_LSENSE_SZ 512
2615 
2616 static int resp_log_sense(struct scsi_cmnd *scp,
2617 			  struct sdebug_dev_info *devip)
2618 {
2619 	int ppc, sp, pcode, subpcode;
2620 	u32 alloc_len, len, n;
2621 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2622 	unsigned char *cmd = scp->cmnd;
2623 
2624 	memset(arr, 0, sizeof(arr));
2625 	ppc = cmd[1] & 0x2;
2626 	sp = cmd[1] & 0x1;
2627 	if (ppc || sp) {
2628 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2629 		return check_condition_result;
2630 	}
2631 	pcode = cmd[2] & 0x3f;
2632 	subpcode = cmd[3] & 0xff;
2633 	alloc_len = get_unaligned_be16(cmd + 7);
2634 	arr[0] = pcode;
2635 	if (0 == subpcode) {
2636 		switch (pcode) {
2637 		case 0x0:	/* Supported log pages log page */
2638 			n = 4;
2639 			arr[n++] = 0x0;		/* this page */
2640 			arr[n++] = 0xd;		/* Temperature */
2641 			arr[n++] = 0x2f;	/* Informational exceptions */
2642 			arr[3] = n - 4;
2643 			break;
2644 		case 0xd:	/* Temperature log page */
2645 			arr[3] = resp_temp_l_pg(arr + 4);
2646 			break;
2647 		case 0x2f:	/* Informational exceptions log page */
2648 			arr[3] = resp_ie_l_pg(arr + 4);
2649 			break;
2650 		default:
2651 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2652 			return check_condition_result;
2653 		}
2654 	} else if (0xff == subpcode) {
2655 		arr[0] |= 0x40;
2656 		arr[1] = subpcode;
2657 		switch (pcode) {
2658 		case 0x0:	/* Supported log pages and subpages log page */
2659 			n = 4;
2660 			arr[n++] = 0x0;
2661 			arr[n++] = 0x0;		/* 0,0 page */
2662 			arr[n++] = 0x0;
2663 			arr[n++] = 0xff;	/* this page */
2664 			arr[n++] = 0xd;
2665 			arr[n++] = 0x0;		/* Temperature */
2666 			arr[n++] = 0xd;
2667 			arr[n++] = 0x1;		/* Environment reporting */
2668 			arr[n++] = 0xd;
2669 			arr[n++] = 0xff;	/* all 0xd subpages */
2670 			arr[n++] = 0x2f;
2671 			arr[n++] = 0x0;	/* Informational exceptions */
2672 			arr[n++] = 0x2f;
2673 			arr[n++] = 0xff;	/* all 0x2f subpages */
2674 			arr[3] = n - 4;
2675 			break;
2676 		case 0xd:	/* Temperature subpages */
2677 			n = 4;
2678 			arr[n++] = 0xd;
2679 			arr[n++] = 0x0;		/* Temperature */
2680 			arr[n++] = 0xd;
2681 			arr[n++] = 0x1;		/* Environment reporting */
2682 			arr[n++] = 0xd;
2683 			arr[n++] = 0xff;	/* these subpages */
2684 			arr[3] = n - 4;
2685 			break;
2686 		case 0x2f:	/* Informational exceptions subpages */
2687 			n = 4;
2688 			arr[n++] = 0x2f;
2689 			arr[n++] = 0x0;		/* Informational exceptions */
2690 			arr[n++] = 0x2f;
2691 			arr[n++] = 0xff;	/* these subpages */
2692 			arr[3] = n - 4;
2693 			break;
2694 		default:
2695 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2696 			return check_condition_result;
2697 		}
2698 	} else if (subpcode > 0) {
2699 		arr[0] |= 0x40;
2700 		arr[1] = subpcode;
2701 		if (pcode == 0xd && subpcode == 1)
2702 			arr[3] = resp_env_rep_l_spg(arr + 4);
2703 		else {
2704 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2705 			return check_condition_result;
2706 		}
2707 	} else {
2708 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2709 		return check_condition_result;
2710 	}
2711 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2712 	return fill_from_dev_buffer(scp, arr,
2713 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2714 }
2715 
2716 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2717 {
2718 	return devip->nr_zones != 0;
2719 }
2720 
2721 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2722 					unsigned long long lba)
2723 {
2724 	u32 zno = lba >> devip->zsize_shift;
2725 	struct sdeb_zone_state *zsp;
2726 
2727 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2728 		return &devip->zstate[zno];
2729 
2730 	/*
2731 	 * If the zone capacity is less than the zone size, adjust for gap
2732 	 * zones.
2733 	 */
2734 	zno = 2 * zno - devip->nr_conv_zones;
2735 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2736 	zsp = &devip->zstate[zno];
2737 	if (lba >= zsp->z_start + zsp->z_size)
2738 		zsp++;
2739 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2740 	return zsp;
2741 }
2742 
2743 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2744 {
2745 	return zsp->z_type == ZBC_ZTYPE_CNV;
2746 }
2747 
2748 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2749 {
2750 	return zsp->z_type == ZBC_ZTYPE_GAP;
2751 }
2752 
2753 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2754 {
2755 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2756 }
2757 
2758 static void zbc_close_zone(struct sdebug_dev_info *devip,
2759 			   struct sdeb_zone_state *zsp)
2760 {
2761 	enum sdebug_z_cond zc;
2762 
2763 	if (!zbc_zone_is_seq(zsp))
2764 		return;
2765 
2766 	zc = zsp->z_cond;
2767 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2768 		return;
2769 
2770 	if (zc == ZC2_IMPLICIT_OPEN)
2771 		devip->nr_imp_open--;
2772 	else
2773 		devip->nr_exp_open--;
2774 
2775 	if (zsp->z_wp == zsp->z_start) {
2776 		zsp->z_cond = ZC1_EMPTY;
2777 	} else {
2778 		zsp->z_cond = ZC4_CLOSED;
2779 		devip->nr_closed++;
2780 	}
2781 }
2782 
2783 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2784 {
2785 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2786 	unsigned int i;
2787 
2788 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2789 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2790 			zbc_close_zone(devip, zsp);
2791 			return;
2792 		}
2793 	}
2794 }
2795 
2796 static void zbc_open_zone(struct sdebug_dev_info *devip,
2797 			  struct sdeb_zone_state *zsp, bool explicit)
2798 {
2799 	enum sdebug_z_cond zc;
2800 
2801 	if (!zbc_zone_is_seq(zsp))
2802 		return;
2803 
2804 	zc = zsp->z_cond;
2805 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2806 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2807 		return;
2808 
2809 	/* Close an implicit open zone if necessary */
2810 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2811 		zbc_close_zone(devip, zsp);
2812 	else if (devip->max_open &&
2813 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2814 		zbc_close_imp_open_zone(devip);
2815 
2816 	if (zsp->z_cond == ZC4_CLOSED)
2817 		devip->nr_closed--;
2818 	if (explicit) {
2819 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2820 		devip->nr_exp_open++;
2821 	} else {
2822 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2823 		devip->nr_imp_open++;
2824 	}
2825 }
2826 
2827 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2828 				     struct sdeb_zone_state *zsp)
2829 {
2830 	switch (zsp->z_cond) {
2831 	case ZC2_IMPLICIT_OPEN:
2832 		devip->nr_imp_open--;
2833 		break;
2834 	case ZC3_EXPLICIT_OPEN:
2835 		devip->nr_exp_open--;
2836 		break;
2837 	default:
2838 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2839 			  zsp->z_start, zsp->z_cond);
2840 		break;
2841 	}
2842 	zsp->z_cond = ZC5_FULL;
2843 }
2844 
2845 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2846 		       unsigned long long lba, unsigned int num)
2847 {
2848 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2849 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2850 
2851 	if (!zbc_zone_is_seq(zsp))
2852 		return;
2853 
2854 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2855 		zsp->z_wp += num;
2856 		if (zsp->z_wp >= zend)
2857 			zbc_set_zone_full(devip, zsp);
2858 		return;
2859 	}
2860 
2861 	while (num) {
2862 		if (lba != zsp->z_wp)
2863 			zsp->z_non_seq_resource = true;
2864 
2865 		end = lba + num;
2866 		if (end >= zend) {
2867 			n = zend - lba;
2868 			zsp->z_wp = zend;
2869 		} else if (end > zsp->z_wp) {
2870 			n = num;
2871 			zsp->z_wp = end;
2872 		} else {
2873 			n = num;
2874 		}
2875 		if (zsp->z_wp >= zend)
2876 			zbc_set_zone_full(devip, zsp);
2877 
2878 		num -= n;
2879 		lba += n;
2880 		if (num) {
2881 			zsp++;
2882 			zend = zsp->z_start + zsp->z_size;
2883 		}
2884 	}
2885 }
2886 
2887 static int check_zbc_access_params(struct scsi_cmnd *scp,
2888 			unsigned long long lba, unsigned int num, bool write)
2889 {
2890 	struct scsi_device *sdp = scp->device;
2891 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2892 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2893 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2894 
2895 	if (!write) {
2896 		if (devip->zmodel == BLK_ZONED_HA)
2897 			return 0;
2898 		/* For host-managed, reads cannot cross zone types boundaries */
2899 		if (zsp->z_type != zsp_end->z_type) {
2900 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2901 					LBA_OUT_OF_RANGE,
2902 					READ_INVDATA_ASCQ);
2903 			return check_condition_result;
2904 		}
2905 		return 0;
2906 	}
2907 
2908 	/* Writing into a gap zone is not allowed */
2909 	if (zbc_zone_is_gap(zsp)) {
2910 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2911 				ATTEMPT_ACCESS_GAP);
2912 		return check_condition_result;
2913 	}
2914 
2915 	/* No restrictions for writes within conventional zones */
2916 	if (zbc_zone_is_conv(zsp)) {
2917 		if (!zbc_zone_is_conv(zsp_end)) {
2918 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2919 					LBA_OUT_OF_RANGE,
2920 					WRITE_BOUNDARY_ASCQ);
2921 			return check_condition_result;
2922 		}
2923 		return 0;
2924 	}
2925 
2926 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2927 		/* Writes cannot cross sequential zone boundaries */
2928 		if (zsp_end != zsp) {
2929 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2930 					LBA_OUT_OF_RANGE,
2931 					WRITE_BOUNDARY_ASCQ);
2932 			return check_condition_result;
2933 		}
2934 		/* Cannot write full zones */
2935 		if (zsp->z_cond == ZC5_FULL) {
2936 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2937 					INVALID_FIELD_IN_CDB, 0);
2938 			return check_condition_result;
2939 		}
2940 		/* Writes must be aligned to the zone WP */
2941 		if (lba != zsp->z_wp) {
2942 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2943 					LBA_OUT_OF_RANGE,
2944 					UNALIGNED_WRITE_ASCQ);
2945 			return check_condition_result;
2946 		}
2947 	}
2948 
2949 	/* Handle implicit open of closed and empty zones */
2950 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2951 		if (devip->max_open &&
2952 		    devip->nr_exp_open >= devip->max_open) {
2953 			mk_sense_buffer(scp, DATA_PROTECT,
2954 					INSUFF_RES_ASC,
2955 					INSUFF_ZONE_ASCQ);
2956 			return check_condition_result;
2957 		}
2958 		zbc_open_zone(devip, zsp, false);
2959 	}
2960 
2961 	return 0;
2962 }
2963 
2964 static inline int check_device_access_params
2965 			(struct scsi_cmnd *scp, unsigned long long lba,
2966 			 unsigned int num, bool write)
2967 {
2968 	struct scsi_device *sdp = scp->device;
2969 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2970 
2971 	if (lba + num > sdebug_capacity) {
2972 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2973 		return check_condition_result;
2974 	}
2975 	/* transfer length excessive (tie in to block limits VPD page) */
2976 	if (num > sdebug_store_sectors) {
2977 		/* needs work to find which cdb byte 'num' comes from */
2978 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2979 		return check_condition_result;
2980 	}
2981 	if (write && unlikely(sdebug_wp)) {
2982 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2983 		return check_condition_result;
2984 	}
2985 	if (sdebug_dev_is_zoned(devip))
2986 		return check_zbc_access_params(scp, lba, num, write);
2987 
2988 	return 0;
2989 }
2990 
2991 /*
2992  * Note: if BUG_ON() fires it usually indicates a problem with the parser
2993  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2994  * that access any of the "stores" in struct sdeb_store_info should call this
2995  * function with bug_if_fake_rw set to true.
2996  */
2997 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2998 						bool bug_if_fake_rw)
2999 {
3000 	if (sdebug_fake_rw) {
3001 		BUG_ON(bug_if_fake_rw);	/* See note above */
3002 		return NULL;
3003 	}
3004 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3005 }
3006 
3007 /* Returns number of bytes copied or -1 if error. */
3008 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3009 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3010 {
3011 	int ret;
3012 	u64 block, rest = 0;
3013 	enum dma_data_direction dir;
3014 	struct scsi_data_buffer *sdb = &scp->sdb;
3015 	u8 *fsp;
3016 
3017 	if (do_write) {
3018 		dir = DMA_TO_DEVICE;
3019 		write_since_sync = true;
3020 	} else {
3021 		dir = DMA_FROM_DEVICE;
3022 	}
3023 
3024 	if (!sdb->length || !sip)
3025 		return 0;
3026 	if (scp->sc_data_direction != dir)
3027 		return -1;
3028 	fsp = sip->storep;
3029 
3030 	block = do_div(lba, sdebug_store_sectors);
3031 	if (block + num > sdebug_store_sectors)
3032 		rest = block + num - sdebug_store_sectors;
3033 
3034 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3035 		   fsp + (block * sdebug_sector_size),
3036 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3037 	if (ret != (num - rest) * sdebug_sector_size)
3038 		return ret;
3039 
3040 	if (rest) {
3041 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3042 			    fsp, rest * sdebug_sector_size,
3043 			    sg_skip + ((num - rest) * sdebug_sector_size),
3044 			    do_write);
3045 	}
3046 
3047 	return ret;
3048 }
3049 
3050 /* Returns number of bytes copied or -1 if error. */
3051 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3052 {
3053 	struct scsi_data_buffer *sdb = &scp->sdb;
3054 
3055 	if (!sdb->length)
3056 		return 0;
3057 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3058 		return -1;
3059 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3060 			      num * sdebug_sector_size, 0, true);
3061 }
3062 
3063 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3064  * arr into sip->storep+lba and return true. If comparison fails then
3065  * return false. */
3066 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3067 			      const u8 *arr, bool compare_only)
3068 {
3069 	bool res;
3070 	u64 block, rest = 0;
3071 	u32 store_blks = sdebug_store_sectors;
3072 	u32 lb_size = sdebug_sector_size;
3073 	u8 *fsp = sip->storep;
3074 
3075 	block = do_div(lba, store_blks);
3076 	if (block + num > store_blks)
3077 		rest = block + num - store_blks;
3078 
3079 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3080 	if (!res)
3081 		return res;
3082 	if (rest)
3083 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3084 			     rest * lb_size);
3085 	if (!res)
3086 		return res;
3087 	if (compare_only)
3088 		return true;
3089 	arr += num * lb_size;
3090 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3091 	if (rest)
3092 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3093 	return res;
3094 }
3095 
3096 static __be16 dif_compute_csum(const void *buf, int len)
3097 {
3098 	__be16 csum;
3099 
3100 	if (sdebug_guard)
3101 		csum = (__force __be16)ip_compute_csum(buf, len);
3102 	else
3103 		csum = cpu_to_be16(crc_t10dif(buf, len));
3104 
3105 	return csum;
3106 }
3107 
3108 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3109 		      sector_t sector, u32 ei_lba)
3110 {
3111 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3112 
3113 	if (sdt->guard_tag != csum) {
3114 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3115 			(unsigned long)sector,
3116 			be16_to_cpu(sdt->guard_tag),
3117 			be16_to_cpu(csum));
3118 		return 0x01;
3119 	}
3120 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3121 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3122 		pr_err("REF check failed on sector %lu\n",
3123 			(unsigned long)sector);
3124 		return 0x03;
3125 	}
3126 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3127 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3128 		pr_err("REF check failed on sector %lu\n",
3129 			(unsigned long)sector);
3130 		return 0x03;
3131 	}
3132 	return 0;
3133 }
3134 
3135 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3136 			  unsigned int sectors, bool read)
3137 {
3138 	size_t resid;
3139 	void *paddr;
3140 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3141 						scp->device->hostdata, true);
3142 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3143 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3144 	struct sg_mapping_iter miter;
3145 
3146 	/* Bytes of protection data to copy into sgl */
3147 	resid = sectors * sizeof(*dif_storep);
3148 
3149 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3150 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3151 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3152 
3153 	while (sg_miter_next(&miter) && resid > 0) {
3154 		size_t len = min_t(size_t, miter.length, resid);
3155 		void *start = dif_store(sip, sector);
3156 		size_t rest = 0;
3157 
3158 		if (dif_store_end < start + len)
3159 			rest = start + len - dif_store_end;
3160 
3161 		paddr = miter.addr;
3162 
3163 		if (read)
3164 			memcpy(paddr, start, len - rest);
3165 		else
3166 			memcpy(start, paddr, len - rest);
3167 
3168 		if (rest) {
3169 			if (read)
3170 				memcpy(paddr + len - rest, dif_storep, rest);
3171 			else
3172 				memcpy(dif_storep, paddr + len - rest, rest);
3173 		}
3174 
3175 		sector += len / sizeof(*dif_storep);
3176 		resid -= len;
3177 	}
3178 	sg_miter_stop(&miter);
3179 }
3180 
3181 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3182 			    unsigned int sectors, u32 ei_lba)
3183 {
3184 	int ret = 0;
3185 	unsigned int i;
3186 	sector_t sector;
3187 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3188 						scp->device->hostdata, true);
3189 	struct t10_pi_tuple *sdt;
3190 
3191 	for (i = 0; i < sectors; i++, ei_lba++) {
3192 		sector = start_sec + i;
3193 		sdt = dif_store(sip, sector);
3194 
3195 		if (sdt->app_tag == cpu_to_be16(0xffff))
3196 			continue;
3197 
3198 		/*
3199 		 * Because scsi_debug acts as both initiator and
3200 		 * target we proceed to verify the PI even if
3201 		 * RDPROTECT=3. This is done so the "initiator" knows
3202 		 * which type of error to return. Otherwise we would
3203 		 * have to iterate over the PI twice.
3204 		 */
3205 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3206 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3207 					 sector, ei_lba);
3208 			if (ret) {
3209 				dif_errors++;
3210 				break;
3211 			}
3212 		}
3213 	}
3214 
3215 	dif_copy_prot(scp, start_sec, sectors, true);
3216 	dix_reads++;
3217 
3218 	return ret;
3219 }
3220 
3221 static inline void
3222 sdeb_read_lock(struct sdeb_store_info *sip)
3223 {
3224 	if (sdebug_no_rwlock) {
3225 		if (sip)
3226 			__acquire(&sip->macc_lck);
3227 		else
3228 			__acquire(&sdeb_fake_rw_lck);
3229 	} else {
3230 		if (sip)
3231 			read_lock(&sip->macc_lck);
3232 		else
3233 			read_lock(&sdeb_fake_rw_lck);
3234 	}
3235 }
3236 
3237 static inline void
3238 sdeb_read_unlock(struct sdeb_store_info *sip)
3239 {
3240 	if (sdebug_no_rwlock) {
3241 		if (sip)
3242 			__release(&sip->macc_lck);
3243 		else
3244 			__release(&sdeb_fake_rw_lck);
3245 	} else {
3246 		if (sip)
3247 			read_unlock(&sip->macc_lck);
3248 		else
3249 			read_unlock(&sdeb_fake_rw_lck);
3250 	}
3251 }
3252 
3253 static inline void
3254 sdeb_write_lock(struct sdeb_store_info *sip)
3255 {
3256 	if (sdebug_no_rwlock) {
3257 		if (sip)
3258 			__acquire(&sip->macc_lck);
3259 		else
3260 			__acquire(&sdeb_fake_rw_lck);
3261 	} else {
3262 		if (sip)
3263 			write_lock(&sip->macc_lck);
3264 		else
3265 			write_lock(&sdeb_fake_rw_lck);
3266 	}
3267 }
3268 
3269 static inline void
3270 sdeb_write_unlock(struct sdeb_store_info *sip)
3271 {
3272 	if (sdebug_no_rwlock) {
3273 		if (sip)
3274 			__release(&sip->macc_lck);
3275 		else
3276 			__release(&sdeb_fake_rw_lck);
3277 	} else {
3278 		if (sip)
3279 			write_unlock(&sip->macc_lck);
3280 		else
3281 			write_unlock(&sdeb_fake_rw_lck);
3282 	}
3283 }
3284 
3285 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3286 {
3287 	bool check_prot;
3288 	u32 num;
3289 	u32 ei_lba;
3290 	int ret;
3291 	u64 lba;
3292 	struct sdeb_store_info *sip = devip2sip(devip, true);
3293 	u8 *cmd = scp->cmnd;
3294 
3295 	switch (cmd[0]) {
3296 	case READ_16:
3297 		ei_lba = 0;
3298 		lba = get_unaligned_be64(cmd + 2);
3299 		num = get_unaligned_be32(cmd + 10);
3300 		check_prot = true;
3301 		break;
3302 	case READ_10:
3303 		ei_lba = 0;
3304 		lba = get_unaligned_be32(cmd + 2);
3305 		num = get_unaligned_be16(cmd + 7);
3306 		check_prot = true;
3307 		break;
3308 	case READ_6:
3309 		ei_lba = 0;
3310 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3311 		      (u32)(cmd[1] & 0x1f) << 16;
3312 		num = (0 == cmd[4]) ? 256 : cmd[4];
3313 		check_prot = true;
3314 		break;
3315 	case READ_12:
3316 		ei_lba = 0;
3317 		lba = get_unaligned_be32(cmd + 2);
3318 		num = get_unaligned_be32(cmd + 6);
3319 		check_prot = true;
3320 		break;
3321 	case XDWRITEREAD_10:
3322 		ei_lba = 0;
3323 		lba = get_unaligned_be32(cmd + 2);
3324 		num = get_unaligned_be16(cmd + 7);
3325 		check_prot = false;
3326 		break;
3327 	default:	/* assume READ(32) */
3328 		lba = get_unaligned_be64(cmd + 12);
3329 		ei_lba = get_unaligned_be32(cmd + 20);
3330 		num = get_unaligned_be32(cmd + 28);
3331 		check_prot = false;
3332 		break;
3333 	}
3334 	if (unlikely(have_dif_prot && check_prot)) {
3335 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3336 		    (cmd[1] & 0xe0)) {
3337 			mk_sense_invalid_opcode(scp);
3338 			return check_condition_result;
3339 		}
3340 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3341 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3342 		    (cmd[1] & 0xe0) == 0)
3343 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3344 				    "to DIF device\n");
3345 	}
3346 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3347 		     atomic_read(&sdeb_inject_pending))) {
3348 		num /= 2;
3349 		atomic_set(&sdeb_inject_pending, 0);
3350 	}
3351 
3352 	ret = check_device_access_params(scp, lba, num, false);
3353 	if (ret)
3354 		return ret;
3355 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3356 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3357 		     ((lba + num) > sdebug_medium_error_start))) {
3358 		/* claim unrecoverable read error */
3359 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3360 		/* set info field and valid bit for fixed descriptor */
3361 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3362 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3363 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3364 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3365 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3366 		}
3367 		scsi_set_resid(scp, scsi_bufflen(scp));
3368 		return check_condition_result;
3369 	}
3370 
3371 	sdeb_read_lock(sip);
3372 
3373 	/* DIX + T10 DIF */
3374 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3375 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3376 		case 1: /* Guard tag error */
3377 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3378 				sdeb_read_unlock(sip);
3379 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3380 				return check_condition_result;
3381 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3382 				sdeb_read_unlock(sip);
3383 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3384 				return illegal_condition_result;
3385 			}
3386 			break;
3387 		case 3: /* Reference tag error */
3388 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3389 				sdeb_read_unlock(sip);
3390 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3391 				return check_condition_result;
3392 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3393 				sdeb_read_unlock(sip);
3394 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3395 				return illegal_condition_result;
3396 			}
3397 			break;
3398 		}
3399 	}
3400 
3401 	ret = do_device_access(sip, scp, 0, lba, num, false);
3402 	sdeb_read_unlock(sip);
3403 	if (unlikely(ret == -1))
3404 		return DID_ERROR << 16;
3405 
3406 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3407 
3408 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3409 		     atomic_read(&sdeb_inject_pending))) {
3410 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3411 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3412 			atomic_set(&sdeb_inject_pending, 0);
3413 			return check_condition_result;
3414 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3415 			/* Logical block guard check failed */
3416 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3417 			atomic_set(&sdeb_inject_pending, 0);
3418 			return illegal_condition_result;
3419 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3420 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3421 			atomic_set(&sdeb_inject_pending, 0);
3422 			return illegal_condition_result;
3423 		}
3424 	}
3425 	return 0;
3426 }
3427 
3428 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3429 			     unsigned int sectors, u32 ei_lba)
3430 {
3431 	int ret;
3432 	struct t10_pi_tuple *sdt;
3433 	void *daddr;
3434 	sector_t sector = start_sec;
3435 	int ppage_offset;
3436 	int dpage_offset;
3437 	struct sg_mapping_iter diter;
3438 	struct sg_mapping_iter piter;
3439 
3440 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3441 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3442 
3443 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3444 			scsi_prot_sg_count(SCpnt),
3445 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3446 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3447 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3448 
3449 	/* For each protection page */
3450 	while (sg_miter_next(&piter)) {
3451 		dpage_offset = 0;
3452 		if (WARN_ON(!sg_miter_next(&diter))) {
3453 			ret = 0x01;
3454 			goto out;
3455 		}
3456 
3457 		for (ppage_offset = 0; ppage_offset < piter.length;
3458 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3459 			/* If we're at the end of the current
3460 			 * data page advance to the next one
3461 			 */
3462 			if (dpage_offset >= diter.length) {
3463 				if (WARN_ON(!sg_miter_next(&diter))) {
3464 					ret = 0x01;
3465 					goto out;
3466 				}
3467 				dpage_offset = 0;
3468 			}
3469 
3470 			sdt = piter.addr + ppage_offset;
3471 			daddr = diter.addr + dpage_offset;
3472 
3473 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3474 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3475 				if (ret)
3476 					goto out;
3477 			}
3478 
3479 			sector++;
3480 			ei_lba++;
3481 			dpage_offset += sdebug_sector_size;
3482 		}
3483 		diter.consumed = dpage_offset;
3484 		sg_miter_stop(&diter);
3485 	}
3486 	sg_miter_stop(&piter);
3487 
3488 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3489 	dix_writes++;
3490 
3491 	return 0;
3492 
3493 out:
3494 	dif_errors++;
3495 	sg_miter_stop(&diter);
3496 	sg_miter_stop(&piter);
3497 	return ret;
3498 }
3499 
3500 static unsigned long lba_to_map_index(sector_t lba)
3501 {
3502 	if (sdebug_unmap_alignment)
3503 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3504 	sector_div(lba, sdebug_unmap_granularity);
3505 	return lba;
3506 }
3507 
3508 static sector_t map_index_to_lba(unsigned long index)
3509 {
3510 	sector_t lba = index * sdebug_unmap_granularity;
3511 
3512 	if (sdebug_unmap_alignment)
3513 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3514 	return lba;
3515 }
3516 
3517 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3518 			      unsigned int *num)
3519 {
3520 	sector_t end;
3521 	unsigned int mapped;
3522 	unsigned long index;
3523 	unsigned long next;
3524 
3525 	index = lba_to_map_index(lba);
3526 	mapped = test_bit(index, sip->map_storep);
3527 
3528 	if (mapped)
3529 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3530 	else
3531 		next = find_next_bit(sip->map_storep, map_size, index);
3532 
3533 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3534 	*num = end - lba;
3535 	return mapped;
3536 }
3537 
3538 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3539 		       unsigned int len)
3540 {
3541 	sector_t end = lba + len;
3542 
3543 	while (lba < end) {
3544 		unsigned long index = lba_to_map_index(lba);
3545 
3546 		if (index < map_size)
3547 			set_bit(index, sip->map_storep);
3548 
3549 		lba = map_index_to_lba(index + 1);
3550 	}
3551 }
3552 
3553 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3554 			 unsigned int len)
3555 {
3556 	sector_t end = lba + len;
3557 	u8 *fsp = sip->storep;
3558 
3559 	while (lba < end) {
3560 		unsigned long index = lba_to_map_index(lba);
3561 
3562 		if (lba == map_index_to_lba(index) &&
3563 		    lba + sdebug_unmap_granularity <= end &&
3564 		    index < map_size) {
3565 			clear_bit(index, sip->map_storep);
3566 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3567 				memset(fsp + lba * sdebug_sector_size,
3568 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3569 				       sdebug_sector_size *
3570 				       sdebug_unmap_granularity);
3571 			}
3572 			if (sip->dif_storep) {
3573 				memset(sip->dif_storep + lba, 0xff,
3574 				       sizeof(*sip->dif_storep) *
3575 				       sdebug_unmap_granularity);
3576 			}
3577 		}
3578 		lba = map_index_to_lba(index + 1);
3579 	}
3580 }
3581 
3582 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3583 {
3584 	bool check_prot;
3585 	u32 num;
3586 	u32 ei_lba;
3587 	int ret;
3588 	u64 lba;
3589 	struct sdeb_store_info *sip = devip2sip(devip, true);
3590 	u8 *cmd = scp->cmnd;
3591 
3592 	switch (cmd[0]) {
3593 	case WRITE_16:
3594 		ei_lba = 0;
3595 		lba = get_unaligned_be64(cmd + 2);
3596 		num = get_unaligned_be32(cmd + 10);
3597 		check_prot = true;
3598 		break;
3599 	case WRITE_10:
3600 		ei_lba = 0;
3601 		lba = get_unaligned_be32(cmd + 2);
3602 		num = get_unaligned_be16(cmd + 7);
3603 		check_prot = true;
3604 		break;
3605 	case WRITE_6:
3606 		ei_lba = 0;
3607 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3608 		      (u32)(cmd[1] & 0x1f) << 16;
3609 		num = (0 == cmd[4]) ? 256 : cmd[4];
3610 		check_prot = true;
3611 		break;
3612 	case WRITE_12:
3613 		ei_lba = 0;
3614 		lba = get_unaligned_be32(cmd + 2);
3615 		num = get_unaligned_be32(cmd + 6);
3616 		check_prot = true;
3617 		break;
3618 	case 0x53:	/* XDWRITEREAD(10) */
3619 		ei_lba = 0;
3620 		lba = get_unaligned_be32(cmd + 2);
3621 		num = get_unaligned_be16(cmd + 7);
3622 		check_prot = false;
3623 		break;
3624 	default:	/* assume WRITE(32) */
3625 		lba = get_unaligned_be64(cmd + 12);
3626 		ei_lba = get_unaligned_be32(cmd + 20);
3627 		num = get_unaligned_be32(cmd + 28);
3628 		check_prot = false;
3629 		break;
3630 	}
3631 	if (unlikely(have_dif_prot && check_prot)) {
3632 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3633 		    (cmd[1] & 0xe0)) {
3634 			mk_sense_invalid_opcode(scp);
3635 			return check_condition_result;
3636 		}
3637 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3638 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3639 		    (cmd[1] & 0xe0) == 0)
3640 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3641 				    "to DIF device\n");
3642 	}
3643 
3644 	sdeb_write_lock(sip);
3645 	ret = check_device_access_params(scp, lba, num, true);
3646 	if (ret) {
3647 		sdeb_write_unlock(sip);
3648 		return ret;
3649 	}
3650 
3651 	/* DIX + T10 DIF */
3652 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3653 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3654 		case 1: /* Guard tag error */
3655 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3656 				sdeb_write_unlock(sip);
3657 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3658 				return illegal_condition_result;
3659 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3660 				sdeb_write_unlock(sip);
3661 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3662 				return check_condition_result;
3663 			}
3664 			break;
3665 		case 3: /* Reference tag error */
3666 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3667 				sdeb_write_unlock(sip);
3668 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3669 				return illegal_condition_result;
3670 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3671 				sdeb_write_unlock(sip);
3672 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3673 				return check_condition_result;
3674 			}
3675 			break;
3676 		}
3677 	}
3678 
3679 	ret = do_device_access(sip, scp, 0, lba, num, true);
3680 	if (unlikely(scsi_debug_lbp()))
3681 		map_region(sip, lba, num);
3682 	/* If ZBC zone then bump its write pointer */
3683 	if (sdebug_dev_is_zoned(devip))
3684 		zbc_inc_wp(devip, lba, num);
3685 	sdeb_write_unlock(sip);
3686 	if (unlikely(-1 == ret))
3687 		return DID_ERROR << 16;
3688 	else if (unlikely(sdebug_verbose &&
3689 			  (ret < (num * sdebug_sector_size))))
3690 		sdev_printk(KERN_INFO, scp->device,
3691 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3692 			    my_name, num * sdebug_sector_size, ret);
3693 
3694 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3695 		     atomic_read(&sdeb_inject_pending))) {
3696 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3697 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3698 			atomic_set(&sdeb_inject_pending, 0);
3699 			return check_condition_result;
3700 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3701 			/* Logical block guard check failed */
3702 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3703 			atomic_set(&sdeb_inject_pending, 0);
3704 			return illegal_condition_result;
3705 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3706 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3707 			atomic_set(&sdeb_inject_pending, 0);
3708 			return illegal_condition_result;
3709 		}
3710 	}
3711 	return 0;
3712 }
3713 
3714 /*
3715  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3716  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3717  */
3718 static int resp_write_scat(struct scsi_cmnd *scp,
3719 			   struct sdebug_dev_info *devip)
3720 {
3721 	u8 *cmd = scp->cmnd;
3722 	u8 *lrdp = NULL;
3723 	u8 *up;
3724 	struct sdeb_store_info *sip = devip2sip(devip, true);
3725 	u8 wrprotect;
3726 	u16 lbdof, num_lrd, k;
3727 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3728 	u32 lb_size = sdebug_sector_size;
3729 	u32 ei_lba;
3730 	u64 lba;
3731 	int ret, res;
3732 	bool is_16;
3733 	static const u32 lrd_size = 32; /* + parameter list header size */
3734 
3735 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3736 		is_16 = false;
3737 		wrprotect = (cmd[10] >> 5) & 0x7;
3738 		lbdof = get_unaligned_be16(cmd + 12);
3739 		num_lrd = get_unaligned_be16(cmd + 16);
3740 		bt_len = get_unaligned_be32(cmd + 28);
3741 	} else {        /* that leaves WRITE SCATTERED(16) */
3742 		is_16 = true;
3743 		wrprotect = (cmd[2] >> 5) & 0x7;
3744 		lbdof = get_unaligned_be16(cmd + 4);
3745 		num_lrd = get_unaligned_be16(cmd + 8);
3746 		bt_len = get_unaligned_be32(cmd + 10);
3747 		if (unlikely(have_dif_prot)) {
3748 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3749 			    wrprotect) {
3750 				mk_sense_invalid_opcode(scp);
3751 				return illegal_condition_result;
3752 			}
3753 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3754 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3755 			     wrprotect == 0)
3756 				sdev_printk(KERN_ERR, scp->device,
3757 					    "Unprotected WR to DIF device\n");
3758 		}
3759 	}
3760 	if ((num_lrd == 0) || (bt_len == 0))
3761 		return 0;       /* T10 says these do-nothings are not errors */
3762 	if (lbdof == 0) {
3763 		if (sdebug_verbose)
3764 			sdev_printk(KERN_INFO, scp->device,
3765 				"%s: %s: LB Data Offset field bad\n",
3766 				my_name, __func__);
3767 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3768 		return illegal_condition_result;
3769 	}
3770 	lbdof_blen = lbdof * lb_size;
3771 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3772 		if (sdebug_verbose)
3773 			sdev_printk(KERN_INFO, scp->device,
3774 				"%s: %s: LBA range descriptors don't fit\n",
3775 				my_name, __func__);
3776 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3777 		return illegal_condition_result;
3778 	}
3779 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3780 	if (lrdp == NULL)
3781 		return SCSI_MLQUEUE_HOST_BUSY;
3782 	if (sdebug_verbose)
3783 		sdev_printk(KERN_INFO, scp->device,
3784 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3785 			my_name, __func__, lbdof_blen);
3786 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3787 	if (res == -1) {
3788 		ret = DID_ERROR << 16;
3789 		goto err_out;
3790 	}
3791 
3792 	sdeb_write_lock(sip);
3793 	sg_off = lbdof_blen;
3794 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3795 	cum_lb = 0;
3796 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3797 		lba = get_unaligned_be64(up + 0);
3798 		num = get_unaligned_be32(up + 8);
3799 		if (sdebug_verbose)
3800 			sdev_printk(KERN_INFO, scp->device,
3801 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3802 				my_name, __func__, k, lba, num, sg_off);
3803 		if (num == 0)
3804 			continue;
3805 		ret = check_device_access_params(scp, lba, num, true);
3806 		if (ret)
3807 			goto err_out_unlock;
3808 		num_by = num * lb_size;
3809 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3810 
3811 		if ((cum_lb + num) > bt_len) {
3812 			if (sdebug_verbose)
3813 				sdev_printk(KERN_INFO, scp->device,
3814 				    "%s: %s: sum of blocks > data provided\n",
3815 				    my_name, __func__);
3816 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3817 					0);
3818 			ret = illegal_condition_result;
3819 			goto err_out_unlock;
3820 		}
3821 
3822 		/* DIX + T10 DIF */
3823 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3824 			int prot_ret = prot_verify_write(scp, lba, num,
3825 							 ei_lba);
3826 
3827 			if (prot_ret) {
3828 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3829 						prot_ret);
3830 				ret = illegal_condition_result;
3831 				goto err_out_unlock;
3832 			}
3833 		}
3834 
3835 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3836 		/* If ZBC zone then bump its write pointer */
3837 		if (sdebug_dev_is_zoned(devip))
3838 			zbc_inc_wp(devip, lba, num);
3839 		if (unlikely(scsi_debug_lbp()))
3840 			map_region(sip, lba, num);
3841 		if (unlikely(-1 == ret)) {
3842 			ret = DID_ERROR << 16;
3843 			goto err_out_unlock;
3844 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3845 			sdev_printk(KERN_INFO, scp->device,
3846 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3847 			    my_name, num_by, ret);
3848 
3849 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3850 			     atomic_read(&sdeb_inject_pending))) {
3851 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3852 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3853 				atomic_set(&sdeb_inject_pending, 0);
3854 				ret = check_condition_result;
3855 				goto err_out_unlock;
3856 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3857 				/* Logical block guard check failed */
3858 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3859 				atomic_set(&sdeb_inject_pending, 0);
3860 				ret = illegal_condition_result;
3861 				goto err_out_unlock;
3862 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3863 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3864 				atomic_set(&sdeb_inject_pending, 0);
3865 				ret = illegal_condition_result;
3866 				goto err_out_unlock;
3867 			}
3868 		}
3869 		sg_off += num_by;
3870 		cum_lb += num;
3871 	}
3872 	ret = 0;
3873 err_out_unlock:
3874 	sdeb_write_unlock(sip);
3875 err_out:
3876 	kfree(lrdp);
3877 	return ret;
3878 }
3879 
3880 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3881 			   u32 ei_lba, bool unmap, bool ndob)
3882 {
3883 	struct scsi_device *sdp = scp->device;
3884 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3885 	unsigned long long i;
3886 	u64 block, lbaa;
3887 	u32 lb_size = sdebug_sector_size;
3888 	int ret;
3889 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3890 						scp->device->hostdata, true);
3891 	u8 *fs1p;
3892 	u8 *fsp;
3893 
3894 	sdeb_write_lock(sip);
3895 
3896 	ret = check_device_access_params(scp, lba, num, true);
3897 	if (ret) {
3898 		sdeb_write_unlock(sip);
3899 		return ret;
3900 	}
3901 
3902 	if (unmap && scsi_debug_lbp()) {
3903 		unmap_region(sip, lba, num);
3904 		goto out;
3905 	}
3906 	lbaa = lba;
3907 	block = do_div(lbaa, sdebug_store_sectors);
3908 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3909 	fsp = sip->storep;
3910 	fs1p = fsp + (block * lb_size);
3911 	if (ndob) {
3912 		memset(fs1p, 0, lb_size);
3913 		ret = 0;
3914 	} else
3915 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3916 
3917 	if (-1 == ret) {
3918 		sdeb_write_unlock(sip);
3919 		return DID_ERROR << 16;
3920 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3921 		sdev_printk(KERN_INFO, scp->device,
3922 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3923 			    my_name, "write same", lb_size, ret);
3924 
3925 	/* Copy first sector to remaining blocks */
3926 	for (i = 1 ; i < num ; i++) {
3927 		lbaa = lba + i;
3928 		block = do_div(lbaa, sdebug_store_sectors);
3929 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3930 	}
3931 	if (scsi_debug_lbp())
3932 		map_region(sip, lba, num);
3933 	/* If ZBC zone then bump its write pointer */
3934 	if (sdebug_dev_is_zoned(devip))
3935 		zbc_inc_wp(devip, lba, num);
3936 out:
3937 	sdeb_write_unlock(sip);
3938 
3939 	return 0;
3940 }
3941 
3942 static int resp_write_same_10(struct scsi_cmnd *scp,
3943 			      struct sdebug_dev_info *devip)
3944 {
3945 	u8 *cmd = scp->cmnd;
3946 	u32 lba;
3947 	u16 num;
3948 	u32 ei_lba = 0;
3949 	bool unmap = false;
3950 
3951 	if (cmd[1] & 0x8) {
3952 		if (sdebug_lbpws10 == 0) {
3953 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3954 			return check_condition_result;
3955 		} else
3956 			unmap = true;
3957 	}
3958 	lba = get_unaligned_be32(cmd + 2);
3959 	num = get_unaligned_be16(cmd + 7);
3960 	if (num > sdebug_write_same_length) {
3961 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3962 		return check_condition_result;
3963 	}
3964 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3965 }
3966 
3967 static int resp_write_same_16(struct scsi_cmnd *scp,
3968 			      struct sdebug_dev_info *devip)
3969 {
3970 	u8 *cmd = scp->cmnd;
3971 	u64 lba;
3972 	u32 num;
3973 	u32 ei_lba = 0;
3974 	bool unmap = false;
3975 	bool ndob = false;
3976 
3977 	if (cmd[1] & 0x8) {	/* UNMAP */
3978 		if (sdebug_lbpws == 0) {
3979 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3980 			return check_condition_result;
3981 		} else
3982 			unmap = true;
3983 	}
3984 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3985 		ndob = true;
3986 	lba = get_unaligned_be64(cmd + 2);
3987 	num = get_unaligned_be32(cmd + 10);
3988 	if (num > sdebug_write_same_length) {
3989 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3990 		return check_condition_result;
3991 	}
3992 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3993 }
3994 
3995 /* Note the mode field is in the same position as the (lower) service action
3996  * field. For the Report supported operation codes command, SPC-4 suggests
3997  * each mode of this command should be reported separately; for future. */
3998 static int resp_write_buffer(struct scsi_cmnd *scp,
3999 			     struct sdebug_dev_info *devip)
4000 {
4001 	u8 *cmd = scp->cmnd;
4002 	struct scsi_device *sdp = scp->device;
4003 	struct sdebug_dev_info *dp;
4004 	u8 mode;
4005 
4006 	mode = cmd[1] & 0x1f;
4007 	switch (mode) {
4008 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4009 		/* set UAs on this device only */
4010 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4011 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4012 		break;
4013 	case 0x5:	/* download MC, save and ACT */
4014 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4015 		break;
4016 	case 0x6:	/* download MC with offsets and ACT */
4017 		/* set UAs on most devices (LUs) in this target */
4018 		list_for_each_entry(dp,
4019 				    &devip->sdbg_host->dev_info_list,
4020 				    dev_list)
4021 			if (dp->target == sdp->id) {
4022 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4023 				if (devip != dp)
4024 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4025 						dp->uas_bm);
4026 			}
4027 		break;
4028 	case 0x7:	/* download MC with offsets, save, and ACT */
4029 		/* set UA on all devices (LUs) in this target */
4030 		list_for_each_entry(dp,
4031 				    &devip->sdbg_host->dev_info_list,
4032 				    dev_list)
4033 			if (dp->target == sdp->id)
4034 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4035 					dp->uas_bm);
4036 		break;
4037 	default:
4038 		/* do nothing for this command for other mode values */
4039 		break;
4040 	}
4041 	return 0;
4042 }
4043 
4044 static int resp_comp_write(struct scsi_cmnd *scp,
4045 			   struct sdebug_dev_info *devip)
4046 {
4047 	u8 *cmd = scp->cmnd;
4048 	u8 *arr;
4049 	struct sdeb_store_info *sip = devip2sip(devip, true);
4050 	u64 lba;
4051 	u32 dnum;
4052 	u32 lb_size = sdebug_sector_size;
4053 	u8 num;
4054 	int ret;
4055 	int retval = 0;
4056 
4057 	lba = get_unaligned_be64(cmd + 2);
4058 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4059 	if (0 == num)
4060 		return 0;	/* degenerate case, not an error */
4061 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4062 	    (cmd[1] & 0xe0)) {
4063 		mk_sense_invalid_opcode(scp);
4064 		return check_condition_result;
4065 	}
4066 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4067 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4068 	    (cmd[1] & 0xe0) == 0)
4069 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4070 			    "to DIF device\n");
4071 	ret = check_device_access_params(scp, lba, num, false);
4072 	if (ret)
4073 		return ret;
4074 	dnum = 2 * num;
4075 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4076 	if (NULL == arr) {
4077 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4078 				INSUFF_RES_ASCQ);
4079 		return check_condition_result;
4080 	}
4081 
4082 	sdeb_write_lock(sip);
4083 
4084 	ret = do_dout_fetch(scp, dnum, arr);
4085 	if (ret == -1) {
4086 		retval = DID_ERROR << 16;
4087 		goto cleanup;
4088 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4089 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4090 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4091 			    dnum * lb_size, ret);
4092 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4093 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4094 		retval = check_condition_result;
4095 		goto cleanup;
4096 	}
4097 	if (scsi_debug_lbp())
4098 		map_region(sip, lba, num);
4099 cleanup:
4100 	sdeb_write_unlock(sip);
4101 	kfree(arr);
4102 	return retval;
4103 }
4104 
4105 struct unmap_block_desc {
4106 	__be64	lba;
4107 	__be32	blocks;
4108 	__be32	__reserved;
4109 };
4110 
4111 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4112 {
4113 	unsigned char *buf;
4114 	struct unmap_block_desc *desc;
4115 	struct sdeb_store_info *sip = devip2sip(devip, true);
4116 	unsigned int i, payload_len, descriptors;
4117 	int ret;
4118 
4119 	if (!scsi_debug_lbp())
4120 		return 0;	/* fib and say its done */
4121 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4122 	BUG_ON(scsi_bufflen(scp) != payload_len);
4123 
4124 	descriptors = (payload_len - 8) / 16;
4125 	if (descriptors > sdebug_unmap_max_desc) {
4126 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4127 		return check_condition_result;
4128 	}
4129 
4130 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4131 	if (!buf) {
4132 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4133 				INSUFF_RES_ASCQ);
4134 		return check_condition_result;
4135 	}
4136 
4137 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4138 
4139 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4140 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4141 
4142 	desc = (void *)&buf[8];
4143 
4144 	sdeb_write_lock(sip);
4145 
4146 	for (i = 0 ; i < descriptors ; i++) {
4147 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4148 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4149 
4150 		ret = check_device_access_params(scp, lba, num, true);
4151 		if (ret)
4152 			goto out;
4153 
4154 		unmap_region(sip, lba, num);
4155 	}
4156 
4157 	ret = 0;
4158 
4159 out:
4160 	sdeb_write_unlock(sip);
4161 	kfree(buf);
4162 
4163 	return ret;
4164 }
4165 
4166 #define SDEBUG_GET_LBA_STATUS_LEN 32
4167 
4168 static int resp_get_lba_status(struct scsi_cmnd *scp,
4169 			       struct sdebug_dev_info *devip)
4170 {
4171 	u8 *cmd = scp->cmnd;
4172 	u64 lba;
4173 	u32 alloc_len, mapped, num;
4174 	int ret;
4175 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4176 
4177 	lba = get_unaligned_be64(cmd + 2);
4178 	alloc_len = get_unaligned_be32(cmd + 10);
4179 
4180 	if (alloc_len < 24)
4181 		return 0;
4182 
4183 	ret = check_device_access_params(scp, lba, 1, false);
4184 	if (ret)
4185 		return ret;
4186 
4187 	if (scsi_debug_lbp()) {
4188 		struct sdeb_store_info *sip = devip2sip(devip, true);
4189 
4190 		mapped = map_state(sip, lba, &num);
4191 	} else {
4192 		mapped = 1;
4193 		/* following just in case virtual_gb changed */
4194 		sdebug_capacity = get_sdebug_capacity();
4195 		if (sdebug_capacity - lba <= 0xffffffff)
4196 			num = sdebug_capacity - lba;
4197 		else
4198 			num = 0xffffffff;
4199 	}
4200 
4201 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4202 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4203 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4204 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4205 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4206 
4207 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4208 }
4209 
4210 static int resp_sync_cache(struct scsi_cmnd *scp,
4211 			   struct sdebug_dev_info *devip)
4212 {
4213 	int res = 0;
4214 	u64 lba;
4215 	u32 num_blocks;
4216 	u8 *cmd = scp->cmnd;
4217 
4218 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4219 		lba = get_unaligned_be32(cmd + 2);
4220 		num_blocks = get_unaligned_be16(cmd + 7);
4221 	} else {				/* SYNCHRONIZE_CACHE(16) */
4222 		lba = get_unaligned_be64(cmd + 2);
4223 		num_blocks = get_unaligned_be32(cmd + 10);
4224 	}
4225 	if (lba + num_blocks > sdebug_capacity) {
4226 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4227 		return check_condition_result;
4228 	}
4229 	if (!write_since_sync || (cmd[1] & 0x2))
4230 		res = SDEG_RES_IMMED_MASK;
4231 	else		/* delay if write_since_sync and IMMED clear */
4232 		write_since_sync = false;
4233 	return res;
4234 }
4235 
4236 /*
4237  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4238  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4239  * a GOOD status otherwise. Model a disk with a big cache and yield
4240  * CONDITION MET. Actually tries to bring range in main memory into the
4241  * cache associated with the CPU(s).
4242  */
4243 static int resp_pre_fetch(struct scsi_cmnd *scp,
4244 			  struct sdebug_dev_info *devip)
4245 {
4246 	int res = 0;
4247 	u64 lba;
4248 	u64 block, rest = 0;
4249 	u32 nblks;
4250 	u8 *cmd = scp->cmnd;
4251 	struct sdeb_store_info *sip = devip2sip(devip, true);
4252 	u8 *fsp = sip->storep;
4253 
4254 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4255 		lba = get_unaligned_be32(cmd + 2);
4256 		nblks = get_unaligned_be16(cmd + 7);
4257 	} else {			/* PRE-FETCH(16) */
4258 		lba = get_unaligned_be64(cmd + 2);
4259 		nblks = get_unaligned_be32(cmd + 10);
4260 	}
4261 	if (lba + nblks > sdebug_capacity) {
4262 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4263 		return check_condition_result;
4264 	}
4265 	if (!fsp)
4266 		goto fini;
4267 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4268 	block = do_div(lba, sdebug_store_sectors);
4269 	if (block + nblks > sdebug_store_sectors)
4270 		rest = block + nblks - sdebug_store_sectors;
4271 
4272 	/* Try to bring the PRE-FETCH range into CPU's cache */
4273 	sdeb_read_lock(sip);
4274 	prefetch_range(fsp + (sdebug_sector_size * block),
4275 		       (nblks - rest) * sdebug_sector_size);
4276 	if (rest)
4277 		prefetch_range(fsp, rest * sdebug_sector_size);
4278 	sdeb_read_unlock(sip);
4279 fini:
4280 	if (cmd[1] & 0x2)
4281 		res = SDEG_RES_IMMED_MASK;
4282 	return res | condition_met_result;
4283 }
4284 
4285 #define RL_BUCKET_ELEMS 8
4286 
4287 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4288  * (W-LUN), the normal Linux scanning logic does not associate it with a
4289  * device (e.g. /dev/sg7). The following magic will make that association:
4290  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4291  * where <n> is a host number. If there are multiple targets in a host then
4292  * the above will associate a W-LUN to each target. To only get a W-LUN
4293  * for target 2, then use "echo '- 2 49409' > scan" .
4294  */
4295 static int resp_report_luns(struct scsi_cmnd *scp,
4296 			    struct sdebug_dev_info *devip)
4297 {
4298 	unsigned char *cmd = scp->cmnd;
4299 	unsigned int alloc_len;
4300 	unsigned char select_report;
4301 	u64 lun;
4302 	struct scsi_lun *lun_p;
4303 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4304 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4305 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4306 	unsigned int tlun_cnt;	/* total LUN count */
4307 	unsigned int rlen;	/* response length (in bytes) */
4308 	int k, j, n, res;
4309 	unsigned int off_rsp = 0;
4310 	const int sz_lun = sizeof(struct scsi_lun);
4311 
4312 	clear_luns_changed_on_target(devip);
4313 
4314 	select_report = cmd[2];
4315 	alloc_len = get_unaligned_be32(cmd + 6);
4316 
4317 	if (alloc_len < 4) {
4318 		pr_err("alloc len too small %d\n", alloc_len);
4319 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4320 		return check_condition_result;
4321 	}
4322 
4323 	switch (select_report) {
4324 	case 0:		/* all LUNs apart from W-LUNs */
4325 		lun_cnt = sdebug_max_luns;
4326 		wlun_cnt = 0;
4327 		break;
4328 	case 1:		/* only W-LUNs */
4329 		lun_cnt = 0;
4330 		wlun_cnt = 1;
4331 		break;
4332 	case 2:		/* all LUNs */
4333 		lun_cnt = sdebug_max_luns;
4334 		wlun_cnt = 1;
4335 		break;
4336 	case 0x10:	/* only administrative LUs */
4337 	case 0x11:	/* see SPC-5 */
4338 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4339 	default:
4340 		pr_debug("select report invalid %d\n", select_report);
4341 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4342 		return check_condition_result;
4343 	}
4344 
4345 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4346 		--lun_cnt;
4347 
4348 	tlun_cnt = lun_cnt + wlun_cnt;
4349 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4350 	scsi_set_resid(scp, scsi_bufflen(scp));
4351 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4352 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4353 
4354 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4355 	lun = sdebug_no_lun_0 ? 1 : 0;
4356 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4357 		memset(arr, 0, sizeof(arr));
4358 		lun_p = (struct scsi_lun *)&arr[0];
4359 		if (k == 0) {
4360 			put_unaligned_be32(rlen, &arr[0]);
4361 			++lun_p;
4362 			j = 1;
4363 		}
4364 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4365 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4366 				break;
4367 			int_to_scsilun(lun++, lun_p);
4368 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4369 				lun_p->scsi_lun[0] |= 0x40;
4370 		}
4371 		if (j < RL_BUCKET_ELEMS)
4372 			break;
4373 		n = j * sz_lun;
4374 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4375 		if (res)
4376 			return res;
4377 		off_rsp += n;
4378 	}
4379 	if (wlun_cnt) {
4380 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4381 		++j;
4382 	}
4383 	if (j > 0)
4384 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4385 	return res;
4386 }
4387 
4388 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4389 {
4390 	bool is_bytchk3 = false;
4391 	u8 bytchk;
4392 	int ret, j;
4393 	u32 vnum, a_num, off;
4394 	const u32 lb_size = sdebug_sector_size;
4395 	u64 lba;
4396 	u8 *arr;
4397 	u8 *cmd = scp->cmnd;
4398 	struct sdeb_store_info *sip = devip2sip(devip, true);
4399 
4400 	bytchk = (cmd[1] >> 1) & 0x3;
4401 	if (bytchk == 0) {
4402 		return 0;	/* always claim internal verify okay */
4403 	} else if (bytchk == 2) {
4404 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4405 		return check_condition_result;
4406 	} else if (bytchk == 3) {
4407 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4408 	}
4409 	switch (cmd[0]) {
4410 	case VERIFY_16:
4411 		lba = get_unaligned_be64(cmd + 2);
4412 		vnum = get_unaligned_be32(cmd + 10);
4413 		break;
4414 	case VERIFY:		/* is VERIFY(10) */
4415 		lba = get_unaligned_be32(cmd + 2);
4416 		vnum = get_unaligned_be16(cmd + 7);
4417 		break;
4418 	default:
4419 		mk_sense_invalid_opcode(scp);
4420 		return check_condition_result;
4421 	}
4422 	if (vnum == 0)
4423 		return 0;	/* not an error */
4424 	a_num = is_bytchk3 ? 1 : vnum;
4425 	/* Treat following check like one for read (i.e. no write) access */
4426 	ret = check_device_access_params(scp, lba, a_num, false);
4427 	if (ret)
4428 		return ret;
4429 
4430 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4431 	if (!arr) {
4432 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4433 				INSUFF_RES_ASCQ);
4434 		return check_condition_result;
4435 	}
4436 	/* Not changing store, so only need read access */
4437 	sdeb_read_lock(sip);
4438 
4439 	ret = do_dout_fetch(scp, a_num, arr);
4440 	if (ret == -1) {
4441 		ret = DID_ERROR << 16;
4442 		goto cleanup;
4443 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4444 		sdev_printk(KERN_INFO, scp->device,
4445 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4446 			    my_name, __func__, a_num * lb_size, ret);
4447 	}
4448 	if (is_bytchk3) {
4449 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4450 			memcpy(arr + off, arr, lb_size);
4451 	}
4452 	ret = 0;
4453 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4454 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4455 		ret = check_condition_result;
4456 		goto cleanup;
4457 	}
4458 cleanup:
4459 	sdeb_read_unlock(sip);
4460 	kfree(arr);
4461 	return ret;
4462 }
4463 
4464 #define RZONES_DESC_HD 64
4465 
4466 /* Report zones depending on start LBA and reporting options */
4467 static int resp_report_zones(struct scsi_cmnd *scp,
4468 			     struct sdebug_dev_info *devip)
4469 {
4470 	unsigned int rep_max_zones, nrz = 0;
4471 	int ret = 0;
4472 	u32 alloc_len, rep_opts, rep_len;
4473 	bool partial;
4474 	u64 lba, zs_lba;
4475 	u8 *arr = NULL, *desc;
4476 	u8 *cmd = scp->cmnd;
4477 	struct sdeb_zone_state *zsp = NULL;
4478 	struct sdeb_store_info *sip = devip2sip(devip, false);
4479 
4480 	if (!sdebug_dev_is_zoned(devip)) {
4481 		mk_sense_invalid_opcode(scp);
4482 		return check_condition_result;
4483 	}
4484 	zs_lba = get_unaligned_be64(cmd + 2);
4485 	alloc_len = get_unaligned_be32(cmd + 10);
4486 	if (alloc_len == 0)
4487 		return 0;	/* not an error */
4488 	rep_opts = cmd[14] & 0x3f;
4489 	partial = cmd[14] & 0x80;
4490 
4491 	if (zs_lba >= sdebug_capacity) {
4492 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4493 		return check_condition_result;
4494 	}
4495 
4496 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4497 
4498 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4499 	if (!arr) {
4500 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4501 				INSUFF_RES_ASCQ);
4502 		return check_condition_result;
4503 	}
4504 
4505 	sdeb_read_lock(sip);
4506 
4507 	desc = arr + 64;
4508 	for (lba = zs_lba; lba < sdebug_capacity;
4509 	     lba = zsp->z_start + zsp->z_size) {
4510 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4511 			break;
4512 		zsp = zbc_zone(devip, lba);
4513 		switch (rep_opts) {
4514 		case 0x00:
4515 			/* All zones */
4516 			break;
4517 		case 0x01:
4518 			/* Empty zones */
4519 			if (zsp->z_cond != ZC1_EMPTY)
4520 				continue;
4521 			break;
4522 		case 0x02:
4523 			/* Implicit open zones */
4524 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4525 				continue;
4526 			break;
4527 		case 0x03:
4528 			/* Explicit open zones */
4529 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4530 				continue;
4531 			break;
4532 		case 0x04:
4533 			/* Closed zones */
4534 			if (zsp->z_cond != ZC4_CLOSED)
4535 				continue;
4536 			break;
4537 		case 0x05:
4538 			/* Full zones */
4539 			if (zsp->z_cond != ZC5_FULL)
4540 				continue;
4541 			break;
4542 		case 0x06:
4543 		case 0x07:
4544 		case 0x10:
4545 			/*
4546 			 * Read-only, offline, reset WP recommended are
4547 			 * not emulated: no zones to report;
4548 			 */
4549 			continue;
4550 		case 0x11:
4551 			/* non-seq-resource set */
4552 			if (!zsp->z_non_seq_resource)
4553 				continue;
4554 			break;
4555 		case 0x3e:
4556 			/* All zones except gap zones. */
4557 			if (zbc_zone_is_gap(zsp))
4558 				continue;
4559 			break;
4560 		case 0x3f:
4561 			/* Not write pointer (conventional) zones */
4562 			if (zbc_zone_is_seq(zsp))
4563 				continue;
4564 			break;
4565 		default:
4566 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4567 					INVALID_FIELD_IN_CDB, 0);
4568 			ret = check_condition_result;
4569 			goto fini;
4570 		}
4571 
4572 		if (nrz < rep_max_zones) {
4573 			/* Fill zone descriptor */
4574 			desc[0] = zsp->z_type;
4575 			desc[1] = zsp->z_cond << 4;
4576 			if (zsp->z_non_seq_resource)
4577 				desc[1] |= 1 << 1;
4578 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4579 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4580 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4581 			desc += 64;
4582 		}
4583 
4584 		if (partial && nrz >= rep_max_zones)
4585 			break;
4586 
4587 		nrz++;
4588 	}
4589 
4590 	/* Report header */
4591 	/* Zone list length. */
4592 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4593 	/* Maximum LBA */
4594 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4595 	/* Zone starting LBA granularity. */
4596 	if (devip->zcap < devip->zsize)
4597 		put_unaligned_be64(devip->zsize, arr + 16);
4598 
4599 	rep_len = (unsigned long)desc - (unsigned long)arr;
4600 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4601 
4602 fini:
4603 	sdeb_read_unlock(sip);
4604 	kfree(arr);
4605 	return ret;
4606 }
4607 
4608 /* Logic transplanted from tcmu-runner, file_zbc.c */
4609 static void zbc_open_all(struct sdebug_dev_info *devip)
4610 {
4611 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4612 	unsigned int i;
4613 
4614 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4615 		if (zsp->z_cond == ZC4_CLOSED)
4616 			zbc_open_zone(devip, &devip->zstate[i], true);
4617 	}
4618 }
4619 
4620 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4621 {
4622 	int res = 0;
4623 	u64 z_id;
4624 	enum sdebug_z_cond zc;
4625 	u8 *cmd = scp->cmnd;
4626 	struct sdeb_zone_state *zsp;
4627 	bool all = cmd[14] & 0x01;
4628 	struct sdeb_store_info *sip = devip2sip(devip, false);
4629 
4630 	if (!sdebug_dev_is_zoned(devip)) {
4631 		mk_sense_invalid_opcode(scp);
4632 		return check_condition_result;
4633 	}
4634 
4635 	sdeb_write_lock(sip);
4636 
4637 	if (all) {
4638 		/* Check if all closed zones can be open */
4639 		if (devip->max_open &&
4640 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4641 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4642 					INSUFF_ZONE_ASCQ);
4643 			res = check_condition_result;
4644 			goto fini;
4645 		}
4646 		/* Open all closed zones */
4647 		zbc_open_all(devip);
4648 		goto fini;
4649 	}
4650 
4651 	/* Open the specified zone */
4652 	z_id = get_unaligned_be64(cmd + 2);
4653 	if (z_id >= sdebug_capacity) {
4654 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4655 		res = check_condition_result;
4656 		goto fini;
4657 	}
4658 
4659 	zsp = zbc_zone(devip, z_id);
4660 	if (z_id != zsp->z_start) {
4661 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4662 		res = check_condition_result;
4663 		goto fini;
4664 	}
4665 	if (zbc_zone_is_conv(zsp)) {
4666 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4667 		res = check_condition_result;
4668 		goto fini;
4669 	}
4670 
4671 	zc = zsp->z_cond;
4672 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4673 		goto fini;
4674 
4675 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4676 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4677 				INSUFF_ZONE_ASCQ);
4678 		res = check_condition_result;
4679 		goto fini;
4680 	}
4681 
4682 	zbc_open_zone(devip, zsp, true);
4683 fini:
4684 	sdeb_write_unlock(sip);
4685 	return res;
4686 }
4687 
4688 static void zbc_close_all(struct sdebug_dev_info *devip)
4689 {
4690 	unsigned int i;
4691 
4692 	for (i = 0; i < devip->nr_zones; i++)
4693 		zbc_close_zone(devip, &devip->zstate[i]);
4694 }
4695 
4696 static int resp_close_zone(struct scsi_cmnd *scp,
4697 			   struct sdebug_dev_info *devip)
4698 {
4699 	int res = 0;
4700 	u64 z_id;
4701 	u8 *cmd = scp->cmnd;
4702 	struct sdeb_zone_state *zsp;
4703 	bool all = cmd[14] & 0x01;
4704 	struct sdeb_store_info *sip = devip2sip(devip, false);
4705 
4706 	if (!sdebug_dev_is_zoned(devip)) {
4707 		mk_sense_invalid_opcode(scp);
4708 		return check_condition_result;
4709 	}
4710 
4711 	sdeb_write_lock(sip);
4712 
4713 	if (all) {
4714 		zbc_close_all(devip);
4715 		goto fini;
4716 	}
4717 
4718 	/* Close specified zone */
4719 	z_id = get_unaligned_be64(cmd + 2);
4720 	if (z_id >= sdebug_capacity) {
4721 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4722 		res = check_condition_result;
4723 		goto fini;
4724 	}
4725 
4726 	zsp = zbc_zone(devip, z_id);
4727 	if (z_id != zsp->z_start) {
4728 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4729 		res = check_condition_result;
4730 		goto fini;
4731 	}
4732 	if (zbc_zone_is_conv(zsp)) {
4733 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4734 		res = check_condition_result;
4735 		goto fini;
4736 	}
4737 
4738 	zbc_close_zone(devip, zsp);
4739 fini:
4740 	sdeb_write_unlock(sip);
4741 	return res;
4742 }
4743 
4744 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4745 			    struct sdeb_zone_state *zsp, bool empty)
4746 {
4747 	enum sdebug_z_cond zc = zsp->z_cond;
4748 
4749 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4750 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4751 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4752 			zbc_close_zone(devip, zsp);
4753 		if (zsp->z_cond == ZC4_CLOSED)
4754 			devip->nr_closed--;
4755 		zsp->z_wp = zsp->z_start + zsp->z_size;
4756 		zsp->z_cond = ZC5_FULL;
4757 	}
4758 }
4759 
4760 static void zbc_finish_all(struct sdebug_dev_info *devip)
4761 {
4762 	unsigned int i;
4763 
4764 	for (i = 0; i < devip->nr_zones; i++)
4765 		zbc_finish_zone(devip, &devip->zstate[i], false);
4766 }
4767 
4768 static int resp_finish_zone(struct scsi_cmnd *scp,
4769 			    struct sdebug_dev_info *devip)
4770 {
4771 	struct sdeb_zone_state *zsp;
4772 	int res = 0;
4773 	u64 z_id;
4774 	u8 *cmd = scp->cmnd;
4775 	bool all = cmd[14] & 0x01;
4776 	struct sdeb_store_info *sip = devip2sip(devip, false);
4777 
4778 	if (!sdebug_dev_is_zoned(devip)) {
4779 		mk_sense_invalid_opcode(scp);
4780 		return check_condition_result;
4781 	}
4782 
4783 	sdeb_write_lock(sip);
4784 
4785 	if (all) {
4786 		zbc_finish_all(devip);
4787 		goto fini;
4788 	}
4789 
4790 	/* Finish the specified zone */
4791 	z_id = get_unaligned_be64(cmd + 2);
4792 	if (z_id >= sdebug_capacity) {
4793 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4794 		res = check_condition_result;
4795 		goto fini;
4796 	}
4797 
4798 	zsp = zbc_zone(devip, z_id);
4799 	if (z_id != zsp->z_start) {
4800 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4801 		res = check_condition_result;
4802 		goto fini;
4803 	}
4804 	if (zbc_zone_is_conv(zsp)) {
4805 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4806 		res = check_condition_result;
4807 		goto fini;
4808 	}
4809 
4810 	zbc_finish_zone(devip, zsp, true);
4811 fini:
4812 	sdeb_write_unlock(sip);
4813 	return res;
4814 }
4815 
4816 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4817 			 struct sdeb_zone_state *zsp)
4818 {
4819 	enum sdebug_z_cond zc;
4820 	struct sdeb_store_info *sip = devip2sip(devip, false);
4821 
4822 	if (!zbc_zone_is_seq(zsp))
4823 		return;
4824 
4825 	zc = zsp->z_cond;
4826 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4827 		zbc_close_zone(devip, zsp);
4828 
4829 	if (zsp->z_cond == ZC4_CLOSED)
4830 		devip->nr_closed--;
4831 
4832 	if (zsp->z_wp > zsp->z_start)
4833 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4834 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4835 
4836 	zsp->z_non_seq_resource = false;
4837 	zsp->z_wp = zsp->z_start;
4838 	zsp->z_cond = ZC1_EMPTY;
4839 }
4840 
4841 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4842 {
4843 	unsigned int i;
4844 
4845 	for (i = 0; i < devip->nr_zones; i++)
4846 		zbc_rwp_zone(devip, &devip->zstate[i]);
4847 }
4848 
4849 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4850 {
4851 	struct sdeb_zone_state *zsp;
4852 	int res = 0;
4853 	u64 z_id;
4854 	u8 *cmd = scp->cmnd;
4855 	bool all = cmd[14] & 0x01;
4856 	struct sdeb_store_info *sip = devip2sip(devip, false);
4857 
4858 	if (!sdebug_dev_is_zoned(devip)) {
4859 		mk_sense_invalid_opcode(scp);
4860 		return check_condition_result;
4861 	}
4862 
4863 	sdeb_write_lock(sip);
4864 
4865 	if (all) {
4866 		zbc_rwp_all(devip);
4867 		goto fini;
4868 	}
4869 
4870 	z_id = get_unaligned_be64(cmd + 2);
4871 	if (z_id >= sdebug_capacity) {
4872 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4873 		res = check_condition_result;
4874 		goto fini;
4875 	}
4876 
4877 	zsp = zbc_zone(devip, z_id);
4878 	if (z_id != zsp->z_start) {
4879 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4880 		res = check_condition_result;
4881 		goto fini;
4882 	}
4883 	if (zbc_zone_is_conv(zsp)) {
4884 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4885 		res = check_condition_result;
4886 		goto fini;
4887 	}
4888 
4889 	zbc_rwp_zone(devip, zsp);
4890 fini:
4891 	sdeb_write_unlock(sip);
4892 	return res;
4893 }
4894 
4895 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4896 {
4897 	u16 hwq;
4898 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4899 
4900 	hwq = blk_mq_unique_tag_to_hwq(tag);
4901 
4902 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4903 	if (WARN_ON_ONCE(hwq >= submit_queues))
4904 		hwq = 0;
4905 
4906 	return sdebug_q_arr + hwq;
4907 }
4908 
4909 static u32 get_tag(struct scsi_cmnd *cmnd)
4910 {
4911 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4912 }
4913 
4914 /* Queued (deferred) command completions converge here. */
4915 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4916 {
4917 	bool aborted = sd_dp->aborted;
4918 	int qc_idx;
4919 	int retiring = 0;
4920 	unsigned long iflags;
4921 	struct sdebug_queue *sqp;
4922 	struct sdebug_queued_cmd *sqcp;
4923 	struct scsi_cmnd *scp;
4924 	struct sdebug_dev_info *devip;
4925 
4926 	if (unlikely(aborted))
4927 		sd_dp->aborted = false;
4928 	qc_idx = sd_dp->qc_idx;
4929 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4930 	if (sdebug_statistics) {
4931 		atomic_inc(&sdebug_completions);
4932 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4933 			atomic_inc(&sdebug_miss_cpus);
4934 	}
4935 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4936 		pr_err("wild qc_idx=%d\n", qc_idx);
4937 		return;
4938 	}
4939 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4940 	WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4941 	sqcp = &sqp->qc_arr[qc_idx];
4942 	scp = sqcp->a_cmnd;
4943 	if (unlikely(scp == NULL)) {
4944 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4945 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4946 		       sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4947 		return;
4948 	}
4949 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4950 	if (likely(devip))
4951 		atomic_dec(&devip->num_in_q);
4952 	else
4953 		pr_err("devip=NULL\n");
4954 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4955 		retiring = 1;
4956 
4957 	sqcp->a_cmnd = NULL;
4958 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4959 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4960 		pr_err("Unexpected completion\n");
4961 		return;
4962 	}
4963 
4964 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4965 		int k, retval;
4966 
4967 		retval = atomic_read(&retired_max_queue);
4968 		if (qc_idx >= retval) {
4969 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4970 			pr_err("index %d too large\n", retval);
4971 			return;
4972 		}
4973 		k = find_last_bit(sqp->in_use_bm, retval);
4974 		if ((k < sdebug_max_queue) || (k == retval))
4975 			atomic_set(&retired_max_queue, 0);
4976 		else
4977 			atomic_set(&retired_max_queue, k + 1);
4978 	}
4979 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4980 	if (unlikely(aborted)) {
4981 		if (sdebug_verbose)
4982 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4983 		return;
4984 	}
4985 	scsi_done(scp); /* callback to mid level */
4986 }
4987 
4988 /* When high resolution timer goes off this function is called. */
4989 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4990 {
4991 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4992 						  hrt);
4993 	sdebug_q_cmd_complete(sd_dp);
4994 	return HRTIMER_NORESTART;
4995 }
4996 
4997 /* When work queue schedules work, it calls this function. */
4998 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4999 {
5000 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5001 						  ew.work);
5002 	sdebug_q_cmd_complete(sd_dp);
5003 }
5004 
5005 static bool got_shared_uuid;
5006 static uuid_t shared_uuid;
5007 
5008 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5009 {
5010 	struct sdeb_zone_state *zsp;
5011 	sector_t capacity = get_sdebug_capacity();
5012 	sector_t conv_capacity;
5013 	sector_t zstart = 0;
5014 	unsigned int i;
5015 
5016 	/*
5017 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5018 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5019 	 * use the specified zone size checking that at least 2 zones can be
5020 	 * created for the device.
5021 	 */
5022 	if (!sdeb_zbc_zone_size_mb) {
5023 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5024 			>> ilog2(sdebug_sector_size);
5025 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5026 			devip->zsize >>= 1;
5027 		if (devip->zsize < 2) {
5028 			pr_err("Device capacity too small\n");
5029 			return -EINVAL;
5030 		}
5031 	} else {
5032 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5033 			pr_err("Zone size is not a power of 2\n");
5034 			return -EINVAL;
5035 		}
5036 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5037 			>> ilog2(sdebug_sector_size);
5038 		if (devip->zsize >= capacity) {
5039 			pr_err("Zone size too large for device capacity\n");
5040 			return -EINVAL;
5041 		}
5042 	}
5043 
5044 	devip->zsize_shift = ilog2(devip->zsize);
5045 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5046 
5047 	if (sdeb_zbc_zone_cap_mb == 0) {
5048 		devip->zcap = devip->zsize;
5049 	} else {
5050 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5051 			      ilog2(sdebug_sector_size);
5052 		if (devip->zcap > devip->zsize) {
5053 			pr_err("Zone capacity too large\n");
5054 			return -EINVAL;
5055 		}
5056 	}
5057 
5058 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5059 	if (conv_capacity >= capacity) {
5060 		pr_err("Number of conventional zones too large\n");
5061 		return -EINVAL;
5062 	}
5063 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5064 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5065 			      devip->zsize_shift;
5066 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5067 
5068 	/* Add gap zones if zone capacity is smaller than the zone size */
5069 	if (devip->zcap < devip->zsize)
5070 		devip->nr_zones += devip->nr_seq_zones;
5071 
5072 	if (devip->zmodel == BLK_ZONED_HM) {
5073 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5074 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5075 			devip->max_open = (devip->nr_zones - 1) / 2;
5076 		else
5077 			devip->max_open = sdeb_zbc_max_open;
5078 	}
5079 
5080 	devip->zstate = kcalloc(devip->nr_zones,
5081 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5082 	if (!devip->zstate)
5083 		return -ENOMEM;
5084 
5085 	for (i = 0; i < devip->nr_zones; i++) {
5086 		zsp = &devip->zstate[i];
5087 
5088 		zsp->z_start = zstart;
5089 
5090 		if (i < devip->nr_conv_zones) {
5091 			zsp->z_type = ZBC_ZTYPE_CNV;
5092 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5093 			zsp->z_wp = (sector_t)-1;
5094 			zsp->z_size =
5095 				min_t(u64, devip->zsize, capacity - zstart);
5096 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5097 			if (devip->zmodel == BLK_ZONED_HM)
5098 				zsp->z_type = ZBC_ZTYPE_SWR;
5099 			else
5100 				zsp->z_type = ZBC_ZTYPE_SWP;
5101 			zsp->z_cond = ZC1_EMPTY;
5102 			zsp->z_wp = zsp->z_start;
5103 			zsp->z_size =
5104 				min_t(u64, devip->zcap, capacity - zstart);
5105 		} else {
5106 			zsp->z_type = ZBC_ZTYPE_GAP;
5107 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5108 			zsp->z_wp = (sector_t)-1;
5109 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5110 					    capacity - zstart);
5111 		}
5112 
5113 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5114 		zstart += zsp->z_size;
5115 	}
5116 
5117 	return 0;
5118 }
5119 
5120 static struct sdebug_dev_info *sdebug_device_create(
5121 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5122 {
5123 	struct sdebug_dev_info *devip;
5124 
5125 	devip = kzalloc(sizeof(*devip), flags);
5126 	if (devip) {
5127 		if (sdebug_uuid_ctl == 1)
5128 			uuid_gen(&devip->lu_name);
5129 		else if (sdebug_uuid_ctl == 2) {
5130 			if (got_shared_uuid)
5131 				devip->lu_name = shared_uuid;
5132 			else {
5133 				uuid_gen(&shared_uuid);
5134 				got_shared_uuid = true;
5135 				devip->lu_name = shared_uuid;
5136 			}
5137 		}
5138 		devip->sdbg_host = sdbg_host;
5139 		if (sdeb_zbc_in_use) {
5140 			devip->zmodel = sdeb_zbc_model;
5141 			if (sdebug_device_create_zones(devip)) {
5142 				kfree(devip);
5143 				return NULL;
5144 			}
5145 		} else {
5146 			devip->zmodel = BLK_ZONED_NONE;
5147 		}
5148 		devip->sdbg_host = sdbg_host;
5149 		devip->create_ts = ktime_get_boottime();
5150 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5151 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5152 	}
5153 	return devip;
5154 }
5155 
5156 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5157 {
5158 	struct sdebug_host_info *sdbg_host;
5159 	struct sdebug_dev_info *open_devip = NULL;
5160 	struct sdebug_dev_info *devip;
5161 
5162 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5163 	if (!sdbg_host) {
5164 		pr_err("Host info NULL\n");
5165 		return NULL;
5166 	}
5167 
5168 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5169 		if ((devip->used) && (devip->channel == sdev->channel) &&
5170 		    (devip->target == sdev->id) &&
5171 		    (devip->lun == sdev->lun))
5172 			return devip;
5173 		else {
5174 			if ((!devip->used) && (!open_devip))
5175 				open_devip = devip;
5176 		}
5177 	}
5178 	if (!open_devip) { /* try and make a new one */
5179 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5180 		if (!open_devip) {
5181 			pr_err("out of memory at line %d\n", __LINE__);
5182 			return NULL;
5183 		}
5184 	}
5185 
5186 	open_devip->channel = sdev->channel;
5187 	open_devip->target = sdev->id;
5188 	open_devip->lun = sdev->lun;
5189 	open_devip->sdbg_host = sdbg_host;
5190 	atomic_set(&open_devip->num_in_q, 0);
5191 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5192 	open_devip->used = true;
5193 	return open_devip;
5194 }
5195 
5196 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5197 {
5198 	if (sdebug_verbose)
5199 		pr_info("slave_alloc <%u %u %u %llu>\n",
5200 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5201 	return 0;
5202 }
5203 
5204 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5205 {
5206 	struct sdebug_dev_info *devip =
5207 			(struct sdebug_dev_info *)sdp->hostdata;
5208 
5209 	if (sdebug_verbose)
5210 		pr_info("slave_configure <%u %u %u %llu>\n",
5211 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5212 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5213 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5214 	if (devip == NULL) {
5215 		devip = find_build_dev_info(sdp);
5216 		if (devip == NULL)
5217 			return 1;  /* no resources, will be marked offline */
5218 	}
5219 	sdp->hostdata = devip;
5220 	if (sdebug_no_uld)
5221 		sdp->no_uld_attach = 1;
5222 	config_cdb_len(sdp);
5223 	return 0;
5224 }
5225 
5226 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5227 {
5228 	struct sdebug_dev_info *devip =
5229 		(struct sdebug_dev_info *)sdp->hostdata;
5230 
5231 	if (sdebug_verbose)
5232 		pr_info("slave_destroy <%u %u %u %llu>\n",
5233 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5234 	if (devip) {
5235 		/* make this slot available for re-use */
5236 		devip->used = false;
5237 		sdp->hostdata = NULL;
5238 	}
5239 }
5240 
5241 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5242 			   enum sdeb_defer_type defer_t)
5243 {
5244 	if (!sd_dp)
5245 		return;
5246 	if (defer_t == SDEB_DEFER_HRT)
5247 		hrtimer_cancel(&sd_dp->hrt);
5248 	else if (defer_t == SDEB_DEFER_WQ)
5249 		cancel_work_sync(&sd_dp->ew.work);
5250 }
5251 
5252 /* If @cmnd found deletes its timer or work queue and returns true; else
5253    returns false */
5254 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5255 {
5256 	unsigned long iflags;
5257 	int j, k, qmax, r_qmax;
5258 	enum sdeb_defer_type l_defer_t;
5259 	struct sdebug_queue *sqp;
5260 	struct sdebug_queued_cmd *sqcp;
5261 	struct sdebug_dev_info *devip;
5262 	struct sdebug_defer *sd_dp;
5263 
5264 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5265 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5266 		qmax = sdebug_max_queue;
5267 		r_qmax = atomic_read(&retired_max_queue);
5268 		if (r_qmax > qmax)
5269 			qmax = r_qmax;
5270 		for (k = 0; k < qmax; ++k) {
5271 			if (test_bit(k, sqp->in_use_bm)) {
5272 				sqcp = &sqp->qc_arr[k];
5273 				if (cmnd != sqcp->a_cmnd)
5274 					continue;
5275 				/* found */
5276 				devip = (struct sdebug_dev_info *)
5277 						cmnd->device->hostdata;
5278 				if (devip)
5279 					atomic_dec(&devip->num_in_q);
5280 				sqcp->a_cmnd = NULL;
5281 				sd_dp = sqcp->sd_dp;
5282 				if (sd_dp) {
5283 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5284 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5285 				} else
5286 					l_defer_t = SDEB_DEFER_NONE;
5287 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5288 				stop_qc_helper(sd_dp, l_defer_t);
5289 				clear_bit(k, sqp->in_use_bm);
5290 				return true;
5291 			}
5292 		}
5293 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5294 	}
5295 	return false;
5296 }
5297 
5298 /* Deletes (stops) timers or work queues of all queued commands */
5299 static void stop_all_queued(void)
5300 {
5301 	unsigned long iflags;
5302 	int j, k;
5303 	enum sdeb_defer_type l_defer_t;
5304 	struct sdebug_queue *sqp;
5305 	struct sdebug_queued_cmd *sqcp;
5306 	struct sdebug_dev_info *devip;
5307 	struct sdebug_defer *sd_dp;
5308 
5309 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5310 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5311 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5312 			if (test_bit(k, sqp->in_use_bm)) {
5313 				sqcp = &sqp->qc_arr[k];
5314 				if (sqcp->a_cmnd == NULL)
5315 					continue;
5316 				devip = (struct sdebug_dev_info *)
5317 					sqcp->a_cmnd->device->hostdata;
5318 				if (devip)
5319 					atomic_dec(&devip->num_in_q);
5320 				sqcp->a_cmnd = NULL;
5321 				sd_dp = sqcp->sd_dp;
5322 				if (sd_dp) {
5323 					l_defer_t = READ_ONCE(sd_dp->defer_t);
5324 					WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5325 				} else
5326 					l_defer_t = SDEB_DEFER_NONE;
5327 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5328 				stop_qc_helper(sd_dp, l_defer_t);
5329 				clear_bit(k, sqp->in_use_bm);
5330 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5331 			}
5332 		}
5333 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5334 	}
5335 }
5336 
5337 /* Free queued command memory on heap */
5338 static void free_all_queued(void)
5339 {
5340 	int j, k;
5341 	struct sdebug_queue *sqp;
5342 	struct sdebug_queued_cmd *sqcp;
5343 
5344 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5345 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5346 			sqcp = &sqp->qc_arr[k];
5347 			kfree(sqcp->sd_dp);
5348 			sqcp->sd_dp = NULL;
5349 		}
5350 	}
5351 }
5352 
5353 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5354 {
5355 	bool ok;
5356 
5357 	++num_aborts;
5358 	if (SCpnt) {
5359 		ok = stop_queued_cmnd(SCpnt);
5360 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5361 			sdev_printk(KERN_INFO, SCpnt->device,
5362 				    "%s: command%s found\n", __func__,
5363 				    ok ? "" : " not");
5364 	}
5365 	return SUCCESS;
5366 }
5367 
5368 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5369 {
5370 	++num_dev_resets;
5371 	if (SCpnt && SCpnt->device) {
5372 		struct scsi_device *sdp = SCpnt->device;
5373 		struct sdebug_dev_info *devip =
5374 				(struct sdebug_dev_info *)sdp->hostdata;
5375 
5376 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5377 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5378 		if (devip)
5379 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5380 	}
5381 	return SUCCESS;
5382 }
5383 
5384 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5385 {
5386 	struct sdebug_host_info *sdbg_host;
5387 	struct sdebug_dev_info *devip;
5388 	struct scsi_device *sdp;
5389 	struct Scsi_Host *hp;
5390 	int k = 0;
5391 
5392 	++num_target_resets;
5393 	if (!SCpnt)
5394 		goto lie;
5395 	sdp = SCpnt->device;
5396 	if (!sdp)
5397 		goto lie;
5398 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5399 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5400 	hp = sdp->host;
5401 	if (!hp)
5402 		goto lie;
5403 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5404 	if (sdbg_host) {
5405 		list_for_each_entry(devip,
5406 				    &sdbg_host->dev_info_list,
5407 				    dev_list)
5408 			if (devip->target == sdp->id) {
5409 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5410 				++k;
5411 			}
5412 	}
5413 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5414 		sdev_printk(KERN_INFO, sdp,
5415 			    "%s: %d device(s) found in target\n", __func__, k);
5416 lie:
5417 	return SUCCESS;
5418 }
5419 
5420 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5421 {
5422 	struct sdebug_host_info *sdbg_host;
5423 	struct sdebug_dev_info *devip;
5424 	struct scsi_device *sdp;
5425 	struct Scsi_Host *hp;
5426 	int k = 0;
5427 
5428 	++num_bus_resets;
5429 	if (!(SCpnt && SCpnt->device))
5430 		goto lie;
5431 	sdp = SCpnt->device;
5432 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5433 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5434 	hp = sdp->host;
5435 	if (hp) {
5436 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5437 		if (sdbg_host) {
5438 			list_for_each_entry(devip,
5439 					    &sdbg_host->dev_info_list,
5440 					    dev_list) {
5441 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5442 				++k;
5443 			}
5444 		}
5445 	}
5446 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5447 		sdev_printk(KERN_INFO, sdp,
5448 			    "%s: %d device(s) found in host\n", __func__, k);
5449 lie:
5450 	return SUCCESS;
5451 }
5452 
5453 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5454 {
5455 	struct sdebug_host_info *sdbg_host;
5456 	struct sdebug_dev_info *devip;
5457 	int k = 0;
5458 
5459 	++num_host_resets;
5460 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5461 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5462 	spin_lock(&sdebug_host_list_lock);
5463 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5464 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5465 				    dev_list) {
5466 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5467 			++k;
5468 		}
5469 	}
5470 	spin_unlock(&sdebug_host_list_lock);
5471 	stop_all_queued();
5472 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5473 		sdev_printk(KERN_INFO, SCpnt->device,
5474 			    "%s: %d device(s) found\n", __func__, k);
5475 	return SUCCESS;
5476 }
5477 
5478 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5479 {
5480 	struct msdos_partition *pp;
5481 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5482 	int sectors_per_part, num_sectors, k;
5483 	int heads_by_sects, start_sec, end_sec;
5484 
5485 	/* assume partition table already zeroed */
5486 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5487 		return;
5488 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5489 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5490 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5491 	}
5492 	num_sectors = (int)get_sdebug_capacity();
5493 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5494 			   / sdebug_num_parts;
5495 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5496 	starts[0] = sdebug_sectors_per;
5497 	max_part_secs = sectors_per_part;
5498 	for (k = 1; k < sdebug_num_parts; ++k) {
5499 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5500 			    * heads_by_sects;
5501 		if (starts[k] - starts[k - 1] < max_part_secs)
5502 			max_part_secs = starts[k] - starts[k - 1];
5503 	}
5504 	starts[sdebug_num_parts] = num_sectors;
5505 	starts[sdebug_num_parts + 1] = 0;
5506 
5507 	ramp[510] = 0x55;	/* magic partition markings */
5508 	ramp[511] = 0xAA;
5509 	pp = (struct msdos_partition *)(ramp + 0x1be);
5510 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5511 		start_sec = starts[k];
5512 		end_sec = starts[k] + max_part_secs - 1;
5513 		pp->boot_ind = 0;
5514 
5515 		pp->cyl = start_sec / heads_by_sects;
5516 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5517 			   / sdebug_sectors_per;
5518 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5519 
5520 		pp->end_cyl = end_sec / heads_by_sects;
5521 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5522 			       / sdebug_sectors_per;
5523 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5524 
5525 		pp->start_sect = cpu_to_le32(start_sec);
5526 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5527 		pp->sys_ind = 0x83;	/* plain Linux partition */
5528 	}
5529 }
5530 
5531 static void block_unblock_all_queues(bool block)
5532 {
5533 	int j;
5534 	struct sdebug_queue *sqp;
5535 
5536 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5537 		atomic_set(&sqp->blocked, (int)block);
5538 }
5539 
5540 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5541  * commands will be processed normally before triggers occur.
5542  */
5543 static void tweak_cmnd_count(void)
5544 {
5545 	int count, modulo;
5546 
5547 	modulo = abs(sdebug_every_nth);
5548 	if (modulo < 2)
5549 		return;
5550 	block_unblock_all_queues(true);
5551 	count = atomic_read(&sdebug_cmnd_count);
5552 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5553 	block_unblock_all_queues(false);
5554 }
5555 
5556 static void clear_queue_stats(void)
5557 {
5558 	atomic_set(&sdebug_cmnd_count, 0);
5559 	atomic_set(&sdebug_completions, 0);
5560 	atomic_set(&sdebug_miss_cpus, 0);
5561 	atomic_set(&sdebug_a_tsf, 0);
5562 }
5563 
5564 static bool inject_on_this_cmd(void)
5565 {
5566 	if (sdebug_every_nth == 0)
5567 		return false;
5568 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5569 }
5570 
5571 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5572 
5573 /* Complete the processing of the thread that queued a SCSI command to this
5574  * driver. It either completes the command by calling cmnd_done() or
5575  * schedules a hr timer or work queue then returns 0. Returns
5576  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5577  */
5578 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5579 			 int scsi_result,
5580 			 int (*pfp)(struct scsi_cmnd *,
5581 				    struct sdebug_dev_info *),
5582 			 int delta_jiff, int ndelay)
5583 {
5584 	bool new_sd_dp;
5585 	bool inject = false;
5586 	bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5587 	int k, num_in_q, qdepth;
5588 	unsigned long iflags;
5589 	u64 ns_from_boot = 0;
5590 	struct sdebug_queue *sqp;
5591 	struct sdebug_queued_cmd *sqcp;
5592 	struct scsi_device *sdp;
5593 	struct sdebug_defer *sd_dp;
5594 
5595 	if (unlikely(devip == NULL)) {
5596 		if (scsi_result == 0)
5597 			scsi_result = DID_NO_CONNECT << 16;
5598 		goto respond_in_thread;
5599 	}
5600 	sdp = cmnd->device;
5601 
5602 	if (delta_jiff == 0)
5603 		goto respond_in_thread;
5604 
5605 	sqp = get_queue(cmnd);
5606 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5607 	if (unlikely(atomic_read(&sqp->blocked))) {
5608 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5609 		return SCSI_MLQUEUE_HOST_BUSY;
5610 	}
5611 	num_in_q = atomic_read(&devip->num_in_q);
5612 	qdepth = cmnd->device->queue_depth;
5613 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5614 		if (scsi_result) {
5615 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5616 			goto respond_in_thread;
5617 		} else
5618 			scsi_result = device_qfull_result;
5619 	} else if (unlikely(sdebug_every_nth &&
5620 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5621 			    (scsi_result == 0))) {
5622 		if ((num_in_q == (qdepth - 1)) &&
5623 		    (atomic_inc_return(&sdebug_a_tsf) >=
5624 		     abs(sdebug_every_nth))) {
5625 			atomic_set(&sdebug_a_tsf, 0);
5626 			inject = true;
5627 			scsi_result = device_qfull_result;
5628 		}
5629 	}
5630 
5631 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5632 	if (unlikely(k >= sdebug_max_queue)) {
5633 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5634 		if (scsi_result)
5635 			goto respond_in_thread;
5636 		scsi_result = device_qfull_result;
5637 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5638 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5639 				    __func__, sdebug_max_queue);
5640 		goto respond_in_thread;
5641 	}
5642 	set_bit(k, sqp->in_use_bm);
5643 	atomic_inc(&devip->num_in_q);
5644 	sqcp = &sqp->qc_arr[k];
5645 	sqcp->a_cmnd = cmnd;
5646 	cmnd->host_scribble = (unsigned char *)sqcp;
5647 	sd_dp = sqcp->sd_dp;
5648 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5649 
5650 	if (!sd_dp) {
5651 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5652 		if (!sd_dp) {
5653 			atomic_dec(&devip->num_in_q);
5654 			clear_bit(k, sqp->in_use_bm);
5655 			return SCSI_MLQUEUE_HOST_BUSY;
5656 		}
5657 		new_sd_dp = true;
5658 	} else {
5659 		new_sd_dp = false;
5660 	}
5661 
5662 	/* Set the hostwide tag */
5663 	if (sdebug_host_max_queue)
5664 		sd_dp->hc_idx = get_tag(cmnd);
5665 
5666 	if (polled)
5667 		ns_from_boot = ktime_get_boottime_ns();
5668 
5669 	/* one of the resp_*() response functions is called here */
5670 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5671 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5672 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5673 		delta_jiff = ndelay = 0;
5674 	}
5675 	if (cmnd->result == 0 && scsi_result != 0)
5676 		cmnd->result = scsi_result;
5677 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5678 		if (atomic_read(&sdeb_inject_pending)) {
5679 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5680 			atomic_set(&sdeb_inject_pending, 0);
5681 			cmnd->result = check_condition_result;
5682 		}
5683 	}
5684 
5685 	if (unlikely(sdebug_verbose && cmnd->result))
5686 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5687 			    __func__, cmnd->result);
5688 
5689 	if (delta_jiff > 0 || ndelay > 0) {
5690 		ktime_t kt;
5691 
5692 		if (delta_jiff > 0) {
5693 			u64 ns = jiffies_to_nsecs(delta_jiff);
5694 
5695 			if (sdebug_random && ns < U32_MAX) {
5696 				ns = prandom_u32_max((u32)ns);
5697 			} else if (sdebug_random) {
5698 				ns >>= 12;	/* scale to 4 usec precision */
5699 				if (ns < U32_MAX)	/* over 4 hours max */
5700 					ns = prandom_u32_max((u32)ns);
5701 				ns <<= 12;
5702 			}
5703 			kt = ns_to_ktime(ns);
5704 		} else {	/* ndelay has a 4.2 second max */
5705 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5706 					     (u32)ndelay;
5707 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5708 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5709 
5710 				if (kt <= d) {	/* elapsed duration >= kt */
5711 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5712 					sqcp->a_cmnd = NULL;
5713 					atomic_dec(&devip->num_in_q);
5714 					clear_bit(k, sqp->in_use_bm);
5715 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5716 					if (new_sd_dp)
5717 						kfree(sd_dp);
5718 					/* call scsi_done() from this thread */
5719 					scsi_done(cmnd);
5720 					return 0;
5721 				}
5722 				/* otherwise reduce kt by elapsed time */
5723 				kt -= d;
5724 			}
5725 		}
5726 		if (polled) {
5727 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5728 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5729 			if (!sd_dp->init_poll) {
5730 				sd_dp->init_poll = true;
5731 				sqcp->sd_dp = sd_dp;
5732 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5733 				sd_dp->qc_idx = k;
5734 			}
5735 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5736 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5737 		} else {
5738 			if (!sd_dp->init_hrt) {
5739 				sd_dp->init_hrt = true;
5740 				sqcp->sd_dp = sd_dp;
5741 				hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5742 					     HRTIMER_MODE_REL_PINNED);
5743 				sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5744 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5745 				sd_dp->qc_idx = k;
5746 			}
5747 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5748 			/* schedule the invocation of scsi_done() for a later time */
5749 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5750 		}
5751 		if (sdebug_statistics)
5752 			sd_dp->issuing_cpu = raw_smp_processor_id();
5753 	} else {	/* jdelay < 0, use work queue */
5754 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5755 			     atomic_read(&sdeb_inject_pending)))
5756 			sd_dp->aborted = true;
5757 		if (polled) {
5758 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5759 			spin_lock_irqsave(&sqp->qc_lock, iflags);
5760 			if (!sd_dp->init_poll) {
5761 				sd_dp->init_poll = true;
5762 				sqcp->sd_dp = sd_dp;
5763 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5764 				sd_dp->qc_idx = k;
5765 			}
5766 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5767 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5768 		} else {
5769 			if (!sd_dp->init_wq) {
5770 				sd_dp->init_wq = true;
5771 				sqcp->sd_dp = sd_dp;
5772 				sd_dp->sqa_idx = sqp - sdebug_q_arr;
5773 				sd_dp->qc_idx = k;
5774 				INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5775 			}
5776 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5777 			schedule_work(&sd_dp->ew.work);
5778 		}
5779 		if (sdebug_statistics)
5780 			sd_dp->issuing_cpu = raw_smp_processor_id();
5781 		if (unlikely(sd_dp->aborted)) {
5782 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5783 				    scsi_cmd_to_rq(cmnd)->tag);
5784 			blk_abort_request(scsi_cmd_to_rq(cmnd));
5785 			atomic_set(&sdeb_inject_pending, 0);
5786 			sd_dp->aborted = false;
5787 		}
5788 	}
5789 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5790 		sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5791 			    num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5792 	return 0;
5793 
5794 respond_in_thread:	/* call back to mid-layer using invocation thread */
5795 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5796 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5797 	if (cmnd->result == 0 && scsi_result != 0)
5798 		cmnd->result = scsi_result;
5799 	scsi_done(cmnd);
5800 	return 0;
5801 }
5802 
5803 /* Note: The following macros create attribute files in the
5804    /sys/module/scsi_debug/parameters directory. Unfortunately this
5805    driver is unaware of a change and cannot trigger auxiliary actions
5806    as it can when the corresponding attribute in the
5807    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5808  */
5809 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5810 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5811 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5812 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5813 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5814 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5815 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5816 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5817 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5818 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5819 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5820 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5821 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5822 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5823 module_param_string(inq_product, sdebug_inq_product_id,
5824 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5825 module_param_string(inq_rev, sdebug_inq_product_rev,
5826 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5827 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5828 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5829 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5830 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5831 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5832 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5833 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5834 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5835 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5836 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5837 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5838 		   S_IRUGO | S_IWUSR);
5839 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5840 		   S_IRUGO | S_IWUSR);
5841 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5842 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5843 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5844 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5845 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5846 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5847 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5848 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5849 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5850 module_param_named(per_host_store, sdebug_per_host_store, bool,
5851 		   S_IRUGO | S_IWUSR);
5852 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5853 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5854 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5855 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5856 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5857 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5858 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5859 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5860 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5861 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5862 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5863 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5864 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5865 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5866 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5867 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5868 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5869 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5870 		   S_IRUGO | S_IWUSR);
5871 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5872 module_param_named(write_same_length, sdebug_write_same_length, int,
5873 		   S_IRUGO | S_IWUSR);
5874 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5875 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5876 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5877 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5878 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5879 
5880 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5881 MODULE_DESCRIPTION("SCSI debug adapter driver");
5882 MODULE_LICENSE("GPL");
5883 MODULE_VERSION(SDEBUG_VERSION);
5884 
5885 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5886 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5887 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5888 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5889 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5890 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5891 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5892 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5893 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5894 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5895 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5896 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5897 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5898 MODULE_PARM_DESC(host_max_queue,
5899 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5900 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5901 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5902 		 SDEBUG_VERSION "\")");
5903 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5904 MODULE_PARM_DESC(lbprz,
5905 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5906 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5907 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5908 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5909 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5910 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5911 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5912 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5913 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5914 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5915 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5916 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5917 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5918 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5919 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5920 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5921 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5922 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5923 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5924 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5925 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5926 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5927 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5928 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5929 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5930 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5931 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5932 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5933 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5934 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5935 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5936 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5937 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5938 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5939 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5940 MODULE_PARM_DESC(uuid_ctl,
5941 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5942 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5943 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5944 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5945 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5946 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5947 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5948 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5949 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5950 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5951 
5952 #define SDEBUG_INFO_LEN 256
5953 static char sdebug_info[SDEBUG_INFO_LEN];
5954 
5955 static const char *scsi_debug_info(struct Scsi_Host *shp)
5956 {
5957 	int k;
5958 
5959 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5960 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5961 	if (k >= (SDEBUG_INFO_LEN - 1))
5962 		return sdebug_info;
5963 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5964 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5965 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5966 		  "statistics", (int)sdebug_statistics);
5967 	return sdebug_info;
5968 }
5969 
5970 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5971 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5972 				 int length)
5973 {
5974 	char arr[16];
5975 	int opts;
5976 	int minLen = length > 15 ? 15 : length;
5977 
5978 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5979 		return -EACCES;
5980 	memcpy(arr, buffer, minLen);
5981 	arr[minLen] = '\0';
5982 	if (1 != sscanf(arr, "%d", &opts))
5983 		return -EINVAL;
5984 	sdebug_opts = opts;
5985 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5986 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5987 	if (sdebug_every_nth != 0)
5988 		tweak_cmnd_count();
5989 	return length;
5990 }
5991 
5992 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5993  * same for each scsi_debug host (if more than one). Some of the counters
5994  * output are not atomics so might be inaccurate in a busy system. */
5995 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5996 {
5997 	int f, j, l;
5998 	struct sdebug_queue *sqp;
5999 	struct sdebug_host_info *sdhp;
6000 
6001 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6002 		   SDEBUG_VERSION, sdebug_version_date);
6003 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6004 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6005 		   sdebug_opts, sdebug_every_nth);
6006 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6007 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6008 		   sdebug_sector_size, "bytes");
6009 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6010 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6011 		   num_aborts);
6012 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6013 		   num_dev_resets, num_target_resets, num_bus_resets,
6014 		   num_host_resets);
6015 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6016 		   dix_reads, dix_writes, dif_errors);
6017 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6018 		   sdebug_statistics);
6019 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6020 		   atomic_read(&sdebug_cmnd_count),
6021 		   atomic_read(&sdebug_completions),
6022 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
6023 		   atomic_read(&sdebug_a_tsf),
6024 		   atomic_read(&sdeb_mq_poll_count));
6025 
6026 	seq_printf(m, "submit_queues=%d\n", submit_queues);
6027 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6028 		seq_printf(m, "  queue %d:\n", j);
6029 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6030 		if (f != sdebug_max_queue) {
6031 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6032 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6033 				   "first,last bits", f, l);
6034 		}
6035 	}
6036 
6037 	seq_printf(m, "this host_no=%d\n", host->host_no);
6038 	if (!xa_empty(per_store_ap)) {
6039 		bool niu;
6040 		int idx;
6041 		unsigned long l_idx;
6042 		struct sdeb_store_info *sip;
6043 
6044 		seq_puts(m, "\nhost list:\n");
6045 		j = 0;
6046 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6047 			idx = sdhp->si_idx;
6048 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6049 				   sdhp->shost->host_no, idx);
6050 			++j;
6051 		}
6052 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6053 			   sdeb_most_recent_idx);
6054 		j = 0;
6055 		xa_for_each(per_store_ap, l_idx, sip) {
6056 			niu = xa_get_mark(per_store_ap, l_idx,
6057 					  SDEB_XA_NOT_IN_USE);
6058 			idx = (int)l_idx;
6059 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6060 				   (niu ? "  not_in_use" : ""));
6061 			++j;
6062 		}
6063 	}
6064 	return 0;
6065 }
6066 
6067 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6068 {
6069 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6070 }
6071 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6072  * of delay is jiffies.
6073  */
6074 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6075 			   size_t count)
6076 {
6077 	int jdelay, res;
6078 
6079 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6080 		res = count;
6081 		if (sdebug_jdelay != jdelay) {
6082 			int j, k;
6083 			struct sdebug_queue *sqp;
6084 
6085 			block_unblock_all_queues(true);
6086 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6087 			     ++j, ++sqp) {
6088 				k = find_first_bit(sqp->in_use_bm,
6089 						   sdebug_max_queue);
6090 				if (k != sdebug_max_queue) {
6091 					res = -EBUSY;   /* queued commands */
6092 					break;
6093 				}
6094 			}
6095 			if (res > 0) {
6096 				sdebug_jdelay = jdelay;
6097 				sdebug_ndelay = 0;
6098 			}
6099 			block_unblock_all_queues(false);
6100 		}
6101 		return res;
6102 	}
6103 	return -EINVAL;
6104 }
6105 static DRIVER_ATTR_RW(delay);
6106 
6107 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6108 {
6109 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6110 }
6111 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6112 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6113 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6114 			    size_t count)
6115 {
6116 	int ndelay, res;
6117 
6118 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6119 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6120 		res = count;
6121 		if (sdebug_ndelay != ndelay) {
6122 			int j, k;
6123 			struct sdebug_queue *sqp;
6124 
6125 			block_unblock_all_queues(true);
6126 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6127 			     ++j, ++sqp) {
6128 				k = find_first_bit(sqp->in_use_bm,
6129 						   sdebug_max_queue);
6130 				if (k != sdebug_max_queue) {
6131 					res = -EBUSY;   /* queued commands */
6132 					break;
6133 				}
6134 			}
6135 			if (res > 0) {
6136 				sdebug_ndelay = ndelay;
6137 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6138 							: DEF_JDELAY;
6139 			}
6140 			block_unblock_all_queues(false);
6141 		}
6142 		return res;
6143 	}
6144 	return -EINVAL;
6145 }
6146 static DRIVER_ATTR_RW(ndelay);
6147 
6148 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6149 {
6150 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6151 }
6152 
6153 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6154 			  size_t count)
6155 {
6156 	int opts;
6157 	char work[20];
6158 
6159 	if (sscanf(buf, "%10s", work) == 1) {
6160 		if (strncasecmp(work, "0x", 2) == 0) {
6161 			if (kstrtoint(work + 2, 16, &opts) == 0)
6162 				goto opts_done;
6163 		} else {
6164 			if (kstrtoint(work, 10, &opts) == 0)
6165 				goto opts_done;
6166 		}
6167 	}
6168 	return -EINVAL;
6169 opts_done:
6170 	sdebug_opts = opts;
6171 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6172 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6173 	tweak_cmnd_count();
6174 	return count;
6175 }
6176 static DRIVER_ATTR_RW(opts);
6177 
6178 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6179 {
6180 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6181 }
6182 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6183 			   size_t count)
6184 {
6185 	int n;
6186 
6187 	/* Cannot change from or to TYPE_ZBC with sysfs */
6188 	if (sdebug_ptype == TYPE_ZBC)
6189 		return -EINVAL;
6190 
6191 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6192 		if (n == TYPE_ZBC)
6193 			return -EINVAL;
6194 		sdebug_ptype = n;
6195 		return count;
6196 	}
6197 	return -EINVAL;
6198 }
6199 static DRIVER_ATTR_RW(ptype);
6200 
6201 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6202 {
6203 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6204 }
6205 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6206 			    size_t count)
6207 {
6208 	int n;
6209 
6210 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6211 		sdebug_dsense = n;
6212 		return count;
6213 	}
6214 	return -EINVAL;
6215 }
6216 static DRIVER_ATTR_RW(dsense);
6217 
6218 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6219 {
6220 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6221 }
6222 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6223 			     size_t count)
6224 {
6225 	int n, idx;
6226 
6227 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6228 		bool want_store = (n == 0);
6229 		struct sdebug_host_info *sdhp;
6230 
6231 		n = (n > 0);
6232 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6233 		if (sdebug_fake_rw == n)
6234 			return count;	/* not transitioning so do nothing */
6235 
6236 		if (want_store) {	/* 1 --> 0 transition, set up store */
6237 			if (sdeb_first_idx < 0) {
6238 				idx = sdebug_add_store();
6239 				if (idx < 0)
6240 					return idx;
6241 			} else {
6242 				idx = sdeb_first_idx;
6243 				xa_clear_mark(per_store_ap, idx,
6244 					      SDEB_XA_NOT_IN_USE);
6245 			}
6246 			/* make all hosts use same store */
6247 			list_for_each_entry(sdhp, &sdebug_host_list,
6248 					    host_list) {
6249 				if (sdhp->si_idx != idx) {
6250 					xa_set_mark(per_store_ap, sdhp->si_idx,
6251 						    SDEB_XA_NOT_IN_USE);
6252 					sdhp->si_idx = idx;
6253 				}
6254 			}
6255 			sdeb_most_recent_idx = idx;
6256 		} else {	/* 0 --> 1 transition is trigger for shrink */
6257 			sdebug_erase_all_stores(true /* apart from first */);
6258 		}
6259 		sdebug_fake_rw = n;
6260 		return count;
6261 	}
6262 	return -EINVAL;
6263 }
6264 static DRIVER_ATTR_RW(fake_rw);
6265 
6266 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6267 {
6268 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6269 }
6270 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6271 			      size_t count)
6272 {
6273 	int n;
6274 
6275 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6276 		sdebug_no_lun_0 = n;
6277 		return count;
6278 	}
6279 	return -EINVAL;
6280 }
6281 static DRIVER_ATTR_RW(no_lun_0);
6282 
6283 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6284 {
6285 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6286 }
6287 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6288 			      size_t count)
6289 {
6290 	int n;
6291 
6292 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6293 		sdebug_num_tgts = n;
6294 		sdebug_max_tgts_luns();
6295 		return count;
6296 	}
6297 	return -EINVAL;
6298 }
6299 static DRIVER_ATTR_RW(num_tgts);
6300 
6301 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6302 {
6303 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6304 }
6305 static DRIVER_ATTR_RO(dev_size_mb);
6306 
6307 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6308 {
6309 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6310 }
6311 
6312 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6313 				    size_t count)
6314 {
6315 	bool v;
6316 
6317 	if (kstrtobool(buf, &v))
6318 		return -EINVAL;
6319 
6320 	sdebug_per_host_store = v;
6321 	return count;
6322 }
6323 static DRIVER_ATTR_RW(per_host_store);
6324 
6325 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6326 {
6327 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6328 }
6329 static DRIVER_ATTR_RO(num_parts);
6330 
6331 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6332 {
6333 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6334 }
6335 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6336 			       size_t count)
6337 {
6338 	int nth;
6339 	char work[20];
6340 
6341 	if (sscanf(buf, "%10s", work) == 1) {
6342 		if (strncasecmp(work, "0x", 2) == 0) {
6343 			if (kstrtoint(work + 2, 16, &nth) == 0)
6344 				goto every_nth_done;
6345 		} else {
6346 			if (kstrtoint(work, 10, &nth) == 0)
6347 				goto every_nth_done;
6348 		}
6349 	}
6350 	return -EINVAL;
6351 
6352 every_nth_done:
6353 	sdebug_every_nth = nth;
6354 	if (nth && !sdebug_statistics) {
6355 		pr_info("every_nth needs statistics=1, set it\n");
6356 		sdebug_statistics = true;
6357 	}
6358 	tweak_cmnd_count();
6359 	return count;
6360 }
6361 static DRIVER_ATTR_RW(every_nth);
6362 
6363 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6364 {
6365 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6366 }
6367 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6368 				size_t count)
6369 {
6370 	int n;
6371 	bool changed;
6372 
6373 	if (kstrtoint(buf, 0, &n))
6374 		return -EINVAL;
6375 	if (n >= 0) {
6376 		if (n > (int)SAM_LUN_AM_FLAT) {
6377 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6378 			return -EINVAL;
6379 		}
6380 		changed = ((int)sdebug_lun_am != n);
6381 		sdebug_lun_am = n;
6382 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6383 			struct sdebug_host_info *sdhp;
6384 			struct sdebug_dev_info *dp;
6385 
6386 			spin_lock(&sdebug_host_list_lock);
6387 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6388 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6389 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6390 				}
6391 			}
6392 			spin_unlock(&sdebug_host_list_lock);
6393 		}
6394 		return count;
6395 	}
6396 	return -EINVAL;
6397 }
6398 static DRIVER_ATTR_RW(lun_format);
6399 
6400 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6401 {
6402 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6403 }
6404 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6405 			      size_t count)
6406 {
6407 	int n;
6408 	bool changed;
6409 
6410 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6411 		if (n > 256) {
6412 			pr_warn("max_luns can be no more than 256\n");
6413 			return -EINVAL;
6414 		}
6415 		changed = (sdebug_max_luns != n);
6416 		sdebug_max_luns = n;
6417 		sdebug_max_tgts_luns();
6418 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6419 			struct sdebug_host_info *sdhp;
6420 			struct sdebug_dev_info *dp;
6421 
6422 			spin_lock(&sdebug_host_list_lock);
6423 			list_for_each_entry(sdhp, &sdebug_host_list,
6424 					    host_list) {
6425 				list_for_each_entry(dp, &sdhp->dev_info_list,
6426 						    dev_list) {
6427 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6428 						dp->uas_bm);
6429 				}
6430 			}
6431 			spin_unlock(&sdebug_host_list_lock);
6432 		}
6433 		return count;
6434 	}
6435 	return -EINVAL;
6436 }
6437 static DRIVER_ATTR_RW(max_luns);
6438 
6439 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6440 {
6441 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6442 }
6443 /* N.B. max_queue can be changed while there are queued commands. In flight
6444  * commands beyond the new max_queue will be completed. */
6445 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6446 			       size_t count)
6447 {
6448 	int j, n, k, a;
6449 	struct sdebug_queue *sqp;
6450 
6451 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6452 	    (n <= SDEBUG_CANQUEUE) &&
6453 	    (sdebug_host_max_queue == 0)) {
6454 		block_unblock_all_queues(true);
6455 		k = 0;
6456 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6457 		     ++j, ++sqp) {
6458 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6459 			if (a > k)
6460 				k = a;
6461 		}
6462 		sdebug_max_queue = n;
6463 		if (k == SDEBUG_CANQUEUE)
6464 			atomic_set(&retired_max_queue, 0);
6465 		else if (k >= n)
6466 			atomic_set(&retired_max_queue, k + 1);
6467 		else
6468 			atomic_set(&retired_max_queue, 0);
6469 		block_unblock_all_queues(false);
6470 		return count;
6471 	}
6472 	return -EINVAL;
6473 }
6474 static DRIVER_ATTR_RW(max_queue);
6475 
6476 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6477 {
6478 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6479 }
6480 
6481 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6482 {
6483 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6484 }
6485 
6486 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6487 {
6488 	bool v;
6489 
6490 	if (kstrtobool(buf, &v))
6491 		return -EINVAL;
6492 
6493 	sdebug_no_rwlock = v;
6494 	return count;
6495 }
6496 static DRIVER_ATTR_RW(no_rwlock);
6497 
6498 /*
6499  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6500  * in range [0, sdebug_host_max_queue), we can't change it.
6501  */
6502 static DRIVER_ATTR_RO(host_max_queue);
6503 
6504 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6505 {
6506 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6507 }
6508 static DRIVER_ATTR_RO(no_uld);
6509 
6510 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6511 {
6512 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6513 }
6514 static DRIVER_ATTR_RO(scsi_level);
6515 
6516 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6517 {
6518 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6519 }
6520 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6521 				size_t count)
6522 {
6523 	int n;
6524 	bool changed;
6525 
6526 	/* Ignore capacity change for ZBC drives for now */
6527 	if (sdeb_zbc_in_use)
6528 		return -ENOTSUPP;
6529 
6530 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6531 		changed = (sdebug_virtual_gb != n);
6532 		sdebug_virtual_gb = n;
6533 		sdebug_capacity = get_sdebug_capacity();
6534 		if (changed) {
6535 			struct sdebug_host_info *sdhp;
6536 			struct sdebug_dev_info *dp;
6537 
6538 			spin_lock(&sdebug_host_list_lock);
6539 			list_for_each_entry(sdhp, &sdebug_host_list,
6540 					    host_list) {
6541 				list_for_each_entry(dp, &sdhp->dev_info_list,
6542 						    dev_list) {
6543 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6544 						dp->uas_bm);
6545 				}
6546 			}
6547 			spin_unlock(&sdebug_host_list_lock);
6548 		}
6549 		return count;
6550 	}
6551 	return -EINVAL;
6552 }
6553 static DRIVER_ATTR_RW(virtual_gb);
6554 
6555 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6556 {
6557 	/* absolute number of hosts currently active is what is shown */
6558 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6559 }
6560 
6561 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6562 			      size_t count)
6563 {
6564 	bool found;
6565 	unsigned long idx;
6566 	struct sdeb_store_info *sip;
6567 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6568 	int delta_hosts;
6569 
6570 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6571 		return -EINVAL;
6572 	if (delta_hosts > 0) {
6573 		do {
6574 			found = false;
6575 			if (want_phs) {
6576 				xa_for_each_marked(per_store_ap, idx, sip,
6577 						   SDEB_XA_NOT_IN_USE) {
6578 					sdeb_most_recent_idx = (int)idx;
6579 					found = true;
6580 					break;
6581 				}
6582 				if (found)	/* re-use case */
6583 					sdebug_add_host_helper((int)idx);
6584 				else
6585 					sdebug_do_add_host(true);
6586 			} else {
6587 				sdebug_do_add_host(false);
6588 			}
6589 		} while (--delta_hosts);
6590 	} else if (delta_hosts < 0) {
6591 		do {
6592 			sdebug_do_remove_host(false);
6593 		} while (++delta_hosts);
6594 	}
6595 	return count;
6596 }
6597 static DRIVER_ATTR_RW(add_host);
6598 
6599 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6600 {
6601 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6602 }
6603 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6604 				    size_t count)
6605 {
6606 	int n;
6607 
6608 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6609 		sdebug_vpd_use_hostno = n;
6610 		return count;
6611 	}
6612 	return -EINVAL;
6613 }
6614 static DRIVER_ATTR_RW(vpd_use_hostno);
6615 
6616 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6617 {
6618 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6619 }
6620 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6621 				size_t count)
6622 {
6623 	int n;
6624 
6625 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6626 		if (n > 0)
6627 			sdebug_statistics = true;
6628 		else {
6629 			clear_queue_stats();
6630 			sdebug_statistics = false;
6631 		}
6632 		return count;
6633 	}
6634 	return -EINVAL;
6635 }
6636 static DRIVER_ATTR_RW(statistics);
6637 
6638 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6639 {
6640 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6641 }
6642 static DRIVER_ATTR_RO(sector_size);
6643 
6644 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6645 {
6646 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6647 }
6648 static DRIVER_ATTR_RO(submit_queues);
6649 
6650 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6651 {
6652 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6653 }
6654 static DRIVER_ATTR_RO(dix);
6655 
6656 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6657 {
6658 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6659 }
6660 static DRIVER_ATTR_RO(dif);
6661 
6662 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6663 {
6664 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6665 }
6666 static DRIVER_ATTR_RO(guard);
6667 
6668 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6669 {
6670 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6671 }
6672 static DRIVER_ATTR_RO(ato);
6673 
6674 static ssize_t map_show(struct device_driver *ddp, char *buf)
6675 {
6676 	ssize_t count = 0;
6677 
6678 	if (!scsi_debug_lbp())
6679 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6680 				 sdebug_store_sectors);
6681 
6682 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6683 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6684 
6685 		if (sip)
6686 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6687 					  (int)map_size, sip->map_storep);
6688 	}
6689 	buf[count++] = '\n';
6690 	buf[count] = '\0';
6691 
6692 	return count;
6693 }
6694 static DRIVER_ATTR_RO(map);
6695 
6696 static ssize_t random_show(struct device_driver *ddp, char *buf)
6697 {
6698 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6699 }
6700 
6701 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6702 			    size_t count)
6703 {
6704 	bool v;
6705 
6706 	if (kstrtobool(buf, &v))
6707 		return -EINVAL;
6708 
6709 	sdebug_random = v;
6710 	return count;
6711 }
6712 static DRIVER_ATTR_RW(random);
6713 
6714 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6715 {
6716 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6717 }
6718 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6719 			       size_t count)
6720 {
6721 	int n;
6722 
6723 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6724 		sdebug_removable = (n > 0);
6725 		return count;
6726 	}
6727 	return -EINVAL;
6728 }
6729 static DRIVER_ATTR_RW(removable);
6730 
6731 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6732 {
6733 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6734 }
6735 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6736 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6737 			       size_t count)
6738 {
6739 	int n;
6740 
6741 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6742 		sdebug_host_lock = (n > 0);
6743 		return count;
6744 	}
6745 	return -EINVAL;
6746 }
6747 static DRIVER_ATTR_RW(host_lock);
6748 
6749 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6750 {
6751 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6752 }
6753 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6754 			    size_t count)
6755 {
6756 	int n;
6757 
6758 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6759 		sdebug_strict = (n > 0);
6760 		return count;
6761 	}
6762 	return -EINVAL;
6763 }
6764 static DRIVER_ATTR_RW(strict);
6765 
6766 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6767 {
6768 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6769 }
6770 static DRIVER_ATTR_RO(uuid_ctl);
6771 
6772 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6773 {
6774 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6775 }
6776 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6777 			     size_t count)
6778 {
6779 	int ret, n;
6780 
6781 	ret = kstrtoint(buf, 0, &n);
6782 	if (ret)
6783 		return ret;
6784 	sdebug_cdb_len = n;
6785 	all_config_cdb_len();
6786 	return count;
6787 }
6788 static DRIVER_ATTR_RW(cdb_len);
6789 
6790 static const char * const zbc_model_strs_a[] = {
6791 	[BLK_ZONED_NONE] = "none",
6792 	[BLK_ZONED_HA]   = "host-aware",
6793 	[BLK_ZONED_HM]   = "host-managed",
6794 };
6795 
6796 static const char * const zbc_model_strs_b[] = {
6797 	[BLK_ZONED_NONE] = "no",
6798 	[BLK_ZONED_HA]   = "aware",
6799 	[BLK_ZONED_HM]   = "managed",
6800 };
6801 
6802 static const char * const zbc_model_strs_c[] = {
6803 	[BLK_ZONED_NONE] = "0",
6804 	[BLK_ZONED_HA]   = "1",
6805 	[BLK_ZONED_HM]   = "2",
6806 };
6807 
6808 static int sdeb_zbc_model_str(const char *cp)
6809 {
6810 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6811 
6812 	if (res < 0) {
6813 		res = sysfs_match_string(zbc_model_strs_b, cp);
6814 		if (res < 0) {
6815 			res = sysfs_match_string(zbc_model_strs_c, cp);
6816 			if (res < 0)
6817 				return -EINVAL;
6818 		}
6819 	}
6820 	return res;
6821 }
6822 
6823 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6824 {
6825 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6826 			 zbc_model_strs_a[sdeb_zbc_model]);
6827 }
6828 static DRIVER_ATTR_RO(zbc);
6829 
6830 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6831 {
6832 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6833 }
6834 static DRIVER_ATTR_RO(tur_ms_to_ready);
6835 
6836 /* Note: The following array creates attribute files in the
6837    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6838    files (over those found in the /sys/module/scsi_debug/parameters
6839    directory) is that auxiliary actions can be triggered when an attribute
6840    is changed. For example see: add_host_store() above.
6841  */
6842 
6843 static struct attribute *sdebug_drv_attrs[] = {
6844 	&driver_attr_delay.attr,
6845 	&driver_attr_opts.attr,
6846 	&driver_attr_ptype.attr,
6847 	&driver_attr_dsense.attr,
6848 	&driver_attr_fake_rw.attr,
6849 	&driver_attr_host_max_queue.attr,
6850 	&driver_attr_no_lun_0.attr,
6851 	&driver_attr_num_tgts.attr,
6852 	&driver_attr_dev_size_mb.attr,
6853 	&driver_attr_num_parts.attr,
6854 	&driver_attr_every_nth.attr,
6855 	&driver_attr_lun_format.attr,
6856 	&driver_attr_max_luns.attr,
6857 	&driver_attr_max_queue.attr,
6858 	&driver_attr_no_rwlock.attr,
6859 	&driver_attr_no_uld.attr,
6860 	&driver_attr_scsi_level.attr,
6861 	&driver_attr_virtual_gb.attr,
6862 	&driver_attr_add_host.attr,
6863 	&driver_attr_per_host_store.attr,
6864 	&driver_attr_vpd_use_hostno.attr,
6865 	&driver_attr_sector_size.attr,
6866 	&driver_attr_statistics.attr,
6867 	&driver_attr_submit_queues.attr,
6868 	&driver_attr_dix.attr,
6869 	&driver_attr_dif.attr,
6870 	&driver_attr_guard.attr,
6871 	&driver_attr_ato.attr,
6872 	&driver_attr_map.attr,
6873 	&driver_attr_random.attr,
6874 	&driver_attr_removable.attr,
6875 	&driver_attr_host_lock.attr,
6876 	&driver_attr_ndelay.attr,
6877 	&driver_attr_strict.attr,
6878 	&driver_attr_uuid_ctl.attr,
6879 	&driver_attr_cdb_len.attr,
6880 	&driver_attr_tur_ms_to_ready.attr,
6881 	&driver_attr_zbc.attr,
6882 	NULL,
6883 };
6884 ATTRIBUTE_GROUPS(sdebug_drv);
6885 
6886 static struct device *pseudo_primary;
6887 
6888 static int __init scsi_debug_init(void)
6889 {
6890 	bool want_store = (sdebug_fake_rw == 0);
6891 	unsigned long sz;
6892 	int k, ret, hosts_to_add;
6893 	int idx = -1;
6894 
6895 	ramdisk_lck_a[0] = &atomic_rw;
6896 	ramdisk_lck_a[1] = &atomic_rw2;
6897 	atomic_set(&retired_max_queue, 0);
6898 
6899 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6900 		pr_warn("ndelay must be less than 1 second, ignored\n");
6901 		sdebug_ndelay = 0;
6902 	} else if (sdebug_ndelay > 0)
6903 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6904 
6905 	switch (sdebug_sector_size) {
6906 	case  512:
6907 	case 1024:
6908 	case 2048:
6909 	case 4096:
6910 		break;
6911 	default:
6912 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6913 		return -EINVAL;
6914 	}
6915 
6916 	switch (sdebug_dif) {
6917 	case T10_PI_TYPE0_PROTECTION:
6918 		break;
6919 	case T10_PI_TYPE1_PROTECTION:
6920 	case T10_PI_TYPE2_PROTECTION:
6921 	case T10_PI_TYPE3_PROTECTION:
6922 		have_dif_prot = true;
6923 		break;
6924 
6925 	default:
6926 		pr_err("dif must be 0, 1, 2 or 3\n");
6927 		return -EINVAL;
6928 	}
6929 
6930 	if (sdebug_num_tgts < 0) {
6931 		pr_err("num_tgts must be >= 0\n");
6932 		return -EINVAL;
6933 	}
6934 
6935 	if (sdebug_guard > 1) {
6936 		pr_err("guard must be 0 or 1\n");
6937 		return -EINVAL;
6938 	}
6939 
6940 	if (sdebug_ato > 1) {
6941 		pr_err("ato must be 0 or 1\n");
6942 		return -EINVAL;
6943 	}
6944 
6945 	if (sdebug_physblk_exp > 15) {
6946 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6947 		return -EINVAL;
6948 	}
6949 
6950 	sdebug_lun_am = sdebug_lun_am_i;
6951 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6952 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6953 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6954 	}
6955 
6956 	if (sdebug_max_luns > 256) {
6957 		if (sdebug_max_luns > 16384) {
6958 			pr_warn("max_luns can be no more than 16384, use default\n");
6959 			sdebug_max_luns = DEF_MAX_LUNS;
6960 		}
6961 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6962 	}
6963 
6964 	if (sdebug_lowest_aligned > 0x3fff) {
6965 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6966 		return -EINVAL;
6967 	}
6968 
6969 	if (submit_queues < 1) {
6970 		pr_err("submit_queues must be 1 or more\n");
6971 		return -EINVAL;
6972 	}
6973 
6974 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6975 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6976 		return -EINVAL;
6977 	}
6978 
6979 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6980 	    (sdebug_host_max_queue < 0)) {
6981 		pr_err("host_max_queue must be in range [0 %d]\n",
6982 		       SDEBUG_CANQUEUE);
6983 		return -EINVAL;
6984 	}
6985 
6986 	if (sdebug_host_max_queue &&
6987 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6988 		sdebug_max_queue = sdebug_host_max_queue;
6989 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6990 			sdebug_max_queue);
6991 	}
6992 
6993 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6994 			       GFP_KERNEL);
6995 	if (sdebug_q_arr == NULL)
6996 		return -ENOMEM;
6997 	for (k = 0; k < submit_queues; ++k)
6998 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6999 
7000 	/*
7001 	 * check for host managed zoned block device specified with
7002 	 * ptype=0x14 or zbc=XXX.
7003 	 */
7004 	if (sdebug_ptype == TYPE_ZBC) {
7005 		sdeb_zbc_model = BLK_ZONED_HM;
7006 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7007 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7008 		if (k < 0) {
7009 			ret = k;
7010 			goto free_q_arr;
7011 		}
7012 		sdeb_zbc_model = k;
7013 		switch (sdeb_zbc_model) {
7014 		case BLK_ZONED_NONE:
7015 		case BLK_ZONED_HA:
7016 			sdebug_ptype = TYPE_DISK;
7017 			break;
7018 		case BLK_ZONED_HM:
7019 			sdebug_ptype = TYPE_ZBC;
7020 			break;
7021 		default:
7022 			pr_err("Invalid ZBC model\n");
7023 			ret = -EINVAL;
7024 			goto free_q_arr;
7025 		}
7026 	}
7027 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
7028 		sdeb_zbc_in_use = true;
7029 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7030 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7031 	}
7032 
7033 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7034 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7035 	if (sdebug_dev_size_mb < 1)
7036 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7037 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7038 	sdebug_store_sectors = sz / sdebug_sector_size;
7039 	sdebug_capacity = get_sdebug_capacity();
7040 
7041 	/* play around with geometry, don't waste too much on track 0 */
7042 	sdebug_heads = 8;
7043 	sdebug_sectors_per = 32;
7044 	if (sdebug_dev_size_mb >= 256)
7045 		sdebug_heads = 64;
7046 	else if (sdebug_dev_size_mb >= 16)
7047 		sdebug_heads = 32;
7048 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7049 			       (sdebug_sectors_per * sdebug_heads);
7050 	if (sdebug_cylinders_per >= 1024) {
7051 		/* other LLDs do this; implies >= 1GB ram disk ... */
7052 		sdebug_heads = 255;
7053 		sdebug_sectors_per = 63;
7054 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7055 			       (sdebug_sectors_per * sdebug_heads);
7056 	}
7057 	if (scsi_debug_lbp()) {
7058 		sdebug_unmap_max_blocks =
7059 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7060 
7061 		sdebug_unmap_max_desc =
7062 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7063 
7064 		sdebug_unmap_granularity =
7065 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7066 
7067 		if (sdebug_unmap_alignment &&
7068 		    sdebug_unmap_granularity <=
7069 		    sdebug_unmap_alignment) {
7070 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7071 			ret = -EINVAL;
7072 			goto free_q_arr;
7073 		}
7074 	}
7075 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7076 	if (want_store) {
7077 		idx = sdebug_add_store();
7078 		if (idx < 0) {
7079 			ret = idx;
7080 			goto free_q_arr;
7081 		}
7082 	}
7083 
7084 	pseudo_primary = root_device_register("pseudo_0");
7085 	if (IS_ERR(pseudo_primary)) {
7086 		pr_warn("root_device_register() error\n");
7087 		ret = PTR_ERR(pseudo_primary);
7088 		goto free_vm;
7089 	}
7090 	ret = bus_register(&pseudo_lld_bus);
7091 	if (ret < 0) {
7092 		pr_warn("bus_register error: %d\n", ret);
7093 		goto dev_unreg;
7094 	}
7095 	ret = driver_register(&sdebug_driverfs_driver);
7096 	if (ret < 0) {
7097 		pr_warn("driver_register error: %d\n", ret);
7098 		goto bus_unreg;
7099 	}
7100 
7101 	hosts_to_add = sdebug_add_host;
7102 	sdebug_add_host = 0;
7103 
7104 	for (k = 0; k < hosts_to_add; k++) {
7105 		if (want_store && k == 0) {
7106 			ret = sdebug_add_host_helper(idx);
7107 			if (ret < 0) {
7108 				pr_err("add_host_helper k=%d, error=%d\n",
7109 				       k, -ret);
7110 				break;
7111 			}
7112 		} else {
7113 			ret = sdebug_do_add_host(want_store &&
7114 						 sdebug_per_host_store);
7115 			if (ret < 0) {
7116 				pr_err("add_host k=%d error=%d\n", k, -ret);
7117 				break;
7118 			}
7119 		}
7120 	}
7121 	if (sdebug_verbose)
7122 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7123 
7124 	return 0;
7125 
7126 bus_unreg:
7127 	bus_unregister(&pseudo_lld_bus);
7128 dev_unreg:
7129 	root_device_unregister(pseudo_primary);
7130 free_vm:
7131 	sdebug_erase_store(idx, NULL);
7132 free_q_arr:
7133 	kfree(sdebug_q_arr);
7134 	return ret;
7135 }
7136 
7137 static void __exit scsi_debug_exit(void)
7138 {
7139 	int k = sdebug_num_hosts;
7140 
7141 	stop_all_queued();
7142 	for (; k; k--)
7143 		sdebug_do_remove_host(true);
7144 	free_all_queued();
7145 	driver_unregister(&sdebug_driverfs_driver);
7146 	bus_unregister(&pseudo_lld_bus);
7147 	root_device_unregister(pseudo_primary);
7148 
7149 	sdebug_erase_all_stores(false);
7150 	xa_destroy(per_store_ap);
7151 	kfree(sdebug_q_arr);
7152 }
7153 
7154 device_initcall(scsi_debug_init);
7155 module_exit(scsi_debug_exit);
7156 
7157 static void sdebug_release_adapter(struct device *dev)
7158 {
7159 	struct sdebug_host_info *sdbg_host;
7160 
7161 	sdbg_host = to_sdebug_host(dev);
7162 	kfree(sdbg_host);
7163 }
7164 
7165 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7166 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7167 {
7168 	if (idx < 0)
7169 		return;
7170 	if (!sip) {
7171 		if (xa_empty(per_store_ap))
7172 			return;
7173 		sip = xa_load(per_store_ap, idx);
7174 		if (!sip)
7175 			return;
7176 	}
7177 	vfree(sip->map_storep);
7178 	vfree(sip->dif_storep);
7179 	vfree(sip->storep);
7180 	xa_erase(per_store_ap, idx);
7181 	kfree(sip);
7182 }
7183 
7184 /* Assume apart_from_first==false only in shutdown case. */
7185 static void sdebug_erase_all_stores(bool apart_from_first)
7186 {
7187 	unsigned long idx;
7188 	struct sdeb_store_info *sip = NULL;
7189 
7190 	xa_for_each(per_store_ap, idx, sip) {
7191 		if (apart_from_first)
7192 			apart_from_first = false;
7193 		else
7194 			sdebug_erase_store(idx, sip);
7195 	}
7196 	if (apart_from_first)
7197 		sdeb_most_recent_idx = sdeb_first_idx;
7198 }
7199 
7200 /*
7201  * Returns store xarray new element index (idx) if >=0 else negated errno.
7202  * Limit the number of stores to 65536.
7203  */
7204 static int sdebug_add_store(void)
7205 {
7206 	int res;
7207 	u32 n_idx;
7208 	unsigned long iflags;
7209 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7210 	struct sdeb_store_info *sip = NULL;
7211 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7212 
7213 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7214 	if (!sip)
7215 		return -ENOMEM;
7216 
7217 	xa_lock_irqsave(per_store_ap, iflags);
7218 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7219 	if (unlikely(res < 0)) {
7220 		xa_unlock_irqrestore(per_store_ap, iflags);
7221 		kfree(sip);
7222 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7223 		return res;
7224 	}
7225 	sdeb_most_recent_idx = n_idx;
7226 	if (sdeb_first_idx < 0)
7227 		sdeb_first_idx = n_idx;
7228 	xa_unlock_irqrestore(per_store_ap, iflags);
7229 
7230 	res = -ENOMEM;
7231 	sip->storep = vzalloc(sz);
7232 	if (!sip->storep) {
7233 		pr_err("user data oom\n");
7234 		goto err;
7235 	}
7236 	if (sdebug_num_parts > 0)
7237 		sdebug_build_parts(sip->storep, sz);
7238 
7239 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7240 	if (sdebug_dix) {
7241 		int dif_size;
7242 
7243 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7244 		sip->dif_storep = vmalloc(dif_size);
7245 
7246 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7247 			sip->dif_storep);
7248 
7249 		if (!sip->dif_storep) {
7250 			pr_err("DIX oom\n");
7251 			goto err;
7252 		}
7253 		memset(sip->dif_storep, 0xff, dif_size);
7254 	}
7255 	/* Logical Block Provisioning */
7256 	if (scsi_debug_lbp()) {
7257 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7258 		sip->map_storep = vmalloc(array_size(sizeof(long),
7259 						     BITS_TO_LONGS(map_size)));
7260 
7261 		pr_info("%lu provisioning blocks\n", map_size);
7262 
7263 		if (!sip->map_storep) {
7264 			pr_err("LBP map oom\n");
7265 			goto err;
7266 		}
7267 
7268 		bitmap_zero(sip->map_storep, map_size);
7269 
7270 		/* Map first 1KB for partition table */
7271 		if (sdebug_num_parts)
7272 			map_region(sip, 0, 2);
7273 	}
7274 
7275 	rwlock_init(&sip->macc_lck);
7276 	return (int)n_idx;
7277 err:
7278 	sdebug_erase_store((int)n_idx, sip);
7279 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7280 	return res;
7281 }
7282 
7283 static int sdebug_add_host_helper(int per_host_idx)
7284 {
7285 	int k, devs_per_host, idx;
7286 	int error = -ENOMEM;
7287 	struct sdebug_host_info *sdbg_host;
7288 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7289 
7290 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7291 	if (!sdbg_host)
7292 		return -ENOMEM;
7293 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7294 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7295 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7296 	sdbg_host->si_idx = idx;
7297 
7298 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7299 
7300 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7301 	for (k = 0; k < devs_per_host; k++) {
7302 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7303 		if (!sdbg_devinfo)
7304 			goto clean;
7305 	}
7306 
7307 	spin_lock(&sdebug_host_list_lock);
7308 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7309 	spin_unlock(&sdebug_host_list_lock);
7310 
7311 	sdbg_host->dev.bus = &pseudo_lld_bus;
7312 	sdbg_host->dev.parent = pseudo_primary;
7313 	sdbg_host->dev.release = &sdebug_release_adapter;
7314 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7315 
7316 	error = device_register(&sdbg_host->dev);
7317 	if (error)
7318 		goto clean;
7319 
7320 	++sdebug_num_hosts;
7321 	return 0;
7322 
7323 clean:
7324 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7325 				 dev_list) {
7326 		list_del(&sdbg_devinfo->dev_list);
7327 		kfree(sdbg_devinfo->zstate);
7328 		kfree(sdbg_devinfo);
7329 	}
7330 	if (sdbg_host->dev.release)
7331 		put_device(&sdbg_host->dev);
7332 	else
7333 		kfree(sdbg_host);
7334 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7335 	return error;
7336 }
7337 
7338 static int sdebug_do_add_host(bool mk_new_store)
7339 {
7340 	int ph_idx = sdeb_most_recent_idx;
7341 
7342 	if (mk_new_store) {
7343 		ph_idx = sdebug_add_store();
7344 		if (ph_idx < 0)
7345 			return ph_idx;
7346 	}
7347 	return sdebug_add_host_helper(ph_idx);
7348 }
7349 
7350 static void sdebug_do_remove_host(bool the_end)
7351 {
7352 	int idx = -1;
7353 	struct sdebug_host_info *sdbg_host = NULL;
7354 	struct sdebug_host_info *sdbg_host2;
7355 
7356 	spin_lock(&sdebug_host_list_lock);
7357 	if (!list_empty(&sdebug_host_list)) {
7358 		sdbg_host = list_entry(sdebug_host_list.prev,
7359 				       struct sdebug_host_info, host_list);
7360 		idx = sdbg_host->si_idx;
7361 	}
7362 	if (!the_end && idx >= 0) {
7363 		bool unique = true;
7364 
7365 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7366 			if (sdbg_host2 == sdbg_host)
7367 				continue;
7368 			if (idx == sdbg_host2->si_idx) {
7369 				unique = false;
7370 				break;
7371 			}
7372 		}
7373 		if (unique) {
7374 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7375 			if (idx == sdeb_most_recent_idx)
7376 				--sdeb_most_recent_idx;
7377 		}
7378 	}
7379 	if (sdbg_host)
7380 		list_del(&sdbg_host->host_list);
7381 	spin_unlock(&sdebug_host_list_lock);
7382 
7383 	if (!sdbg_host)
7384 		return;
7385 
7386 	device_unregister(&sdbg_host->dev);
7387 	--sdebug_num_hosts;
7388 }
7389 
7390 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7391 {
7392 	int num_in_q = 0;
7393 	struct sdebug_dev_info *devip;
7394 
7395 	block_unblock_all_queues(true);
7396 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7397 	if (NULL == devip) {
7398 		block_unblock_all_queues(false);
7399 		return	-ENODEV;
7400 	}
7401 	num_in_q = atomic_read(&devip->num_in_q);
7402 
7403 	if (qdepth > SDEBUG_CANQUEUE) {
7404 		qdepth = SDEBUG_CANQUEUE;
7405 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7406 			qdepth, SDEBUG_CANQUEUE);
7407 	}
7408 	if (qdepth < 1)
7409 		qdepth = 1;
7410 	if (qdepth != sdev->queue_depth)
7411 		scsi_change_queue_depth(sdev, qdepth);
7412 
7413 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7414 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7415 			    __func__, qdepth, num_in_q);
7416 	}
7417 	block_unblock_all_queues(false);
7418 	return sdev->queue_depth;
7419 }
7420 
7421 static bool fake_timeout(struct scsi_cmnd *scp)
7422 {
7423 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7424 		if (sdebug_every_nth < -1)
7425 			sdebug_every_nth = -1;
7426 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7427 			return true; /* ignore command causing timeout */
7428 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7429 			 scsi_medium_access_command(scp))
7430 			return true; /* time out reads and writes */
7431 	}
7432 	return false;
7433 }
7434 
7435 /* Response to TUR or media access command when device stopped */
7436 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7437 {
7438 	int stopped_state;
7439 	u64 diff_ns = 0;
7440 	ktime_t now_ts = ktime_get_boottime();
7441 	struct scsi_device *sdp = scp->device;
7442 
7443 	stopped_state = atomic_read(&devip->stopped);
7444 	if (stopped_state == 2) {
7445 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7446 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7447 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7448 				/* tur_ms_to_ready timer extinguished */
7449 				atomic_set(&devip->stopped, 0);
7450 				return 0;
7451 			}
7452 		}
7453 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7454 		if (sdebug_verbose)
7455 			sdev_printk(KERN_INFO, sdp,
7456 				    "%s: Not ready: in process of becoming ready\n", my_name);
7457 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7458 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7459 
7460 			if (diff_ns <= tur_nanosecs_to_ready)
7461 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7462 			else
7463 				diff_ns = tur_nanosecs_to_ready;
7464 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7465 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7466 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7467 						   diff_ns);
7468 			return check_condition_result;
7469 		}
7470 	}
7471 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7472 	if (sdebug_verbose)
7473 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7474 			    my_name);
7475 	return check_condition_result;
7476 }
7477 
7478 static void sdebug_map_queues(struct Scsi_Host *shost)
7479 {
7480 	int i, qoff;
7481 
7482 	if (shost->nr_hw_queues == 1)
7483 		return;
7484 
7485 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7486 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7487 
7488 		map->nr_queues  = 0;
7489 
7490 		if (i == HCTX_TYPE_DEFAULT)
7491 			map->nr_queues = submit_queues - poll_queues;
7492 		else if (i == HCTX_TYPE_POLL)
7493 			map->nr_queues = poll_queues;
7494 
7495 		if (!map->nr_queues) {
7496 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7497 			continue;
7498 		}
7499 
7500 		map->queue_offset = qoff;
7501 		blk_mq_map_queues(map);
7502 
7503 		qoff += map->nr_queues;
7504 	}
7505 }
7506 
7507 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7508 {
7509 	bool first;
7510 	bool retiring = false;
7511 	int num_entries = 0;
7512 	unsigned int qc_idx = 0;
7513 	unsigned long iflags;
7514 	ktime_t kt_from_boot = ktime_get_boottime();
7515 	struct sdebug_queue *sqp;
7516 	struct sdebug_queued_cmd *sqcp;
7517 	struct scsi_cmnd *scp;
7518 	struct sdebug_dev_info *devip;
7519 	struct sdebug_defer *sd_dp;
7520 
7521 	sqp = sdebug_q_arr + queue_num;
7522 
7523 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7524 
7525 	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7526 	if (qc_idx >= sdebug_max_queue)
7527 		goto unlock;
7528 
7529 	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
7530 		if (first) {
7531 			first = false;
7532 			if (!test_bit(qc_idx, sqp->in_use_bm))
7533 				continue;
7534 		} else {
7535 			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7536 		}
7537 		if (qc_idx >= sdebug_max_queue)
7538 			break;
7539 
7540 		sqcp = &sqp->qc_arr[qc_idx];
7541 		sd_dp = sqcp->sd_dp;
7542 		if (unlikely(!sd_dp))
7543 			continue;
7544 		scp = sqcp->a_cmnd;
7545 		if (unlikely(scp == NULL)) {
7546 			pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7547 			       queue_num, qc_idx, __func__);
7548 			break;
7549 		}
7550 		if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7551 			if (kt_from_boot < sd_dp->cmpl_ts)
7552 				continue;
7553 
7554 		} else		/* ignoring non REQ_POLLED requests */
7555 			continue;
7556 		devip = (struct sdebug_dev_info *)scp->device->hostdata;
7557 		if (likely(devip))
7558 			atomic_dec(&devip->num_in_q);
7559 		else
7560 			pr_err("devip=NULL from %s\n", __func__);
7561 		if (unlikely(atomic_read(&retired_max_queue) > 0))
7562 			retiring = true;
7563 
7564 		sqcp->a_cmnd = NULL;
7565 		if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7566 			pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7567 				sqp, queue_num, qc_idx, __func__);
7568 			break;
7569 		}
7570 		if (unlikely(retiring)) {	/* user has reduced max_queue */
7571 			int k, retval;
7572 
7573 			retval = atomic_read(&retired_max_queue);
7574 			if (qc_idx >= retval) {
7575 				pr_err("index %d too large\n", retval);
7576 				break;
7577 			}
7578 			k = find_last_bit(sqp->in_use_bm, retval);
7579 			if ((k < sdebug_max_queue) || (k == retval))
7580 				atomic_set(&retired_max_queue, 0);
7581 			else
7582 				atomic_set(&retired_max_queue, k + 1);
7583 		}
7584 		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7585 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7586 		scsi_done(scp); /* callback to mid level */
7587 		num_entries++;
7588 		spin_lock_irqsave(&sqp->qc_lock, iflags);
7589 		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7590 			break;
7591 	}
7592 
7593 unlock:
7594 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7595 
7596 	if (num_entries > 0)
7597 		atomic_add(num_entries, &sdeb_mq_poll_count);
7598 	return num_entries;
7599 }
7600 
7601 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7602 				   struct scsi_cmnd *scp)
7603 {
7604 	u8 sdeb_i;
7605 	struct scsi_device *sdp = scp->device;
7606 	const struct opcode_info_t *oip;
7607 	const struct opcode_info_t *r_oip;
7608 	struct sdebug_dev_info *devip;
7609 	u8 *cmd = scp->cmnd;
7610 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7611 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7612 	int k, na;
7613 	int errsts = 0;
7614 	u64 lun_index = sdp->lun & 0x3FFF;
7615 	u32 flags;
7616 	u16 sa;
7617 	u8 opcode = cmd[0];
7618 	bool has_wlun_rl;
7619 	bool inject_now;
7620 
7621 	scsi_set_resid(scp, 0);
7622 	if (sdebug_statistics) {
7623 		atomic_inc(&sdebug_cmnd_count);
7624 		inject_now = inject_on_this_cmd();
7625 	} else {
7626 		inject_now = false;
7627 	}
7628 	if (unlikely(sdebug_verbose &&
7629 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7630 		char b[120];
7631 		int n, len, sb;
7632 
7633 		len = scp->cmd_len;
7634 		sb = (int)sizeof(b);
7635 		if (len > 32)
7636 			strcpy(b, "too long, over 32 bytes");
7637 		else {
7638 			for (k = 0, n = 0; k < len && n < sb; ++k)
7639 				n += scnprintf(b + n, sb - n, "%02x ",
7640 					       (u32)cmd[k]);
7641 		}
7642 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7643 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7644 	}
7645 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7646 		return SCSI_MLQUEUE_HOST_BUSY;
7647 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7648 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7649 		goto err_out;
7650 
7651 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7652 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7653 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7654 	if (unlikely(!devip)) {
7655 		devip = find_build_dev_info(sdp);
7656 		if (NULL == devip)
7657 			goto err_out;
7658 	}
7659 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7660 		atomic_set(&sdeb_inject_pending, 1);
7661 
7662 	na = oip->num_attached;
7663 	r_pfp = oip->pfp;
7664 	if (na) {	/* multiple commands with this opcode */
7665 		r_oip = oip;
7666 		if (FF_SA & r_oip->flags) {
7667 			if (F_SA_LOW & oip->flags)
7668 				sa = 0x1f & cmd[1];
7669 			else
7670 				sa = get_unaligned_be16(cmd + 8);
7671 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7672 				if (opcode == oip->opcode && sa == oip->sa)
7673 					break;
7674 			}
7675 		} else {   /* since no service action only check opcode */
7676 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7677 				if (opcode == oip->opcode)
7678 					break;
7679 			}
7680 		}
7681 		if (k > na) {
7682 			if (F_SA_LOW & r_oip->flags)
7683 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7684 			else if (F_SA_HIGH & r_oip->flags)
7685 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7686 			else
7687 				mk_sense_invalid_opcode(scp);
7688 			goto check_cond;
7689 		}
7690 	}	/* else (when na==0) we assume the oip is a match */
7691 	flags = oip->flags;
7692 	if (unlikely(F_INV_OP & flags)) {
7693 		mk_sense_invalid_opcode(scp);
7694 		goto check_cond;
7695 	}
7696 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7697 		if (sdebug_verbose)
7698 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7699 				    my_name, opcode, " supported for wlun");
7700 		mk_sense_invalid_opcode(scp);
7701 		goto check_cond;
7702 	}
7703 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7704 		u8 rem;
7705 		int j;
7706 
7707 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7708 			rem = ~oip->len_mask[k] & cmd[k];
7709 			if (rem) {
7710 				for (j = 7; j >= 0; --j, rem <<= 1) {
7711 					if (0x80 & rem)
7712 						break;
7713 				}
7714 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7715 				goto check_cond;
7716 			}
7717 		}
7718 	}
7719 	if (unlikely(!(F_SKIP_UA & flags) &&
7720 		     find_first_bit(devip->uas_bm,
7721 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7722 		errsts = make_ua(scp, devip);
7723 		if (errsts)
7724 			goto check_cond;
7725 	}
7726 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7727 		     atomic_read(&devip->stopped))) {
7728 		errsts = resp_not_ready(scp, devip);
7729 		if (errsts)
7730 			goto fini;
7731 	}
7732 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7733 		goto fini;
7734 	if (unlikely(sdebug_every_nth)) {
7735 		if (fake_timeout(scp))
7736 			return 0;	/* ignore command: make trouble */
7737 	}
7738 	if (likely(oip->pfp))
7739 		pfp = oip->pfp;	/* calls a resp_* function */
7740 	else
7741 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7742 
7743 fini:
7744 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7745 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7746 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7747 					    sdebug_ndelay > 10000)) {
7748 		/*
7749 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7750 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7751 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7752 		 * For Synchronize Cache want 1/20 of SSU's delay.
7753 		 */
7754 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7755 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7756 
7757 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7758 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7759 	} else
7760 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7761 				     sdebug_ndelay);
7762 check_cond:
7763 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7764 err_out:
7765 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7766 }
7767 
7768 static struct scsi_host_template sdebug_driver_template = {
7769 	.show_info =		scsi_debug_show_info,
7770 	.write_info =		scsi_debug_write_info,
7771 	.proc_name =		sdebug_proc_name,
7772 	.name =			"SCSI DEBUG",
7773 	.info =			scsi_debug_info,
7774 	.slave_alloc =		scsi_debug_slave_alloc,
7775 	.slave_configure =	scsi_debug_slave_configure,
7776 	.slave_destroy =	scsi_debug_slave_destroy,
7777 	.ioctl =		scsi_debug_ioctl,
7778 	.queuecommand =		scsi_debug_queuecommand,
7779 	.change_queue_depth =	sdebug_change_qdepth,
7780 	.map_queues =		sdebug_map_queues,
7781 	.mq_poll =		sdebug_blk_mq_poll,
7782 	.eh_abort_handler =	scsi_debug_abort,
7783 	.eh_device_reset_handler = scsi_debug_device_reset,
7784 	.eh_target_reset_handler = scsi_debug_target_reset,
7785 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7786 	.eh_host_reset_handler = scsi_debug_host_reset,
7787 	.can_queue =		SDEBUG_CANQUEUE,
7788 	.this_id =		7,
7789 	.sg_tablesize =		SG_MAX_SEGMENTS,
7790 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7791 	.max_sectors =		-1U,
7792 	.max_segment_size =	-1U,
7793 	.module =		THIS_MODULE,
7794 	.track_queue_depth =	1,
7795 };
7796 
7797 static int sdebug_driver_probe(struct device *dev)
7798 {
7799 	int error = 0;
7800 	struct sdebug_host_info *sdbg_host;
7801 	struct Scsi_Host *hpnt;
7802 	int hprot;
7803 
7804 	sdbg_host = to_sdebug_host(dev);
7805 
7806 	sdebug_driver_template.can_queue = sdebug_max_queue;
7807 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7808 	if (!sdebug_clustering)
7809 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7810 
7811 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7812 	if (NULL == hpnt) {
7813 		pr_err("scsi_host_alloc failed\n");
7814 		error = -ENODEV;
7815 		return error;
7816 	}
7817 	if (submit_queues > nr_cpu_ids) {
7818 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7819 			my_name, submit_queues, nr_cpu_ids);
7820 		submit_queues = nr_cpu_ids;
7821 	}
7822 	/*
7823 	 * Decide whether to tell scsi subsystem that we want mq. The
7824 	 * following should give the same answer for each host.
7825 	 */
7826 	hpnt->nr_hw_queues = submit_queues;
7827 	if (sdebug_host_max_queue)
7828 		hpnt->host_tagset = 1;
7829 
7830 	/* poll queues are possible for nr_hw_queues > 1 */
7831 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7832 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7833 			 my_name, poll_queues, hpnt->nr_hw_queues);
7834 		poll_queues = 0;
7835 	}
7836 
7837 	/*
7838 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7839 	 * left over for non-polled I/O.
7840 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7841 	 */
7842 	if (poll_queues >= submit_queues) {
7843 		if (submit_queues < 3)
7844 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7845 		else
7846 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7847 				my_name, submit_queues - 1);
7848 		poll_queues = 1;
7849 	}
7850 	if (poll_queues)
7851 		hpnt->nr_maps = 3;
7852 
7853 	sdbg_host->shost = hpnt;
7854 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7855 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7856 		hpnt->max_id = sdebug_num_tgts + 1;
7857 	else
7858 		hpnt->max_id = sdebug_num_tgts;
7859 	/* = sdebug_max_luns; */
7860 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7861 
7862 	hprot = 0;
7863 
7864 	switch (sdebug_dif) {
7865 
7866 	case T10_PI_TYPE1_PROTECTION:
7867 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7868 		if (sdebug_dix)
7869 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7870 		break;
7871 
7872 	case T10_PI_TYPE2_PROTECTION:
7873 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7874 		if (sdebug_dix)
7875 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7876 		break;
7877 
7878 	case T10_PI_TYPE3_PROTECTION:
7879 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7880 		if (sdebug_dix)
7881 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7882 		break;
7883 
7884 	default:
7885 		if (sdebug_dix)
7886 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7887 		break;
7888 	}
7889 
7890 	scsi_host_set_prot(hpnt, hprot);
7891 
7892 	if (have_dif_prot || sdebug_dix)
7893 		pr_info("host protection%s%s%s%s%s%s%s\n",
7894 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7895 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7896 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7897 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7898 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7899 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7900 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7901 
7902 	if (sdebug_guard == 1)
7903 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7904 	else
7905 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7906 
7907 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7908 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7909 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7910 		sdebug_statistics = true;
7911 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7912 	if (error) {
7913 		pr_err("scsi_add_host failed\n");
7914 		error = -ENODEV;
7915 		scsi_host_put(hpnt);
7916 	} else {
7917 		scsi_scan_host(hpnt);
7918 	}
7919 
7920 	return error;
7921 }
7922 
7923 static void sdebug_driver_remove(struct device *dev)
7924 {
7925 	struct sdebug_host_info *sdbg_host;
7926 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7927 
7928 	sdbg_host = to_sdebug_host(dev);
7929 
7930 	scsi_remove_host(sdbg_host->shost);
7931 
7932 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7933 				 dev_list) {
7934 		list_del(&sdbg_devinfo->dev_list);
7935 		kfree(sdbg_devinfo->zstate);
7936 		kfree(sdbg_devinfo);
7937 	}
7938 
7939 	scsi_host_put(sdbg_host->shost);
7940 }
7941 
7942 static int pseudo_lld_bus_match(struct device *dev,
7943 				struct device_driver *dev_driver)
7944 {
7945 	return 1;
7946 }
7947 
7948 static struct bus_type pseudo_lld_bus = {
7949 	.name = "pseudo",
7950 	.match = pseudo_lld_bus_match,
7951 	.probe = sdebug_driver_probe,
7952 	.remove = sdebug_driver_remove,
7953 	.drv_groups = sdebug_drv_groups,
7954 };
7955