xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 57f7225a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103 
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106 
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST   1
109 #define DEF_NUM_TGTS   1
110 #define DEF_MAX_LUNS   1
111 /* With these defaults, this driver will make 1 host with 1 target
112  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113  */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT   0
118 #define DEF_DEV_SIZE_MB   8
119 #define DEF_ZBC_DEV_SIZE_MB   128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE   0
124 #define DEF_EVERY_NTH   0
125 #define DEF_FAKE_RW	0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0   0
135 #define DEF_NUM_PARTS   0
136 #define DEF_OPTS   0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE   TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB   0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158 
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB	128
161 #define DEF_ZBC_MAX_OPEN_ZONES	8
162 #define DEF_ZBC_NR_CONV_ZONES	1
163 
164 #define SDEBUG_LUN_0_VAL 0
165 
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE		1
168 #define SDEBUG_OPT_MEDIUM_ERR		2
169 #define SDEBUG_OPT_TIMEOUT		4
170 #define SDEBUG_OPT_RECOVERED_ERR	8
171 #define SDEBUG_OPT_TRANSPORT_ERR	16
172 #define SDEBUG_OPT_DIF_ERR		32
173 #define SDEBUG_OPT_DIX_ERR		64
174 #define SDEBUG_OPT_MAC_TIMEOUT		128
175 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
176 #define SDEBUG_OPT_Q_NOISE		0x200
177 #define SDEBUG_OPT_ALL_TSF		0x400	/* ignore */
178 #define SDEBUG_OPT_RARE_TSF		0x800
179 #define SDEBUG_OPT_N_WCE		0x1000
180 #define SDEBUG_OPT_RESET_NOISE		0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
182 #define SDEBUG_OPT_HOST_BUSY		0x8000
183 #define SDEBUG_OPT_CMD_ABORT		0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 			      SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 				  SDEBUG_OPT_TRANSPORT_ERR | \
188 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 				  SDEBUG_OPT_SHORT_TRANSFER | \
190 				  SDEBUG_OPT_HOST_BUSY | \
191 				  SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196  * priority order. In the subset implemented here lower numbers have higher
197  * priority. The UA numbers should be a sequence starting from 0 with
198  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1	/* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6	/* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208 
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210  * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
224 
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN			1	/* Data-in command (e.g. READ) */
227 #define F_D_OUT			2	/* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10	/* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA		0x20	/* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR		0x40	/* for commands like INQUIRY */
233 #define F_SA_LOW		0x80	/* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP		0x200	/* invalid opcode (not supported) */
236 #define F_FAKE_RW		0x400	/* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS		0x800	/* media access, reacts to SSU state */
238 #define F_SSU_DELAY		0x1000	/* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY		0x2000	/* SYNCHRONIZE CACHE delay */
240 
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
246 
247 #define SDEBUG_MAX_PARTS 4
248 
249 #define SDEBUG_MAX_CMD_LEN 32
250 
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252 
253 static struct kmem_cache *queued_cmd_cache;
254 
255 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
257 
258 /* Zone types (zbcr05 table 25) */
259 enum sdebug_z_type {
260 	ZBC_ZTYPE_CNV	= 0x1,
261 	ZBC_ZTYPE_SWR	= 0x2,
262 	ZBC_ZTYPE_SWP	= 0x3,
263 	/* ZBC_ZTYPE_SOBR = 0x4, */
264 	ZBC_ZTYPE_GAP	= 0x5,
265 };
266 
267 /* enumeration names taken from table 26, zbcr05 */
268 enum sdebug_z_cond {
269 	ZBC_NOT_WRITE_POINTER	= 0x0,
270 	ZC1_EMPTY		= 0x1,
271 	ZC2_IMPLICIT_OPEN	= 0x2,
272 	ZC3_EXPLICIT_OPEN	= 0x3,
273 	ZC4_CLOSED		= 0x4,
274 	ZC6_READ_ONLY		= 0xd,
275 	ZC5_FULL		= 0xe,
276 	ZC7_OFFLINE		= 0xf,
277 };
278 
279 struct sdeb_zone_state {	/* ZBC: per zone state */
280 	enum sdebug_z_type z_type;
281 	enum sdebug_z_cond z_cond;
282 	bool z_non_seq_resource;
283 	unsigned int z_size;
284 	sector_t z_start;
285 	sector_t z_wp;
286 };
287 
288 struct sdebug_dev_info {
289 	struct list_head dev_list;
290 	unsigned int channel;
291 	unsigned int target;
292 	u64 lun;
293 	uuid_t lu_name;
294 	struct sdebug_host_info *sdbg_host;
295 	unsigned long uas_bm[1];
296 	atomic_t stopped;	/* 1: by SSU, 2: device start */
297 	bool used;
298 
299 	/* For ZBC devices */
300 	enum blk_zoned_model zmodel;
301 	unsigned int zcap;
302 	unsigned int zsize;
303 	unsigned int zsize_shift;
304 	unsigned int nr_zones;
305 	unsigned int nr_conv_zones;
306 	unsigned int nr_seq_zones;
307 	unsigned int nr_imp_open;
308 	unsigned int nr_exp_open;
309 	unsigned int nr_closed;
310 	unsigned int max_open;
311 	ktime_t create_ts;	/* time since bootup that this device was created */
312 	struct sdeb_zone_state *zstate;
313 };
314 
315 struct sdebug_host_info {
316 	struct list_head host_list;
317 	int si_idx;	/* sdeb_store_info (per host) xarray index */
318 	struct Scsi_Host *shost;
319 	struct device dev;
320 	struct list_head dev_info_list;
321 };
322 
323 /* There is an xarray of pointers to this struct's objects, one per host */
324 struct sdeb_store_info {
325 	rwlock_t macc_lck;	/* for atomic media access on this store */
326 	u8 *storep;		/* user data storage (ram) */
327 	struct t10_pi_tuple *dif_storep; /* protection info */
328 	void *map_storep;	/* provisioning map */
329 };
330 
331 #define dev_to_sdebug_host(d)	\
332 	container_of(d, struct sdebug_host_info, dev)
333 
334 #define shost_to_sdebug_host(shost)	\
335 	dev_to_sdebug_host(shost->dma_dev)
336 
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338 		      SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
339 
340 struct sdebug_defer {
341 	struct hrtimer hrt;
342 	struct execute_work ew;
343 	ktime_t cmpl_ts;/* time since boot to complete this cmd */
344 	int sqa_idx;	/* index of sdebug_queue array */
345 	int hc_idx;	/* hostwide tag index */
346 	int issuing_cpu;
347 	bool aborted;	/* true when blk_abort_request() already called */
348 	enum sdeb_defer_type defer_t;
349 };
350 
351 struct sdebug_queued_cmd {
352 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
353 	 * instance indicates this slot is in use.
354 	 */
355 	struct sdebug_defer sd_dp;
356 	struct scsi_cmnd *scmd;
357 };
358 
359 struct sdebug_scsi_cmd {
360 	spinlock_t   lock;
361 };
362 
363 struct sdebug_queue {
364 	struct sdebug_queued_cmd *qc_arr[SDEBUG_CANQUEUE];
365 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
366 	spinlock_t qc_lock;
367 };
368 
369 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
370 static atomic_t sdebug_completions;  /* count of deferred completions */
371 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
372 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
373 static atomic_t sdeb_inject_pending;
374 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
375 
376 struct opcode_info_t {
377 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
378 				/* for terminating element */
379 	u8 opcode;		/* if num_attached > 0, preferred */
380 	u16 sa;			/* service action */
381 	u32 flags;		/* OR-ed set of SDEB_F_* */
382 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
383 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
384 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
385 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
386 };
387 
388 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
389 enum sdeb_opcode_index {
390 	SDEB_I_INVALID_OPCODE =	0,
391 	SDEB_I_INQUIRY = 1,
392 	SDEB_I_REPORT_LUNS = 2,
393 	SDEB_I_REQUEST_SENSE = 3,
394 	SDEB_I_TEST_UNIT_READY = 4,
395 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
396 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
397 	SDEB_I_LOG_SENSE = 7,
398 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
399 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
400 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
401 	SDEB_I_START_STOP = 11,
402 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
403 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
404 	SDEB_I_MAINT_IN = 14,
405 	SDEB_I_MAINT_OUT = 15,
406 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
407 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
408 	SDEB_I_RESERVE = 18,		/* 6, 10 */
409 	SDEB_I_RELEASE = 19,		/* 6, 10 */
410 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
411 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
412 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
413 	SDEB_I_SEND_DIAG = 23,
414 	SDEB_I_UNMAP = 24,
415 	SDEB_I_WRITE_BUFFER = 25,
416 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
417 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
418 	SDEB_I_COMP_WRITE = 28,
419 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
420 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
421 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
422 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
423 };
424 
425 
426 static const unsigned char opcode_ind_arr[256] = {
427 /* 0x0; 0x0->0x1f: 6 byte cdbs */
428 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
429 	    0, 0, 0, 0,
430 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
431 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
432 	    SDEB_I_RELEASE,
433 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
434 	    SDEB_I_ALLOW_REMOVAL, 0,
435 /* 0x20; 0x20->0x3f: 10 byte cdbs */
436 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
437 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
438 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
439 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
440 /* 0x40; 0x40->0x5f: 10 byte cdbs */
441 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
442 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
443 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
444 	    SDEB_I_RELEASE,
445 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
446 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
447 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
448 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
449 	0, SDEB_I_VARIABLE_LEN,
450 /* 0x80; 0x80->0x9f: 16 byte cdbs */
451 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
452 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
453 	0, 0, 0, SDEB_I_VERIFY,
454 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
455 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
456 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
457 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
458 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
459 	     SDEB_I_MAINT_OUT, 0, 0, 0,
460 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
461 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
462 	0, 0, 0, 0, 0, 0, 0, 0,
463 	0, 0, 0, 0, 0, 0, 0, 0,
464 /* 0xc0; 0xc0->0xff: vendor specific */
465 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
466 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
467 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
468 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
469 };
470 
471 /*
472  * The following "response" functions return the SCSI mid-level's 4 byte
473  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
474  * command completion, they can mask their return value with
475  * SDEG_RES_IMMED_MASK .
476  */
477 #define SDEG_RES_IMMED_MASK 0x40000000
478 
479 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
505 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
506 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
507 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
508 
509 static int sdebug_do_add_host(bool mk_new_store);
510 static int sdebug_add_host_helper(int per_host_idx);
511 static void sdebug_do_remove_host(bool the_end);
512 static int sdebug_add_store(void);
513 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
514 static void sdebug_erase_all_stores(bool apart_from_first);
515 
516 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
517 
518 /*
519  * The following are overflow arrays for cdbs that "hit" the same index in
520  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
521  * should be placed in opcode_info_arr[], the others should be placed here.
522  */
523 static const struct opcode_info_t msense_iarr[] = {
524 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
525 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 };
527 
528 static const struct opcode_info_t mselect_iarr[] = {
529 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
530 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 };
532 
533 static const struct opcode_info_t read_iarr[] = {
534 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
535 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
536 	     0, 0, 0, 0} },
537 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
538 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
539 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
540 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
541 	     0xc7, 0, 0, 0, 0} },
542 };
543 
544 static const struct opcode_info_t write_iarr[] = {
545 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
546 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
547 		   0, 0, 0, 0, 0, 0} },
548 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
549 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
550 		   0, 0, 0} },
551 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
552 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
553 		   0xbf, 0xc7, 0, 0, 0, 0} },
554 };
555 
556 static const struct opcode_info_t verify_iarr[] = {
557 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
558 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
559 		   0, 0, 0, 0, 0, 0} },
560 };
561 
562 static const struct opcode_info_t sa_in_16_iarr[] = {
563 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
564 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
565 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
566 };
567 
568 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
569 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
570 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
571 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
572 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
573 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
574 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
575 };
576 
577 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
578 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
579 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
580 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
581 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
582 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
583 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
584 };
585 
586 static const struct opcode_info_t write_same_iarr[] = {
587 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
588 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
589 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
590 };
591 
592 static const struct opcode_info_t reserve_iarr[] = {
593 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
594 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
595 };
596 
597 static const struct opcode_info_t release_iarr[] = {
598 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
599 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
600 };
601 
602 static const struct opcode_info_t sync_cache_iarr[] = {
603 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
604 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
605 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
606 };
607 
608 static const struct opcode_info_t pre_fetch_iarr[] = {
609 	{0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
610 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
612 };
613 
614 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
615 	{0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
616 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
617 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
618 	{0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
619 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
620 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
621 	{0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
622 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
623 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
624 };
625 
626 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
627 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
628 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
629 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
630 };
631 
632 
633 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
634  * plus the terminating elements for logic that scans this table such as
635  * REPORT SUPPORTED OPERATION CODES. */
636 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
637 /* 0 */
638 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
639 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
640 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
641 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
642 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
643 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
644 	     0, 0} },					/* REPORT LUNS */
645 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
646 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
647 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
648 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
649 /* 5 */
650 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
651 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
652 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
653 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
654 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
655 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
656 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
657 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
658 	     0, 0, 0} },
659 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
660 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
661 	     0, 0} },
662 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
663 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
664 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
665 /* 10 */
666 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
667 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
668 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
670 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
671 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
673 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
674 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
676 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
677 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
678 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
679 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
680 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
681 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
682 				0xff, 0, 0xc7, 0, 0, 0, 0} },
683 /* 15 */
684 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
685 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
686 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
687 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
688 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
689 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
690 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
691 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
692 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
693 	     0xff, 0xff} },
694 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
695 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
696 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
697 	     0} },
698 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
699 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
700 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
701 	     0} },
702 /* 20 */
703 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
704 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
705 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
706 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
707 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
708 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
709 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
710 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
711 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
712 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
713 /* 25 */
714 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
715 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
716 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
717 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
718 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
719 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
720 		 0, 0, 0, 0, 0} },
721 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
722 	    resp_sync_cache, sync_cache_iarr,
723 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
724 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
725 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
726 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
727 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
728 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
729 	    resp_pre_fetch, pre_fetch_iarr,
730 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
731 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
732 
733 /* 30 */
734 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
735 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
736 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
737 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
738 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
739 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
740 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
741 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
742 /* sentinel */
743 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
744 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
745 };
746 
747 static int sdebug_num_hosts;
748 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
749 static int sdebug_ato = DEF_ATO;
750 static int sdebug_cdb_len = DEF_CDB_LEN;
751 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
752 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
753 static int sdebug_dif = DEF_DIF;
754 static int sdebug_dix = DEF_DIX;
755 static int sdebug_dsense = DEF_D_SENSE;
756 static int sdebug_every_nth = DEF_EVERY_NTH;
757 static int sdebug_fake_rw = DEF_FAKE_RW;
758 static unsigned int sdebug_guard = DEF_GUARD;
759 static int sdebug_host_max_queue;	/* per host */
760 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
761 static int sdebug_max_luns = DEF_MAX_LUNS;
762 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
763 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
764 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
765 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
766 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
767 static int sdebug_no_uld;
768 static int sdebug_num_parts = DEF_NUM_PARTS;
769 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
770 static int sdebug_opt_blks = DEF_OPT_BLKS;
771 static int sdebug_opts = DEF_OPTS;
772 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
773 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
774 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
775 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
776 static int sdebug_sector_size = DEF_SECTOR_SIZE;
777 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
778 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
779 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
780 static unsigned int sdebug_lbpu = DEF_LBPU;
781 static unsigned int sdebug_lbpws = DEF_LBPWS;
782 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
783 static unsigned int sdebug_lbprz = DEF_LBPRZ;
784 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
785 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
786 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
787 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
788 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
789 static int sdebug_uuid_ctl = DEF_UUID_CTL;
790 static bool sdebug_random = DEF_RANDOM;
791 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
792 static bool sdebug_removable = DEF_REMOVABLE;
793 static bool sdebug_clustering;
794 static bool sdebug_host_lock = DEF_HOST_LOCK;
795 static bool sdebug_strict = DEF_STRICT;
796 static bool sdebug_any_injecting_opt;
797 static bool sdebug_no_rwlock;
798 static bool sdebug_verbose;
799 static bool have_dif_prot;
800 static bool write_since_sync;
801 static bool sdebug_statistics = DEF_STATISTICS;
802 static bool sdebug_wp;
803 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
804 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
805 static char *sdeb_zbc_model_s;
806 
807 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
808 			  SAM_LUN_AM_FLAT = 0x1,
809 			  SAM_LUN_AM_LOGICAL_UNIT = 0x2,
810 			  SAM_LUN_AM_EXTENDED = 0x3};
811 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
812 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
813 
814 static unsigned int sdebug_store_sectors;
815 static sector_t sdebug_capacity;	/* in sectors */
816 
817 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
818    may still need them */
819 static int sdebug_heads;		/* heads per disk */
820 static int sdebug_cylinders_per;	/* cylinders per surface */
821 static int sdebug_sectors_per;		/* sectors per cylinder */
822 
823 static LIST_HEAD(sdebug_host_list);
824 static DEFINE_MUTEX(sdebug_host_list_mutex);
825 
826 static struct xarray per_store_arr;
827 static struct xarray *per_store_ap = &per_store_arr;
828 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
829 static int sdeb_most_recent_idx = -1;
830 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
831 
832 static unsigned long map_size;
833 static int num_aborts;
834 static int num_dev_resets;
835 static int num_target_resets;
836 static int num_bus_resets;
837 static int num_host_resets;
838 static int dix_writes;
839 static int dix_reads;
840 static int dif_errors;
841 
842 /* ZBC global data */
843 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
844 static int sdeb_zbc_zone_cap_mb;
845 static int sdeb_zbc_zone_size_mb;
846 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
847 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
848 
849 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
850 static int poll_queues; /* iouring iopoll interface.*/
851 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
852 
853 static DEFINE_RWLOCK(atomic_rw);
854 static DEFINE_RWLOCK(atomic_rw2);
855 
856 static rwlock_t *ramdisk_lck_a[2];
857 
858 static char sdebug_proc_name[] = MY_NAME;
859 static const char *my_name = MY_NAME;
860 
861 static struct bus_type pseudo_lld_bus;
862 
863 static struct device_driver sdebug_driverfs_driver = {
864 	.name 		= sdebug_proc_name,
865 	.bus		= &pseudo_lld_bus,
866 };
867 
868 static const int check_condition_result =
869 	SAM_STAT_CHECK_CONDITION;
870 
871 static const int illegal_condition_result =
872 	(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
873 
874 static const int device_qfull_result =
875 	(DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
876 
877 static const int condition_met_result = SAM_STAT_CONDITION_MET;
878 
879 
880 /* Only do the extra work involved in logical block provisioning if one or
881  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
882  * real reads and writes (i.e. not skipping them for speed).
883  */
884 static inline bool scsi_debug_lbp(void)
885 {
886 	return 0 == sdebug_fake_rw &&
887 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
888 }
889 
890 static void *lba2fake_store(struct sdeb_store_info *sip,
891 			    unsigned long long lba)
892 {
893 	struct sdeb_store_info *lsip = sip;
894 
895 	lba = do_div(lba, sdebug_store_sectors);
896 	if (!sip || !sip->storep) {
897 		WARN_ON_ONCE(true);
898 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
899 	}
900 	return lsip->storep + lba * sdebug_sector_size;
901 }
902 
903 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
904 				      sector_t sector)
905 {
906 	sector = sector_div(sector, sdebug_store_sectors);
907 
908 	return sip->dif_storep + sector;
909 }
910 
911 static void sdebug_max_tgts_luns(void)
912 {
913 	struct sdebug_host_info *sdbg_host;
914 	struct Scsi_Host *hpnt;
915 
916 	mutex_lock(&sdebug_host_list_mutex);
917 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
918 		hpnt = sdbg_host->shost;
919 		if ((hpnt->this_id >= 0) &&
920 		    (sdebug_num_tgts > hpnt->this_id))
921 			hpnt->max_id = sdebug_num_tgts + 1;
922 		else
923 			hpnt->max_id = sdebug_num_tgts;
924 		/* sdebug_max_luns; */
925 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
926 	}
927 	mutex_unlock(&sdebug_host_list_mutex);
928 }
929 
930 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
931 
932 /* Set in_bit to -1 to indicate no bit position of invalid field */
933 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
934 				 enum sdeb_cmd_data c_d,
935 				 int in_byte, int in_bit)
936 {
937 	unsigned char *sbuff;
938 	u8 sks[4];
939 	int sl, asc;
940 
941 	sbuff = scp->sense_buffer;
942 	if (!sbuff) {
943 		sdev_printk(KERN_ERR, scp->device,
944 			    "%s: sense_buffer is NULL\n", __func__);
945 		return;
946 	}
947 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
948 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
949 	scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
950 	memset(sks, 0, sizeof(sks));
951 	sks[0] = 0x80;
952 	if (c_d)
953 		sks[0] |= 0x40;
954 	if (in_bit >= 0) {
955 		sks[0] |= 0x8;
956 		sks[0] |= 0x7 & in_bit;
957 	}
958 	put_unaligned_be16(in_byte, sks + 1);
959 	if (sdebug_dsense) {
960 		sl = sbuff[7] + 8;
961 		sbuff[7] = sl;
962 		sbuff[sl] = 0x2;
963 		sbuff[sl + 1] = 0x6;
964 		memcpy(sbuff + sl + 4, sks, 3);
965 	} else
966 		memcpy(sbuff + 15, sks, 3);
967 	if (sdebug_verbose)
968 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
969 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
970 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
971 }
972 
973 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
974 {
975 	if (!scp->sense_buffer) {
976 		sdev_printk(KERN_ERR, scp->device,
977 			    "%s: sense_buffer is NULL\n", __func__);
978 		return;
979 	}
980 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
981 
982 	scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
983 
984 	if (sdebug_verbose)
985 		sdev_printk(KERN_INFO, scp->device,
986 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
987 			    my_name, key, asc, asq);
988 }
989 
990 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
991 {
992 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
993 }
994 
995 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
996 			    void __user *arg)
997 {
998 	if (sdebug_verbose) {
999 		if (0x1261 == cmd)
1000 			sdev_printk(KERN_INFO, dev,
1001 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
1002 		else if (0x5331 == cmd)
1003 			sdev_printk(KERN_INFO, dev,
1004 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1005 				    __func__);
1006 		else
1007 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1008 				    __func__, cmd);
1009 	}
1010 	return -EINVAL;
1011 	/* return -ENOTTY; // correct return but upsets fdisk */
1012 }
1013 
1014 static void config_cdb_len(struct scsi_device *sdev)
1015 {
1016 	switch (sdebug_cdb_len) {
1017 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1018 		sdev->use_10_for_rw = false;
1019 		sdev->use_16_for_rw = false;
1020 		sdev->use_10_for_ms = false;
1021 		break;
1022 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1023 		sdev->use_10_for_rw = true;
1024 		sdev->use_16_for_rw = false;
1025 		sdev->use_10_for_ms = false;
1026 		break;
1027 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1028 		sdev->use_10_for_rw = true;
1029 		sdev->use_16_for_rw = false;
1030 		sdev->use_10_for_ms = true;
1031 		break;
1032 	case 16:
1033 		sdev->use_10_for_rw = false;
1034 		sdev->use_16_for_rw = true;
1035 		sdev->use_10_for_ms = true;
1036 		break;
1037 	case 32: /* No knobs to suggest this so same as 16 for now */
1038 		sdev->use_10_for_rw = false;
1039 		sdev->use_16_for_rw = true;
1040 		sdev->use_10_for_ms = true;
1041 		break;
1042 	default:
1043 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1044 			sdebug_cdb_len);
1045 		sdev->use_10_for_rw = true;
1046 		sdev->use_16_for_rw = false;
1047 		sdev->use_10_for_ms = false;
1048 		sdebug_cdb_len = 10;
1049 		break;
1050 	}
1051 }
1052 
1053 static void all_config_cdb_len(void)
1054 {
1055 	struct sdebug_host_info *sdbg_host;
1056 	struct Scsi_Host *shost;
1057 	struct scsi_device *sdev;
1058 
1059 	mutex_lock(&sdebug_host_list_mutex);
1060 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1061 		shost = sdbg_host->shost;
1062 		shost_for_each_device(sdev, shost) {
1063 			config_cdb_len(sdev);
1064 		}
1065 	}
1066 	mutex_unlock(&sdebug_host_list_mutex);
1067 }
1068 
1069 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1070 {
1071 	struct sdebug_host_info *sdhp = devip->sdbg_host;
1072 	struct sdebug_dev_info *dp;
1073 
1074 	list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1075 		if ((devip->sdbg_host == dp->sdbg_host) &&
1076 		    (devip->target == dp->target)) {
1077 			clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1078 		}
1079 	}
1080 }
1081 
1082 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1083 {
1084 	int k;
1085 
1086 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1087 	if (k != SDEBUG_NUM_UAS) {
1088 		const char *cp = NULL;
1089 
1090 		switch (k) {
1091 		case SDEBUG_UA_POR:
1092 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1093 					POWER_ON_RESET_ASCQ);
1094 			if (sdebug_verbose)
1095 				cp = "power on reset";
1096 			break;
1097 		case SDEBUG_UA_POOCCUR:
1098 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1099 					POWER_ON_OCCURRED_ASCQ);
1100 			if (sdebug_verbose)
1101 				cp = "power on occurred";
1102 			break;
1103 		case SDEBUG_UA_BUS_RESET:
1104 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1105 					BUS_RESET_ASCQ);
1106 			if (sdebug_verbose)
1107 				cp = "bus reset";
1108 			break;
1109 		case SDEBUG_UA_MODE_CHANGED:
1110 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1111 					MODE_CHANGED_ASCQ);
1112 			if (sdebug_verbose)
1113 				cp = "mode parameters changed";
1114 			break;
1115 		case SDEBUG_UA_CAPACITY_CHANGED:
1116 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1117 					CAPACITY_CHANGED_ASCQ);
1118 			if (sdebug_verbose)
1119 				cp = "capacity data changed";
1120 			break;
1121 		case SDEBUG_UA_MICROCODE_CHANGED:
1122 			mk_sense_buffer(scp, UNIT_ATTENTION,
1123 					TARGET_CHANGED_ASC,
1124 					MICROCODE_CHANGED_ASCQ);
1125 			if (sdebug_verbose)
1126 				cp = "microcode has been changed";
1127 			break;
1128 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1129 			mk_sense_buffer(scp, UNIT_ATTENTION,
1130 					TARGET_CHANGED_ASC,
1131 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1132 			if (sdebug_verbose)
1133 				cp = "microcode has been changed without reset";
1134 			break;
1135 		case SDEBUG_UA_LUNS_CHANGED:
1136 			/*
1137 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1138 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1139 			 * on the target, until a REPORT LUNS command is
1140 			 * received.  SPC-4 behavior is to report it only once.
1141 			 * NOTE:  sdebug_scsi_level does not use the same
1142 			 * values as struct scsi_device->scsi_level.
1143 			 */
1144 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1145 				clear_luns_changed_on_target(devip);
1146 			mk_sense_buffer(scp, UNIT_ATTENTION,
1147 					TARGET_CHANGED_ASC,
1148 					LUNS_CHANGED_ASCQ);
1149 			if (sdebug_verbose)
1150 				cp = "reported luns data has changed";
1151 			break;
1152 		default:
1153 			pr_warn("unexpected unit attention code=%d\n", k);
1154 			if (sdebug_verbose)
1155 				cp = "unknown";
1156 			break;
1157 		}
1158 		clear_bit(k, devip->uas_bm);
1159 		if (sdebug_verbose)
1160 			sdev_printk(KERN_INFO, scp->device,
1161 				   "%s reports: Unit attention: %s\n",
1162 				   my_name, cp);
1163 		return check_condition_result;
1164 	}
1165 	return 0;
1166 }
1167 
1168 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1169 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1170 				int arr_len)
1171 {
1172 	int act_len;
1173 	struct scsi_data_buffer *sdb = &scp->sdb;
1174 
1175 	if (!sdb->length)
1176 		return 0;
1177 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1178 		return DID_ERROR << 16;
1179 
1180 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1181 				      arr, arr_len);
1182 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1183 
1184 	return 0;
1185 }
1186 
1187 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1188  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1189  * calls, not required to write in ascending offset order. Assumes resid
1190  * set to scsi_bufflen() prior to any calls.
1191  */
1192 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1193 				  int arr_len, unsigned int off_dst)
1194 {
1195 	unsigned int act_len, n;
1196 	struct scsi_data_buffer *sdb = &scp->sdb;
1197 	off_t skip = off_dst;
1198 
1199 	if (sdb->length <= off_dst)
1200 		return 0;
1201 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1202 		return DID_ERROR << 16;
1203 
1204 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1205 				       arr, arr_len, skip);
1206 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1207 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1208 		 scsi_get_resid(scp));
1209 	n = scsi_bufflen(scp) - (off_dst + act_len);
1210 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1211 	return 0;
1212 }
1213 
1214 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1215  * 'arr' or -1 if error.
1216  */
1217 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1218 			       int arr_len)
1219 {
1220 	if (!scsi_bufflen(scp))
1221 		return 0;
1222 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1223 		return -1;
1224 
1225 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1226 }
1227 
1228 
1229 static char sdebug_inq_vendor_id[9] = "Linux   ";
1230 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1231 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1232 /* Use some locally assigned NAAs for SAS addresses. */
1233 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1234 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1235 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1236 
1237 /* Device identification VPD page. Returns number of bytes placed in arr */
1238 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1239 			  int target_dev_id, int dev_id_num,
1240 			  const char *dev_id_str, int dev_id_str_len,
1241 			  const uuid_t *lu_name)
1242 {
1243 	int num, port_a;
1244 	char b[32];
1245 
1246 	port_a = target_dev_id + 1;
1247 	/* T10 vendor identifier field format (faked) */
1248 	arr[0] = 0x2;	/* ASCII */
1249 	arr[1] = 0x1;
1250 	arr[2] = 0x0;
1251 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1252 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1253 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1254 	num = 8 + 16 + dev_id_str_len;
1255 	arr[3] = num;
1256 	num += 4;
1257 	if (dev_id_num >= 0) {
1258 		if (sdebug_uuid_ctl) {
1259 			/* Locally assigned UUID */
1260 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1261 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1262 			arr[num++] = 0x0;
1263 			arr[num++] = 0x12;
1264 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1265 			arr[num++] = 0x0;
1266 			memcpy(arr + num, lu_name, 16);
1267 			num += 16;
1268 		} else {
1269 			/* NAA-3, Logical unit identifier (binary) */
1270 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1271 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1272 			arr[num++] = 0x0;
1273 			arr[num++] = 0x8;
1274 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1275 			num += 8;
1276 		}
1277 		/* Target relative port number */
1278 		arr[num++] = 0x61;	/* proto=sas, binary */
1279 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1280 		arr[num++] = 0x0;	/* reserved */
1281 		arr[num++] = 0x4;	/* length */
1282 		arr[num++] = 0x0;	/* reserved */
1283 		arr[num++] = 0x0;	/* reserved */
1284 		arr[num++] = 0x0;
1285 		arr[num++] = 0x1;	/* relative port A */
1286 	}
1287 	/* NAA-3, Target port identifier */
1288 	arr[num++] = 0x61;	/* proto=sas, binary */
1289 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1290 	arr[num++] = 0x0;
1291 	arr[num++] = 0x8;
1292 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1293 	num += 8;
1294 	/* NAA-3, Target port group identifier */
1295 	arr[num++] = 0x61;	/* proto=sas, binary */
1296 	arr[num++] = 0x95;	/* piv=1, target port group id */
1297 	arr[num++] = 0x0;
1298 	arr[num++] = 0x4;
1299 	arr[num++] = 0;
1300 	arr[num++] = 0;
1301 	put_unaligned_be16(port_group_id, arr + num);
1302 	num += 2;
1303 	/* NAA-3, Target device identifier */
1304 	arr[num++] = 0x61;	/* proto=sas, binary */
1305 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1306 	arr[num++] = 0x0;
1307 	arr[num++] = 0x8;
1308 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1309 	num += 8;
1310 	/* SCSI name string: Target device identifier */
1311 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1312 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1313 	arr[num++] = 0x0;
1314 	arr[num++] = 24;
1315 	memcpy(arr + num, "naa.32222220", 12);
1316 	num += 12;
1317 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1318 	memcpy(arr + num, b, 8);
1319 	num += 8;
1320 	memset(arr + num, 0, 4);
1321 	num += 4;
1322 	return num;
1323 }
1324 
1325 static unsigned char vpd84_data[] = {
1326 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1327     0x22,0x22,0x22,0x0,0xbb,0x1,
1328     0x22,0x22,0x22,0x0,0xbb,0x2,
1329 };
1330 
1331 /*  Software interface identification VPD page */
1332 static int inquiry_vpd_84(unsigned char *arr)
1333 {
1334 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1335 	return sizeof(vpd84_data);
1336 }
1337 
1338 /* Management network addresses VPD page */
1339 static int inquiry_vpd_85(unsigned char *arr)
1340 {
1341 	int num = 0;
1342 	const char *na1 = "https://www.kernel.org/config";
1343 	const char *na2 = "http://www.kernel.org/log";
1344 	int plen, olen;
1345 
1346 	arr[num++] = 0x1;	/* lu, storage config */
1347 	arr[num++] = 0x0;	/* reserved */
1348 	arr[num++] = 0x0;
1349 	olen = strlen(na1);
1350 	plen = olen + 1;
1351 	if (plen % 4)
1352 		plen = ((plen / 4) + 1) * 4;
1353 	arr[num++] = plen;	/* length, null termianted, padded */
1354 	memcpy(arr + num, na1, olen);
1355 	memset(arr + num + olen, 0, plen - olen);
1356 	num += plen;
1357 
1358 	arr[num++] = 0x4;	/* lu, logging */
1359 	arr[num++] = 0x0;	/* reserved */
1360 	arr[num++] = 0x0;
1361 	olen = strlen(na2);
1362 	plen = olen + 1;
1363 	if (plen % 4)
1364 		plen = ((plen / 4) + 1) * 4;
1365 	arr[num++] = plen;	/* length, null terminated, padded */
1366 	memcpy(arr + num, na2, olen);
1367 	memset(arr + num + olen, 0, plen - olen);
1368 	num += plen;
1369 
1370 	return num;
1371 }
1372 
1373 /* SCSI ports VPD page */
1374 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1375 {
1376 	int num = 0;
1377 	int port_a, port_b;
1378 
1379 	port_a = target_dev_id + 1;
1380 	port_b = port_a + 1;
1381 	arr[num++] = 0x0;	/* reserved */
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x0;
1384 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1385 	memset(arr + num, 0, 6);
1386 	num += 6;
1387 	arr[num++] = 0x0;
1388 	arr[num++] = 12;	/* length tp descriptor */
1389 	/* naa-5 target port identifier (A) */
1390 	arr[num++] = 0x61;	/* proto=sas, binary */
1391 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1392 	arr[num++] = 0x0;	/* reserved */
1393 	arr[num++] = 0x8;	/* length */
1394 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1395 	num += 8;
1396 	arr[num++] = 0x0;	/* reserved */
1397 	arr[num++] = 0x0;	/* reserved */
1398 	arr[num++] = 0x0;
1399 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1400 	memset(arr + num, 0, 6);
1401 	num += 6;
1402 	arr[num++] = 0x0;
1403 	arr[num++] = 12;	/* length tp descriptor */
1404 	/* naa-5 target port identifier (B) */
1405 	arr[num++] = 0x61;	/* proto=sas, binary */
1406 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1407 	arr[num++] = 0x0;	/* reserved */
1408 	arr[num++] = 0x8;	/* length */
1409 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1410 	num += 8;
1411 
1412 	return num;
1413 }
1414 
1415 
1416 static unsigned char vpd89_data[] = {
1417 /* from 4th byte */ 0,0,0,0,
1418 'l','i','n','u','x',' ',' ',' ',
1419 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1420 '1','2','3','4',
1421 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1422 0xec,0,0,0,
1423 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1424 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1425 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1426 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1427 0x53,0x41,
1428 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1429 0x20,0x20,
1430 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1431 0x10,0x80,
1432 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1433 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1434 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1436 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1437 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1438 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1443 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1444 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1445 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1455 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1458 };
1459 
1460 /* ATA Information VPD page */
1461 static int inquiry_vpd_89(unsigned char *arr)
1462 {
1463 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1464 	return sizeof(vpd89_data);
1465 }
1466 
1467 
1468 static unsigned char vpdb0_data[] = {
1469 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1470 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1471 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1472 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1473 };
1474 
1475 /* Block limits VPD page (SBC-3) */
1476 static int inquiry_vpd_b0(unsigned char *arr)
1477 {
1478 	unsigned int gran;
1479 
1480 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1481 
1482 	/* Optimal transfer length granularity */
1483 	if (sdebug_opt_xferlen_exp != 0 &&
1484 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1485 		gran = 1 << sdebug_opt_xferlen_exp;
1486 	else
1487 		gran = 1 << sdebug_physblk_exp;
1488 	put_unaligned_be16(gran, arr + 2);
1489 
1490 	/* Maximum Transfer Length */
1491 	if (sdebug_store_sectors > 0x400)
1492 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1493 
1494 	/* Optimal Transfer Length */
1495 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1496 
1497 	if (sdebug_lbpu) {
1498 		/* Maximum Unmap LBA Count */
1499 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1500 
1501 		/* Maximum Unmap Block Descriptor Count */
1502 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1503 	}
1504 
1505 	/* Unmap Granularity Alignment */
1506 	if (sdebug_unmap_alignment) {
1507 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1508 		arr[28] |= 0x80; /* UGAVALID */
1509 	}
1510 
1511 	/* Optimal Unmap Granularity */
1512 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1513 
1514 	/* Maximum WRITE SAME Length */
1515 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1516 
1517 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1518 }
1519 
1520 /* Block device characteristics VPD page (SBC-3) */
1521 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1522 {
1523 	memset(arr, 0, 0x3c);
1524 	arr[0] = 0;
1525 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1526 	arr[2] = 0;
1527 	arr[3] = 5;	/* less than 1.8" */
1528 	if (devip->zmodel == BLK_ZONED_HA)
1529 		arr[4] = 1 << 4;	/* zoned field = 01b */
1530 
1531 	return 0x3c;
1532 }
1533 
1534 /* Logical block provisioning VPD page (SBC-4) */
1535 static int inquiry_vpd_b2(unsigned char *arr)
1536 {
1537 	memset(arr, 0, 0x4);
1538 	arr[0] = 0;			/* threshold exponent */
1539 	if (sdebug_lbpu)
1540 		arr[1] = 1 << 7;
1541 	if (sdebug_lbpws)
1542 		arr[1] |= 1 << 6;
1543 	if (sdebug_lbpws10)
1544 		arr[1] |= 1 << 5;
1545 	if (sdebug_lbprz && scsi_debug_lbp())
1546 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1547 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1548 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1549 	/* threshold_percentage=0 */
1550 	return 0x4;
1551 }
1552 
1553 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1554 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1555 {
1556 	memset(arr, 0, 0x3c);
1557 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1558 	/*
1559 	 * Set Optimal number of open sequential write preferred zones and
1560 	 * Optimal number of non-sequentially written sequential write
1561 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1562 	 * fields set to zero, apart from Max. number of open swrz_s field.
1563 	 */
1564 	put_unaligned_be32(0xffffffff, &arr[4]);
1565 	put_unaligned_be32(0xffffffff, &arr[8]);
1566 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1567 		put_unaligned_be32(devip->max_open, &arr[12]);
1568 	else
1569 		put_unaligned_be32(0xffffffff, &arr[12]);
1570 	if (devip->zcap < devip->zsize) {
1571 		arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1572 		put_unaligned_be64(devip->zsize, &arr[20]);
1573 	} else {
1574 		arr[19] = 0;
1575 	}
1576 	return 0x3c;
1577 }
1578 
1579 #define SDEBUG_LONG_INQ_SZ 96
1580 #define SDEBUG_MAX_INQ_ARR_SZ 584
1581 
1582 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1583 {
1584 	unsigned char pq_pdt;
1585 	unsigned char *arr;
1586 	unsigned char *cmd = scp->cmnd;
1587 	u32 alloc_len, n;
1588 	int ret;
1589 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1590 
1591 	alloc_len = get_unaligned_be16(cmd + 3);
1592 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1593 	if (! arr)
1594 		return DID_REQUEUE << 16;
1595 	is_disk = (sdebug_ptype == TYPE_DISK);
1596 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1597 	is_disk_zbc = (is_disk || is_zbc);
1598 	have_wlun = scsi_is_wlun(scp->device->lun);
1599 	if (have_wlun)
1600 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1601 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1602 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1603 	else
1604 		pq_pdt = (sdebug_ptype & 0x1f);
1605 	arr[0] = pq_pdt;
1606 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1607 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1608 		kfree(arr);
1609 		return check_condition_result;
1610 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1611 		int lu_id_num, port_group_id, target_dev_id;
1612 		u32 len;
1613 		char lu_id_str[6];
1614 		int host_no = devip->sdbg_host->shost->host_no;
1615 
1616 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1617 		    (devip->channel & 0x7f);
1618 		if (sdebug_vpd_use_hostno == 0)
1619 			host_no = 0;
1620 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1621 			    (devip->target * 1000) + devip->lun);
1622 		target_dev_id = ((host_no + 1) * 2000) +
1623 				 (devip->target * 1000) - 3;
1624 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1625 		if (0 == cmd[2]) { /* supported vital product data pages */
1626 			arr[1] = cmd[2];	/*sanity */
1627 			n = 4;
1628 			arr[n++] = 0x0;   /* this page */
1629 			arr[n++] = 0x80;  /* unit serial number */
1630 			arr[n++] = 0x83;  /* device identification */
1631 			arr[n++] = 0x84;  /* software interface ident. */
1632 			arr[n++] = 0x85;  /* management network addresses */
1633 			arr[n++] = 0x86;  /* extended inquiry */
1634 			arr[n++] = 0x87;  /* mode page policy */
1635 			arr[n++] = 0x88;  /* SCSI ports */
1636 			if (is_disk_zbc) {	  /* SBC or ZBC */
1637 				arr[n++] = 0x89;  /* ATA information */
1638 				arr[n++] = 0xb0;  /* Block limits */
1639 				arr[n++] = 0xb1;  /* Block characteristics */
1640 				if (is_disk)
1641 					arr[n++] = 0xb2;  /* LB Provisioning */
1642 				if (is_zbc)
1643 					arr[n++] = 0xb6;  /* ZB dev. char. */
1644 			}
1645 			arr[3] = n - 4;	  /* number of supported VPD pages */
1646 		} else if (0x80 == cmd[2]) { /* unit serial number */
1647 			arr[1] = cmd[2];	/*sanity */
1648 			arr[3] = len;
1649 			memcpy(&arr[4], lu_id_str, len);
1650 		} else if (0x83 == cmd[2]) { /* device identification */
1651 			arr[1] = cmd[2];	/*sanity */
1652 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1653 						target_dev_id, lu_id_num,
1654 						lu_id_str, len,
1655 						&devip->lu_name);
1656 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1657 			arr[1] = cmd[2];	/*sanity */
1658 			arr[3] = inquiry_vpd_84(&arr[4]);
1659 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1660 			arr[1] = cmd[2];	/*sanity */
1661 			arr[3] = inquiry_vpd_85(&arr[4]);
1662 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1663 			arr[1] = cmd[2];	/*sanity */
1664 			arr[3] = 0x3c;	/* number of following entries */
1665 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1666 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1667 			else if (have_dif_prot)
1668 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1669 			else
1670 				arr[4] = 0x0;   /* no protection stuff */
1671 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1672 		} else if (0x87 == cmd[2]) { /* mode page policy */
1673 			arr[1] = cmd[2];	/*sanity */
1674 			arr[3] = 0x8;	/* number of following entries */
1675 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1676 			arr[6] = 0x80;	/* mlus, shared */
1677 			arr[8] = 0x18;	 /* protocol specific lu */
1678 			arr[10] = 0x82;	 /* mlus, per initiator port */
1679 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1680 			arr[1] = cmd[2];	/*sanity */
1681 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1682 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1683 			arr[1] = cmd[2];        /*sanity */
1684 			n = inquiry_vpd_89(&arr[4]);
1685 			put_unaligned_be16(n, arr + 2);
1686 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1687 			arr[1] = cmd[2];        /*sanity */
1688 			arr[3] = inquiry_vpd_b0(&arr[4]);
1689 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1690 			arr[1] = cmd[2];        /*sanity */
1691 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1692 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1693 			arr[1] = cmd[2];        /*sanity */
1694 			arr[3] = inquiry_vpd_b2(&arr[4]);
1695 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1696 			arr[1] = cmd[2];        /*sanity */
1697 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1698 		} else {
1699 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1700 			kfree(arr);
1701 			return check_condition_result;
1702 		}
1703 		len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1704 		ret = fill_from_dev_buffer(scp, arr,
1705 			    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1706 		kfree(arr);
1707 		return ret;
1708 	}
1709 	/* drops through here for a standard inquiry */
1710 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1711 	arr[2] = sdebug_scsi_level;
1712 	arr[3] = 2;    /* response_data_format==2 */
1713 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1714 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1715 	if (sdebug_vpd_use_hostno == 0)
1716 		arr[5] |= 0x10; /* claim: implicit TPGS */
1717 	arr[6] = 0x10; /* claim: MultiP */
1718 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1719 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1720 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1721 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1722 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1723 	/* Use Vendor Specific area to place driver date in ASCII hex */
1724 	memcpy(&arr[36], sdebug_version_date, 8);
1725 	/* version descriptors (2 bytes each) follow */
1726 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1727 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1728 	n = 62;
1729 	if (is_disk) {		/* SBC-4 no version claimed */
1730 		put_unaligned_be16(0x600, arr + n);
1731 		n += 2;
1732 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1733 		put_unaligned_be16(0x525, arr + n);
1734 		n += 2;
1735 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1736 		put_unaligned_be16(0x624, arr + n);
1737 		n += 2;
1738 	}
1739 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1740 	ret = fill_from_dev_buffer(scp, arr,
1741 			    min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1742 	kfree(arr);
1743 	return ret;
1744 }
1745 
1746 /* See resp_iec_m_pg() for how this data is manipulated */
1747 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1748 				   0, 0, 0x0, 0x0};
1749 
1750 static int resp_requests(struct scsi_cmnd *scp,
1751 			 struct sdebug_dev_info *devip)
1752 {
1753 	unsigned char *cmd = scp->cmnd;
1754 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];	/* assume >= 18 bytes */
1755 	bool dsense = !!(cmd[1] & 1);
1756 	u32 alloc_len = cmd[4];
1757 	u32 len = 18;
1758 	int stopped_state = atomic_read(&devip->stopped);
1759 
1760 	memset(arr, 0, sizeof(arr));
1761 	if (stopped_state > 0) {	/* some "pollable" data [spc6r02: 5.12.2] */
1762 		if (dsense) {
1763 			arr[0] = 0x72;
1764 			arr[1] = NOT_READY;
1765 			arr[2] = LOGICAL_UNIT_NOT_READY;
1766 			arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1767 			len = 8;
1768 		} else {
1769 			arr[0] = 0x70;
1770 			arr[2] = NOT_READY;		/* NO_SENSE in sense_key */
1771 			arr[7] = 0xa;			/* 18 byte sense buffer */
1772 			arr[12] = LOGICAL_UNIT_NOT_READY;
1773 			arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1774 		}
1775 	} else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1776 		/* Information exceptions control mode page: TEST=1, MRIE=6 */
1777 		if (dsense) {
1778 			arr[0] = 0x72;
1779 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1780 			arr[2] = THRESHOLD_EXCEEDED;
1781 			arr[3] = 0xff;		/* Failure prediction(false) */
1782 			len = 8;
1783 		} else {
1784 			arr[0] = 0x70;
1785 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1786 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1787 			arr[12] = THRESHOLD_EXCEEDED;
1788 			arr[13] = 0xff;		/* Failure prediction(false) */
1789 		}
1790 	} else {	/* nothing to report */
1791 		if (dsense) {
1792 			len = 8;
1793 			memset(arr, 0, len);
1794 			arr[0] = 0x72;
1795 		} else {
1796 			memset(arr, 0, len);
1797 			arr[0] = 0x70;
1798 			arr[7] = 0xa;
1799 		}
1800 	}
1801 	return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1802 }
1803 
1804 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1805 {
1806 	unsigned char *cmd = scp->cmnd;
1807 	int power_cond, want_stop, stopped_state;
1808 	bool changing;
1809 
1810 	power_cond = (cmd[4] & 0xf0) >> 4;
1811 	if (power_cond) {
1812 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1813 		return check_condition_result;
1814 	}
1815 	want_stop = !(cmd[4] & 1);
1816 	stopped_state = atomic_read(&devip->stopped);
1817 	if (stopped_state == 2) {
1818 		ktime_t now_ts = ktime_get_boottime();
1819 
1820 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1821 			u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1822 
1823 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1824 				/* tur_ms_to_ready timer extinguished */
1825 				atomic_set(&devip->stopped, 0);
1826 				stopped_state = 0;
1827 			}
1828 		}
1829 		if (stopped_state == 2) {
1830 			if (want_stop) {
1831 				stopped_state = 1;	/* dummy up success */
1832 			} else {	/* Disallow tur_ms_to_ready delay to be overridden */
1833 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1834 				return check_condition_result;
1835 			}
1836 		}
1837 	}
1838 	changing = (stopped_state != want_stop);
1839 	if (changing)
1840 		atomic_xchg(&devip->stopped, want_stop);
1841 	if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
1842 		return SDEG_RES_IMMED_MASK;
1843 	else
1844 		return 0;
1845 }
1846 
1847 static sector_t get_sdebug_capacity(void)
1848 {
1849 	static const unsigned int gibibyte = 1073741824;
1850 
1851 	if (sdebug_virtual_gb > 0)
1852 		return (sector_t)sdebug_virtual_gb *
1853 			(gibibyte / sdebug_sector_size);
1854 	else
1855 		return sdebug_store_sectors;
1856 }
1857 
1858 #define SDEBUG_READCAP_ARR_SZ 8
1859 static int resp_readcap(struct scsi_cmnd *scp,
1860 			struct sdebug_dev_info *devip)
1861 {
1862 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1863 	unsigned int capac;
1864 
1865 	/* following just in case virtual_gb changed */
1866 	sdebug_capacity = get_sdebug_capacity();
1867 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1868 	if (sdebug_capacity < 0xffffffff) {
1869 		capac = (unsigned int)sdebug_capacity - 1;
1870 		put_unaligned_be32(capac, arr + 0);
1871 	} else
1872 		put_unaligned_be32(0xffffffff, arr + 0);
1873 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1874 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1875 }
1876 
1877 #define SDEBUG_READCAP16_ARR_SZ 32
1878 static int resp_readcap16(struct scsi_cmnd *scp,
1879 			  struct sdebug_dev_info *devip)
1880 {
1881 	unsigned char *cmd = scp->cmnd;
1882 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1883 	u32 alloc_len;
1884 
1885 	alloc_len = get_unaligned_be32(cmd + 10);
1886 	/* following just in case virtual_gb changed */
1887 	sdebug_capacity = get_sdebug_capacity();
1888 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1889 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1890 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1891 	arr[13] = sdebug_physblk_exp & 0xf;
1892 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1893 
1894 	if (scsi_debug_lbp()) {
1895 		arr[14] |= 0x80; /* LBPME */
1896 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1897 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1898 		 * in the wider field maps to 0 in this field.
1899 		 */
1900 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1901 			arr[14] |= 0x40;
1902 	}
1903 
1904 	/*
1905 	 * Since the scsi_debug READ CAPACITY implementation always reports the
1906 	 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1907 	 */
1908 	if (devip->zmodel == BLK_ZONED_HM)
1909 		arr[12] |= 1 << 4;
1910 
1911 	arr[15] = sdebug_lowest_aligned & 0xff;
1912 
1913 	if (have_dif_prot) {
1914 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1915 		arr[12] |= 1; /* PROT_EN */
1916 	}
1917 
1918 	return fill_from_dev_buffer(scp, arr,
1919 			    min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1920 }
1921 
1922 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1923 
1924 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1925 			      struct sdebug_dev_info *devip)
1926 {
1927 	unsigned char *cmd = scp->cmnd;
1928 	unsigned char *arr;
1929 	int host_no = devip->sdbg_host->shost->host_no;
1930 	int port_group_a, port_group_b, port_a, port_b;
1931 	u32 alen, n, rlen;
1932 	int ret;
1933 
1934 	alen = get_unaligned_be32(cmd + 6);
1935 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1936 	if (! arr)
1937 		return DID_REQUEUE << 16;
1938 	/*
1939 	 * EVPD page 0x88 states we have two ports, one
1940 	 * real and a fake port with no device connected.
1941 	 * So we create two port groups with one port each
1942 	 * and set the group with port B to unavailable.
1943 	 */
1944 	port_a = 0x1; /* relative port A */
1945 	port_b = 0x2; /* relative port B */
1946 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1947 			(devip->channel & 0x7f);
1948 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1949 			(devip->channel & 0x7f) + 0x80;
1950 
1951 	/*
1952 	 * The asymmetric access state is cycled according to the host_id.
1953 	 */
1954 	n = 4;
1955 	if (sdebug_vpd_use_hostno == 0) {
1956 		arr[n++] = host_no % 3; /* Asymm access state */
1957 		arr[n++] = 0x0F; /* claim: all states are supported */
1958 	} else {
1959 		arr[n++] = 0x0; /* Active/Optimized path */
1960 		arr[n++] = 0x01; /* only support active/optimized paths */
1961 	}
1962 	put_unaligned_be16(port_group_a, arr + n);
1963 	n += 2;
1964 	arr[n++] = 0;    /* Reserved */
1965 	arr[n++] = 0;    /* Status code */
1966 	arr[n++] = 0;    /* Vendor unique */
1967 	arr[n++] = 0x1;  /* One port per group */
1968 	arr[n++] = 0;    /* Reserved */
1969 	arr[n++] = 0;    /* Reserved */
1970 	put_unaligned_be16(port_a, arr + n);
1971 	n += 2;
1972 	arr[n++] = 3;    /* Port unavailable */
1973 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1974 	put_unaligned_be16(port_group_b, arr + n);
1975 	n += 2;
1976 	arr[n++] = 0;    /* Reserved */
1977 	arr[n++] = 0;    /* Status code */
1978 	arr[n++] = 0;    /* Vendor unique */
1979 	arr[n++] = 0x1;  /* One port per group */
1980 	arr[n++] = 0;    /* Reserved */
1981 	arr[n++] = 0;    /* Reserved */
1982 	put_unaligned_be16(port_b, arr + n);
1983 	n += 2;
1984 
1985 	rlen = n - 4;
1986 	put_unaligned_be32(rlen, arr + 0);
1987 
1988 	/*
1989 	 * Return the smallest value of either
1990 	 * - The allocated length
1991 	 * - The constructed command length
1992 	 * - The maximum array size
1993 	 */
1994 	rlen = min(alen, n);
1995 	ret = fill_from_dev_buffer(scp, arr,
1996 			   min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1997 	kfree(arr);
1998 	return ret;
1999 }
2000 
2001 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2002 			     struct sdebug_dev_info *devip)
2003 {
2004 	bool rctd;
2005 	u8 reporting_opts, req_opcode, sdeb_i, supp;
2006 	u16 req_sa, u;
2007 	u32 alloc_len, a_len;
2008 	int k, offset, len, errsts, count, bump, na;
2009 	const struct opcode_info_t *oip;
2010 	const struct opcode_info_t *r_oip;
2011 	u8 *arr;
2012 	u8 *cmd = scp->cmnd;
2013 
2014 	rctd = !!(cmd[2] & 0x80);
2015 	reporting_opts = cmd[2] & 0x7;
2016 	req_opcode = cmd[3];
2017 	req_sa = get_unaligned_be16(cmd + 4);
2018 	alloc_len = get_unaligned_be32(cmd + 6);
2019 	if (alloc_len < 4 || alloc_len > 0xffff) {
2020 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2021 		return check_condition_result;
2022 	}
2023 	if (alloc_len > 8192)
2024 		a_len = 8192;
2025 	else
2026 		a_len = alloc_len;
2027 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2028 	if (NULL == arr) {
2029 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2030 				INSUFF_RES_ASCQ);
2031 		return check_condition_result;
2032 	}
2033 	switch (reporting_opts) {
2034 	case 0:	/* all commands */
2035 		/* count number of commands */
2036 		for (count = 0, oip = opcode_info_arr;
2037 		     oip->num_attached != 0xff; ++oip) {
2038 			if (F_INV_OP & oip->flags)
2039 				continue;
2040 			count += (oip->num_attached + 1);
2041 		}
2042 		bump = rctd ? 20 : 8;
2043 		put_unaligned_be32(count * bump, arr);
2044 		for (offset = 4, oip = opcode_info_arr;
2045 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2046 			if (F_INV_OP & oip->flags)
2047 				continue;
2048 			na = oip->num_attached;
2049 			arr[offset] = oip->opcode;
2050 			put_unaligned_be16(oip->sa, arr + offset + 2);
2051 			if (rctd)
2052 				arr[offset + 5] |= 0x2;
2053 			if (FF_SA & oip->flags)
2054 				arr[offset + 5] |= 0x1;
2055 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2056 			if (rctd)
2057 				put_unaligned_be16(0xa, arr + offset + 8);
2058 			r_oip = oip;
2059 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2060 				if (F_INV_OP & oip->flags)
2061 					continue;
2062 				offset += bump;
2063 				arr[offset] = oip->opcode;
2064 				put_unaligned_be16(oip->sa, arr + offset + 2);
2065 				if (rctd)
2066 					arr[offset + 5] |= 0x2;
2067 				if (FF_SA & oip->flags)
2068 					arr[offset + 5] |= 0x1;
2069 				put_unaligned_be16(oip->len_mask[0],
2070 						   arr + offset + 6);
2071 				if (rctd)
2072 					put_unaligned_be16(0xa,
2073 							   arr + offset + 8);
2074 			}
2075 			oip = r_oip;
2076 			offset += bump;
2077 		}
2078 		break;
2079 	case 1:	/* one command: opcode only */
2080 	case 2:	/* one command: opcode plus service action */
2081 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2082 		sdeb_i = opcode_ind_arr[req_opcode];
2083 		oip = &opcode_info_arr[sdeb_i];
2084 		if (F_INV_OP & oip->flags) {
2085 			supp = 1;
2086 			offset = 4;
2087 		} else {
2088 			if (1 == reporting_opts) {
2089 				if (FF_SA & oip->flags) {
2090 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2091 							     2, 2);
2092 					kfree(arr);
2093 					return check_condition_result;
2094 				}
2095 				req_sa = 0;
2096 			} else if (2 == reporting_opts &&
2097 				   0 == (FF_SA & oip->flags)) {
2098 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2099 				kfree(arr);	/* point at requested sa */
2100 				return check_condition_result;
2101 			}
2102 			if (0 == (FF_SA & oip->flags) &&
2103 			    req_opcode == oip->opcode)
2104 				supp = 3;
2105 			else if (0 == (FF_SA & oip->flags)) {
2106 				na = oip->num_attached;
2107 				for (k = 0, oip = oip->arrp; k < na;
2108 				     ++k, ++oip) {
2109 					if (req_opcode == oip->opcode)
2110 						break;
2111 				}
2112 				supp = (k >= na) ? 1 : 3;
2113 			} else if (req_sa != oip->sa) {
2114 				na = oip->num_attached;
2115 				for (k = 0, oip = oip->arrp; k < na;
2116 				     ++k, ++oip) {
2117 					if (req_sa == oip->sa)
2118 						break;
2119 				}
2120 				supp = (k >= na) ? 1 : 3;
2121 			} else
2122 				supp = 3;
2123 			if (3 == supp) {
2124 				u = oip->len_mask[0];
2125 				put_unaligned_be16(u, arr + 2);
2126 				arr[4] = oip->opcode;
2127 				for (k = 1; k < u; ++k)
2128 					arr[4 + k] = (k < 16) ?
2129 						 oip->len_mask[k] : 0xff;
2130 				offset = 4 + u;
2131 			} else
2132 				offset = 4;
2133 		}
2134 		arr[1] = (rctd ? 0x80 : 0) | supp;
2135 		if (rctd) {
2136 			put_unaligned_be16(0xa, arr + offset);
2137 			offset += 12;
2138 		}
2139 		break;
2140 	default:
2141 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2142 		kfree(arr);
2143 		return check_condition_result;
2144 	}
2145 	offset = (offset < a_len) ? offset : a_len;
2146 	len = (offset < alloc_len) ? offset : alloc_len;
2147 	errsts = fill_from_dev_buffer(scp, arr, len);
2148 	kfree(arr);
2149 	return errsts;
2150 }
2151 
2152 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2153 			  struct sdebug_dev_info *devip)
2154 {
2155 	bool repd;
2156 	u32 alloc_len, len;
2157 	u8 arr[16];
2158 	u8 *cmd = scp->cmnd;
2159 
2160 	memset(arr, 0, sizeof(arr));
2161 	repd = !!(cmd[2] & 0x80);
2162 	alloc_len = get_unaligned_be32(cmd + 6);
2163 	if (alloc_len < 4) {
2164 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2165 		return check_condition_result;
2166 	}
2167 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2168 	arr[1] = 0x1;		/* ITNRS */
2169 	if (repd) {
2170 		arr[3] = 0xc;
2171 		len = 16;
2172 	} else
2173 		len = 4;
2174 
2175 	len = (len < alloc_len) ? len : alloc_len;
2176 	return fill_from_dev_buffer(scp, arr, len);
2177 }
2178 
2179 /* <<Following mode page info copied from ST318451LW>> */
2180 
2181 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2182 {	/* Read-Write Error Recovery page for mode_sense */
2183 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2184 					5, 0, 0xff, 0xff};
2185 
2186 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2187 	if (1 == pcontrol)
2188 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2189 	return sizeof(err_recov_pg);
2190 }
2191 
2192 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2193 { 	/* Disconnect-Reconnect page for mode_sense */
2194 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2195 					 0, 0, 0, 0, 0, 0, 0, 0};
2196 
2197 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2198 	if (1 == pcontrol)
2199 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2200 	return sizeof(disconnect_pg);
2201 }
2202 
2203 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2204 {       /* Format device page for mode_sense */
2205 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2206 				     0, 0, 0, 0, 0, 0, 0, 0,
2207 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2208 
2209 	memcpy(p, format_pg, sizeof(format_pg));
2210 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2211 	put_unaligned_be16(sdebug_sector_size, p + 12);
2212 	if (sdebug_removable)
2213 		p[20] |= 0x20; /* should agree with INQUIRY */
2214 	if (1 == pcontrol)
2215 		memset(p + 2, 0, sizeof(format_pg) - 2);
2216 	return sizeof(format_pg);
2217 }
2218 
2219 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2220 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2221 				     0, 0, 0, 0};
2222 
2223 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2224 { 	/* Caching page for mode_sense */
2225 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2226 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2227 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2228 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2229 
2230 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2231 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2232 	memcpy(p, caching_pg, sizeof(caching_pg));
2233 	if (1 == pcontrol)
2234 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2235 	else if (2 == pcontrol)
2236 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2237 	return sizeof(caching_pg);
2238 }
2239 
2240 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2241 				    0, 0, 0x2, 0x4b};
2242 
2243 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2244 { 	/* Control mode page for mode_sense */
2245 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2246 					0, 0, 0, 0};
2247 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2248 				     0, 0, 0x2, 0x4b};
2249 
2250 	if (sdebug_dsense)
2251 		ctrl_m_pg[2] |= 0x4;
2252 	else
2253 		ctrl_m_pg[2] &= ~0x4;
2254 
2255 	if (sdebug_ato)
2256 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2257 
2258 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2259 	if (1 == pcontrol)
2260 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2261 	else if (2 == pcontrol)
2262 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2263 	return sizeof(ctrl_m_pg);
2264 }
2265 
2266 
2267 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2268 {	/* Informational Exceptions control mode page for mode_sense */
2269 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2270 				       0, 0, 0x0, 0x0};
2271 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2272 				      0, 0, 0x0, 0x0};
2273 
2274 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2275 	if (1 == pcontrol)
2276 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2277 	else if (2 == pcontrol)
2278 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2279 	return sizeof(iec_m_pg);
2280 }
2281 
2282 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2283 {	/* SAS SSP mode page - short format for mode_sense */
2284 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2285 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2286 
2287 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2288 	if (1 == pcontrol)
2289 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2290 	return sizeof(sas_sf_m_pg);
2291 }
2292 
2293 
2294 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2295 			      int target_dev_id)
2296 {	/* SAS phy control and discover mode page for mode_sense */
2297 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2298 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2299 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2300 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2301 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2302 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2303 		    0, 0, 0, 0, 0, 0, 0, 0,
2304 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2305 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2306 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2307 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2308 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2309 		    0, 0, 0, 0, 0, 0, 0, 0,
2310 		};
2311 	int port_a, port_b;
2312 
2313 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2314 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2315 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2316 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2317 	port_a = target_dev_id + 1;
2318 	port_b = port_a + 1;
2319 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2320 	put_unaligned_be32(port_a, p + 20);
2321 	put_unaligned_be32(port_b, p + 48 + 20);
2322 	if (1 == pcontrol)
2323 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2324 	return sizeof(sas_pcd_m_pg);
2325 }
2326 
2327 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2328 {	/* SAS SSP shared protocol specific port mode subpage */
2329 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2330 		    0, 0, 0, 0, 0, 0, 0, 0,
2331 		};
2332 
2333 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2334 	if (1 == pcontrol)
2335 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2336 	return sizeof(sas_sha_m_pg);
2337 }
2338 
2339 #define SDEBUG_MAX_MSENSE_SZ 256
2340 
2341 static int resp_mode_sense(struct scsi_cmnd *scp,
2342 			   struct sdebug_dev_info *devip)
2343 {
2344 	int pcontrol, pcode, subpcode, bd_len;
2345 	unsigned char dev_spec;
2346 	u32 alloc_len, offset, len;
2347 	int target_dev_id;
2348 	int target = scp->device->id;
2349 	unsigned char *ap;
2350 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2351 	unsigned char *cmd = scp->cmnd;
2352 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2353 
2354 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2355 	pcontrol = (cmd[2] & 0xc0) >> 6;
2356 	pcode = cmd[2] & 0x3f;
2357 	subpcode = cmd[3];
2358 	msense_6 = (MODE_SENSE == cmd[0]);
2359 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2360 	is_disk = (sdebug_ptype == TYPE_DISK);
2361 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2362 	if ((is_disk || is_zbc) && !dbd)
2363 		bd_len = llbaa ? 16 : 8;
2364 	else
2365 		bd_len = 0;
2366 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2367 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2368 	if (0x3 == pcontrol) {  /* Saving values not supported */
2369 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2370 		return check_condition_result;
2371 	}
2372 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2373 			(devip->target * 1000) - 3;
2374 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2375 	if (is_disk || is_zbc) {
2376 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2377 		if (sdebug_wp)
2378 			dev_spec |= 0x80;
2379 	} else
2380 		dev_spec = 0x0;
2381 	if (msense_6) {
2382 		arr[2] = dev_spec;
2383 		arr[3] = bd_len;
2384 		offset = 4;
2385 	} else {
2386 		arr[3] = dev_spec;
2387 		if (16 == bd_len)
2388 			arr[4] = 0x1;	/* set LONGLBA bit */
2389 		arr[7] = bd_len;	/* assume 255 or less */
2390 		offset = 8;
2391 	}
2392 	ap = arr + offset;
2393 	if ((bd_len > 0) && (!sdebug_capacity))
2394 		sdebug_capacity = get_sdebug_capacity();
2395 
2396 	if (8 == bd_len) {
2397 		if (sdebug_capacity > 0xfffffffe)
2398 			put_unaligned_be32(0xffffffff, ap + 0);
2399 		else
2400 			put_unaligned_be32(sdebug_capacity, ap + 0);
2401 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2402 		offset += bd_len;
2403 		ap = arr + offset;
2404 	} else if (16 == bd_len) {
2405 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2406 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2407 		offset += bd_len;
2408 		ap = arr + offset;
2409 	}
2410 
2411 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2412 		/* TODO: Control Extension page */
2413 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2414 		return check_condition_result;
2415 	}
2416 	bad_pcode = false;
2417 
2418 	switch (pcode) {
2419 	case 0x1:	/* Read-Write error recovery page, direct access */
2420 		len = resp_err_recov_pg(ap, pcontrol, target);
2421 		offset += len;
2422 		break;
2423 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2424 		len = resp_disconnect_pg(ap, pcontrol, target);
2425 		offset += len;
2426 		break;
2427 	case 0x3:       /* Format device page, direct access */
2428 		if (is_disk) {
2429 			len = resp_format_pg(ap, pcontrol, target);
2430 			offset += len;
2431 		} else
2432 			bad_pcode = true;
2433 		break;
2434 	case 0x8:	/* Caching page, direct access */
2435 		if (is_disk || is_zbc) {
2436 			len = resp_caching_pg(ap, pcontrol, target);
2437 			offset += len;
2438 		} else
2439 			bad_pcode = true;
2440 		break;
2441 	case 0xa:	/* Control Mode page, all devices */
2442 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2443 		offset += len;
2444 		break;
2445 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2446 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2447 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2448 			return check_condition_result;
2449 		}
2450 		len = 0;
2451 		if ((0x0 == subpcode) || (0xff == subpcode))
2452 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2453 		if ((0x1 == subpcode) || (0xff == subpcode))
2454 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2455 						  target_dev_id);
2456 		if ((0x2 == subpcode) || (0xff == subpcode))
2457 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2458 		offset += len;
2459 		break;
2460 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2461 		len = resp_iec_m_pg(ap, pcontrol, target);
2462 		offset += len;
2463 		break;
2464 	case 0x3f:	/* Read all Mode pages */
2465 		if ((0 == subpcode) || (0xff == subpcode)) {
2466 			len = resp_err_recov_pg(ap, pcontrol, target);
2467 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2468 			if (is_disk) {
2469 				len += resp_format_pg(ap + len, pcontrol,
2470 						      target);
2471 				len += resp_caching_pg(ap + len, pcontrol,
2472 						       target);
2473 			} else if (is_zbc) {
2474 				len += resp_caching_pg(ap + len, pcontrol,
2475 						       target);
2476 			}
2477 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2478 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2479 			if (0xff == subpcode) {
2480 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2481 						  target, target_dev_id);
2482 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2483 			}
2484 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2485 			offset += len;
2486 		} else {
2487 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2488 			return check_condition_result;
2489 		}
2490 		break;
2491 	default:
2492 		bad_pcode = true;
2493 		break;
2494 	}
2495 	if (bad_pcode) {
2496 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2497 		return check_condition_result;
2498 	}
2499 	if (msense_6)
2500 		arr[0] = offset - 1;
2501 	else
2502 		put_unaligned_be16((offset - 2), arr + 0);
2503 	return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2504 }
2505 
2506 #define SDEBUG_MAX_MSELECT_SZ 512
2507 
2508 static int resp_mode_select(struct scsi_cmnd *scp,
2509 			    struct sdebug_dev_info *devip)
2510 {
2511 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2512 	int param_len, res, mpage;
2513 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2514 	unsigned char *cmd = scp->cmnd;
2515 	int mselect6 = (MODE_SELECT == cmd[0]);
2516 
2517 	memset(arr, 0, sizeof(arr));
2518 	pf = cmd[1] & 0x10;
2519 	sp = cmd[1] & 0x1;
2520 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2521 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2522 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2523 		return check_condition_result;
2524 	}
2525 	res = fetch_to_dev_buffer(scp, arr, param_len);
2526 	if (-1 == res)
2527 		return DID_ERROR << 16;
2528 	else if (sdebug_verbose && (res < param_len))
2529 		sdev_printk(KERN_INFO, scp->device,
2530 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2531 			    __func__, param_len, res);
2532 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2533 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2534 	off = bd_len + (mselect6 ? 4 : 8);
2535 	if (md_len > 2 || off >= res) {
2536 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2537 		return check_condition_result;
2538 	}
2539 	mpage = arr[off] & 0x3f;
2540 	ps = !!(arr[off] & 0x80);
2541 	if (ps) {
2542 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2543 		return check_condition_result;
2544 	}
2545 	spf = !!(arr[off] & 0x40);
2546 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2547 		       (arr[off + 1] + 2);
2548 	if ((pg_len + off) > param_len) {
2549 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2550 				PARAMETER_LIST_LENGTH_ERR, 0);
2551 		return check_condition_result;
2552 	}
2553 	switch (mpage) {
2554 	case 0x8:      /* Caching Mode page */
2555 		if (caching_pg[1] == arr[off + 1]) {
2556 			memcpy(caching_pg + 2, arr + off + 2,
2557 			       sizeof(caching_pg) - 2);
2558 			goto set_mode_changed_ua;
2559 		}
2560 		break;
2561 	case 0xa:      /* Control Mode page */
2562 		if (ctrl_m_pg[1] == arr[off + 1]) {
2563 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2564 			       sizeof(ctrl_m_pg) - 2);
2565 			if (ctrl_m_pg[4] & 0x8)
2566 				sdebug_wp = true;
2567 			else
2568 				sdebug_wp = false;
2569 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2570 			goto set_mode_changed_ua;
2571 		}
2572 		break;
2573 	case 0x1c:      /* Informational Exceptions Mode page */
2574 		if (iec_m_pg[1] == arr[off + 1]) {
2575 			memcpy(iec_m_pg + 2, arr + off + 2,
2576 			       sizeof(iec_m_pg) - 2);
2577 			goto set_mode_changed_ua;
2578 		}
2579 		break;
2580 	default:
2581 		break;
2582 	}
2583 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2584 	return check_condition_result;
2585 set_mode_changed_ua:
2586 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2587 	return 0;
2588 }
2589 
2590 static int resp_temp_l_pg(unsigned char *arr)
2591 {
2592 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2593 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2594 		};
2595 
2596 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2597 	return sizeof(temp_l_pg);
2598 }
2599 
2600 static int resp_ie_l_pg(unsigned char *arr)
2601 {
2602 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2603 		};
2604 
2605 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2606 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2607 		arr[4] = THRESHOLD_EXCEEDED;
2608 		arr[5] = 0xff;
2609 	}
2610 	return sizeof(ie_l_pg);
2611 }
2612 
2613 static int resp_env_rep_l_spg(unsigned char *arr)
2614 {
2615 	unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2616 					 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2617 					 0x1, 0x0, 0x23, 0x8,
2618 					 0x0, 55, 72, 35, 55, 45, 0, 0,
2619 		};
2620 
2621 	memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2622 	return sizeof(env_rep_l_spg);
2623 }
2624 
2625 #define SDEBUG_MAX_LSENSE_SZ 512
2626 
2627 static int resp_log_sense(struct scsi_cmnd *scp,
2628 			  struct sdebug_dev_info *devip)
2629 {
2630 	int ppc, sp, pcode, subpcode;
2631 	u32 alloc_len, len, n;
2632 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2633 	unsigned char *cmd = scp->cmnd;
2634 
2635 	memset(arr, 0, sizeof(arr));
2636 	ppc = cmd[1] & 0x2;
2637 	sp = cmd[1] & 0x1;
2638 	if (ppc || sp) {
2639 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2640 		return check_condition_result;
2641 	}
2642 	pcode = cmd[2] & 0x3f;
2643 	subpcode = cmd[3] & 0xff;
2644 	alloc_len = get_unaligned_be16(cmd + 7);
2645 	arr[0] = pcode;
2646 	if (0 == subpcode) {
2647 		switch (pcode) {
2648 		case 0x0:	/* Supported log pages log page */
2649 			n = 4;
2650 			arr[n++] = 0x0;		/* this page */
2651 			arr[n++] = 0xd;		/* Temperature */
2652 			arr[n++] = 0x2f;	/* Informational exceptions */
2653 			arr[3] = n - 4;
2654 			break;
2655 		case 0xd:	/* Temperature log page */
2656 			arr[3] = resp_temp_l_pg(arr + 4);
2657 			break;
2658 		case 0x2f:	/* Informational exceptions log page */
2659 			arr[3] = resp_ie_l_pg(arr + 4);
2660 			break;
2661 		default:
2662 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2663 			return check_condition_result;
2664 		}
2665 	} else if (0xff == subpcode) {
2666 		arr[0] |= 0x40;
2667 		arr[1] = subpcode;
2668 		switch (pcode) {
2669 		case 0x0:	/* Supported log pages and subpages log page */
2670 			n = 4;
2671 			arr[n++] = 0x0;
2672 			arr[n++] = 0x0;		/* 0,0 page */
2673 			arr[n++] = 0x0;
2674 			arr[n++] = 0xff;	/* this page */
2675 			arr[n++] = 0xd;
2676 			arr[n++] = 0x0;		/* Temperature */
2677 			arr[n++] = 0xd;
2678 			arr[n++] = 0x1;		/* Environment reporting */
2679 			arr[n++] = 0xd;
2680 			arr[n++] = 0xff;	/* all 0xd subpages */
2681 			arr[n++] = 0x2f;
2682 			arr[n++] = 0x0;	/* Informational exceptions */
2683 			arr[n++] = 0x2f;
2684 			arr[n++] = 0xff;	/* all 0x2f subpages */
2685 			arr[3] = n - 4;
2686 			break;
2687 		case 0xd:	/* Temperature subpages */
2688 			n = 4;
2689 			arr[n++] = 0xd;
2690 			arr[n++] = 0x0;		/* Temperature */
2691 			arr[n++] = 0xd;
2692 			arr[n++] = 0x1;		/* Environment reporting */
2693 			arr[n++] = 0xd;
2694 			arr[n++] = 0xff;	/* these subpages */
2695 			arr[3] = n - 4;
2696 			break;
2697 		case 0x2f:	/* Informational exceptions subpages */
2698 			n = 4;
2699 			arr[n++] = 0x2f;
2700 			arr[n++] = 0x0;		/* Informational exceptions */
2701 			arr[n++] = 0x2f;
2702 			arr[n++] = 0xff;	/* these subpages */
2703 			arr[3] = n - 4;
2704 			break;
2705 		default:
2706 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2707 			return check_condition_result;
2708 		}
2709 	} else if (subpcode > 0) {
2710 		arr[0] |= 0x40;
2711 		arr[1] = subpcode;
2712 		if (pcode == 0xd && subpcode == 1)
2713 			arr[3] = resp_env_rep_l_spg(arr + 4);
2714 		else {
2715 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2716 			return check_condition_result;
2717 		}
2718 	} else {
2719 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2720 		return check_condition_result;
2721 	}
2722 	len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2723 	return fill_from_dev_buffer(scp, arr,
2724 		    min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2725 }
2726 
2727 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2728 {
2729 	return devip->nr_zones != 0;
2730 }
2731 
2732 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2733 					unsigned long long lba)
2734 {
2735 	u32 zno = lba >> devip->zsize_shift;
2736 	struct sdeb_zone_state *zsp;
2737 
2738 	if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2739 		return &devip->zstate[zno];
2740 
2741 	/*
2742 	 * If the zone capacity is less than the zone size, adjust for gap
2743 	 * zones.
2744 	 */
2745 	zno = 2 * zno - devip->nr_conv_zones;
2746 	WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2747 	zsp = &devip->zstate[zno];
2748 	if (lba >= zsp->z_start + zsp->z_size)
2749 		zsp++;
2750 	WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2751 	return zsp;
2752 }
2753 
2754 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2755 {
2756 	return zsp->z_type == ZBC_ZTYPE_CNV;
2757 }
2758 
2759 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2760 {
2761 	return zsp->z_type == ZBC_ZTYPE_GAP;
2762 }
2763 
2764 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2765 {
2766 	return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2767 }
2768 
2769 static void zbc_close_zone(struct sdebug_dev_info *devip,
2770 			   struct sdeb_zone_state *zsp)
2771 {
2772 	enum sdebug_z_cond zc;
2773 
2774 	if (!zbc_zone_is_seq(zsp))
2775 		return;
2776 
2777 	zc = zsp->z_cond;
2778 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2779 		return;
2780 
2781 	if (zc == ZC2_IMPLICIT_OPEN)
2782 		devip->nr_imp_open--;
2783 	else
2784 		devip->nr_exp_open--;
2785 
2786 	if (zsp->z_wp == zsp->z_start) {
2787 		zsp->z_cond = ZC1_EMPTY;
2788 	} else {
2789 		zsp->z_cond = ZC4_CLOSED;
2790 		devip->nr_closed++;
2791 	}
2792 }
2793 
2794 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2795 {
2796 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2797 	unsigned int i;
2798 
2799 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2800 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2801 			zbc_close_zone(devip, zsp);
2802 			return;
2803 		}
2804 	}
2805 }
2806 
2807 static void zbc_open_zone(struct sdebug_dev_info *devip,
2808 			  struct sdeb_zone_state *zsp, bool explicit)
2809 {
2810 	enum sdebug_z_cond zc;
2811 
2812 	if (!zbc_zone_is_seq(zsp))
2813 		return;
2814 
2815 	zc = zsp->z_cond;
2816 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2817 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2818 		return;
2819 
2820 	/* Close an implicit open zone if necessary */
2821 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2822 		zbc_close_zone(devip, zsp);
2823 	else if (devip->max_open &&
2824 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2825 		zbc_close_imp_open_zone(devip);
2826 
2827 	if (zsp->z_cond == ZC4_CLOSED)
2828 		devip->nr_closed--;
2829 	if (explicit) {
2830 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2831 		devip->nr_exp_open++;
2832 	} else {
2833 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2834 		devip->nr_imp_open++;
2835 	}
2836 }
2837 
2838 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2839 				     struct sdeb_zone_state *zsp)
2840 {
2841 	switch (zsp->z_cond) {
2842 	case ZC2_IMPLICIT_OPEN:
2843 		devip->nr_imp_open--;
2844 		break;
2845 	case ZC3_EXPLICIT_OPEN:
2846 		devip->nr_exp_open--;
2847 		break;
2848 	default:
2849 		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2850 			  zsp->z_start, zsp->z_cond);
2851 		break;
2852 	}
2853 	zsp->z_cond = ZC5_FULL;
2854 }
2855 
2856 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2857 		       unsigned long long lba, unsigned int num)
2858 {
2859 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2860 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2861 
2862 	if (!zbc_zone_is_seq(zsp))
2863 		return;
2864 
2865 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2866 		zsp->z_wp += num;
2867 		if (zsp->z_wp >= zend)
2868 			zbc_set_zone_full(devip, zsp);
2869 		return;
2870 	}
2871 
2872 	while (num) {
2873 		if (lba != zsp->z_wp)
2874 			zsp->z_non_seq_resource = true;
2875 
2876 		end = lba + num;
2877 		if (end >= zend) {
2878 			n = zend - lba;
2879 			zsp->z_wp = zend;
2880 		} else if (end > zsp->z_wp) {
2881 			n = num;
2882 			zsp->z_wp = end;
2883 		} else {
2884 			n = num;
2885 		}
2886 		if (zsp->z_wp >= zend)
2887 			zbc_set_zone_full(devip, zsp);
2888 
2889 		num -= n;
2890 		lba += n;
2891 		if (num) {
2892 			zsp++;
2893 			zend = zsp->z_start + zsp->z_size;
2894 		}
2895 	}
2896 }
2897 
2898 static int check_zbc_access_params(struct scsi_cmnd *scp,
2899 			unsigned long long lba, unsigned int num, bool write)
2900 {
2901 	struct scsi_device *sdp = scp->device;
2902 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2903 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2904 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2905 
2906 	if (!write) {
2907 		if (devip->zmodel == BLK_ZONED_HA)
2908 			return 0;
2909 		/* For host-managed, reads cannot cross zone types boundaries */
2910 		if (zsp->z_type != zsp_end->z_type) {
2911 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2912 					LBA_OUT_OF_RANGE,
2913 					READ_INVDATA_ASCQ);
2914 			return check_condition_result;
2915 		}
2916 		return 0;
2917 	}
2918 
2919 	/* Writing into a gap zone is not allowed */
2920 	if (zbc_zone_is_gap(zsp)) {
2921 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2922 				ATTEMPT_ACCESS_GAP);
2923 		return check_condition_result;
2924 	}
2925 
2926 	/* No restrictions for writes within conventional zones */
2927 	if (zbc_zone_is_conv(zsp)) {
2928 		if (!zbc_zone_is_conv(zsp_end)) {
2929 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2930 					LBA_OUT_OF_RANGE,
2931 					WRITE_BOUNDARY_ASCQ);
2932 			return check_condition_result;
2933 		}
2934 		return 0;
2935 	}
2936 
2937 	if (zsp->z_type == ZBC_ZTYPE_SWR) {
2938 		/* Writes cannot cross sequential zone boundaries */
2939 		if (zsp_end != zsp) {
2940 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2941 					LBA_OUT_OF_RANGE,
2942 					WRITE_BOUNDARY_ASCQ);
2943 			return check_condition_result;
2944 		}
2945 		/* Cannot write full zones */
2946 		if (zsp->z_cond == ZC5_FULL) {
2947 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2948 					INVALID_FIELD_IN_CDB, 0);
2949 			return check_condition_result;
2950 		}
2951 		/* Writes must be aligned to the zone WP */
2952 		if (lba != zsp->z_wp) {
2953 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2954 					LBA_OUT_OF_RANGE,
2955 					UNALIGNED_WRITE_ASCQ);
2956 			return check_condition_result;
2957 		}
2958 	}
2959 
2960 	/* Handle implicit open of closed and empty zones */
2961 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2962 		if (devip->max_open &&
2963 		    devip->nr_exp_open >= devip->max_open) {
2964 			mk_sense_buffer(scp, DATA_PROTECT,
2965 					INSUFF_RES_ASC,
2966 					INSUFF_ZONE_ASCQ);
2967 			return check_condition_result;
2968 		}
2969 		zbc_open_zone(devip, zsp, false);
2970 	}
2971 
2972 	return 0;
2973 }
2974 
2975 static inline int check_device_access_params
2976 			(struct scsi_cmnd *scp, unsigned long long lba,
2977 			 unsigned int num, bool write)
2978 {
2979 	struct scsi_device *sdp = scp->device;
2980 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2981 
2982 	if (lba + num > sdebug_capacity) {
2983 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2984 		return check_condition_result;
2985 	}
2986 	/* transfer length excessive (tie in to block limits VPD page) */
2987 	if (num > sdebug_store_sectors) {
2988 		/* needs work to find which cdb byte 'num' comes from */
2989 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2990 		return check_condition_result;
2991 	}
2992 	if (write && unlikely(sdebug_wp)) {
2993 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2994 		return check_condition_result;
2995 	}
2996 	if (sdebug_dev_is_zoned(devip))
2997 		return check_zbc_access_params(scp, lba, num, write);
2998 
2999 	return 0;
3000 }
3001 
3002 /*
3003  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3004  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3005  * that access any of the "stores" in struct sdeb_store_info should call this
3006  * function with bug_if_fake_rw set to true.
3007  */
3008 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3009 						bool bug_if_fake_rw)
3010 {
3011 	if (sdebug_fake_rw) {
3012 		BUG_ON(bug_if_fake_rw);	/* See note above */
3013 		return NULL;
3014 	}
3015 	return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3016 }
3017 
3018 /* Returns number of bytes copied or -1 if error. */
3019 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3020 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
3021 {
3022 	int ret;
3023 	u64 block, rest = 0;
3024 	enum dma_data_direction dir;
3025 	struct scsi_data_buffer *sdb = &scp->sdb;
3026 	u8 *fsp;
3027 
3028 	if (do_write) {
3029 		dir = DMA_TO_DEVICE;
3030 		write_since_sync = true;
3031 	} else {
3032 		dir = DMA_FROM_DEVICE;
3033 	}
3034 
3035 	if (!sdb->length || !sip)
3036 		return 0;
3037 	if (scp->sc_data_direction != dir)
3038 		return -1;
3039 	fsp = sip->storep;
3040 
3041 	block = do_div(lba, sdebug_store_sectors);
3042 	if (block + num > sdebug_store_sectors)
3043 		rest = block + num - sdebug_store_sectors;
3044 
3045 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3046 		   fsp + (block * sdebug_sector_size),
3047 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
3048 	if (ret != (num - rest) * sdebug_sector_size)
3049 		return ret;
3050 
3051 	if (rest) {
3052 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3053 			    fsp, rest * sdebug_sector_size,
3054 			    sg_skip + ((num - rest) * sdebug_sector_size),
3055 			    do_write);
3056 	}
3057 
3058 	return ret;
3059 }
3060 
3061 /* Returns number of bytes copied or -1 if error. */
3062 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3063 {
3064 	struct scsi_data_buffer *sdb = &scp->sdb;
3065 
3066 	if (!sdb->length)
3067 		return 0;
3068 	if (scp->sc_data_direction != DMA_TO_DEVICE)
3069 		return -1;
3070 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3071 			      num * sdebug_sector_size, 0, true);
3072 }
3073 
3074 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3075  * arr into sip->storep+lba and return true. If comparison fails then
3076  * return false. */
3077 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3078 			      const u8 *arr, bool compare_only)
3079 {
3080 	bool res;
3081 	u64 block, rest = 0;
3082 	u32 store_blks = sdebug_store_sectors;
3083 	u32 lb_size = sdebug_sector_size;
3084 	u8 *fsp = sip->storep;
3085 
3086 	block = do_div(lba, store_blks);
3087 	if (block + num > store_blks)
3088 		rest = block + num - store_blks;
3089 
3090 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3091 	if (!res)
3092 		return res;
3093 	if (rest)
3094 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
3095 			     rest * lb_size);
3096 	if (!res)
3097 		return res;
3098 	if (compare_only)
3099 		return true;
3100 	arr += num * lb_size;
3101 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3102 	if (rest)
3103 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3104 	return res;
3105 }
3106 
3107 static __be16 dif_compute_csum(const void *buf, int len)
3108 {
3109 	__be16 csum;
3110 
3111 	if (sdebug_guard)
3112 		csum = (__force __be16)ip_compute_csum(buf, len);
3113 	else
3114 		csum = cpu_to_be16(crc_t10dif(buf, len));
3115 
3116 	return csum;
3117 }
3118 
3119 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3120 		      sector_t sector, u32 ei_lba)
3121 {
3122 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
3123 
3124 	if (sdt->guard_tag != csum) {
3125 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3126 			(unsigned long)sector,
3127 			be16_to_cpu(sdt->guard_tag),
3128 			be16_to_cpu(csum));
3129 		return 0x01;
3130 	}
3131 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3132 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3133 		pr_err("REF check failed on sector %lu\n",
3134 			(unsigned long)sector);
3135 		return 0x03;
3136 	}
3137 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3138 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3139 		pr_err("REF check failed on sector %lu\n",
3140 			(unsigned long)sector);
3141 		return 0x03;
3142 	}
3143 	return 0;
3144 }
3145 
3146 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3147 			  unsigned int sectors, bool read)
3148 {
3149 	size_t resid;
3150 	void *paddr;
3151 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3152 						scp->device->hostdata, true);
3153 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3154 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3155 	struct sg_mapping_iter miter;
3156 
3157 	/* Bytes of protection data to copy into sgl */
3158 	resid = sectors * sizeof(*dif_storep);
3159 
3160 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3161 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3162 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3163 
3164 	while (sg_miter_next(&miter) && resid > 0) {
3165 		size_t len = min_t(size_t, miter.length, resid);
3166 		void *start = dif_store(sip, sector);
3167 		size_t rest = 0;
3168 
3169 		if (dif_store_end < start + len)
3170 			rest = start + len - dif_store_end;
3171 
3172 		paddr = miter.addr;
3173 
3174 		if (read)
3175 			memcpy(paddr, start, len - rest);
3176 		else
3177 			memcpy(start, paddr, len - rest);
3178 
3179 		if (rest) {
3180 			if (read)
3181 				memcpy(paddr + len - rest, dif_storep, rest);
3182 			else
3183 				memcpy(dif_storep, paddr + len - rest, rest);
3184 		}
3185 
3186 		sector += len / sizeof(*dif_storep);
3187 		resid -= len;
3188 	}
3189 	sg_miter_stop(&miter);
3190 }
3191 
3192 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3193 			    unsigned int sectors, u32 ei_lba)
3194 {
3195 	int ret = 0;
3196 	unsigned int i;
3197 	sector_t sector;
3198 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3199 						scp->device->hostdata, true);
3200 	struct t10_pi_tuple *sdt;
3201 
3202 	for (i = 0; i < sectors; i++, ei_lba++) {
3203 		sector = start_sec + i;
3204 		sdt = dif_store(sip, sector);
3205 
3206 		if (sdt->app_tag == cpu_to_be16(0xffff))
3207 			continue;
3208 
3209 		/*
3210 		 * Because scsi_debug acts as both initiator and
3211 		 * target we proceed to verify the PI even if
3212 		 * RDPROTECT=3. This is done so the "initiator" knows
3213 		 * which type of error to return. Otherwise we would
3214 		 * have to iterate over the PI twice.
3215 		 */
3216 		if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3217 			ret = dif_verify(sdt, lba2fake_store(sip, sector),
3218 					 sector, ei_lba);
3219 			if (ret) {
3220 				dif_errors++;
3221 				break;
3222 			}
3223 		}
3224 	}
3225 
3226 	dif_copy_prot(scp, start_sec, sectors, true);
3227 	dix_reads++;
3228 
3229 	return ret;
3230 }
3231 
3232 static inline void
3233 sdeb_read_lock(struct sdeb_store_info *sip)
3234 {
3235 	if (sdebug_no_rwlock) {
3236 		if (sip)
3237 			__acquire(&sip->macc_lck);
3238 		else
3239 			__acquire(&sdeb_fake_rw_lck);
3240 	} else {
3241 		if (sip)
3242 			read_lock(&sip->macc_lck);
3243 		else
3244 			read_lock(&sdeb_fake_rw_lck);
3245 	}
3246 }
3247 
3248 static inline void
3249 sdeb_read_unlock(struct sdeb_store_info *sip)
3250 {
3251 	if (sdebug_no_rwlock) {
3252 		if (sip)
3253 			__release(&sip->macc_lck);
3254 		else
3255 			__release(&sdeb_fake_rw_lck);
3256 	} else {
3257 		if (sip)
3258 			read_unlock(&sip->macc_lck);
3259 		else
3260 			read_unlock(&sdeb_fake_rw_lck);
3261 	}
3262 }
3263 
3264 static inline void
3265 sdeb_write_lock(struct sdeb_store_info *sip)
3266 {
3267 	if (sdebug_no_rwlock) {
3268 		if (sip)
3269 			__acquire(&sip->macc_lck);
3270 		else
3271 			__acquire(&sdeb_fake_rw_lck);
3272 	} else {
3273 		if (sip)
3274 			write_lock(&sip->macc_lck);
3275 		else
3276 			write_lock(&sdeb_fake_rw_lck);
3277 	}
3278 }
3279 
3280 static inline void
3281 sdeb_write_unlock(struct sdeb_store_info *sip)
3282 {
3283 	if (sdebug_no_rwlock) {
3284 		if (sip)
3285 			__release(&sip->macc_lck);
3286 		else
3287 			__release(&sdeb_fake_rw_lck);
3288 	} else {
3289 		if (sip)
3290 			write_unlock(&sip->macc_lck);
3291 		else
3292 			write_unlock(&sdeb_fake_rw_lck);
3293 	}
3294 }
3295 
3296 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3297 {
3298 	bool check_prot;
3299 	u32 num;
3300 	u32 ei_lba;
3301 	int ret;
3302 	u64 lba;
3303 	struct sdeb_store_info *sip = devip2sip(devip, true);
3304 	u8 *cmd = scp->cmnd;
3305 
3306 	switch (cmd[0]) {
3307 	case READ_16:
3308 		ei_lba = 0;
3309 		lba = get_unaligned_be64(cmd + 2);
3310 		num = get_unaligned_be32(cmd + 10);
3311 		check_prot = true;
3312 		break;
3313 	case READ_10:
3314 		ei_lba = 0;
3315 		lba = get_unaligned_be32(cmd + 2);
3316 		num = get_unaligned_be16(cmd + 7);
3317 		check_prot = true;
3318 		break;
3319 	case READ_6:
3320 		ei_lba = 0;
3321 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3322 		      (u32)(cmd[1] & 0x1f) << 16;
3323 		num = (0 == cmd[4]) ? 256 : cmd[4];
3324 		check_prot = true;
3325 		break;
3326 	case READ_12:
3327 		ei_lba = 0;
3328 		lba = get_unaligned_be32(cmd + 2);
3329 		num = get_unaligned_be32(cmd + 6);
3330 		check_prot = true;
3331 		break;
3332 	case XDWRITEREAD_10:
3333 		ei_lba = 0;
3334 		lba = get_unaligned_be32(cmd + 2);
3335 		num = get_unaligned_be16(cmd + 7);
3336 		check_prot = false;
3337 		break;
3338 	default:	/* assume READ(32) */
3339 		lba = get_unaligned_be64(cmd + 12);
3340 		ei_lba = get_unaligned_be32(cmd + 20);
3341 		num = get_unaligned_be32(cmd + 28);
3342 		check_prot = false;
3343 		break;
3344 	}
3345 	if (unlikely(have_dif_prot && check_prot)) {
3346 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3347 		    (cmd[1] & 0xe0)) {
3348 			mk_sense_invalid_opcode(scp);
3349 			return check_condition_result;
3350 		}
3351 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3352 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3353 		    (cmd[1] & 0xe0) == 0)
3354 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3355 				    "to DIF device\n");
3356 	}
3357 	if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3358 		     atomic_read(&sdeb_inject_pending))) {
3359 		num /= 2;
3360 		atomic_set(&sdeb_inject_pending, 0);
3361 	}
3362 
3363 	ret = check_device_access_params(scp, lba, num, false);
3364 	if (ret)
3365 		return ret;
3366 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3367 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3368 		     ((lba + num) > sdebug_medium_error_start))) {
3369 		/* claim unrecoverable read error */
3370 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3371 		/* set info field and valid bit for fixed descriptor */
3372 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3373 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3374 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3375 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3376 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3377 		}
3378 		scsi_set_resid(scp, scsi_bufflen(scp));
3379 		return check_condition_result;
3380 	}
3381 
3382 	sdeb_read_lock(sip);
3383 
3384 	/* DIX + T10 DIF */
3385 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3386 		switch (prot_verify_read(scp, lba, num, ei_lba)) {
3387 		case 1: /* Guard tag error */
3388 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3389 				sdeb_read_unlock(sip);
3390 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3391 				return check_condition_result;
3392 			} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3393 				sdeb_read_unlock(sip);
3394 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3395 				return illegal_condition_result;
3396 			}
3397 			break;
3398 		case 3: /* Reference tag error */
3399 			if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3400 				sdeb_read_unlock(sip);
3401 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3402 				return check_condition_result;
3403 			} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3404 				sdeb_read_unlock(sip);
3405 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3406 				return illegal_condition_result;
3407 			}
3408 			break;
3409 		}
3410 	}
3411 
3412 	ret = do_device_access(sip, scp, 0, lba, num, false);
3413 	sdeb_read_unlock(sip);
3414 	if (unlikely(ret == -1))
3415 		return DID_ERROR << 16;
3416 
3417 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3418 
3419 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3420 		     atomic_read(&sdeb_inject_pending))) {
3421 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3422 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3423 			atomic_set(&sdeb_inject_pending, 0);
3424 			return check_condition_result;
3425 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3426 			/* Logical block guard check failed */
3427 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3428 			atomic_set(&sdeb_inject_pending, 0);
3429 			return illegal_condition_result;
3430 		} else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3431 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3432 			atomic_set(&sdeb_inject_pending, 0);
3433 			return illegal_condition_result;
3434 		}
3435 	}
3436 	return 0;
3437 }
3438 
3439 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3440 			     unsigned int sectors, u32 ei_lba)
3441 {
3442 	int ret;
3443 	struct t10_pi_tuple *sdt;
3444 	void *daddr;
3445 	sector_t sector = start_sec;
3446 	int ppage_offset;
3447 	int dpage_offset;
3448 	struct sg_mapping_iter diter;
3449 	struct sg_mapping_iter piter;
3450 
3451 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3452 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3453 
3454 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3455 			scsi_prot_sg_count(SCpnt),
3456 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3457 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3458 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3459 
3460 	/* For each protection page */
3461 	while (sg_miter_next(&piter)) {
3462 		dpage_offset = 0;
3463 		if (WARN_ON(!sg_miter_next(&diter))) {
3464 			ret = 0x01;
3465 			goto out;
3466 		}
3467 
3468 		for (ppage_offset = 0; ppage_offset < piter.length;
3469 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3470 			/* If we're at the end of the current
3471 			 * data page advance to the next one
3472 			 */
3473 			if (dpage_offset >= diter.length) {
3474 				if (WARN_ON(!sg_miter_next(&diter))) {
3475 					ret = 0x01;
3476 					goto out;
3477 				}
3478 				dpage_offset = 0;
3479 			}
3480 
3481 			sdt = piter.addr + ppage_offset;
3482 			daddr = diter.addr + dpage_offset;
3483 
3484 			if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3485 				ret = dif_verify(sdt, daddr, sector, ei_lba);
3486 				if (ret)
3487 					goto out;
3488 			}
3489 
3490 			sector++;
3491 			ei_lba++;
3492 			dpage_offset += sdebug_sector_size;
3493 		}
3494 		diter.consumed = dpage_offset;
3495 		sg_miter_stop(&diter);
3496 	}
3497 	sg_miter_stop(&piter);
3498 
3499 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3500 	dix_writes++;
3501 
3502 	return 0;
3503 
3504 out:
3505 	dif_errors++;
3506 	sg_miter_stop(&diter);
3507 	sg_miter_stop(&piter);
3508 	return ret;
3509 }
3510 
3511 static unsigned long lba_to_map_index(sector_t lba)
3512 {
3513 	if (sdebug_unmap_alignment)
3514 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3515 	sector_div(lba, sdebug_unmap_granularity);
3516 	return lba;
3517 }
3518 
3519 static sector_t map_index_to_lba(unsigned long index)
3520 {
3521 	sector_t lba = index * sdebug_unmap_granularity;
3522 
3523 	if (sdebug_unmap_alignment)
3524 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3525 	return lba;
3526 }
3527 
3528 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3529 			      unsigned int *num)
3530 {
3531 	sector_t end;
3532 	unsigned int mapped;
3533 	unsigned long index;
3534 	unsigned long next;
3535 
3536 	index = lba_to_map_index(lba);
3537 	mapped = test_bit(index, sip->map_storep);
3538 
3539 	if (mapped)
3540 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3541 	else
3542 		next = find_next_bit(sip->map_storep, map_size, index);
3543 
3544 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3545 	*num = end - lba;
3546 	return mapped;
3547 }
3548 
3549 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3550 		       unsigned int len)
3551 {
3552 	sector_t end = lba + len;
3553 
3554 	while (lba < end) {
3555 		unsigned long index = lba_to_map_index(lba);
3556 
3557 		if (index < map_size)
3558 			set_bit(index, sip->map_storep);
3559 
3560 		lba = map_index_to_lba(index + 1);
3561 	}
3562 }
3563 
3564 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3565 			 unsigned int len)
3566 {
3567 	sector_t end = lba + len;
3568 	u8 *fsp = sip->storep;
3569 
3570 	while (lba < end) {
3571 		unsigned long index = lba_to_map_index(lba);
3572 
3573 		if (lba == map_index_to_lba(index) &&
3574 		    lba + sdebug_unmap_granularity <= end &&
3575 		    index < map_size) {
3576 			clear_bit(index, sip->map_storep);
3577 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3578 				memset(fsp + lba * sdebug_sector_size,
3579 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3580 				       sdebug_sector_size *
3581 				       sdebug_unmap_granularity);
3582 			}
3583 			if (sip->dif_storep) {
3584 				memset(sip->dif_storep + lba, 0xff,
3585 				       sizeof(*sip->dif_storep) *
3586 				       sdebug_unmap_granularity);
3587 			}
3588 		}
3589 		lba = map_index_to_lba(index + 1);
3590 	}
3591 }
3592 
3593 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3594 {
3595 	bool check_prot;
3596 	u32 num;
3597 	u32 ei_lba;
3598 	int ret;
3599 	u64 lba;
3600 	struct sdeb_store_info *sip = devip2sip(devip, true);
3601 	u8 *cmd = scp->cmnd;
3602 
3603 	switch (cmd[0]) {
3604 	case WRITE_16:
3605 		ei_lba = 0;
3606 		lba = get_unaligned_be64(cmd + 2);
3607 		num = get_unaligned_be32(cmd + 10);
3608 		check_prot = true;
3609 		break;
3610 	case WRITE_10:
3611 		ei_lba = 0;
3612 		lba = get_unaligned_be32(cmd + 2);
3613 		num = get_unaligned_be16(cmd + 7);
3614 		check_prot = true;
3615 		break;
3616 	case WRITE_6:
3617 		ei_lba = 0;
3618 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3619 		      (u32)(cmd[1] & 0x1f) << 16;
3620 		num = (0 == cmd[4]) ? 256 : cmd[4];
3621 		check_prot = true;
3622 		break;
3623 	case WRITE_12:
3624 		ei_lba = 0;
3625 		lba = get_unaligned_be32(cmd + 2);
3626 		num = get_unaligned_be32(cmd + 6);
3627 		check_prot = true;
3628 		break;
3629 	case 0x53:	/* XDWRITEREAD(10) */
3630 		ei_lba = 0;
3631 		lba = get_unaligned_be32(cmd + 2);
3632 		num = get_unaligned_be16(cmd + 7);
3633 		check_prot = false;
3634 		break;
3635 	default:	/* assume WRITE(32) */
3636 		lba = get_unaligned_be64(cmd + 12);
3637 		ei_lba = get_unaligned_be32(cmd + 20);
3638 		num = get_unaligned_be32(cmd + 28);
3639 		check_prot = false;
3640 		break;
3641 	}
3642 	if (unlikely(have_dif_prot && check_prot)) {
3643 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3644 		    (cmd[1] & 0xe0)) {
3645 			mk_sense_invalid_opcode(scp);
3646 			return check_condition_result;
3647 		}
3648 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3649 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3650 		    (cmd[1] & 0xe0) == 0)
3651 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3652 				    "to DIF device\n");
3653 	}
3654 
3655 	sdeb_write_lock(sip);
3656 	ret = check_device_access_params(scp, lba, num, true);
3657 	if (ret) {
3658 		sdeb_write_unlock(sip);
3659 		return ret;
3660 	}
3661 
3662 	/* DIX + T10 DIF */
3663 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3664 		switch (prot_verify_write(scp, lba, num, ei_lba)) {
3665 		case 1: /* Guard tag error */
3666 			if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3667 				sdeb_write_unlock(sip);
3668 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3669 				return illegal_condition_result;
3670 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3671 				sdeb_write_unlock(sip);
3672 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3673 				return check_condition_result;
3674 			}
3675 			break;
3676 		case 3: /* Reference tag error */
3677 			if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3678 				sdeb_write_unlock(sip);
3679 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3680 				return illegal_condition_result;
3681 			} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3682 				sdeb_write_unlock(sip);
3683 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3684 				return check_condition_result;
3685 			}
3686 			break;
3687 		}
3688 	}
3689 
3690 	ret = do_device_access(sip, scp, 0, lba, num, true);
3691 	if (unlikely(scsi_debug_lbp()))
3692 		map_region(sip, lba, num);
3693 	/* If ZBC zone then bump its write pointer */
3694 	if (sdebug_dev_is_zoned(devip))
3695 		zbc_inc_wp(devip, lba, num);
3696 	sdeb_write_unlock(sip);
3697 	if (unlikely(-1 == ret))
3698 		return DID_ERROR << 16;
3699 	else if (unlikely(sdebug_verbose &&
3700 			  (ret < (num * sdebug_sector_size))))
3701 		sdev_printk(KERN_INFO, scp->device,
3702 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3703 			    my_name, num * sdebug_sector_size, ret);
3704 
3705 	if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3706 		     atomic_read(&sdeb_inject_pending))) {
3707 		if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3708 			mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3709 			atomic_set(&sdeb_inject_pending, 0);
3710 			return check_condition_result;
3711 		} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3712 			/* Logical block guard check failed */
3713 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3714 			atomic_set(&sdeb_inject_pending, 0);
3715 			return illegal_condition_result;
3716 		} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3717 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3718 			atomic_set(&sdeb_inject_pending, 0);
3719 			return illegal_condition_result;
3720 		}
3721 	}
3722 	return 0;
3723 }
3724 
3725 /*
3726  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3727  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3728  */
3729 static int resp_write_scat(struct scsi_cmnd *scp,
3730 			   struct sdebug_dev_info *devip)
3731 {
3732 	u8 *cmd = scp->cmnd;
3733 	u8 *lrdp = NULL;
3734 	u8 *up;
3735 	struct sdeb_store_info *sip = devip2sip(devip, true);
3736 	u8 wrprotect;
3737 	u16 lbdof, num_lrd, k;
3738 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3739 	u32 lb_size = sdebug_sector_size;
3740 	u32 ei_lba;
3741 	u64 lba;
3742 	int ret, res;
3743 	bool is_16;
3744 	static const u32 lrd_size = 32; /* + parameter list header size */
3745 
3746 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3747 		is_16 = false;
3748 		wrprotect = (cmd[10] >> 5) & 0x7;
3749 		lbdof = get_unaligned_be16(cmd + 12);
3750 		num_lrd = get_unaligned_be16(cmd + 16);
3751 		bt_len = get_unaligned_be32(cmd + 28);
3752 	} else {        /* that leaves WRITE SCATTERED(16) */
3753 		is_16 = true;
3754 		wrprotect = (cmd[2] >> 5) & 0x7;
3755 		lbdof = get_unaligned_be16(cmd + 4);
3756 		num_lrd = get_unaligned_be16(cmd + 8);
3757 		bt_len = get_unaligned_be32(cmd + 10);
3758 		if (unlikely(have_dif_prot)) {
3759 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3760 			    wrprotect) {
3761 				mk_sense_invalid_opcode(scp);
3762 				return illegal_condition_result;
3763 			}
3764 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3765 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3766 			     wrprotect == 0)
3767 				sdev_printk(KERN_ERR, scp->device,
3768 					    "Unprotected WR to DIF device\n");
3769 		}
3770 	}
3771 	if ((num_lrd == 0) || (bt_len == 0))
3772 		return 0;       /* T10 says these do-nothings are not errors */
3773 	if (lbdof == 0) {
3774 		if (sdebug_verbose)
3775 			sdev_printk(KERN_INFO, scp->device,
3776 				"%s: %s: LB Data Offset field bad\n",
3777 				my_name, __func__);
3778 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3779 		return illegal_condition_result;
3780 	}
3781 	lbdof_blen = lbdof * lb_size;
3782 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3783 		if (sdebug_verbose)
3784 			sdev_printk(KERN_INFO, scp->device,
3785 				"%s: %s: LBA range descriptors don't fit\n",
3786 				my_name, __func__);
3787 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3788 		return illegal_condition_result;
3789 	}
3790 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3791 	if (lrdp == NULL)
3792 		return SCSI_MLQUEUE_HOST_BUSY;
3793 	if (sdebug_verbose)
3794 		sdev_printk(KERN_INFO, scp->device,
3795 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3796 			my_name, __func__, lbdof_blen);
3797 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3798 	if (res == -1) {
3799 		ret = DID_ERROR << 16;
3800 		goto err_out;
3801 	}
3802 
3803 	sdeb_write_lock(sip);
3804 	sg_off = lbdof_blen;
3805 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3806 	cum_lb = 0;
3807 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3808 		lba = get_unaligned_be64(up + 0);
3809 		num = get_unaligned_be32(up + 8);
3810 		if (sdebug_verbose)
3811 			sdev_printk(KERN_INFO, scp->device,
3812 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3813 				my_name, __func__, k, lba, num, sg_off);
3814 		if (num == 0)
3815 			continue;
3816 		ret = check_device_access_params(scp, lba, num, true);
3817 		if (ret)
3818 			goto err_out_unlock;
3819 		num_by = num * lb_size;
3820 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3821 
3822 		if ((cum_lb + num) > bt_len) {
3823 			if (sdebug_verbose)
3824 				sdev_printk(KERN_INFO, scp->device,
3825 				    "%s: %s: sum of blocks > data provided\n",
3826 				    my_name, __func__);
3827 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3828 					0);
3829 			ret = illegal_condition_result;
3830 			goto err_out_unlock;
3831 		}
3832 
3833 		/* DIX + T10 DIF */
3834 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3835 			int prot_ret = prot_verify_write(scp, lba, num,
3836 							 ei_lba);
3837 
3838 			if (prot_ret) {
3839 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3840 						prot_ret);
3841 				ret = illegal_condition_result;
3842 				goto err_out_unlock;
3843 			}
3844 		}
3845 
3846 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3847 		/* If ZBC zone then bump its write pointer */
3848 		if (sdebug_dev_is_zoned(devip))
3849 			zbc_inc_wp(devip, lba, num);
3850 		if (unlikely(scsi_debug_lbp()))
3851 			map_region(sip, lba, num);
3852 		if (unlikely(-1 == ret)) {
3853 			ret = DID_ERROR << 16;
3854 			goto err_out_unlock;
3855 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3856 			sdev_printk(KERN_INFO, scp->device,
3857 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3858 			    my_name, num_by, ret);
3859 
3860 		if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3861 			     atomic_read(&sdeb_inject_pending))) {
3862 			if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3863 				mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3864 				atomic_set(&sdeb_inject_pending, 0);
3865 				ret = check_condition_result;
3866 				goto err_out_unlock;
3867 			} else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3868 				/* Logical block guard check failed */
3869 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3870 				atomic_set(&sdeb_inject_pending, 0);
3871 				ret = illegal_condition_result;
3872 				goto err_out_unlock;
3873 			} else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3874 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3875 				atomic_set(&sdeb_inject_pending, 0);
3876 				ret = illegal_condition_result;
3877 				goto err_out_unlock;
3878 			}
3879 		}
3880 		sg_off += num_by;
3881 		cum_lb += num;
3882 	}
3883 	ret = 0;
3884 err_out_unlock:
3885 	sdeb_write_unlock(sip);
3886 err_out:
3887 	kfree(lrdp);
3888 	return ret;
3889 }
3890 
3891 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3892 			   u32 ei_lba, bool unmap, bool ndob)
3893 {
3894 	struct scsi_device *sdp = scp->device;
3895 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3896 	unsigned long long i;
3897 	u64 block, lbaa;
3898 	u32 lb_size = sdebug_sector_size;
3899 	int ret;
3900 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3901 						scp->device->hostdata, true);
3902 	u8 *fs1p;
3903 	u8 *fsp;
3904 
3905 	sdeb_write_lock(sip);
3906 
3907 	ret = check_device_access_params(scp, lba, num, true);
3908 	if (ret) {
3909 		sdeb_write_unlock(sip);
3910 		return ret;
3911 	}
3912 
3913 	if (unmap && scsi_debug_lbp()) {
3914 		unmap_region(sip, lba, num);
3915 		goto out;
3916 	}
3917 	lbaa = lba;
3918 	block = do_div(lbaa, sdebug_store_sectors);
3919 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3920 	fsp = sip->storep;
3921 	fs1p = fsp + (block * lb_size);
3922 	if (ndob) {
3923 		memset(fs1p, 0, lb_size);
3924 		ret = 0;
3925 	} else
3926 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3927 
3928 	if (-1 == ret) {
3929 		sdeb_write_unlock(sip);
3930 		return DID_ERROR << 16;
3931 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3932 		sdev_printk(KERN_INFO, scp->device,
3933 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3934 			    my_name, "write same", lb_size, ret);
3935 
3936 	/* Copy first sector to remaining blocks */
3937 	for (i = 1 ; i < num ; i++) {
3938 		lbaa = lba + i;
3939 		block = do_div(lbaa, sdebug_store_sectors);
3940 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3941 	}
3942 	if (scsi_debug_lbp())
3943 		map_region(sip, lba, num);
3944 	/* If ZBC zone then bump its write pointer */
3945 	if (sdebug_dev_is_zoned(devip))
3946 		zbc_inc_wp(devip, lba, num);
3947 out:
3948 	sdeb_write_unlock(sip);
3949 
3950 	return 0;
3951 }
3952 
3953 static int resp_write_same_10(struct scsi_cmnd *scp,
3954 			      struct sdebug_dev_info *devip)
3955 {
3956 	u8 *cmd = scp->cmnd;
3957 	u32 lba;
3958 	u16 num;
3959 	u32 ei_lba = 0;
3960 	bool unmap = false;
3961 
3962 	if (cmd[1] & 0x8) {
3963 		if (sdebug_lbpws10 == 0) {
3964 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3965 			return check_condition_result;
3966 		} else
3967 			unmap = true;
3968 	}
3969 	lba = get_unaligned_be32(cmd + 2);
3970 	num = get_unaligned_be16(cmd + 7);
3971 	if (num > sdebug_write_same_length) {
3972 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3973 		return check_condition_result;
3974 	}
3975 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3976 }
3977 
3978 static int resp_write_same_16(struct scsi_cmnd *scp,
3979 			      struct sdebug_dev_info *devip)
3980 {
3981 	u8 *cmd = scp->cmnd;
3982 	u64 lba;
3983 	u32 num;
3984 	u32 ei_lba = 0;
3985 	bool unmap = false;
3986 	bool ndob = false;
3987 
3988 	if (cmd[1] & 0x8) {	/* UNMAP */
3989 		if (sdebug_lbpws == 0) {
3990 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3991 			return check_condition_result;
3992 		} else
3993 			unmap = true;
3994 	}
3995 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3996 		ndob = true;
3997 	lba = get_unaligned_be64(cmd + 2);
3998 	num = get_unaligned_be32(cmd + 10);
3999 	if (num > sdebug_write_same_length) {
4000 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4001 		return check_condition_result;
4002 	}
4003 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4004 }
4005 
4006 /* Note the mode field is in the same position as the (lower) service action
4007  * field. For the Report supported operation codes command, SPC-4 suggests
4008  * each mode of this command should be reported separately; for future. */
4009 static int resp_write_buffer(struct scsi_cmnd *scp,
4010 			     struct sdebug_dev_info *devip)
4011 {
4012 	u8 *cmd = scp->cmnd;
4013 	struct scsi_device *sdp = scp->device;
4014 	struct sdebug_dev_info *dp;
4015 	u8 mode;
4016 
4017 	mode = cmd[1] & 0x1f;
4018 	switch (mode) {
4019 	case 0x4:	/* download microcode (MC) and activate (ACT) */
4020 		/* set UAs on this device only */
4021 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4022 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4023 		break;
4024 	case 0x5:	/* download MC, save and ACT */
4025 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4026 		break;
4027 	case 0x6:	/* download MC with offsets and ACT */
4028 		/* set UAs on most devices (LUs) in this target */
4029 		list_for_each_entry(dp,
4030 				    &devip->sdbg_host->dev_info_list,
4031 				    dev_list)
4032 			if (dp->target == sdp->id) {
4033 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4034 				if (devip != dp)
4035 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4036 						dp->uas_bm);
4037 			}
4038 		break;
4039 	case 0x7:	/* download MC with offsets, save, and ACT */
4040 		/* set UA on all devices (LUs) in this target */
4041 		list_for_each_entry(dp,
4042 				    &devip->sdbg_host->dev_info_list,
4043 				    dev_list)
4044 			if (dp->target == sdp->id)
4045 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4046 					dp->uas_bm);
4047 		break;
4048 	default:
4049 		/* do nothing for this command for other mode values */
4050 		break;
4051 	}
4052 	return 0;
4053 }
4054 
4055 static int resp_comp_write(struct scsi_cmnd *scp,
4056 			   struct sdebug_dev_info *devip)
4057 {
4058 	u8 *cmd = scp->cmnd;
4059 	u8 *arr;
4060 	struct sdeb_store_info *sip = devip2sip(devip, true);
4061 	u64 lba;
4062 	u32 dnum;
4063 	u32 lb_size = sdebug_sector_size;
4064 	u8 num;
4065 	int ret;
4066 	int retval = 0;
4067 
4068 	lba = get_unaligned_be64(cmd + 2);
4069 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
4070 	if (0 == num)
4071 		return 0;	/* degenerate case, not an error */
4072 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4073 	    (cmd[1] & 0xe0)) {
4074 		mk_sense_invalid_opcode(scp);
4075 		return check_condition_result;
4076 	}
4077 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4078 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4079 	    (cmd[1] & 0xe0) == 0)
4080 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4081 			    "to DIF device\n");
4082 	ret = check_device_access_params(scp, lba, num, false);
4083 	if (ret)
4084 		return ret;
4085 	dnum = 2 * num;
4086 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4087 	if (NULL == arr) {
4088 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4089 				INSUFF_RES_ASCQ);
4090 		return check_condition_result;
4091 	}
4092 
4093 	sdeb_write_lock(sip);
4094 
4095 	ret = do_dout_fetch(scp, dnum, arr);
4096 	if (ret == -1) {
4097 		retval = DID_ERROR << 16;
4098 		goto cleanup;
4099 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
4100 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4101 			    "indicated=%u, IO sent=%d bytes\n", my_name,
4102 			    dnum * lb_size, ret);
4103 	if (!comp_write_worker(sip, lba, num, arr, false)) {
4104 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4105 		retval = check_condition_result;
4106 		goto cleanup;
4107 	}
4108 	if (scsi_debug_lbp())
4109 		map_region(sip, lba, num);
4110 cleanup:
4111 	sdeb_write_unlock(sip);
4112 	kfree(arr);
4113 	return retval;
4114 }
4115 
4116 struct unmap_block_desc {
4117 	__be64	lba;
4118 	__be32	blocks;
4119 	__be32	__reserved;
4120 };
4121 
4122 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4123 {
4124 	unsigned char *buf;
4125 	struct unmap_block_desc *desc;
4126 	struct sdeb_store_info *sip = devip2sip(devip, true);
4127 	unsigned int i, payload_len, descriptors;
4128 	int ret;
4129 
4130 	if (!scsi_debug_lbp())
4131 		return 0;	/* fib and say its done */
4132 	payload_len = get_unaligned_be16(scp->cmnd + 7);
4133 	BUG_ON(scsi_bufflen(scp) != payload_len);
4134 
4135 	descriptors = (payload_len - 8) / 16;
4136 	if (descriptors > sdebug_unmap_max_desc) {
4137 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4138 		return check_condition_result;
4139 	}
4140 
4141 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4142 	if (!buf) {
4143 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4144 				INSUFF_RES_ASCQ);
4145 		return check_condition_result;
4146 	}
4147 
4148 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4149 
4150 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4151 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4152 
4153 	desc = (void *)&buf[8];
4154 
4155 	sdeb_write_lock(sip);
4156 
4157 	for (i = 0 ; i < descriptors ; i++) {
4158 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4159 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
4160 
4161 		ret = check_device_access_params(scp, lba, num, true);
4162 		if (ret)
4163 			goto out;
4164 
4165 		unmap_region(sip, lba, num);
4166 	}
4167 
4168 	ret = 0;
4169 
4170 out:
4171 	sdeb_write_unlock(sip);
4172 	kfree(buf);
4173 
4174 	return ret;
4175 }
4176 
4177 #define SDEBUG_GET_LBA_STATUS_LEN 32
4178 
4179 static int resp_get_lba_status(struct scsi_cmnd *scp,
4180 			       struct sdebug_dev_info *devip)
4181 {
4182 	u8 *cmd = scp->cmnd;
4183 	u64 lba;
4184 	u32 alloc_len, mapped, num;
4185 	int ret;
4186 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4187 
4188 	lba = get_unaligned_be64(cmd + 2);
4189 	alloc_len = get_unaligned_be32(cmd + 10);
4190 
4191 	if (alloc_len < 24)
4192 		return 0;
4193 
4194 	ret = check_device_access_params(scp, lba, 1, false);
4195 	if (ret)
4196 		return ret;
4197 
4198 	if (scsi_debug_lbp()) {
4199 		struct sdeb_store_info *sip = devip2sip(devip, true);
4200 
4201 		mapped = map_state(sip, lba, &num);
4202 	} else {
4203 		mapped = 1;
4204 		/* following just in case virtual_gb changed */
4205 		sdebug_capacity = get_sdebug_capacity();
4206 		if (sdebug_capacity - lba <= 0xffffffff)
4207 			num = sdebug_capacity - lba;
4208 		else
4209 			num = 0xffffffff;
4210 	}
4211 
4212 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4213 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4214 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4215 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4216 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4217 
4218 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4219 }
4220 
4221 static int resp_sync_cache(struct scsi_cmnd *scp,
4222 			   struct sdebug_dev_info *devip)
4223 {
4224 	int res = 0;
4225 	u64 lba;
4226 	u32 num_blocks;
4227 	u8 *cmd = scp->cmnd;
4228 
4229 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4230 		lba = get_unaligned_be32(cmd + 2);
4231 		num_blocks = get_unaligned_be16(cmd + 7);
4232 	} else {				/* SYNCHRONIZE_CACHE(16) */
4233 		lba = get_unaligned_be64(cmd + 2);
4234 		num_blocks = get_unaligned_be32(cmd + 10);
4235 	}
4236 	if (lba + num_blocks > sdebug_capacity) {
4237 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4238 		return check_condition_result;
4239 	}
4240 	if (!write_since_sync || (cmd[1] & 0x2))
4241 		res = SDEG_RES_IMMED_MASK;
4242 	else		/* delay if write_since_sync and IMMED clear */
4243 		write_since_sync = false;
4244 	return res;
4245 }
4246 
4247 /*
4248  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4249  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4250  * a GOOD status otherwise. Model a disk with a big cache and yield
4251  * CONDITION MET. Actually tries to bring range in main memory into the
4252  * cache associated with the CPU(s).
4253  */
4254 static int resp_pre_fetch(struct scsi_cmnd *scp,
4255 			  struct sdebug_dev_info *devip)
4256 {
4257 	int res = 0;
4258 	u64 lba;
4259 	u64 block, rest = 0;
4260 	u32 nblks;
4261 	u8 *cmd = scp->cmnd;
4262 	struct sdeb_store_info *sip = devip2sip(devip, true);
4263 	u8 *fsp = sip->storep;
4264 
4265 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4266 		lba = get_unaligned_be32(cmd + 2);
4267 		nblks = get_unaligned_be16(cmd + 7);
4268 	} else {			/* PRE-FETCH(16) */
4269 		lba = get_unaligned_be64(cmd + 2);
4270 		nblks = get_unaligned_be32(cmd + 10);
4271 	}
4272 	if (lba + nblks > sdebug_capacity) {
4273 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4274 		return check_condition_result;
4275 	}
4276 	if (!fsp)
4277 		goto fini;
4278 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4279 	block = do_div(lba, sdebug_store_sectors);
4280 	if (block + nblks > sdebug_store_sectors)
4281 		rest = block + nblks - sdebug_store_sectors;
4282 
4283 	/* Try to bring the PRE-FETCH range into CPU's cache */
4284 	sdeb_read_lock(sip);
4285 	prefetch_range(fsp + (sdebug_sector_size * block),
4286 		       (nblks - rest) * sdebug_sector_size);
4287 	if (rest)
4288 		prefetch_range(fsp, rest * sdebug_sector_size);
4289 	sdeb_read_unlock(sip);
4290 fini:
4291 	if (cmd[1] & 0x2)
4292 		res = SDEG_RES_IMMED_MASK;
4293 	return res | condition_met_result;
4294 }
4295 
4296 #define RL_BUCKET_ELEMS 8
4297 
4298 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4299  * (W-LUN), the normal Linux scanning logic does not associate it with a
4300  * device (e.g. /dev/sg7). The following magic will make that association:
4301  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4302  * where <n> is a host number. If there are multiple targets in a host then
4303  * the above will associate a W-LUN to each target. To only get a W-LUN
4304  * for target 2, then use "echo '- 2 49409' > scan" .
4305  */
4306 static int resp_report_luns(struct scsi_cmnd *scp,
4307 			    struct sdebug_dev_info *devip)
4308 {
4309 	unsigned char *cmd = scp->cmnd;
4310 	unsigned int alloc_len;
4311 	unsigned char select_report;
4312 	u64 lun;
4313 	struct scsi_lun *lun_p;
4314 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4315 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4316 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4317 	unsigned int tlun_cnt;	/* total LUN count */
4318 	unsigned int rlen;	/* response length (in bytes) */
4319 	int k, j, n, res;
4320 	unsigned int off_rsp = 0;
4321 	const int sz_lun = sizeof(struct scsi_lun);
4322 
4323 	clear_luns_changed_on_target(devip);
4324 
4325 	select_report = cmd[2];
4326 	alloc_len = get_unaligned_be32(cmd + 6);
4327 
4328 	if (alloc_len < 4) {
4329 		pr_err("alloc len too small %d\n", alloc_len);
4330 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4331 		return check_condition_result;
4332 	}
4333 
4334 	switch (select_report) {
4335 	case 0:		/* all LUNs apart from W-LUNs */
4336 		lun_cnt = sdebug_max_luns;
4337 		wlun_cnt = 0;
4338 		break;
4339 	case 1:		/* only W-LUNs */
4340 		lun_cnt = 0;
4341 		wlun_cnt = 1;
4342 		break;
4343 	case 2:		/* all LUNs */
4344 		lun_cnt = sdebug_max_luns;
4345 		wlun_cnt = 1;
4346 		break;
4347 	case 0x10:	/* only administrative LUs */
4348 	case 0x11:	/* see SPC-5 */
4349 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4350 	default:
4351 		pr_debug("select report invalid %d\n", select_report);
4352 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4353 		return check_condition_result;
4354 	}
4355 
4356 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4357 		--lun_cnt;
4358 
4359 	tlun_cnt = lun_cnt + wlun_cnt;
4360 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4361 	scsi_set_resid(scp, scsi_bufflen(scp));
4362 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4363 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4364 
4365 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4366 	lun = sdebug_no_lun_0 ? 1 : 0;
4367 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4368 		memset(arr, 0, sizeof(arr));
4369 		lun_p = (struct scsi_lun *)&arr[0];
4370 		if (k == 0) {
4371 			put_unaligned_be32(rlen, &arr[0]);
4372 			++lun_p;
4373 			j = 1;
4374 		}
4375 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4376 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4377 				break;
4378 			int_to_scsilun(lun++, lun_p);
4379 			if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4380 				lun_p->scsi_lun[0] |= 0x40;
4381 		}
4382 		if (j < RL_BUCKET_ELEMS)
4383 			break;
4384 		n = j * sz_lun;
4385 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4386 		if (res)
4387 			return res;
4388 		off_rsp += n;
4389 	}
4390 	if (wlun_cnt) {
4391 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4392 		++j;
4393 	}
4394 	if (j > 0)
4395 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4396 	return res;
4397 }
4398 
4399 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4400 {
4401 	bool is_bytchk3 = false;
4402 	u8 bytchk;
4403 	int ret, j;
4404 	u32 vnum, a_num, off;
4405 	const u32 lb_size = sdebug_sector_size;
4406 	u64 lba;
4407 	u8 *arr;
4408 	u8 *cmd = scp->cmnd;
4409 	struct sdeb_store_info *sip = devip2sip(devip, true);
4410 
4411 	bytchk = (cmd[1] >> 1) & 0x3;
4412 	if (bytchk == 0) {
4413 		return 0;	/* always claim internal verify okay */
4414 	} else if (bytchk == 2) {
4415 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4416 		return check_condition_result;
4417 	} else if (bytchk == 3) {
4418 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4419 	}
4420 	switch (cmd[0]) {
4421 	case VERIFY_16:
4422 		lba = get_unaligned_be64(cmd + 2);
4423 		vnum = get_unaligned_be32(cmd + 10);
4424 		break;
4425 	case VERIFY:		/* is VERIFY(10) */
4426 		lba = get_unaligned_be32(cmd + 2);
4427 		vnum = get_unaligned_be16(cmd + 7);
4428 		break;
4429 	default:
4430 		mk_sense_invalid_opcode(scp);
4431 		return check_condition_result;
4432 	}
4433 	if (vnum == 0)
4434 		return 0;	/* not an error */
4435 	a_num = is_bytchk3 ? 1 : vnum;
4436 	/* Treat following check like one for read (i.e. no write) access */
4437 	ret = check_device_access_params(scp, lba, a_num, false);
4438 	if (ret)
4439 		return ret;
4440 
4441 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4442 	if (!arr) {
4443 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4444 				INSUFF_RES_ASCQ);
4445 		return check_condition_result;
4446 	}
4447 	/* Not changing store, so only need read access */
4448 	sdeb_read_lock(sip);
4449 
4450 	ret = do_dout_fetch(scp, a_num, arr);
4451 	if (ret == -1) {
4452 		ret = DID_ERROR << 16;
4453 		goto cleanup;
4454 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4455 		sdev_printk(KERN_INFO, scp->device,
4456 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4457 			    my_name, __func__, a_num * lb_size, ret);
4458 	}
4459 	if (is_bytchk3) {
4460 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4461 			memcpy(arr + off, arr, lb_size);
4462 	}
4463 	ret = 0;
4464 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4465 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4466 		ret = check_condition_result;
4467 		goto cleanup;
4468 	}
4469 cleanup:
4470 	sdeb_read_unlock(sip);
4471 	kfree(arr);
4472 	return ret;
4473 }
4474 
4475 #define RZONES_DESC_HD 64
4476 
4477 /* Report zones depending on start LBA and reporting options */
4478 static int resp_report_zones(struct scsi_cmnd *scp,
4479 			     struct sdebug_dev_info *devip)
4480 {
4481 	unsigned int rep_max_zones, nrz = 0;
4482 	int ret = 0;
4483 	u32 alloc_len, rep_opts, rep_len;
4484 	bool partial;
4485 	u64 lba, zs_lba;
4486 	u8 *arr = NULL, *desc;
4487 	u8 *cmd = scp->cmnd;
4488 	struct sdeb_zone_state *zsp = NULL;
4489 	struct sdeb_store_info *sip = devip2sip(devip, false);
4490 
4491 	if (!sdebug_dev_is_zoned(devip)) {
4492 		mk_sense_invalid_opcode(scp);
4493 		return check_condition_result;
4494 	}
4495 	zs_lba = get_unaligned_be64(cmd + 2);
4496 	alloc_len = get_unaligned_be32(cmd + 10);
4497 	if (alloc_len == 0)
4498 		return 0;	/* not an error */
4499 	rep_opts = cmd[14] & 0x3f;
4500 	partial = cmd[14] & 0x80;
4501 
4502 	if (zs_lba >= sdebug_capacity) {
4503 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4504 		return check_condition_result;
4505 	}
4506 
4507 	rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4508 
4509 	arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4510 	if (!arr) {
4511 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4512 				INSUFF_RES_ASCQ);
4513 		return check_condition_result;
4514 	}
4515 
4516 	sdeb_read_lock(sip);
4517 
4518 	desc = arr + 64;
4519 	for (lba = zs_lba; lba < sdebug_capacity;
4520 	     lba = zsp->z_start + zsp->z_size) {
4521 		if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4522 			break;
4523 		zsp = zbc_zone(devip, lba);
4524 		switch (rep_opts) {
4525 		case 0x00:
4526 			/* All zones */
4527 			break;
4528 		case 0x01:
4529 			/* Empty zones */
4530 			if (zsp->z_cond != ZC1_EMPTY)
4531 				continue;
4532 			break;
4533 		case 0x02:
4534 			/* Implicit open zones */
4535 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4536 				continue;
4537 			break;
4538 		case 0x03:
4539 			/* Explicit open zones */
4540 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4541 				continue;
4542 			break;
4543 		case 0x04:
4544 			/* Closed zones */
4545 			if (zsp->z_cond != ZC4_CLOSED)
4546 				continue;
4547 			break;
4548 		case 0x05:
4549 			/* Full zones */
4550 			if (zsp->z_cond != ZC5_FULL)
4551 				continue;
4552 			break;
4553 		case 0x06:
4554 		case 0x07:
4555 		case 0x10:
4556 			/*
4557 			 * Read-only, offline, reset WP recommended are
4558 			 * not emulated: no zones to report;
4559 			 */
4560 			continue;
4561 		case 0x11:
4562 			/* non-seq-resource set */
4563 			if (!zsp->z_non_seq_resource)
4564 				continue;
4565 			break;
4566 		case 0x3e:
4567 			/* All zones except gap zones. */
4568 			if (zbc_zone_is_gap(zsp))
4569 				continue;
4570 			break;
4571 		case 0x3f:
4572 			/* Not write pointer (conventional) zones */
4573 			if (zbc_zone_is_seq(zsp))
4574 				continue;
4575 			break;
4576 		default:
4577 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4578 					INVALID_FIELD_IN_CDB, 0);
4579 			ret = check_condition_result;
4580 			goto fini;
4581 		}
4582 
4583 		if (nrz < rep_max_zones) {
4584 			/* Fill zone descriptor */
4585 			desc[0] = zsp->z_type;
4586 			desc[1] = zsp->z_cond << 4;
4587 			if (zsp->z_non_seq_resource)
4588 				desc[1] |= 1 << 1;
4589 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4590 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4591 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4592 			desc += 64;
4593 		}
4594 
4595 		if (partial && nrz >= rep_max_zones)
4596 			break;
4597 
4598 		nrz++;
4599 	}
4600 
4601 	/* Report header */
4602 	/* Zone list length. */
4603 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4604 	/* Maximum LBA */
4605 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4606 	/* Zone starting LBA granularity. */
4607 	if (devip->zcap < devip->zsize)
4608 		put_unaligned_be64(devip->zsize, arr + 16);
4609 
4610 	rep_len = (unsigned long)desc - (unsigned long)arr;
4611 	ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4612 
4613 fini:
4614 	sdeb_read_unlock(sip);
4615 	kfree(arr);
4616 	return ret;
4617 }
4618 
4619 /* Logic transplanted from tcmu-runner, file_zbc.c */
4620 static void zbc_open_all(struct sdebug_dev_info *devip)
4621 {
4622 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4623 	unsigned int i;
4624 
4625 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4626 		if (zsp->z_cond == ZC4_CLOSED)
4627 			zbc_open_zone(devip, &devip->zstate[i], true);
4628 	}
4629 }
4630 
4631 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4632 {
4633 	int res = 0;
4634 	u64 z_id;
4635 	enum sdebug_z_cond zc;
4636 	u8 *cmd = scp->cmnd;
4637 	struct sdeb_zone_state *zsp;
4638 	bool all = cmd[14] & 0x01;
4639 	struct sdeb_store_info *sip = devip2sip(devip, false);
4640 
4641 	if (!sdebug_dev_is_zoned(devip)) {
4642 		mk_sense_invalid_opcode(scp);
4643 		return check_condition_result;
4644 	}
4645 
4646 	sdeb_write_lock(sip);
4647 
4648 	if (all) {
4649 		/* Check if all closed zones can be open */
4650 		if (devip->max_open &&
4651 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4652 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4653 					INSUFF_ZONE_ASCQ);
4654 			res = check_condition_result;
4655 			goto fini;
4656 		}
4657 		/* Open all closed zones */
4658 		zbc_open_all(devip);
4659 		goto fini;
4660 	}
4661 
4662 	/* Open the specified zone */
4663 	z_id = get_unaligned_be64(cmd + 2);
4664 	if (z_id >= sdebug_capacity) {
4665 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4666 		res = check_condition_result;
4667 		goto fini;
4668 	}
4669 
4670 	zsp = zbc_zone(devip, z_id);
4671 	if (z_id != zsp->z_start) {
4672 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4673 		res = check_condition_result;
4674 		goto fini;
4675 	}
4676 	if (zbc_zone_is_conv(zsp)) {
4677 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4678 		res = check_condition_result;
4679 		goto fini;
4680 	}
4681 
4682 	zc = zsp->z_cond;
4683 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4684 		goto fini;
4685 
4686 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4687 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4688 				INSUFF_ZONE_ASCQ);
4689 		res = check_condition_result;
4690 		goto fini;
4691 	}
4692 
4693 	zbc_open_zone(devip, zsp, true);
4694 fini:
4695 	sdeb_write_unlock(sip);
4696 	return res;
4697 }
4698 
4699 static void zbc_close_all(struct sdebug_dev_info *devip)
4700 {
4701 	unsigned int i;
4702 
4703 	for (i = 0; i < devip->nr_zones; i++)
4704 		zbc_close_zone(devip, &devip->zstate[i]);
4705 }
4706 
4707 static int resp_close_zone(struct scsi_cmnd *scp,
4708 			   struct sdebug_dev_info *devip)
4709 {
4710 	int res = 0;
4711 	u64 z_id;
4712 	u8 *cmd = scp->cmnd;
4713 	struct sdeb_zone_state *zsp;
4714 	bool all = cmd[14] & 0x01;
4715 	struct sdeb_store_info *sip = devip2sip(devip, false);
4716 
4717 	if (!sdebug_dev_is_zoned(devip)) {
4718 		mk_sense_invalid_opcode(scp);
4719 		return check_condition_result;
4720 	}
4721 
4722 	sdeb_write_lock(sip);
4723 
4724 	if (all) {
4725 		zbc_close_all(devip);
4726 		goto fini;
4727 	}
4728 
4729 	/* Close specified zone */
4730 	z_id = get_unaligned_be64(cmd + 2);
4731 	if (z_id >= sdebug_capacity) {
4732 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4733 		res = check_condition_result;
4734 		goto fini;
4735 	}
4736 
4737 	zsp = zbc_zone(devip, z_id);
4738 	if (z_id != zsp->z_start) {
4739 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4740 		res = check_condition_result;
4741 		goto fini;
4742 	}
4743 	if (zbc_zone_is_conv(zsp)) {
4744 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4745 		res = check_condition_result;
4746 		goto fini;
4747 	}
4748 
4749 	zbc_close_zone(devip, zsp);
4750 fini:
4751 	sdeb_write_unlock(sip);
4752 	return res;
4753 }
4754 
4755 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4756 			    struct sdeb_zone_state *zsp, bool empty)
4757 {
4758 	enum sdebug_z_cond zc = zsp->z_cond;
4759 
4760 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4761 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4762 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4763 			zbc_close_zone(devip, zsp);
4764 		if (zsp->z_cond == ZC4_CLOSED)
4765 			devip->nr_closed--;
4766 		zsp->z_wp = zsp->z_start + zsp->z_size;
4767 		zsp->z_cond = ZC5_FULL;
4768 	}
4769 }
4770 
4771 static void zbc_finish_all(struct sdebug_dev_info *devip)
4772 {
4773 	unsigned int i;
4774 
4775 	for (i = 0; i < devip->nr_zones; i++)
4776 		zbc_finish_zone(devip, &devip->zstate[i], false);
4777 }
4778 
4779 static int resp_finish_zone(struct scsi_cmnd *scp,
4780 			    struct sdebug_dev_info *devip)
4781 {
4782 	struct sdeb_zone_state *zsp;
4783 	int res = 0;
4784 	u64 z_id;
4785 	u8 *cmd = scp->cmnd;
4786 	bool all = cmd[14] & 0x01;
4787 	struct sdeb_store_info *sip = devip2sip(devip, false);
4788 
4789 	if (!sdebug_dev_is_zoned(devip)) {
4790 		mk_sense_invalid_opcode(scp);
4791 		return check_condition_result;
4792 	}
4793 
4794 	sdeb_write_lock(sip);
4795 
4796 	if (all) {
4797 		zbc_finish_all(devip);
4798 		goto fini;
4799 	}
4800 
4801 	/* Finish the specified zone */
4802 	z_id = get_unaligned_be64(cmd + 2);
4803 	if (z_id >= sdebug_capacity) {
4804 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4805 		res = check_condition_result;
4806 		goto fini;
4807 	}
4808 
4809 	zsp = zbc_zone(devip, z_id);
4810 	if (z_id != zsp->z_start) {
4811 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4812 		res = check_condition_result;
4813 		goto fini;
4814 	}
4815 	if (zbc_zone_is_conv(zsp)) {
4816 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4817 		res = check_condition_result;
4818 		goto fini;
4819 	}
4820 
4821 	zbc_finish_zone(devip, zsp, true);
4822 fini:
4823 	sdeb_write_unlock(sip);
4824 	return res;
4825 }
4826 
4827 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4828 			 struct sdeb_zone_state *zsp)
4829 {
4830 	enum sdebug_z_cond zc;
4831 	struct sdeb_store_info *sip = devip2sip(devip, false);
4832 
4833 	if (!zbc_zone_is_seq(zsp))
4834 		return;
4835 
4836 	zc = zsp->z_cond;
4837 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4838 		zbc_close_zone(devip, zsp);
4839 
4840 	if (zsp->z_cond == ZC4_CLOSED)
4841 		devip->nr_closed--;
4842 
4843 	if (zsp->z_wp > zsp->z_start)
4844 		memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4845 		       (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4846 
4847 	zsp->z_non_seq_resource = false;
4848 	zsp->z_wp = zsp->z_start;
4849 	zsp->z_cond = ZC1_EMPTY;
4850 }
4851 
4852 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4853 {
4854 	unsigned int i;
4855 
4856 	for (i = 0; i < devip->nr_zones; i++)
4857 		zbc_rwp_zone(devip, &devip->zstate[i]);
4858 }
4859 
4860 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4861 {
4862 	struct sdeb_zone_state *zsp;
4863 	int res = 0;
4864 	u64 z_id;
4865 	u8 *cmd = scp->cmnd;
4866 	bool all = cmd[14] & 0x01;
4867 	struct sdeb_store_info *sip = devip2sip(devip, false);
4868 
4869 	if (!sdebug_dev_is_zoned(devip)) {
4870 		mk_sense_invalid_opcode(scp);
4871 		return check_condition_result;
4872 	}
4873 
4874 	sdeb_write_lock(sip);
4875 
4876 	if (all) {
4877 		zbc_rwp_all(devip);
4878 		goto fini;
4879 	}
4880 
4881 	z_id = get_unaligned_be64(cmd + 2);
4882 	if (z_id >= sdebug_capacity) {
4883 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4884 		res = check_condition_result;
4885 		goto fini;
4886 	}
4887 
4888 	zsp = zbc_zone(devip, z_id);
4889 	if (z_id != zsp->z_start) {
4890 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4891 		res = check_condition_result;
4892 		goto fini;
4893 	}
4894 	if (zbc_zone_is_conv(zsp)) {
4895 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4896 		res = check_condition_result;
4897 		goto fini;
4898 	}
4899 
4900 	zbc_rwp_zone(devip, zsp);
4901 fini:
4902 	sdeb_write_unlock(sip);
4903 	return res;
4904 }
4905 
4906 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4907 {
4908 	u16 hwq;
4909 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4910 
4911 	hwq = blk_mq_unique_tag_to_hwq(tag);
4912 
4913 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4914 	if (WARN_ON_ONCE(hwq >= submit_queues))
4915 		hwq = 0;
4916 
4917 	return sdebug_q_arr + hwq;
4918 }
4919 
4920 static u32 get_tag(struct scsi_cmnd *cmnd)
4921 {
4922 	return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4923 }
4924 
4925 /* Queued (deferred) command completions converge here. */
4926 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4927 {
4928 	struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4929 	int qc_idx;
4930 	unsigned long flags, iflags;
4931 	struct scsi_cmnd *scp = sqcp->scmd;
4932 	struct sdebug_scsi_cmd *sdsc;
4933 	bool aborted;
4934 	struct sdebug_queue *sqp;
4935 
4936 	qc_idx = sd_dp->sqa_idx;
4937 	if (sdebug_statistics) {
4938 		atomic_inc(&sdebug_completions);
4939 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4940 			atomic_inc(&sdebug_miss_cpus);
4941 	}
4942 	if (!scp) {
4943 		pr_err("scmd=NULL\n");
4944 		goto out;
4945 	}
4946 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4947 		pr_err("wild qc_idx=%d\n", qc_idx);
4948 		goto out;
4949 	}
4950 
4951 	sdsc = scsi_cmd_priv(scp);
4952 	sqp = get_queue(scp);
4953 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4954 	spin_lock_irqsave(&sdsc->lock, flags);
4955 	aborted = sd_dp->aborted;
4956 	if (unlikely(aborted))
4957 		sd_dp->aborted = false;
4958 	ASSIGN_QUEUED_CMD(scp, NULL);
4959 
4960 	sqp->qc_arr[qc_idx] = NULL;
4961 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4962 		spin_unlock_irqrestore(&sdsc->lock, flags);
4963 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4964 		pr_err("Unexpected completion qc_idx=%d\n", qc_idx);
4965 		goto out;
4966 	}
4967 
4968 	spin_unlock_irqrestore(&sdsc->lock, flags);
4969 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4970 
4971 	if (aborted) {
4972 		pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4973 		blk_abort_request(scsi_cmd_to_rq(scp));
4974 		goto out;
4975 	}
4976 
4977 	scsi_done(scp); /* callback to mid level */
4978 out:
4979 	sdebug_free_queued_cmd(sqcp);
4980 }
4981 
4982 /* When high resolution timer goes off this function is called. */
4983 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4984 {
4985 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4986 						  hrt);
4987 	sdebug_q_cmd_complete(sd_dp);
4988 	return HRTIMER_NORESTART;
4989 }
4990 
4991 /* When work queue schedules work, it calls this function. */
4992 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4993 {
4994 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4995 						  ew.work);
4996 	sdebug_q_cmd_complete(sd_dp);
4997 }
4998 
4999 static bool got_shared_uuid;
5000 static uuid_t shared_uuid;
5001 
5002 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5003 {
5004 	struct sdeb_zone_state *zsp;
5005 	sector_t capacity = get_sdebug_capacity();
5006 	sector_t conv_capacity;
5007 	sector_t zstart = 0;
5008 	unsigned int i;
5009 
5010 	/*
5011 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5012 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
5013 	 * use the specified zone size checking that at least 2 zones can be
5014 	 * created for the device.
5015 	 */
5016 	if (!sdeb_zbc_zone_size_mb) {
5017 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5018 			>> ilog2(sdebug_sector_size);
5019 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5020 			devip->zsize >>= 1;
5021 		if (devip->zsize < 2) {
5022 			pr_err("Device capacity too small\n");
5023 			return -EINVAL;
5024 		}
5025 	} else {
5026 		if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5027 			pr_err("Zone size is not a power of 2\n");
5028 			return -EINVAL;
5029 		}
5030 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5031 			>> ilog2(sdebug_sector_size);
5032 		if (devip->zsize >= capacity) {
5033 			pr_err("Zone size too large for device capacity\n");
5034 			return -EINVAL;
5035 		}
5036 	}
5037 
5038 	devip->zsize_shift = ilog2(devip->zsize);
5039 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5040 
5041 	if (sdeb_zbc_zone_cap_mb == 0) {
5042 		devip->zcap = devip->zsize;
5043 	} else {
5044 		devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5045 			      ilog2(sdebug_sector_size);
5046 		if (devip->zcap > devip->zsize) {
5047 			pr_err("Zone capacity too large\n");
5048 			return -EINVAL;
5049 		}
5050 	}
5051 
5052 	conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5053 	if (conv_capacity >= capacity) {
5054 		pr_err("Number of conventional zones too large\n");
5055 		return -EINVAL;
5056 	}
5057 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
5058 	devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5059 			      devip->zsize_shift;
5060 	devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5061 
5062 	/* Add gap zones if zone capacity is smaller than the zone size */
5063 	if (devip->zcap < devip->zsize)
5064 		devip->nr_zones += devip->nr_seq_zones;
5065 
5066 	if (devip->zmodel == BLK_ZONED_HM) {
5067 		/* zbc_max_open_zones can be 0, meaning "not reported" */
5068 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5069 			devip->max_open = (devip->nr_zones - 1) / 2;
5070 		else
5071 			devip->max_open = sdeb_zbc_max_open;
5072 	}
5073 
5074 	devip->zstate = kcalloc(devip->nr_zones,
5075 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
5076 	if (!devip->zstate)
5077 		return -ENOMEM;
5078 
5079 	for (i = 0; i < devip->nr_zones; i++) {
5080 		zsp = &devip->zstate[i];
5081 
5082 		zsp->z_start = zstart;
5083 
5084 		if (i < devip->nr_conv_zones) {
5085 			zsp->z_type = ZBC_ZTYPE_CNV;
5086 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5087 			zsp->z_wp = (sector_t)-1;
5088 			zsp->z_size =
5089 				min_t(u64, devip->zsize, capacity - zstart);
5090 		} else if ((zstart & (devip->zsize - 1)) == 0) {
5091 			if (devip->zmodel == BLK_ZONED_HM)
5092 				zsp->z_type = ZBC_ZTYPE_SWR;
5093 			else
5094 				zsp->z_type = ZBC_ZTYPE_SWP;
5095 			zsp->z_cond = ZC1_EMPTY;
5096 			zsp->z_wp = zsp->z_start;
5097 			zsp->z_size =
5098 				min_t(u64, devip->zcap, capacity - zstart);
5099 		} else {
5100 			zsp->z_type = ZBC_ZTYPE_GAP;
5101 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5102 			zsp->z_wp = (sector_t)-1;
5103 			zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5104 					    capacity - zstart);
5105 		}
5106 
5107 		WARN_ON_ONCE((int)zsp->z_size <= 0);
5108 		zstart += zsp->z_size;
5109 	}
5110 
5111 	return 0;
5112 }
5113 
5114 static struct sdebug_dev_info *sdebug_device_create(
5115 			struct sdebug_host_info *sdbg_host, gfp_t flags)
5116 {
5117 	struct sdebug_dev_info *devip;
5118 
5119 	devip = kzalloc(sizeof(*devip), flags);
5120 	if (devip) {
5121 		if (sdebug_uuid_ctl == 1)
5122 			uuid_gen(&devip->lu_name);
5123 		else if (sdebug_uuid_ctl == 2) {
5124 			if (got_shared_uuid)
5125 				devip->lu_name = shared_uuid;
5126 			else {
5127 				uuid_gen(&shared_uuid);
5128 				got_shared_uuid = true;
5129 				devip->lu_name = shared_uuid;
5130 			}
5131 		}
5132 		devip->sdbg_host = sdbg_host;
5133 		if (sdeb_zbc_in_use) {
5134 			devip->zmodel = sdeb_zbc_model;
5135 			if (sdebug_device_create_zones(devip)) {
5136 				kfree(devip);
5137 				return NULL;
5138 			}
5139 		} else {
5140 			devip->zmodel = BLK_ZONED_NONE;
5141 		}
5142 		devip->create_ts = ktime_get_boottime();
5143 		atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5144 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5145 	}
5146 	return devip;
5147 }
5148 
5149 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5150 {
5151 	struct sdebug_host_info *sdbg_host;
5152 	struct sdebug_dev_info *open_devip = NULL;
5153 	struct sdebug_dev_info *devip;
5154 
5155 	sdbg_host = shost_to_sdebug_host(sdev->host);
5156 
5157 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5158 		if ((devip->used) && (devip->channel == sdev->channel) &&
5159 		    (devip->target == sdev->id) &&
5160 		    (devip->lun == sdev->lun))
5161 			return devip;
5162 		else {
5163 			if ((!devip->used) && (!open_devip))
5164 				open_devip = devip;
5165 		}
5166 	}
5167 	if (!open_devip) { /* try and make a new one */
5168 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5169 		if (!open_devip) {
5170 			pr_err("out of memory at line %d\n", __LINE__);
5171 			return NULL;
5172 		}
5173 	}
5174 
5175 	open_devip->channel = sdev->channel;
5176 	open_devip->target = sdev->id;
5177 	open_devip->lun = sdev->lun;
5178 	open_devip->sdbg_host = sdbg_host;
5179 	set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5180 	open_devip->used = true;
5181 	return open_devip;
5182 }
5183 
5184 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5185 {
5186 	if (sdebug_verbose)
5187 		pr_info("slave_alloc <%u %u %u %llu>\n",
5188 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5189 	return 0;
5190 }
5191 
5192 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5193 {
5194 	struct sdebug_dev_info *devip =
5195 			(struct sdebug_dev_info *)sdp->hostdata;
5196 
5197 	if (sdebug_verbose)
5198 		pr_info("slave_configure <%u %u %u %llu>\n",
5199 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5200 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5201 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5202 	if (devip == NULL) {
5203 		devip = find_build_dev_info(sdp);
5204 		if (devip == NULL)
5205 			return 1;  /* no resources, will be marked offline */
5206 	}
5207 	sdp->hostdata = devip;
5208 	if (sdebug_no_uld)
5209 		sdp->no_uld_attach = 1;
5210 	config_cdb_len(sdp);
5211 	return 0;
5212 }
5213 
5214 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5215 {
5216 	struct sdebug_dev_info *devip =
5217 		(struct sdebug_dev_info *)sdp->hostdata;
5218 
5219 	if (sdebug_verbose)
5220 		pr_info("slave_destroy <%u %u %u %llu>\n",
5221 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5222 	if (devip) {
5223 		/* make this slot available for re-use */
5224 		devip->used = false;
5225 		sdp->hostdata = NULL;
5226 	}
5227 }
5228 
5229 /* Returns true if we require the queued memory to be freed by the caller. */
5230 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5231 			   enum sdeb_defer_type defer_t)
5232 {
5233 	if (defer_t == SDEB_DEFER_HRT) {
5234 		int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5235 
5236 		switch (res) {
5237 		case 0: /* Not active, it must have already run */
5238 		case -1: /* -1 It's executing the CB */
5239 			return false;
5240 		case 1: /* Was active, we've now cancelled */
5241 		default:
5242 			return true;
5243 		}
5244 	} else if (defer_t == SDEB_DEFER_WQ) {
5245 		/* Cancel if pending */
5246 		if (cancel_work_sync(&sd_dp->ew.work))
5247 			return true;
5248 		/* Was not pending, so it must have run */
5249 		return false;
5250 	} else if (defer_t == SDEB_DEFER_POLL) {
5251 		return true;
5252 	}
5253 
5254 	return false;
5255 }
5256 
5257 
5258 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd, int *sqa_idx)
5259 {
5260 	enum sdeb_defer_type l_defer_t;
5261 	struct sdebug_queued_cmd *sqcp;
5262 	struct sdebug_defer *sd_dp;
5263 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5264 
5265 	lockdep_assert_held(&sdsc->lock);
5266 
5267 	sqcp = TO_QUEUED_CMD(cmnd);
5268 	if (!sqcp)
5269 		return false;
5270 	sd_dp = &sqcp->sd_dp;
5271 	if (sqa_idx)
5272 		*sqa_idx = sd_dp->sqa_idx;
5273 	l_defer_t = READ_ONCE(sd_dp->defer_t);
5274 	ASSIGN_QUEUED_CMD(cmnd, NULL);
5275 
5276 	if (stop_qc_helper(sd_dp, l_defer_t))
5277 		sdebug_free_queued_cmd(sqcp);
5278 
5279 	return true;
5280 }
5281 
5282 /*
5283  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5284  */
5285 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5286 {
5287 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5288 	struct sdebug_queue *sqp = get_queue(cmnd);
5289 	unsigned long flags, iflags;
5290 	int k = -1;
5291 	bool res;
5292 
5293 	spin_lock_irqsave(&sdsc->lock, flags);
5294 	res = scsi_debug_stop_cmnd(cmnd, &k);
5295 	spin_unlock_irqrestore(&sdsc->lock, flags);
5296 
5297 	if (k >= 0) {
5298 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5299 		clear_bit(k, sqp->in_use_bm);
5300 		sqp->qc_arr[k] = NULL;
5301 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5302 	}
5303 
5304 	return res;
5305 }
5306 
5307 /*
5308  * All we can do is set the cmnd as internally aborted and wait for it to
5309  * finish. We cannot call scsi_done() as normal completion path may do that.
5310  */
5311 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5312 {
5313 	scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5314 
5315 	return true;
5316 }
5317 
5318 /* Deletes (stops) timers or work queues of all queued commands */
5319 static void stop_all_queued(void)
5320 {
5321 	struct sdebug_host_info *sdhp;
5322 
5323 	mutex_lock(&sdebug_host_list_mutex);
5324 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5325 		struct Scsi_Host *shost = sdhp->shost;
5326 
5327 		blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5328 	}
5329 	mutex_unlock(&sdebug_host_list_mutex);
5330 }
5331 
5332 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5333 {
5334 	bool ok = scsi_debug_abort_cmnd(SCpnt);
5335 
5336 	++num_aborts;
5337 
5338 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5339 		sdev_printk(KERN_INFO, SCpnt->device,
5340 			    "%s: command%s found\n", __func__,
5341 			    ok ? "" : " not");
5342 
5343 	return SUCCESS;
5344 }
5345 
5346 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5347 {
5348 	struct scsi_device *sdp = SCpnt->device;
5349 	struct sdebug_dev_info *devip = sdp->hostdata;
5350 
5351 	++num_dev_resets;
5352 
5353 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5354 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5355 	if (devip)
5356 		set_bit(SDEBUG_UA_POR, devip->uas_bm);
5357 
5358 	return SUCCESS;
5359 }
5360 
5361 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5362 {
5363 	struct scsi_device *sdp = SCpnt->device;
5364 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5365 	struct sdebug_dev_info *devip;
5366 	int k = 0;
5367 
5368 	++num_target_resets;
5369 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5370 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5371 
5372 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5373 		if (devip->target == sdp->id) {
5374 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5375 			++k;
5376 		}
5377 	}
5378 
5379 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5380 		sdev_printk(KERN_INFO, sdp,
5381 			    "%s: %d device(s) found in target\n", __func__, k);
5382 
5383 	return SUCCESS;
5384 }
5385 
5386 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5387 {
5388 	struct scsi_device *sdp = SCpnt->device;
5389 	struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5390 	struct sdebug_dev_info *devip;
5391 	int k = 0;
5392 
5393 	++num_bus_resets;
5394 
5395 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5396 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5397 
5398 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5399 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5400 		++k;
5401 	}
5402 
5403 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5404 		sdev_printk(KERN_INFO, sdp,
5405 			    "%s: %d device(s) found in host\n", __func__, k);
5406 	return SUCCESS;
5407 }
5408 
5409 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5410 {
5411 	struct sdebug_host_info *sdbg_host;
5412 	struct sdebug_dev_info *devip;
5413 	int k = 0;
5414 
5415 	++num_host_resets;
5416 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5417 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5418 	mutex_lock(&sdebug_host_list_mutex);
5419 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5420 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5421 				    dev_list) {
5422 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5423 			++k;
5424 		}
5425 	}
5426 	mutex_unlock(&sdebug_host_list_mutex);
5427 	stop_all_queued();
5428 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5429 		sdev_printk(KERN_INFO, SCpnt->device,
5430 			    "%s: %d device(s) found\n", __func__, k);
5431 	return SUCCESS;
5432 }
5433 
5434 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5435 {
5436 	struct msdos_partition *pp;
5437 	int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5438 	int sectors_per_part, num_sectors, k;
5439 	int heads_by_sects, start_sec, end_sec;
5440 
5441 	/* assume partition table already zeroed */
5442 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5443 		return;
5444 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5445 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5446 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5447 	}
5448 	num_sectors = (int)get_sdebug_capacity();
5449 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5450 			   / sdebug_num_parts;
5451 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5452 	starts[0] = sdebug_sectors_per;
5453 	max_part_secs = sectors_per_part;
5454 	for (k = 1; k < sdebug_num_parts; ++k) {
5455 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5456 			    * heads_by_sects;
5457 		if (starts[k] - starts[k - 1] < max_part_secs)
5458 			max_part_secs = starts[k] - starts[k - 1];
5459 	}
5460 	starts[sdebug_num_parts] = num_sectors;
5461 	starts[sdebug_num_parts + 1] = 0;
5462 
5463 	ramp[510] = 0x55;	/* magic partition markings */
5464 	ramp[511] = 0xAA;
5465 	pp = (struct msdos_partition *)(ramp + 0x1be);
5466 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5467 		start_sec = starts[k];
5468 		end_sec = starts[k] + max_part_secs - 1;
5469 		pp->boot_ind = 0;
5470 
5471 		pp->cyl = start_sec / heads_by_sects;
5472 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5473 			   / sdebug_sectors_per;
5474 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5475 
5476 		pp->end_cyl = end_sec / heads_by_sects;
5477 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5478 			       / sdebug_sectors_per;
5479 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5480 
5481 		pp->start_sect = cpu_to_le32(start_sec);
5482 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5483 		pp->sys_ind = 0x83;	/* plain Linux partition */
5484 	}
5485 }
5486 
5487 static void block_unblock_all_queues(bool block)
5488 {
5489 	struct sdebug_host_info *sdhp;
5490 
5491 	lockdep_assert_held(&sdebug_host_list_mutex);
5492 
5493 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5494 		struct Scsi_Host *shost = sdhp->shost;
5495 
5496 		if (block)
5497 			scsi_block_requests(shost);
5498 		else
5499 			scsi_unblock_requests(shost);
5500 	}
5501 }
5502 
5503 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5504  * commands will be processed normally before triggers occur.
5505  */
5506 static void tweak_cmnd_count(void)
5507 {
5508 	int count, modulo;
5509 
5510 	modulo = abs(sdebug_every_nth);
5511 	if (modulo < 2)
5512 		return;
5513 
5514 	mutex_lock(&sdebug_host_list_mutex);
5515 	block_unblock_all_queues(true);
5516 	count = atomic_read(&sdebug_cmnd_count);
5517 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5518 	block_unblock_all_queues(false);
5519 	mutex_unlock(&sdebug_host_list_mutex);
5520 }
5521 
5522 static void clear_queue_stats(void)
5523 {
5524 	atomic_set(&sdebug_cmnd_count, 0);
5525 	atomic_set(&sdebug_completions, 0);
5526 	atomic_set(&sdebug_miss_cpus, 0);
5527 	atomic_set(&sdebug_a_tsf, 0);
5528 }
5529 
5530 static bool inject_on_this_cmd(void)
5531 {
5532 	if (sdebug_every_nth == 0)
5533 		return false;
5534 	return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5535 }
5536 
5537 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5538 
5539 
5540 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5541 {
5542 	if (sqcp)
5543 		kmem_cache_free(queued_cmd_cache, sqcp);
5544 }
5545 
5546 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5547 {
5548 	struct sdebug_queued_cmd *sqcp;
5549 	struct sdebug_defer *sd_dp;
5550 
5551 	sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5552 	if (!sqcp)
5553 		return NULL;
5554 
5555 	sd_dp = &sqcp->sd_dp;
5556 
5557 	hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5558 	sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5559 	INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5560 
5561 	sqcp->scmd = scmd;
5562 	sd_dp->sqa_idx = -1;
5563 
5564 	return sqcp;
5565 }
5566 
5567 /* Complete the processing of the thread that queued a SCSI command to this
5568  * driver. It either completes the command by calling cmnd_done() or
5569  * schedules a hr timer or work queue then returns 0. Returns
5570  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5571  */
5572 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5573 			 int scsi_result,
5574 			 int (*pfp)(struct scsi_cmnd *,
5575 				    struct sdebug_dev_info *),
5576 			 int delta_jiff, int ndelay)
5577 {
5578 	struct request *rq = scsi_cmd_to_rq(cmnd);
5579 	bool polled = rq->cmd_flags & REQ_POLLED;
5580 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5581 	unsigned long iflags, flags;
5582 	u64 ns_from_boot = 0;
5583 	struct sdebug_queue *sqp;
5584 	struct sdebug_queued_cmd *sqcp;
5585 	struct scsi_device *sdp;
5586 	struct sdebug_defer *sd_dp;
5587 	int k;
5588 
5589 	if (unlikely(devip == NULL)) {
5590 		if (scsi_result == 0)
5591 			scsi_result = DID_NO_CONNECT << 16;
5592 		goto respond_in_thread;
5593 	}
5594 	sdp = cmnd->device;
5595 
5596 	if (delta_jiff == 0)
5597 		goto respond_in_thread;
5598 
5599 	sqp = get_queue(cmnd);
5600 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5601 
5602 	if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5603 		     (scsi_result == 0))) {
5604 		int num_in_q = scsi_device_busy(sdp);
5605 		int qdepth = cmnd->device->queue_depth;
5606 
5607 		if ((num_in_q == qdepth) &&
5608 		    (atomic_inc_return(&sdebug_a_tsf) >=
5609 		     abs(sdebug_every_nth))) {
5610 			atomic_set(&sdebug_a_tsf, 0);
5611 			scsi_result = device_qfull_result;
5612 
5613 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5614 				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5615 					    __func__, num_in_q);
5616 		}
5617 	}
5618 
5619 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5620 	if (unlikely(k >= sdebug_max_queue)) {
5621 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5622 		if (scsi_result)
5623 			goto respond_in_thread;
5624 		scsi_result = device_qfull_result;
5625 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5626 			sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5627 				    __func__, sdebug_max_queue);
5628 		goto respond_in_thread;
5629 	}
5630 	set_bit(k, sqp->in_use_bm);
5631 
5632 	sqcp = sdebug_alloc_queued_cmd(cmnd);
5633 	if (!sqcp) {
5634 		clear_bit(k, sqp->in_use_bm);
5635 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5636 		return SCSI_MLQUEUE_HOST_BUSY;
5637 	}
5638 	sd_dp = &sqcp->sd_dp;
5639 	sd_dp->sqa_idx = k;
5640 	sqp->qc_arr[k] = sqcp;
5641 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5642 
5643 	/* Set the hostwide tag */
5644 	if (sdebug_host_max_queue)
5645 		sd_dp->hc_idx = get_tag(cmnd);
5646 
5647 	if (polled)
5648 		ns_from_boot = ktime_get_boottime_ns();
5649 
5650 	/* one of the resp_*() response functions is called here */
5651 	cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5652 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5653 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5654 		delta_jiff = ndelay = 0;
5655 	}
5656 	if (cmnd->result == 0 && scsi_result != 0)
5657 		cmnd->result = scsi_result;
5658 	if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5659 		if (atomic_read(&sdeb_inject_pending)) {
5660 			mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5661 			atomic_set(&sdeb_inject_pending, 0);
5662 			cmnd->result = check_condition_result;
5663 		}
5664 	}
5665 
5666 	if (unlikely(sdebug_verbose && cmnd->result))
5667 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5668 			    __func__, cmnd->result);
5669 
5670 	if (delta_jiff > 0 || ndelay > 0) {
5671 		ktime_t kt;
5672 
5673 		if (delta_jiff > 0) {
5674 			u64 ns = jiffies_to_nsecs(delta_jiff);
5675 
5676 			if (sdebug_random && ns < U32_MAX) {
5677 				ns = get_random_u32_below((u32)ns);
5678 			} else if (sdebug_random) {
5679 				ns >>= 12;	/* scale to 4 usec precision */
5680 				if (ns < U32_MAX)	/* over 4 hours max */
5681 					ns = get_random_u32_below((u32)ns);
5682 				ns <<= 12;
5683 			}
5684 			kt = ns_to_ktime(ns);
5685 		} else {	/* ndelay has a 4.2 second max */
5686 			kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5687 					     (u32)ndelay;
5688 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5689 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5690 
5691 				if (kt <= d) {	/* elapsed duration >= kt */
5692 					spin_lock_irqsave(&sqp->qc_lock, iflags);
5693 					sqp->qc_arr[k] = NULL;
5694 					clear_bit(k, sqp->in_use_bm);
5695 					spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5696 					/* call scsi_done() from this thread */
5697 					sdebug_free_queued_cmd(sqcp);
5698 					scsi_done(cmnd);
5699 					return 0;
5700 				}
5701 				/* otherwise reduce kt by elapsed time */
5702 				kt -= d;
5703 			}
5704 		}
5705 		if (sdebug_statistics)
5706 			sd_dp->issuing_cpu = raw_smp_processor_id();
5707 		if (polled) {
5708 			spin_lock_irqsave(&sdsc->lock, flags);
5709 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5710 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5711 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5712 			spin_unlock_irqrestore(&sdsc->lock, flags);
5713 		} else {
5714 			/* schedule the invocation of scsi_done() for a later time */
5715 			spin_lock_irqsave(&sdsc->lock, flags);
5716 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5717 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5718 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5719 			/*
5720 			 * The completion handler will try to grab sqcp->lock,
5721 			 * so there is no chance that the completion handler
5722 			 * will call scsi_done() until we release the lock
5723 			 * here (so ok to keep referencing sdsc).
5724 			 */
5725 			spin_unlock_irqrestore(&sdsc->lock, flags);
5726 		}
5727 	} else {	/* jdelay < 0, use work queue */
5728 		if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5729 			     atomic_read(&sdeb_inject_pending))) {
5730 			sd_dp->aborted = true;
5731 			atomic_set(&sdeb_inject_pending, 0);
5732 			sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5733 				    blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5734 		}
5735 
5736 		if (sdebug_statistics)
5737 			sd_dp->issuing_cpu = raw_smp_processor_id();
5738 		if (polled) {
5739 			spin_lock_irqsave(&sdsc->lock, flags);
5740 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5741 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5742 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5743 			spin_unlock_irqrestore(&sdsc->lock, flags);
5744 		} else {
5745 			spin_lock_irqsave(&sdsc->lock, flags);
5746 			ASSIGN_QUEUED_CMD(cmnd, sqcp);
5747 			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5748 			schedule_work(&sd_dp->ew.work);
5749 			spin_unlock_irqrestore(&sdsc->lock, flags);
5750 		}
5751 	}
5752 
5753 	return 0;
5754 
5755 respond_in_thread:	/* call back to mid-layer using invocation thread */
5756 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5757 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5758 	if (cmnd->result == 0 && scsi_result != 0)
5759 		cmnd->result = scsi_result;
5760 	scsi_done(cmnd);
5761 	return 0;
5762 }
5763 
5764 /* Note: The following macros create attribute files in the
5765    /sys/module/scsi_debug/parameters directory. Unfortunately this
5766    driver is unaware of a change and cannot trigger auxiliary actions
5767    as it can when the corresponding attribute in the
5768    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5769  */
5770 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5771 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5772 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5773 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5774 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5775 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5776 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5777 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5778 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5779 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5780 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5781 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5782 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5783 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5784 module_param_string(inq_product, sdebug_inq_product_id,
5785 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5786 module_param_string(inq_rev, sdebug_inq_product_rev,
5787 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5788 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5789 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5790 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5791 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5792 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5793 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5794 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5795 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5796 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5797 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5798 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5799 		   S_IRUGO | S_IWUSR);
5800 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5801 		   S_IRUGO | S_IWUSR);
5802 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5803 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5804 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5805 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5806 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5807 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5808 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5809 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5810 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5811 module_param_named(per_host_store, sdebug_per_host_store, bool,
5812 		   S_IRUGO | S_IWUSR);
5813 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5814 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5815 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5816 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5817 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5818 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5819 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5820 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5821 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5822 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5823 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5824 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5825 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5826 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5827 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5828 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5829 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5830 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5831 		   S_IRUGO | S_IWUSR);
5832 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5833 module_param_named(write_same_length, sdebug_write_same_length, int,
5834 		   S_IRUGO | S_IWUSR);
5835 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5836 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5837 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5838 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5839 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5840 
5841 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5842 MODULE_DESCRIPTION("SCSI debug adapter driver");
5843 MODULE_LICENSE("GPL");
5844 MODULE_VERSION(SDEBUG_VERSION);
5845 
5846 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5847 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5848 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5849 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5850 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5851 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5852 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5853 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5854 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5855 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5856 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5857 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5858 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5859 MODULE_PARM_DESC(host_max_queue,
5860 		 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5861 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5862 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5863 		 SDEBUG_VERSION "\")");
5864 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5865 MODULE_PARM_DESC(lbprz,
5866 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5867 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5868 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5869 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5870 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5871 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5872 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5873 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5874 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5875 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5876 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5877 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5878 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5879 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5880 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5881 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5882 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5883 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5884 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5885 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5886 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5887 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5888 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5889 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5890 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5891 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5892 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5893 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5894 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5895 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5896 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5897 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5898 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5899 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5900 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5901 MODULE_PARM_DESC(uuid_ctl,
5902 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5903 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5904 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5905 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5906 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5907 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5908 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5909 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5910 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5911 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5912 
5913 #define SDEBUG_INFO_LEN 256
5914 static char sdebug_info[SDEBUG_INFO_LEN];
5915 
5916 static const char *scsi_debug_info(struct Scsi_Host *shp)
5917 {
5918 	int k;
5919 
5920 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5921 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5922 	if (k >= (SDEBUG_INFO_LEN - 1))
5923 		return sdebug_info;
5924 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5925 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5926 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5927 		  "statistics", (int)sdebug_statistics);
5928 	return sdebug_info;
5929 }
5930 
5931 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5932 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5933 				 int length)
5934 {
5935 	char arr[16];
5936 	int opts;
5937 	int minLen = length > 15 ? 15 : length;
5938 
5939 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5940 		return -EACCES;
5941 	memcpy(arr, buffer, minLen);
5942 	arr[minLen] = '\0';
5943 	if (1 != sscanf(arr, "%d", &opts))
5944 		return -EINVAL;
5945 	sdebug_opts = opts;
5946 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5947 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5948 	if (sdebug_every_nth != 0)
5949 		tweak_cmnd_count();
5950 	return length;
5951 }
5952 
5953 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5954  * same for each scsi_debug host (if more than one). Some of the counters
5955  * output are not atomics so might be inaccurate in a busy system. */
5956 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5957 {
5958 	int f, j, l;
5959 	struct sdebug_queue *sqp;
5960 	struct sdebug_host_info *sdhp;
5961 
5962 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5963 		   SDEBUG_VERSION, sdebug_version_date);
5964 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5965 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5966 		   sdebug_opts, sdebug_every_nth);
5967 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5968 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5969 		   sdebug_sector_size, "bytes");
5970 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5971 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5972 		   num_aborts);
5973 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5974 		   num_dev_resets, num_target_resets, num_bus_resets,
5975 		   num_host_resets);
5976 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5977 		   dix_reads, dix_writes, dif_errors);
5978 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5979 		   sdebug_statistics);
5980 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5981 		   atomic_read(&sdebug_cmnd_count),
5982 		   atomic_read(&sdebug_completions),
5983 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5984 		   atomic_read(&sdebug_a_tsf),
5985 		   atomic_read(&sdeb_mq_poll_count));
5986 
5987 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5988 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5989 		seq_printf(m, "  queue %d:\n", j);
5990 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5991 		if (f != sdebug_max_queue) {
5992 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5993 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5994 				   "first,last bits", f, l);
5995 		}
5996 	}
5997 
5998 	seq_printf(m, "this host_no=%d\n", host->host_no);
5999 	if (!xa_empty(per_store_ap)) {
6000 		bool niu;
6001 		int idx;
6002 		unsigned long l_idx;
6003 		struct sdeb_store_info *sip;
6004 
6005 		seq_puts(m, "\nhost list:\n");
6006 		j = 0;
6007 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6008 			idx = sdhp->si_idx;
6009 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6010 				   sdhp->shost->host_no, idx);
6011 			++j;
6012 		}
6013 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6014 			   sdeb_most_recent_idx);
6015 		j = 0;
6016 		xa_for_each(per_store_ap, l_idx, sip) {
6017 			niu = xa_get_mark(per_store_ap, l_idx,
6018 					  SDEB_XA_NOT_IN_USE);
6019 			idx = (int)l_idx;
6020 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6021 				   (niu ? "  not_in_use" : ""));
6022 			++j;
6023 		}
6024 	}
6025 	return 0;
6026 }
6027 
6028 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6029 {
6030 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6031 }
6032 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6033  * of delay is jiffies.
6034  */
6035 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6036 			   size_t count)
6037 {
6038 	int jdelay, res;
6039 
6040 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6041 		res = count;
6042 		if (sdebug_jdelay != jdelay) {
6043 			struct sdebug_host_info *sdhp;
6044 
6045 			mutex_lock(&sdebug_host_list_mutex);
6046 			block_unblock_all_queues(true);
6047 
6048 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6049 				struct Scsi_Host *shost = sdhp->shost;
6050 
6051 				if (scsi_host_busy(shost)) {
6052 					res = -EBUSY;   /* queued commands */
6053 					break;
6054 				}
6055 			}
6056 			if (res > 0) {
6057 				sdebug_jdelay = jdelay;
6058 				sdebug_ndelay = 0;
6059 			}
6060 			block_unblock_all_queues(false);
6061 			mutex_unlock(&sdebug_host_list_mutex);
6062 		}
6063 		return res;
6064 	}
6065 	return -EINVAL;
6066 }
6067 static DRIVER_ATTR_RW(delay);
6068 
6069 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6070 {
6071 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6072 }
6073 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6074 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6075 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6076 			    size_t count)
6077 {
6078 	int ndelay, res;
6079 
6080 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6081 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6082 		res = count;
6083 		if (sdebug_ndelay != ndelay) {
6084 			struct sdebug_host_info *sdhp;
6085 
6086 			mutex_lock(&sdebug_host_list_mutex);
6087 			block_unblock_all_queues(true);
6088 
6089 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6090 				struct Scsi_Host *shost = sdhp->shost;
6091 
6092 				if (scsi_host_busy(shost)) {
6093 					res = -EBUSY;   /* queued commands */
6094 					break;
6095 				}
6096 			}
6097 
6098 			if (res > 0) {
6099 				sdebug_ndelay = ndelay;
6100 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6101 							: DEF_JDELAY;
6102 			}
6103 			block_unblock_all_queues(false);
6104 			mutex_unlock(&sdebug_host_list_mutex);
6105 		}
6106 		return res;
6107 	}
6108 	return -EINVAL;
6109 }
6110 static DRIVER_ATTR_RW(ndelay);
6111 
6112 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6113 {
6114 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6115 }
6116 
6117 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6118 			  size_t count)
6119 {
6120 	int opts;
6121 	char work[20];
6122 
6123 	if (sscanf(buf, "%10s", work) == 1) {
6124 		if (strncasecmp(work, "0x", 2) == 0) {
6125 			if (kstrtoint(work + 2, 16, &opts) == 0)
6126 				goto opts_done;
6127 		} else {
6128 			if (kstrtoint(work, 10, &opts) == 0)
6129 				goto opts_done;
6130 		}
6131 	}
6132 	return -EINVAL;
6133 opts_done:
6134 	sdebug_opts = opts;
6135 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6136 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6137 	tweak_cmnd_count();
6138 	return count;
6139 }
6140 static DRIVER_ATTR_RW(opts);
6141 
6142 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6143 {
6144 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6145 }
6146 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6147 			   size_t count)
6148 {
6149 	int n;
6150 
6151 	/* Cannot change from or to TYPE_ZBC with sysfs */
6152 	if (sdebug_ptype == TYPE_ZBC)
6153 		return -EINVAL;
6154 
6155 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6156 		if (n == TYPE_ZBC)
6157 			return -EINVAL;
6158 		sdebug_ptype = n;
6159 		return count;
6160 	}
6161 	return -EINVAL;
6162 }
6163 static DRIVER_ATTR_RW(ptype);
6164 
6165 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6166 {
6167 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6168 }
6169 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6170 			    size_t count)
6171 {
6172 	int n;
6173 
6174 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6175 		sdebug_dsense = n;
6176 		return count;
6177 	}
6178 	return -EINVAL;
6179 }
6180 static DRIVER_ATTR_RW(dsense);
6181 
6182 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6183 {
6184 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6185 }
6186 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6187 			     size_t count)
6188 {
6189 	int n, idx;
6190 
6191 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6192 		bool want_store = (n == 0);
6193 		struct sdebug_host_info *sdhp;
6194 
6195 		n = (n > 0);
6196 		sdebug_fake_rw = (sdebug_fake_rw > 0);
6197 		if (sdebug_fake_rw == n)
6198 			return count;	/* not transitioning so do nothing */
6199 
6200 		if (want_store) {	/* 1 --> 0 transition, set up store */
6201 			if (sdeb_first_idx < 0) {
6202 				idx = sdebug_add_store();
6203 				if (idx < 0)
6204 					return idx;
6205 			} else {
6206 				idx = sdeb_first_idx;
6207 				xa_clear_mark(per_store_ap, idx,
6208 					      SDEB_XA_NOT_IN_USE);
6209 			}
6210 			/* make all hosts use same store */
6211 			list_for_each_entry(sdhp, &sdebug_host_list,
6212 					    host_list) {
6213 				if (sdhp->si_idx != idx) {
6214 					xa_set_mark(per_store_ap, sdhp->si_idx,
6215 						    SDEB_XA_NOT_IN_USE);
6216 					sdhp->si_idx = idx;
6217 				}
6218 			}
6219 			sdeb_most_recent_idx = idx;
6220 		} else {	/* 0 --> 1 transition is trigger for shrink */
6221 			sdebug_erase_all_stores(true /* apart from first */);
6222 		}
6223 		sdebug_fake_rw = n;
6224 		return count;
6225 	}
6226 	return -EINVAL;
6227 }
6228 static DRIVER_ATTR_RW(fake_rw);
6229 
6230 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6231 {
6232 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6233 }
6234 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6235 			      size_t count)
6236 {
6237 	int n;
6238 
6239 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6240 		sdebug_no_lun_0 = n;
6241 		return count;
6242 	}
6243 	return -EINVAL;
6244 }
6245 static DRIVER_ATTR_RW(no_lun_0);
6246 
6247 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6248 {
6249 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6250 }
6251 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6252 			      size_t count)
6253 {
6254 	int n;
6255 
6256 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6257 		sdebug_num_tgts = n;
6258 		sdebug_max_tgts_luns();
6259 		return count;
6260 	}
6261 	return -EINVAL;
6262 }
6263 static DRIVER_ATTR_RW(num_tgts);
6264 
6265 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6266 {
6267 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6268 }
6269 static DRIVER_ATTR_RO(dev_size_mb);
6270 
6271 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6272 {
6273 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6274 }
6275 
6276 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6277 				    size_t count)
6278 {
6279 	bool v;
6280 
6281 	if (kstrtobool(buf, &v))
6282 		return -EINVAL;
6283 
6284 	sdebug_per_host_store = v;
6285 	return count;
6286 }
6287 static DRIVER_ATTR_RW(per_host_store);
6288 
6289 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6290 {
6291 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6292 }
6293 static DRIVER_ATTR_RO(num_parts);
6294 
6295 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6296 {
6297 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6298 }
6299 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6300 			       size_t count)
6301 {
6302 	int nth;
6303 	char work[20];
6304 
6305 	if (sscanf(buf, "%10s", work) == 1) {
6306 		if (strncasecmp(work, "0x", 2) == 0) {
6307 			if (kstrtoint(work + 2, 16, &nth) == 0)
6308 				goto every_nth_done;
6309 		} else {
6310 			if (kstrtoint(work, 10, &nth) == 0)
6311 				goto every_nth_done;
6312 		}
6313 	}
6314 	return -EINVAL;
6315 
6316 every_nth_done:
6317 	sdebug_every_nth = nth;
6318 	if (nth && !sdebug_statistics) {
6319 		pr_info("every_nth needs statistics=1, set it\n");
6320 		sdebug_statistics = true;
6321 	}
6322 	tweak_cmnd_count();
6323 	return count;
6324 }
6325 static DRIVER_ATTR_RW(every_nth);
6326 
6327 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6328 {
6329 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6330 }
6331 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6332 				size_t count)
6333 {
6334 	int n;
6335 	bool changed;
6336 
6337 	if (kstrtoint(buf, 0, &n))
6338 		return -EINVAL;
6339 	if (n >= 0) {
6340 		if (n > (int)SAM_LUN_AM_FLAT) {
6341 			pr_warn("only LUN address methods 0 and 1 are supported\n");
6342 			return -EINVAL;
6343 		}
6344 		changed = ((int)sdebug_lun_am != n);
6345 		sdebug_lun_am = n;
6346 		if (changed && sdebug_scsi_level >= 5) {	/* >= SPC-3 */
6347 			struct sdebug_host_info *sdhp;
6348 			struct sdebug_dev_info *dp;
6349 
6350 			mutex_lock(&sdebug_host_list_mutex);
6351 			list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6352 				list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6353 					set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6354 				}
6355 			}
6356 			mutex_unlock(&sdebug_host_list_mutex);
6357 		}
6358 		return count;
6359 	}
6360 	return -EINVAL;
6361 }
6362 static DRIVER_ATTR_RW(lun_format);
6363 
6364 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6365 {
6366 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6367 }
6368 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6369 			      size_t count)
6370 {
6371 	int n;
6372 	bool changed;
6373 
6374 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6375 		if (n > 256) {
6376 			pr_warn("max_luns can be no more than 256\n");
6377 			return -EINVAL;
6378 		}
6379 		changed = (sdebug_max_luns != n);
6380 		sdebug_max_luns = n;
6381 		sdebug_max_tgts_luns();
6382 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6383 			struct sdebug_host_info *sdhp;
6384 			struct sdebug_dev_info *dp;
6385 
6386 			mutex_lock(&sdebug_host_list_mutex);
6387 			list_for_each_entry(sdhp, &sdebug_host_list,
6388 					    host_list) {
6389 				list_for_each_entry(dp, &sdhp->dev_info_list,
6390 						    dev_list) {
6391 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6392 						dp->uas_bm);
6393 				}
6394 			}
6395 			mutex_unlock(&sdebug_host_list_mutex);
6396 		}
6397 		return count;
6398 	}
6399 	return -EINVAL;
6400 }
6401 static DRIVER_ATTR_RW(max_luns);
6402 
6403 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6404 {
6405 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6406 }
6407 /* N.B. max_queue can be changed while there are queued commands. In flight
6408  * commands beyond the new max_queue will be completed. */
6409 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6410 			       size_t count)
6411 {
6412 	int n;
6413 
6414 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6415 	    (n <= SDEBUG_CANQUEUE) &&
6416 	    (sdebug_host_max_queue == 0)) {
6417 		mutex_lock(&sdebug_host_list_mutex);
6418 
6419 		/* We may only change sdebug_max_queue when we have no shosts */
6420 		if (list_empty(&sdebug_host_list))
6421 			sdebug_max_queue = n;
6422 		else
6423 			count = -EBUSY;
6424 		mutex_unlock(&sdebug_host_list_mutex);
6425 		return count;
6426 	}
6427 	return -EINVAL;
6428 }
6429 static DRIVER_ATTR_RW(max_queue);
6430 
6431 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6432 {
6433 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6434 }
6435 
6436 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6437 {
6438 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6439 }
6440 
6441 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6442 {
6443 	bool v;
6444 
6445 	if (kstrtobool(buf, &v))
6446 		return -EINVAL;
6447 
6448 	sdebug_no_rwlock = v;
6449 	return count;
6450 }
6451 static DRIVER_ATTR_RW(no_rwlock);
6452 
6453 /*
6454  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6455  * in range [0, sdebug_host_max_queue), we can't change it.
6456  */
6457 static DRIVER_ATTR_RO(host_max_queue);
6458 
6459 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6460 {
6461 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6462 }
6463 static DRIVER_ATTR_RO(no_uld);
6464 
6465 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6466 {
6467 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6468 }
6469 static DRIVER_ATTR_RO(scsi_level);
6470 
6471 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6472 {
6473 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6474 }
6475 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6476 				size_t count)
6477 {
6478 	int n;
6479 	bool changed;
6480 
6481 	/* Ignore capacity change for ZBC drives for now */
6482 	if (sdeb_zbc_in_use)
6483 		return -ENOTSUPP;
6484 
6485 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6486 		changed = (sdebug_virtual_gb != n);
6487 		sdebug_virtual_gb = n;
6488 		sdebug_capacity = get_sdebug_capacity();
6489 		if (changed) {
6490 			struct sdebug_host_info *sdhp;
6491 			struct sdebug_dev_info *dp;
6492 
6493 			mutex_lock(&sdebug_host_list_mutex);
6494 			list_for_each_entry(sdhp, &sdebug_host_list,
6495 					    host_list) {
6496 				list_for_each_entry(dp, &sdhp->dev_info_list,
6497 						    dev_list) {
6498 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6499 						dp->uas_bm);
6500 				}
6501 			}
6502 			mutex_unlock(&sdebug_host_list_mutex);
6503 		}
6504 		return count;
6505 	}
6506 	return -EINVAL;
6507 }
6508 static DRIVER_ATTR_RW(virtual_gb);
6509 
6510 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6511 {
6512 	/* absolute number of hosts currently active is what is shown */
6513 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6514 }
6515 
6516 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6517 			      size_t count)
6518 {
6519 	bool found;
6520 	unsigned long idx;
6521 	struct sdeb_store_info *sip;
6522 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6523 	int delta_hosts;
6524 
6525 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6526 		return -EINVAL;
6527 	if (delta_hosts > 0) {
6528 		do {
6529 			found = false;
6530 			if (want_phs) {
6531 				xa_for_each_marked(per_store_ap, idx, sip,
6532 						   SDEB_XA_NOT_IN_USE) {
6533 					sdeb_most_recent_idx = (int)idx;
6534 					found = true;
6535 					break;
6536 				}
6537 				if (found)	/* re-use case */
6538 					sdebug_add_host_helper((int)idx);
6539 				else
6540 					sdebug_do_add_host(true);
6541 			} else {
6542 				sdebug_do_add_host(false);
6543 			}
6544 		} while (--delta_hosts);
6545 	} else if (delta_hosts < 0) {
6546 		do {
6547 			sdebug_do_remove_host(false);
6548 		} while (++delta_hosts);
6549 	}
6550 	return count;
6551 }
6552 static DRIVER_ATTR_RW(add_host);
6553 
6554 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6555 {
6556 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6557 }
6558 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6559 				    size_t count)
6560 {
6561 	int n;
6562 
6563 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6564 		sdebug_vpd_use_hostno = n;
6565 		return count;
6566 	}
6567 	return -EINVAL;
6568 }
6569 static DRIVER_ATTR_RW(vpd_use_hostno);
6570 
6571 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6572 {
6573 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6574 }
6575 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6576 				size_t count)
6577 {
6578 	int n;
6579 
6580 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6581 		if (n > 0)
6582 			sdebug_statistics = true;
6583 		else {
6584 			clear_queue_stats();
6585 			sdebug_statistics = false;
6586 		}
6587 		return count;
6588 	}
6589 	return -EINVAL;
6590 }
6591 static DRIVER_ATTR_RW(statistics);
6592 
6593 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6594 {
6595 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6596 }
6597 static DRIVER_ATTR_RO(sector_size);
6598 
6599 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6600 {
6601 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6602 }
6603 static DRIVER_ATTR_RO(submit_queues);
6604 
6605 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6606 {
6607 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6608 }
6609 static DRIVER_ATTR_RO(dix);
6610 
6611 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6612 {
6613 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6614 }
6615 static DRIVER_ATTR_RO(dif);
6616 
6617 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6618 {
6619 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6620 }
6621 static DRIVER_ATTR_RO(guard);
6622 
6623 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6624 {
6625 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6626 }
6627 static DRIVER_ATTR_RO(ato);
6628 
6629 static ssize_t map_show(struct device_driver *ddp, char *buf)
6630 {
6631 	ssize_t count = 0;
6632 
6633 	if (!scsi_debug_lbp())
6634 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6635 				 sdebug_store_sectors);
6636 
6637 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6638 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6639 
6640 		if (sip)
6641 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6642 					  (int)map_size, sip->map_storep);
6643 	}
6644 	buf[count++] = '\n';
6645 	buf[count] = '\0';
6646 
6647 	return count;
6648 }
6649 static DRIVER_ATTR_RO(map);
6650 
6651 static ssize_t random_show(struct device_driver *ddp, char *buf)
6652 {
6653 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6654 }
6655 
6656 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6657 			    size_t count)
6658 {
6659 	bool v;
6660 
6661 	if (kstrtobool(buf, &v))
6662 		return -EINVAL;
6663 
6664 	sdebug_random = v;
6665 	return count;
6666 }
6667 static DRIVER_ATTR_RW(random);
6668 
6669 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6670 {
6671 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6672 }
6673 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6674 			       size_t count)
6675 {
6676 	int n;
6677 
6678 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6679 		sdebug_removable = (n > 0);
6680 		return count;
6681 	}
6682 	return -EINVAL;
6683 }
6684 static DRIVER_ATTR_RW(removable);
6685 
6686 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6687 {
6688 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6689 }
6690 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6691 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6692 			       size_t count)
6693 {
6694 	int n;
6695 
6696 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6697 		sdebug_host_lock = (n > 0);
6698 		return count;
6699 	}
6700 	return -EINVAL;
6701 }
6702 static DRIVER_ATTR_RW(host_lock);
6703 
6704 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6705 {
6706 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6707 }
6708 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6709 			    size_t count)
6710 {
6711 	int n;
6712 
6713 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6714 		sdebug_strict = (n > 0);
6715 		return count;
6716 	}
6717 	return -EINVAL;
6718 }
6719 static DRIVER_ATTR_RW(strict);
6720 
6721 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6722 {
6723 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6724 }
6725 static DRIVER_ATTR_RO(uuid_ctl);
6726 
6727 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6728 {
6729 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6730 }
6731 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6732 			     size_t count)
6733 {
6734 	int ret, n;
6735 
6736 	ret = kstrtoint(buf, 0, &n);
6737 	if (ret)
6738 		return ret;
6739 	sdebug_cdb_len = n;
6740 	all_config_cdb_len();
6741 	return count;
6742 }
6743 static DRIVER_ATTR_RW(cdb_len);
6744 
6745 static const char * const zbc_model_strs_a[] = {
6746 	[BLK_ZONED_NONE] = "none",
6747 	[BLK_ZONED_HA]   = "host-aware",
6748 	[BLK_ZONED_HM]   = "host-managed",
6749 };
6750 
6751 static const char * const zbc_model_strs_b[] = {
6752 	[BLK_ZONED_NONE] = "no",
6753 	[BLK_ZONED_HA]   = "aware",
6754 	[BLK_ZONED_HM]   = "managed",
6755 };
6756 
6757 static const char * const zbc_model_strs_c[] = {
6758 	[BLK_ZONED_NONE] = "0",
6759 	[BLK_ZONED_HA]   = "1",
6760 	[BLK_ZONED_HM]   = "2",
6761 };
6762 
6763 static int sdeb_zbc_model_str(const char *cp)
6764 {
6765 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6766 
6767 	if (res < 0) {
6768 		res = sysfs_match_string(zbc_model_strs_b, cp);
6769 		if (res < 0) {
6770 			res = sysfs_match_string(zbc_model_strs_c, cp);
6771 			if (res < 0)
6772 				return -EINVAL;
6773 		}
6774 	}
6775 	return res;
6776 }
6777 
6778 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6779 {
6780 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6781 			 zbc_model_strs_a[sdeb_zbc_model]);
6782 }
6783 static DRIVER_ATTR_RO(zbc);
6784 
6785 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6786 {
6787 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6788 }
6789 static DRIVER_ATTR_RO(tur_ms_to_ready);
6790 
6791 /* Note: The following array creates attribute files in the
6792    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6793    files (over those found in the /sys/module/scsi_debug/parameters
6794    directory) is that auxiliary actions can be triggered when an attribute
6795    is changed. For example see: add_host_store() above.
6796  */
6797 
6798 static struct attribute *sdebug_drv_attrs[] = {
6799 	&driver_attr_delay.attr,
6800 	&driver_attr_opts.attr,
6801 	&driver_attr_ptype.attr,
6802 	&driver_attr_dsense.attr,
6803 	&driver_attr_fake_rw.attr,
6804 	&driver_attr_host_max_queue.attr,
6805 	&driver_attr_no_lun_0.attr,
6806 	&driver_attr_num_tgts.attr,
6807 	&driver_attr_dev_size_mb.attr,
6808 	&driver_attr_num_parts.attr,
6809 	&driver_attr_every_nth.attr,
6810 	&driver_attr_lun_format.attr,
6811 	&driver_attr_max_luns.attr,
6812 	&driver_attr_max_queue.attr,
6813 	&driver_attr_no_rwlock.attr,
6814 	&driver_attr_no_uld.attr,
6815 	&driver_attr_scsi_level.attr,
6816 	&driver_attr_virtual_gb.attr,
6817 	&driver_attr_add_host.attr,
6818 	&driver_attr_per_host_store.attr,
6819 	&driver_attr_vpd_use_hostno.attr,
6820 	&driver_attr_sector_size.attr,
6821 	&driver_attr_statistics.attr,
6822 	&driver_attr_submit_queues.attr,
6823 	&driver_attr_dix.attr,
6824 	&driver_attr_dif.attr,
6825 	&driver_attr_guard.attr,
6826 	&driver_attr_ato.attr,
6827 	&driver_attr_map.attr,
6828 	&driver_attr_random.attr,
6829 	&driver_attr_removable.attr,
6830 	&driver_attr_host_lock.attr,
6831 	&driver_attr_ndelay.attr,
6832 	&driver_attr_strict.attr,
6833 	&driver_attr_uuid_ctl.attr,
6834 	&driver_attr_cdb_len.attr,
6835 	&driver_attr_tur_ms_to_ready.attr,
6836 	&driver_attr_zbc.attr,
6837 	NULL,
6838 };
6839 ATTRIBUTE_GROUPS(sdebug_drv);
6840 
6841 static struct device *pseudo_primary;
6842 
6843 static int __init scsi_debug_init(void)
6844 {
6845 	bool want_store = (sdebug_fake_rw == 0);
6846 	unsigned long sz;
6847 	int k, ret, hosts_to_add;
6848 	int idx = -1;
6849 
6850 	ramdisk_lck_a[0] = &atomic_rw;
6851 	ramdisk_lck_a[1] = &atomic_rw2;
6852 
6853 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6854 		pr_warn("ndelay must be less than 1 second, ignored\n");
6855 		sdebug_ndelay = 0;
6856 	} else if (sdebug_ndelay > 0)
6857 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6858 
6859 	switch (sdebug_sector_size) {
6860 	case  512:
6861 	case 1024:
6862 	case 2048:
6863 	case 4096:
6864 		break;
6865 	default:
6866 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6867 		return -EINVAL;
6868 	}
6869 
6870 	switch (sdebug_dif) {
6871 	case T10_PI_TYPE0_PROTECTION:
6872 		break;
6873 	case T10_PI_TYPE1_PROTECTION:
6874 	case T10_PI_TYPE2_PROTECTION:
6875 	case T10_PI_TYPE3_PROTECTION:
6876 		have_dif_prot = true;
6877 		break;
6878 
6879 	default:
6880 		pr_err("dif must be 0, 1, 2 or 3\n");
6881 		return -EINVAL;
6882 	}
6883 
6884 	if (sdebug_num_tgts < 0) {
6885 		pr_err("num_tgts must be >= 0\n");
6886 		return -EINVAL;
6887 	}
6888 
6889 	if (sdebug_guard > 1) {
6890 		pr_err("guard must be 0 or 1\n");
6891 		return -EINVAL;
6892 	}
6893 
6894 	if (sdebug_ato > 1) {
6895 		pr_err("ato must be 0 or 1\n");
6896 		return -EINVAL;
6897 	}
6898 
6899 	if (sdebug_physblk_exp > 15) {
6900 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6901 		return -EINVAL;
6902 	}
6903 
6904 	sdebug_lun_am = sdebug_lun_am_i;
6905 	if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6906 		pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6907 		sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6908 	}
6909 
6910 	if (sdebug_max_luns > 256) {
6911 		if (sdebug_max_luns > 16384) {
6912 			pr_warn("max_luns can be no more than 16384, use default\n");
6913 			sdebug_max_luns = DEF_MAX_LUNS;
6914 		}
6915 		sdebug_lun_am = SAM_LUN_AM_FLAT;
6916 	}
6917 
6918 	if (sdebug_lowest_aligned > 0x3fff) {
6919 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6920 		return -EINVAL;
6921 	}
6922 
6923 	if (submit_queues < 1) {
6924 		pr_err("submit_queues must be 1 or more\n");
6925 		return -EINVAL;
6926 	}
6927 
6928 	if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6929 		pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6930 		return -EINVAL;
6931 	}
6932 
6933 	if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6934 	    (sdebug_host_max_queue < 0)) {
6935 		pr_err("host_max_queue must be in range [0 %d]\n",
6936 		       SDEBUG_CANQUEUE);
6937 		return -EINVAL;
6938 	}
6939 
6940 	if (sdebug_host_max_queue &&
6941 	    (sdebug_max_queue != sdebug_host_max_queue)) {
6942 		sdebug_max_queue = sdebug_host_max_queue;
6943 		pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6944 			sdebug_max_queue);
6945 	}
6946 
6947 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6948 			       GFP_KERNEL);
6949 	if (sdebug_q_arr == NULL)
6950 		return -ENOMEM;
6951 	for (k = 0; k < submit_queues; ++k)
6952 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6953 
6954 	/*
6955 	 * check for host managed zoned block device specified with
6956 	 * ptype=0x14 or zbc=XXX.
6957 	 */
6958 	if (sdebug_ptype == TYPE_ZBC) {
6959 		sdeb_zbc_model = BLK_ZONED_HM;
6960 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6961 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6962 		if (k < 0) {
6963 			ret = k;
6964 			goto free_q_arr;
6965 		}
6966 		sdeb_zbc_model = k;
6967 		switch (sdeb_zbc_model) {
6968 		case BLK_ZONED_NONE:
6969 		case BLK_ZONED_HA:
6970 			sdebug_ptype = TYPE_DISK;
6971 			break;
6972 		case BLK_ZONED_HM:
6973 			sdebug_ptype = TYPE_ZBC;
6974 			break;
6975 		default:
6976 			pr_err("Invalid ZBC model\n");
6977 			ret = -EINVAL;
6978 			goto free_q_arr;
6979 		}
6980 	}
6981 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6982 		sdeb_zbc_in_use = true;
6983 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6984 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6985 	}
6986 
6987 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6988 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6989 	if (sdebug_dev_size_mb < 1)
6990 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6991 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6992 	sdebug_store_sectors = sz / sdebug_sector_size;
6993 	sdebug_capacity = get_sdebug_capacity();
6994 
6995 	/* play around with geometry, don't waste too much on track 0 */
6996 	sdebug_heads = 8;
6997 	sdebug_sectors_per = 32;
6998 	if (sdebug_dev_size_mb >= 256)
6999 		sdebug_heads = 64;
7000 	else if (sdebug_dev_size_mb >= 16)
7001 		sdebug_heads = 32;
7002 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7003 			       (sdebug_sectors_per * sdebug_heads);
7004 	if (sdebug_cylinders_per >= 1024) {
7005 		/* other LLDs do this; implies >= 1GB ram disk ... */
7006 		sdebug_heads = 255;
7007 		sdebug_sectors_per = 63;
7008 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7009 			       (sdebug_sectors_per * sdebug_heads);
7010 	}
7011 	if (scsi_debug_lbp()) {
7012 		sdebug_unmap_max_blocks =
7013 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7014 
7015 		sdebug_unmap_max_desc =
7016 			clamp(sdebug_unmap_max_desc, 0U, 256U);
7017 
7018 		sdebug_unmap_granularity =
7019 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7020 
7021 		if (sdebug_unmap_alignment &&
7022 		    sdebug_unmap_granularity <=
7023 		    sdebug_unmap_alignment) {
7024 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7025 			ret = -EINVAL;
7026 			goto free_q_arr;
7027 		}
7028 	}
7029 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7030 	if (want_store) {
7031 		idx = sdebug_add_store();
7032 		if (idx < 0) {
7033 			ret = idx;
7034 			goto free_q_arr;
7035 		}
7036 	}
7037 
7038 	pseudo_primary = root_device_register("pseudo_0");
7039 	if (IS_ERR(pseudo_primary)) {
7040 		pr_warn("root_device_register() error\n");
7041 		ret = PTR_ERR(pseudo_primary);
7042 		goto free_vm;
7043 	}
7044 	ret = bus_register(&pseudo_lld_bus);
7045 	if (ret < 0) {
7046 		pr_warn("bus_register error: %d\n", ret);
7047 		goto dev_unreg;
7048 	}
7049 	ret = driver_register(&sdebug_driverfs_driver);
7050 	if (ret < 0) {
7051 		pr_warn("driver_register error: %d\n", ret);
7052 		goto bus_unreg;
7053 	}
7054 
7055 	hosts_to_add = sdebug_add_host;
7056 	sdebug_add_host = 0;
7057 
7058 	queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7059 	if (!queued_cmd_cache)
7060 		goto driver_unreg;
7061 
7062 	for (k = 0; k < hosts_to_add; k++) {
7063 		if (want_store && k == 0) {
7064 			ret = sdebug_add_host_helper(idx);
7065 			if (ret < 0) {
7066 				pr_err("add_host_helper k=%d, error=%d\n",
7067 				       k, -ret);
7068 				break;
7069 			}
7070 		} else {
7071 			ret = sdebug_do_add_host(want_store &&
7072 						 sdebug_per_host_store);
7073 			if (ret < 0) {
7074 				pr_err("add_host k=%d error=%d\n", k, -ret);
7075 				break;
7076 			}
7077 		}
7078 	}
7079 	if (sdebug_verbose)
7080 		pr_info("built %d host(s)\n", sdebug_num_hosts);
7081 
7082 	return 0;
7083 
7084 driver_unreg:
7085 	driver_unregister(&sdebug_driverfs_driver);
7086 bus_unreg:
7087 	bus_unregister(&pseudo_lld_bus);
7088 dev_unreg:
7089 	root_device_unregister(pseudo_primary);
7090 free_vm:
7091 	sdebug_erase_store(idx, NULL);
7092 free_q_arr:
7093 	kfree(sdebug_q_arr);
7094 	return ret;
7095 }
7096 
7097 static void __exit scsi_debug_exit(void)
7098 {
7099 	int k = sdebug_num_hosts;
7100 
7101 	for (; k; k--)
7102 		sdebug_do_remove_host(true);
7103 	kmem_cache_destroy(queued_cmd_cache);
7104 	driver_unregister(&sdebug_driverfs_driver);
7105 	bus_unregister(&pseudo_lld_bus);
7106 	root_device_unregister(pseudo_primary);
7107 
7108 	sdebug_erase_all_stores(false);
7109 	xa_destroy(per_store_ap);
7110 	kfree(sdebug_q_arr);
7111 }
7112 
7113 device_initcall(scsi_debug_init);
7114 module_exit(scsi_debug_exit);
7115 
7116 static void sdebug_release_adapter(struct device *dev)
7117 {
7118 	struct sdebug_host_info *sdbg_host;
7119 
7120 	sdbg_host = dev_to_sdebug_host(dev);
7121 	kfree(sdbg_host);
7122 }
7123 
7124 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7125 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7126 {
7127 	if (idx < 0)
7128 		return;
7129 	if (!sip) {
7130 		if (xa_empty(per_store_ap))
7131 			return;
7132 		sip = xa_load(per_store_ap, idx);
7133 		if (!sip)
7134 			return;
7135 	}
7136 	vfree(sip->map_storep);
7137 	vfree(sip->dif_storep);
7138 	vfree(sip->storep);
7139 	xa_erase(per_store_ap, idx);
7140 	kfree(sip);
7141 }
7142 
7143 /* Assume apart_from_first==false only in shutdown case. */
7144 static void sdebug_erase_all_stores(bool apart_from_first)
7145 {
7146 	unsigned long idx;
7147 	struct sdeb_store_info *sip = NULL;
7148 
7149 	xa_for_each(per_store_ap, idx, sip) {
7150 		if (apart_from_first)
7151 			apart_from_first = false;
7152 		else
7153 			sdebug_erase_store(idx, sip);
7154 	}
7155 	if (apart_from_first)
7156 		sdeb_most_recent_idx = sdeb_first_idx;
7157 }
7158 
7159 /*
7160  * Returns store xarray new element index (idx) if >=0 else negated errno.
7161  * Limit the number of stores to 65536.
7162  */
7163 static int sdebug_add_store(void)
7164 {
7165 	int res;
7166 	u32 n_idx;
7167 	unsigned long iflags;
7168 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7169 	struct sdeb_store_info *sip = NULL;
7170 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7171 
7172 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7173 	if (!sip)
7174 		return -ENOMEM;
7175 
7176 	xa_lock_irqsave(per_store_ap, iflags);
7177 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7178 	if (unlikely(res < 0)) {
7179 		xa_unlock_irqrestore(per_store_ap, iflags);
7180 		kfree(sip);
7181 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7182 		return res;
7183 	}
7184 	sdeb_most_recent_idx = n_idx;
7185 	if (sdeb_first_idx < 0)
7186 		sdeb_first_idx = n_idx;
7187 	xa_unlock_irqrestore(per_store_ap, iflags);
7188 
7189 	res = -ENOMEM;
7190 	sip->storep = vzalloc(sz);
7191 	if (!sip->storep) {
7192 		pr_err("user data oom\n");
7193 		goto err;
7194 	}
7195 	if (sdebug_num_parts > 0)
7196 		sdebug_build_parts(sip->storep, sz);
7197 
7198 	/* DIF/DIX: what T10 calls Protection Information (PI) */
7199 	if (sdebug_dix) {
7200 		int dif_size;
7201 
7202 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7203 		sip->dif_storep = vmalloc(dif_size);
7204 
7205 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7206 			sip->dif_storep);
7207 
7208 		if (!sip->dif_storep) {
7209 			pr_err("DIX oom\n");
7210 			goto err;
7211 		}
7212 		memset(sip->dif_storep, 0xff, dif_size);
7213 	}
7214 	/* Logical Block Provisioning */
7215 	if (scsi_debug_lbp()) {
7216 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7217 		sip->map_storep = vmalloc(array_size(sizeof(long),
7218 						     BITS_TO_LONGS(map_size)));
7219 
7220 		pr_info("%lu provisioning blocks\n", map_size);
7221 
7222 		if (!sip->map_storep) {
7223 			pr_err("LBP map oom\n");
7224 			goto err;
7225 		}
7226 
7227 		bitmap_zero(sip->map_storep, map_size);
7228 
7229 		/* Map first 1KB for partition table */
7230 		if (sdebug_num_parts)
7231 			map_region(sip, 0, 2);
7232 	}
7233 
7234 	rwlock_init(&sip->macc_lck);
7235 	return (int)n_idx;
7236 err:
7237 	sdebug_erase_store((int)n_idx, sip);
7238 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
7239 	return res;
7240 }
7241 
7242 static int sdebug_add_host_helper(int per_host_idx)
7243 {
7244 	int k, devs_per_host, idx;
7245 	int error = -ENOMEM;
7246 	struct sdebug_host_info *sdbg_host;
7247 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7248 
7249 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7250 	if (!sdbg_host)
7251 		return -ENOMEM;
7252 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7253 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7254 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7255 	sdbg_host->si_idx = idx;
7256 
7257 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7258 
7259 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7260 	for (k = 0; k < devs_per_host; k++) {
7261 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7262 		if (!sdbg_devinfo)
7263 			goto clean;
7264 	}
7265 
7266 	mutex_lock(&sdebug_host_list_mutex);
7267 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7268 	mutex_unlock(&sdebug_host_list_mutex);
7269 
7270 	sdbg_host->dev.bus = &pseudo_lld_bus;
7271 	sdbg_host->dev.parent = pseudo_primary;
7272 	sdbg_host->dev.release = &sdebug_release_adapter;
7273 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7274 
7275 	error = device_register(&sdbg_host->dev);
7276 	if (error) {
7277 		mutex_lock(&sdebug_host_list_mutex);
7278 		list_del(&sdbg_host->host_list);
7279 		mutex_unlock(&sdebug_host_list_mutex);
7280 		goto clean;
7281 	}
7282 
7283 	++sdebug_num_hosts;
7284 	return 0;
7285 
7286 clean:
7287 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7288 				 dev_list) {
7289 		list_del(&sdbg_devinfo->dev_list);
7290 		kfree(sdbg_devinfo->zstate);
7291 		kfree(sdbg_devinfo);
7292 	}
7293 	if (sdbg_host->dev.release)
7294 		put_device(&sdbg_host->dev);
7295 	else
7296 		kfree(sdbg_host);
7297 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
7298 	return error;
7299 }
7300 
7301 static int sdebug_do_add_host(bool mk_new_store)
7302 {
7303 	int ph_idx = sdeb_most_recent_idx;
7304 
7305 	if (mk_new_store) {
7306 		ph_idx = sdebug_add_store();
7307 		if (ph_idx < 0)
7308 			return ph_idx;
7309 	}
7310 	return sdebug_add_host_helper(ph_idx);
7311 }
7312 
7313 static void sdebug_do_remove_host(bool the_end)
7314 {
7315 	int idx = -1;
7316 	struct sdebug_host_info *sdbg_host = NULL;
7317 	struct sdebug_host_info *sdbg_host2;
7318 
7319 	mutex_lock(&sdebug_host_list_mutex);
7320 	if (!list_empty(&sdebug_host_list)) {
7321 		sdbg_host = list_entry(sdebug_host_list.prev,
7322 				       struct sdebug_host_info, host_list);
7323 		idx = sdbg_host->si_idx;
7324 	}
7325 	if (!the_end && idx >= 0) {
7326 		bool unique = true;
7327 
7328 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7329 			if (sdbg_host2 == sdbg_host)
7330 				continue;
7331 			if (idx == sdbg_host2->si_idx) {
7332 				unique = false;
7333 				break;
7334 			}
7335 		}
7336 		if (unique) {
7337 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7338 			if (idx == sdeb_most_recent_idx)
7339 				--sdeb_most_recent_idx;
7340 		}
7341 	}
7342 	if (sdbg_host)
7343 		list_del(&sdbg_host->host_list);
7344 	mutex_unlock(&sdebug_host_list_mutex);
7345 
7346 	if (!sdbg_host)
7347 		return;
7348 
7349 	device_unregister(&sdbg_host->dev);
7350 	--sdebug_num_hosts;
7351 }
7352 
7353 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7354 {
7355 	struct sdebug_dev_info *devip = sdev->hostdata;
7356 
7357 	if (!devip)
7358 		return	-ENODEV;
7359 
7360 	mutex_lock(&sdebug_host_list_mutex);
7361 	block_unblock_all_queues(true);
7362 
7363 	if (qdepth > SDEBUG_CANQUEUE) {
7364 		qdepth = SDEBUG_CANQUEUE;
7365 		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7366 			qdepth, SDEBUG_CANQUEUE);
7367 	}
7368 	if (qdepth < 1)
7369 		qdepth = 1;
7370 	if (qdepth != sdev->queue_depth)
7371 		scsi_change_queue_depth(sdev, qdepth);
7372 
7373 	block_unblock_all_queues(false);
7374 	mutex_unlock(&sdebug_host_list_mutex);
7375 
7376 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7377 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7378 
7379 	return sdev->queue_depth;
7380 }
7381 
7382 static bool fake_timeout(struct scsi_cmnd *scp)
7383 {
7384 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7385 		if (sdebug_every_nth < -1)
7386 			sdebug_every_nth = -1;
7387 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7388 			return true; /* ignore command causing timeout */
7389 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7390 			 scsi_medium_access_command(scp))
7391 			return true; /* time out reads and writes */
7392 	}
7393 	return false;
7394 }
7395 
7396 /* Response to TUR or media access command when device stopped */
7397 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7398 {
7399 	int stopped_state;
7400 	u64 diff_ns = 0;
7401 	ktime_t now_ts = ktime_get_boottime();
7402 	struct scsi_device *sdp = scp->device;
7403 
7404 	stopped_state = atomic_read(&devip->stopped);
7405 	if (stopped_state == 2) {
7406 		if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7407 			diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7408 			if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7409 				/* tur_ms_to_ready timer extinguished */
7410 				atomic_set(&devip->stopped, 0);
7411 				return 0;
7412 			}
7413 		}
7414 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7415 		if (sdebug_verbose)
7416 			sdev_printk(KERN_INFO, sdp,
7417 				    "%s: Not ready: in process of becoming ready\n", my_name);
7418 		if (scp->cmnd[0] == TEST_UNIT_READY) {
7419 			u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7420 
7421 			if (diff_ns <= tur_nanosecs_to_ready)
7422 				diff_ns = tur_nanosecs_to_ready - diff_ns;
7423 			else
7424 				diff_ns = tur_nanosecs_to_ready;
7425 			/* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7426 			do_div(diff_ns, 1000000);	/* diff_ns becomes milliseconds */
7427 			scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7428 						   diff_ns);
7429 			return check_condition_result;
7430 		}
7431 	}
7432 	mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7433 	if (sdebug_verbose)
7434 		sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7435 			    my_name);
7436 	return check_condition_result;
7437 }
7438 
7439 static void sdebug_map_queues(struct Scsi_Host *shost)
7440 {
7441 	int i, qoff;
7442 
7443 	if (shost->nr_hw_queues == 1)
7444 		return;
7445 
7446 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7447 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7448 
7449 		map->nr_queues  = 0;
7450 
7451 		if (i == HCTX_TYPE_DEFAULT)
7452 			map->nr_queues = submit_queues - poll_queues;
7453 		else if (i == HCTX_TYPE_POLL)
7454 			map->nr_queues = poll_queues;
7455 
7456 		if (!map->nr_queues) {
7457 			BUG_ON(i == HCTX_TYPE_DEFAULT);
7458 			continue;
7459 		}
7460 
7461 		map->queue_offset = qoff;
7462 		blk_mq_map_queues(map);
7463 
7464 		qoff += map->nr_queues;
7465 	}
7466 }
7467 
7468 struct sdebug_blk_mq_poll_data {
7469 	unsigned int queue_num;
7470 	int *num_entries;
7471 };
7472 
7473 /*
7474  * We don't handle aborted commands here, but it does not seem possible to have
7475  * aborted polled commands from schedule_resp()
7476  */
7477 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7478 {
7479 	struct sdebug_blk_mq_poll_data *data = opaque;
7480 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7481 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7482 	struct sdebug_defer *sd_dp;
7483 	u32 unique_tag = blk_mq_unique_tag(rq);
7484 	u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7485 	struct sdebug_queued_cmd *sqcp;
7486 	struct sdebug_queue *sqp;
7487 	unsigned long flags;
7488 	int queue_num = data->queue_num;
7489 	int qc_idx;
7490 	ktime_t time;
7491 
7492 	/* We're only interested in one queue for this iteration */
7493 	if (hwq != queue_num)
7494 		return true;
7495 
7496 	/* Subsequent checks would fail if this failed, but check anyway */
7497 	if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7498 		return true;
7499 
7500 	time = ktime_get_boottime();
7501 
7502 	spin_lock_irqsave(&sdsc->lock, flags);
7503 	sqcp = TO_QUEUED_CMD(cmd);
7504 	if (!sqcp) {
7505 		spin_unlock_irqrestore(&sdsc->lock, flags);
7506 		return true;
7507 	}
7508 
7509 	sqp = sdebug_q_arr + queue_num;
7510 	sd_dp = &sqcp->sd_dp;
7511 
7512 	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7513 		spin_unlock_irqrestore(&sdsc->lock, flags);
7514 		return true;
7515 	}
7516 
7517 	if (time < sd_dp->cmpl_ts) {
7518 		spin_unlock_irqrestore(&sdsc->lock, flags);
7519 		return true;
7520 	}
7521 
7522 	qc_idx = sd_dp->sqa_idx;
7523 	sqp->qc_arr[qc_idx] = NULL;
7524 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7525 		spin_unlock_irqrestore(&sdsc->lock, flags);
7526 		pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u\n",
7527 			sqp, queue_num, qc_idx);
7528 		sdebug_free_queued_cmd(sqcp);
7529 		return true;
7530 	}
7531 
7532 	ASSIGN_QUEUED_CMD(cmd, NULL);
7533 	spin_unlock_irqrestore(&sdsc->lock, flags);
7534 
7535 	if (sdebug_statistics) {
7536 		atomic_inc(&sdebug_completions);
7537 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7538 			atomic_inc(&sdebug_miss_cpus);
7539 	}
7540 
7541 	sdebug_free_queued_cmd(sqcp);
7542 
7543 	scsi_done(cmd); /* callback to mid level */
7544 	(*data->num_entries)++;
7545 	return true;
7546 }
7547 
7548 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7549 {
7550 	int num_entries = 0;
7551 	unsigned long iflags;
7552 	struct sdebug_queue *sqp;
7553 	struct sdebug_blk_mq_poll_data data = {
7554 		.queue_num = queue_num,
7555 		.num_entries = &num_entries,
7556 	};
7557 	sqp = sdebug_q_arr + queue_num;
7558 
7559 	spin_lock_irqsave(&sqp->qc_lock, iflags);
7560 
7561 	blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7562 				&data);
7563 
7564 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7565 	if (num_entries > 0)
7566 		atomic_add(num_entries, &sdeb_mq_poll_count);
7567 	return num_entries;
7568 }
7569 
7570 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7571 				   struct scsi_cmnd *scp)
7572 {
7573 	u8 sdeb_i;
7574 	struct scsi_device *sdp = scp->device;
7575 	const struct opcode_info_t *oip;
7576 	const struct opcode_info_t *r_oip;
7577 	struct sdebug_dev_info *devip;
7578 	u8 *cmd = scp->cmnd;
7579 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7580 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7581 	int k, na;
7582 	int errsts = 0;
7583 	u64 lun_index = sdp->lun & 0x3FFF;
7584 	u32 flags;
7585 	u16 sa;
7586 	u8 opcode = cmd[0];
7587 	bool has_wlun_rl;
7588 	bool inject_now;
7589 
7590 	scsi_set_resid(scp, 0);
7591 	if (sdebug_statistics) {
7592 		atomic_inc(&sdebug_cmnd_count);
7593 		inject_now = inject_on_this_cmd();
7594 	} else {
7595 		inject_now = false;
7596 	}
7597 	if (unlikely(sdebug_verbose &&
7598 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7599 		char b[120];
7600 		int n, len, sb;
7601 
7602 		len = scp->cmd_len;
7603 		sb = (int)sizeof(b);
7604 		if (len > 32)
7605 			strcpy(b, "too long, over 32 bytes");
7606 		else {
7607 			for (k = 0, n = 0; k < len && n < sb; ++k)
7608 				n += scnprintf(b + n, sb - n, "%02x ",
7609 					       (u32)cmd[k]);
7610 		}
7611 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7612 			    blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7613 	}
7614 	if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7615 		return SCSI_MLQUEUE_HOST_BUSY;
7616 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7617 	if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7618 		goto err_out;
7619 
7620 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7621 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7622 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7623 	if (unlikely(!devip)) {
7624 		devip = find_build_dev_info(sdp);
7625 		if (NULL == devip)
7626 			goto err_out;
7627 	}
7628 	if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7629 		atomic_set(&sdeb_inject_pending, 1);
7630 
7631 	na = oip->num_attached;
7632 	r_pfp = oip->pfp;
7633 	if (na) {	/* multiple commands with this opcode */
7634 		r_oip = oip;
7635 		if (FF_SA & r_oip->flags) {
7636 			if (F_SA_LOW & oip->flags)
7637 				sa = 0x1f & cmd[1];
7638 			else
7639 				sa = get_unaligned_be16(cmd + 8);
7640 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7641 				if (opcode == oip->opcode && sa == oip->sa)
7642 					break;
7643 			}
7644 		} else {   /* since no service action only check opcode */
7645 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7646 				if (opcode == oip->opcode)
7647 					break;
7648 			}
7649 		}
7650 		if (k > na) {
7651 			if (F_SA_LOW & r_oip->flags)
7652 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7653 			else if (F_SA_HIGH & r_oip->flags)
7654 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7655 			else
7656 				mk_sense_invalid_opcode(scp);
7657 			goto check_cond;
7658 		}
7659 	}	/* else (when na==0) we assume the oip is a match */
7660 	flags = oip->flags;
7661 	if (unlikely(F_INV_OP & flags)) {
7662 		mk_sense_invalid_opcode(scp);
7663 		goto check_cond;
7664 	}
7665 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7666 		if (sdebug_verbose)
7667 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7668 				    my_name, opcode, " supported for wlun");
7669 		mk_sense_invalid_opcode(scp);
7670 		goto check_cond;
7671 	}
7672 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7673 		u8 rem;
7674 		int j;
7675 
7676 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7677 			rem = ~oip->len_mask[k] & cmd[k];
7678 			if (rem) {
7679 				for (j = 7; j >= 0; --j, rem <<= 1) {
7680 					if (0x80 & rem)
7681 						break;
7682 				}
7683 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7684 				goto check_cond;
7685 			}
7686 		}
7687 	}
7688 	if (unlikely(!(F_SKIP_UA & flags) &&
7689 		     find_first_bit(devip->uas_bm,
7690 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7691 		errsts = make_ua(scp, devip);
7692 		if (errsts)
7693 			goto check_cond;
7694 	}
7695 	if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7696 		     atomic_read(&devip->stopped))) {
7697 		errsts = resp_not_ready(scp, devip);
7698 		if (errsts)
7699 			goto fini;
7700 	}
7701 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7702 		goto fini;
7703 	if (unlikely(sdebug_every_nth)) {
7704 		if (fake_timeout(scp))
7705 			return 0;	/* ignore command: make trouble */
7706 	}
7707 	if (likely(oip->pfp))
7708 		pfp = oip->pfp;	/* calls a resp_* function */
7709 	else
7710 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7711 
7712 fini:
7713 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7714 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7715 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7716 					    sdebug_ndelay > 10000)) {
7717 		/*
7718 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7719 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7720 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7721 		 * For Synchronize Cache want 1/20 of SSU's delay.
7722 		 */
7723 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7724 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7725 
7726 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7727 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7728 	} else
7729 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7730 				     sdebug_ndelay);
7731 check_cond:
7732 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7733 err_out:
7734 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7735 }
7736 
7737 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7738 {
7739 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7740 
7741 	spin_lock_init(&sdsc->lock);
7742 
7743 	return 0;
7744 }
7745 
7746 
7747 static struct scsi_host_template sdebug_driver_template = {
7748 	.show_info =		scsi_debug_show_info,
7749 	.write_info =		scsi_debug_write_info,
7750 	.proc_name =		sdebug_proc_name,
7751 	.name =			"SCSI DEBUG",
7752 	.info =			scsi_debug_info,
7753 	.slave_alloc =		scsi_debug_slave_alloc,
7754 	.slave_configure =	scsi_debug_slave_configure,
7755 	.slave_destroy =	scsi_debug_slave_destroy,
7756 	.ioctl =		scsi_debug_ioctl,
7757 	.queuecommand =		scsi_debug_queuecommand,
7758 	.change_queue_depth =	sdebug_change_qdepth,
7759 	.map_queues =		sdebug_map_queues,
7760 	.mq_poll =		sdebug_blk_mq_poll,
7761 	.eh_abort_handler =	scsi_debug_abort,
7762 	.eh_device_reset_handler = scsi_debug_device_reset,
7763 	.eh_target_reset_handler = scsi_debug_target_reset,
7764 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7765 	.eh_host_reset_handler = scsi_debug_host_reset,
7766 	.can_queue =		SDEBUG_CANQUEUE,
7767 	.this_id =		7,
7768 	.sg_tablesize =		SG_MAX_SEGMENTS,
7769 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7770 	.max_sectors =		-1U,
7771 	.max_segment_size =	-1U,
7772 	.module =		THIS_MODULE,
7773 	.track_queue_depth =	1,
7774 	.cmd_size = sizeof(struct sdebug_scsi_cmd),
7775 	.init_cmd_priv = sdebug_init_cmd_priv,
7776 };
7777 
7778 static int sdebug_driver_probe(struct device *dev)
7779 {
7780 	int error = 0;
7781 	struct sdebug_host_info *sdbg_host;
7782 	struct Scsi_Host *hpnt;
7783 	int hprot;
7784 
7785 	sdbg_host = dev_to_sdebug_host(dev);
7786 
7787 	sdebug_driver_template.can_queue = sdebug_max_queue;
7788 	sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7789 	if (!sdebug_clustering)
7790 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7791 
7792 	hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7793 	if (NULL == hpnt) {
7794 		pr_err("scsi_host_alloc failed\n");
7795 		error = -ENODEV;
7796 		return error;
7797 	}
7798 	if (submit_queues > nr_cpu_ids) {
7799 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7800 			my_name, submit_queues, nr_cpu_ids);
7801 		submit_queues = nr_cpu_ids;
7802 	}
7803 	/*
7804 	 * Decide whether to tell scsi subsystem that we want mq. The
7805 	 * following should give the same answer for each host.
7806 	 */
7807 	hpnt->nr_hw_queues = submit_queues;
7808 	if (sdebug_host_max_queue)
7809 		hpnt->host_tagset = 1;
7810 
7811 	/* poll queues are possible for nr_hw_queues > 1 */
7812 	if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7813 		pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7814 			 my_name, poll_queues, hpnt->nr_hw_queues);
7815 		poll_queues = 0;
7816 	}
7817 
7818 	/*
7819 	 * Poll queues don't need interrupts, but we need at least one I/O queue
7820 	 * left over for non-polled I/O.
7821 	 * If condition not met, trim poll_queues to 1 (just for simplicity).
7822 	 */
7823 	if (poll_queues >= submit_queues) {
7824 		if (submit_queues < 3)
7825 			pr_warn("%s: trim poll_queues to 1\n", my_name);
7826 		else
7827 			pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7828 				my_name, submit_queues - 1);
7829 		poll_queues = 1;
7830 	}
7831 	if (poll_queues)
7832 		hpnt->nr_maps = 3;
7833 
7834 	sdbg_host->shost = hpnt;
7835 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7836 		hpnt->max_id = sdebug_num_tgts + 1;
7837 	else
7838 		hpnt->max_id = sdebug_num_tgts;
7839 	/* = sdebug_max_luns; */
7840 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7841 
7842 	hprot = 0;
7843 
7844 	switch (sdebug_dif) {
7845 
7846 	case T10_PI_TYPE1_PROTECTION:
7847 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7848 		if (sdebug_dix)
7849 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7850 		break;
7851 
7852 	case T10_PI_TYPE2_PROTECTION:
7853 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7854 		if (sdebug_dix)
7855 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7856 		break;
7857 
7858 	case T10_PI_TYPE3_PROTECTION:
7859 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7860 		if (sdebug_dix)
7861 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7862 		break;
7863 
7864 	default:
7865 		if (sdebug_dix)
7866 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7867 		break;
7868 	}
7869 
7870 	scsi_host_set_prot(hpnt, hprot);
7871 
7872 	if (have_dif_prot || sdebug_dix)
7873 		pr_info("host protection%s%s%s%s%s%s%s\n",
7874 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7875 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7876 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7877 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7878 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7879 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7880 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7881 
7882 	if (sdebug_guard == 1)
7883 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7884 	else
7885 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7886 
7887 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7888 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7889 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7890 		sdebug_statistics = true;
7891 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7892 	if (error) {
7893 		pr_err("scsi_add_host failed\n");
7894 		error = -ENODEV;
7895 		scsi_host_put(hpnt);
7896 	} else {
7897 		scsi_scan_host(hpnt);
7898 	}
7899 
7900 	return error;
7901 }
7902 
7903 static void sdebug_driver_remove(struct device *dev)
7904 {
7905 	struct sdebug_host_info *sdbg_host;
7906 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7907 
7908 	sdbg_host = dev_to_sdebug_host(dev);
7909 
7910 	scsi_remove_host(sdbg_host->shost);
7911 
7912 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7913 				 dev_list) {
7914 		list_del(&sdbg_devinfo->dev_list);
7915 		kfree(sdbg_devinfo->zstate);
7916 		kfree(sdbg_devinfo);
7917 	}
7918 
7919 	scsi_host_put(sdbg_host->shost);
7920 }
7921 
7922 static int pseudo_lld_bus_match(struct device *dev,
7923 				struct device_driver *dev_driver)
7924 {
7925 	return 1;
7926 }
7927 
7928 static struct bus_type pseudo_lld_bus = {
7929 	.name = "pseudo",
7930 	.match = pseudo_lld_bus_match,
7931 	.probe = sdebug_driver_probe,
7932 	.remove = sdebug_driver_remove,
7933 	.drv_groups = sdebug_drv_groups,
7934 };
7935