xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 64e14ece)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2020 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0189"	/* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200421";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
102 
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
105 
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST   1
108 #define DEF_NUM_TGTS   1
109 #define DEF_MAX_LUNS   1
110 /* With these defaults, this driver will make 1 host with 1 target
111  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
112  */
113 #define DEF_ATO 1
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT   0
117 #define DEF_DEV_SIZE_MB   8
118 #define DEF_ZBC_DEV_SIZE_MB   128
119 #define DEF_DIF 0
120 #define DEF_DIX 0
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE   0
123 #define DEF_EVERY_NTH   0
124 #define DEF_FAKE_RW	0
125 #define DEF_GUARD 0
126 #define DEF_HOST_LOCK 0
127 #define DEF_LBPU 0
128 #define DEF_LBPWS 0
129 #define DEF_LBPWS10 0
130 #define DEF_LBPRZ 1
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0   0
134 #define DEF_NUM_PARTS   0
135 #define DEF_OPTS   0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE   TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB   0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
151 #define DEF_STRICT 0
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_UUID_CTL 0
155 #define JDELAY_OVERRIDDEN -9999
156 
157 /* Default parameters for ZBC drives */
158 #define DEF_ZBC_ZONE_SIZE_MB	128
159 #define DEF_ZBC_MAX_OPEN_ZONES	8
160 #define DEF_ZBC_NR_CONV_ZONES	1
161 
162 #define SDEBUG_LUN_0_VAL 0
163 
164 /* bit mask values for sdebug_opts */
165 #define SDEBUG_OPT_NOISE		1
166 #define SDEBUG_OPT_MEDIUM_ERR		2
167 #define SDEBUG_OPT_TIMEOUT		4
168 #define SDEBUG_OPT_RECOVERED_ERR	8
169 #define SDEBUG_OPT_TRANSPORT_ERR	16
170 #define SDEBUG_OPT_DIF_ERR		32
171 #define SDEBUG_OPT_DIX_ERR		64
172 #define SDEBUG_OPT_MAC_TIMEOUT		128
173 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
174 #define SDEBUG_OPT_Q_NOISE		0x200
175 #define SDEBUG_OPT_ALL_TSF		0x400
176 #define SDEBUG_OPT_RARE_TSF		0x800
177 #define SDEBUG_OPT_N_WCE		0x1000
178 #define SDEBUG_OPT_RESET_NOISE		0x2000
179 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
180 #define SDEBUG_OPT_HOST_BUSY		0x8000
181 #define SDEBUG_OPT_CMD_ABORT		0x10000
182 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
183 			      SDEBUG_OPT_RESET_NOISE)
184 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
185 				  SDEBUG_OPT_TRANSPORT_ERR | \
186 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
187 				  SDEBUG_OPT_SHORT_TRANSFER | \
188 				  SDEBUG_OPT_HOST_BUSY | \
189 				  SDEBUG_OPT_CMD_ABORT)
190 /* When "every_nth" > 0 then modulo "every_nth" commands:
191  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
192  *   - a RECOVERED_ERROR is simulated on successful read and write
193  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
194  *   - a TRANSPORT_ERROR is simulated on successful read and write
195  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
196  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
197  *     CMD_ABORT
198  *
199  * When "every_nth" < 0 then after "- every_nth" commands the selected
200  * error will be injected. The error will be injected on every subsequent
201  * command until some other action occurs; for example, the user writing
202  * a new value (other than -1 or 1) to every_nth:
203  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
204  */
205 
206 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
207  * priority order. In the subset implemented here lower numbers have higher
208  * priority. The UA numbers should be a sequence starting from 0 with
209  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
210 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
211 #define SDEBUG_UA_BUS_RESET 1
212 #define SDEBUG_UA_MODE_CHANGED 2
213 #define SDEBUG_UA_CAPACITY_CHANGED 3
214 #define SDEBUG_UA_LUNS_CHANGED 4
215 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
216 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
217 #define SDEBUG_NUM_UAS 7
218 
219 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
220  * sector on read commands: */
221 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
222 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
223 
224 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
225  * or "peripheral device" addressing (value 0) */
226 #define SAM2_LUN_ADDRESS_METHOD 0
227 
228 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
229  * (for response) per submit queue at one time. Can be reduced by max_queue
230  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
231  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
232  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
233  * but cannot exceed SDEBUG_CANQUEUE .
234  */
235 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
236 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
237 #define DEF_CMD_PER_LUN  255
238 
239 #define F_D_IN			1
240 #define F_D_OUT			2
241 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
242 #define F_D_UNKN		8
243 #define F_RL_WLUN_OK		0x10
244 #define F_SKIP_UA		0x20
245 #define F_DELAY_OVERR		0x40
246 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
247 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
248 #define F_INV_OP		0x200
249 #define F_FAKE_RW		0x400
250 #define F_M_ACCESS		0x800	/* media access */
251 #define F_SSU_DELAY		0x1000
252 #define F_SYNC_DELAY		0x2000
253 
254 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
255 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
256 #define FF_SA (F_SA_HIGH | F_SA_LOW)
257 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
258 
259 #define SDEBUG_MAX_PARTS 4
260 
261 #define SDEBUG_MAX_CMD_LEN 32
262 
263 #define SDEB_XA_NOT_IN_USE XA_MARK_1
264 
265 /* Zone types (zbcr05 table 25) */
266 enum sdebug_z_type {
267 	ZBC_ZONE_TYPE_CNV	= 0x1,
268 	ZBC_ZONE_TYPE_SWR	= 0x2,
269 	ZBC_ZONE_TYPE_SWP	= 0x3,
270 };
271 
272 /* enumeration names taken from table 26, zbcr05 */
273 enum sdebug_z_cond {
274 	ZBC_NOT_WRITE_POINTER	= 0x0,
275 	ZC1_EMPTY		= 0x1,
276 	ZC2_IMPLICIT_OPEN	= 0x2,
277 	ZC3_EXPLICIT_OPEN	= 0x3,
278 	ZC4_CLOSED		= 0x4,
279 	ZC6_READ_ONLY		= 0xd,
280 	ZC5_FULL		= 0xe,
281 	ZC7_OFFLINE		= 0xf,
282 };
283 
284 struct sdeb_zone_state {	/* ZBC: per zone state */
285 	enum sdebug_z_type z_type;
286 	enum sdebug_z_cond z_cond;
287 	bool z_non_seq_resource;
288 	unsigned int z_size;
289 	sector_t z_start;
290 	sector_t z_wp;
291 };
292 
293 struct sdebug_dev_info {
294 	struct list_head dev_list;
295 	unsigned int channel;
296 	unsigned int target;
297 	u64 lun;
298 	uuid_t lu_name;
299 	struct sdebug_host_info *sdbg_host;
300 	unsigned long uas_bm[1];
301 	atomic_t num_in_q;
302 	atomic_t stopped;
303 	bool used;
304 
305 	/* For ZBC devices */
306 	enum blk_zoned_model zmodel;
307 	unsigned int zsize;
308 	unsigned int zsize_shift;
309 	unsigned int nr_zones;
310 	unsigned int nr_conv_zones;
311 	unsigned int nr_imp_open;
312 	unsigned int nr_exp_open;
313 	unsigned int nr_closed;
314 	unsigned int max_open;
315 	struct sdeb_zone_state *zstate;
316 };
317 
318 struct sdebug_host_info {
319 	struct list_head host_list;
320 	int si_idx;	/* sdeb_store_info (per host) xarray index */
321 	struct Scsi_Host *shost;
322 	struct device dev;
323 	struct list_head dev_info_list;
324 };
325 
326 /* There is an xarray of pointers to this struct's objects, one per host */
327 struct sdeb_store_info {
328 	rwlock_t macc_lck;	/* for atomic media access on this store */
329 	u8 *storep;		/* user data storage (ram) */
330 	struct t10_pi_tuple *dif_storep; /* protection info */
331 	void *map_storep;	/* provisioning map */
332 };
333 
334 #define to_sdebug_host(d)	\
335 	container_of(d, struct sdebug_host_info, dev)
336 
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338 		      SDEB_DEFER_WQ = 2};
339 
340 struct sdebug_defer {
341 	struct hrtimer hrt;
342 	struct execute_work ew;
343 	int sqa_idx;	/* index of sdebug_queue array */
344 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
345 	int issuing_cpu;
346 	bool init_hrt;
347 	bool init_wq;
348 	bool aborted;	/* true when blk_abort_request() already called */
349 	enum sdeb_defer_type defer_t;
350 };
351 
352 struct sdebug_queued_cmd {
353 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
354 	 * instance indicates this slot is in use.
355 	 */
356 	struct sdebug_defer *sd_dp;
357 	struct scsi_cmnd *a_cmnd;
358 	unsigned int inj_recovered:1;
359 	unsigned int inj_transport:1;
360 	unsigned int inj_dif:1;
361 	unsigned int inj_dix:1;
362 	unsigned int inj_short:1;
363 	unsigned int inj_host_busy:1;
364 	unsigned int inj_cmd_abort:1;
365 };
366 
367 struct sdebug_queue {
368 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
369 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
370 	spinlock_t qc_lock;
371 	atomic_t blocked;	/* to temporarily stop more being queued */
372 };
373 
374 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
375 static atomic_t sdebug_completions;  /* count of deferred completions */
376 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
377 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
378 
379 struct opcode_info_t {
380 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
381 				/* for terminating element */
382 	u8 opcode;		/* if num_attached > 0, preferred */
383 	u16 sa;			/* service action */
384 	u32 flags;		/* OR-ed set of SDEB_F_* */
385 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
386 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
387 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
388 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
389 };
390 
391 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
392 enum sdeb_opcode_index {
393 	SDEB_I_INVALID_OPCODE =	0,
394 	SDEB_I_INQUIRY = 1,
395 	SDEB_I_REPORT_LUNS = 2,
396 	SDEB_I_REQUEST_SENSE = 3,
397 	SDEB_I_TEST_UNIT_READY = 4,
398 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
399 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
400 	SDEB_I_LOG_SENSE = 7,
401 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
402 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
403 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
404 	SDEB_I_START_STOP = 11,
405 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
406 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
407 	SDEB_I_MAINT_IN = 14,
408 	SDEB_I_MAINT_OUT = 15,
409 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
410 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
411 	SDEB_I_RESERVE = 18,		/* 6, 10 */
412 	SDEB_I_RELEASE = 19,		/* 6, 10 */
413 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
414 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
415 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
416 	SDEB_I_SEND_DIAG = 23,
417 	SDEB_I_UNMAP = 24,
418 	SDEB_I_WRITE_BUFFER = 25,
419 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
420 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
421 	SDEB_I_COMP_WRITE = 28,
422 	SDEB_I_PRE_FETCH = 29,		/* 10, 16 */
423 	SDEB_I_ZONE_OUT = 30,		/* 0x94+SA; includes no data xfer */
424 	SDEB_I_ZONE_IN = 31,		/* 0x95+SA; all have data-in */
425 	SDEB_I_LAST_ELEM_P1 = 32,	/* keep this last (previous + 1) */
426 };
427 
428 
429 static const unsigned char opcode_ind_arr[256] = {
430 /* 0x0; 0x0->0x1f: 6 byte cdbs */
431 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
432 	    0, 0, 0, 0,
433 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
434 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
435 	    SDEB_I_RELEASE,
436 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
437 	    SDEB_I_ALLOW_REMOVAL, 0,
438 /* 0x20; 0x20->0x3f: 10 byte cdbs */
439 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
440 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
441 	0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
442 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
443 /* 0x40; 0x40->0x5f: 10 byte cdbs */
444 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
445 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
446 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
447 	    SDEB_I_RELEASE,
448 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
449 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
450 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452 	0, SDEB_I_VARIABLE_LEN,
453 /* 0x80; 0x80->0x9f: 16 byte cdbs */
454 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
455 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
456 	0, 0, 0, SDEB_I_VERIFY,
457 	SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
458 	SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
459 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
460 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
461 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
462 	     SDEB_I_MAINT_OUT, 0, 0, 0,
463 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
464 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
465 	0, 0, 0, 0, 0, 0, 0, 0,
466 	0, 0, 0, 0, 0, 0, 0, 0,
467 /* 0xc0; 0xc0->0xff: vendor specific */
468 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
469 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
470 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
471 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
472 };
473 
474 /*
475  * The following "response" functions return the SCSI mid-level's 4 byte
476  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
477  * command completion, they can mask their return value with
478  * SDEG_RES_IMMED_MASK .
479  */
480 #define SDEG_RES_IMMED_MASK 0x40000000
481 
482 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
500 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
501 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
502 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
503 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
504 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
505 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
506 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
507 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
508 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
509 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
510 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
511 
512 static int sdebug_do_add_host(bool mk_new_store);
513 static int sdebug_add_host_helper(int per_host_idx);
514 static void sdebug_do_remove_host(bool the_end);
515 static int sdebug_add_store(void);
516 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
517 static void sdebug_erase_all_stores(bool apart_from_first);
518 
519 /*
520  * The following are overflow arrays for cdbs that "hit" the same index in
521  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
522  * should be placed in opcode_info_arr[], the others should be placed here.
523  */
524 static const struct opcode_info_t msense_iarr[] = {
525 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
526 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
527 };
528 
529 static const struct opcode_info_t mselect_iarr[] = {
530 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
531 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
532 };
533 
534 static const struct opcode_info_t read_iarr[] = {
535 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
536 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
537 	     0, 0, 0, 0} },
538 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
539 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
540 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
541 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
542 	     0xc7, 0, 0, 0, 0} },
543 };
544 
545 static const struct opcode_info_t write_iarr[] = {
546 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
547 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
548 		   0, 0, 0, 0, 0, 0} },
549 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
550 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
551 		   0, 0, 0} },
552 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
553 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
554 		   0xbf, 0xc7, 0, 0, 0, 0} },
555 };
556 
557 static const struct opcode_info_t verify_iarr[] = {
558 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
559 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
560 		   0, 0, 0, 0, 0, 0} },
561 };
562 
563 static const struct opcode_info_t sa_in_16_iarr[] = {
564 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
565 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
566 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
567 };
568 
569 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
570 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
571 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
572 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
573 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
574 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
575 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
576 };
577 
578 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
579 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
580 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
581 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
582 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
583 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
584 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
585 };
586 
587 static const struct opcode_info_t write_same_iarr[] = {
588 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
589 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
590 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
591 };
592 
593 static const struct opcode_info_t reserve_iarr[] = {
594 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
595 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 };
597 
598 static const struct opcode_info_t release_iarr[] = {
599 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
600 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
601 };
602 
603 static const struct opcode_info_t sync_cache_iarr[] = {
604 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
605 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
607 };
608 
609 static const struct opcode_info_t pre_fetch_iarr[] = {
610 	{0, 0x90, 0, F_SYNC_DELAY | F_M_ACCESS, resp_pre_fetch, NULL,
611 	    {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* PRE-FETCH (16) */
613 };
614 
615 static const struct opcode_info_t zone_out_iarr[] = {	/* ZONE OUT(16) */
616 	{0, 0x94, 0x1, F_SA_LOW, resp_close_zone, NULL,
617 	    {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
618 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* CLOSE ZONE */
619 	{0, 0x94, 0x2, F_SA_LOW, resp_finish_zone, NULL,
620 	    {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },	/* FINISH ZONE */
622 	{0, 0x94, 0x4, F_SA_LOW, resp_rwp_zone, NULL,
623 	    {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
624 	     0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
625 };
626 
627 static const struct opcode_info_t zone_in_iarr[] = {	/* ZONE IN(16) */
628 	{0, 0x95, 0x6, F_SA_LOW | F_D_IN, NULL, NULL,
629 	    {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
630 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
631 };
632 
633 
634 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
635  * plus the terminating elements for logic that scans this table such as
636  * REPORT SUPPORTED OPERATION CODES. */
637 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
638 /* 0 */
639 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
640 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
642 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
643 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
644 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
645 	     0, 0} },					/* REPORT LUNS */
646 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
647 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
648 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
649 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
650 /* 5 */
651 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
652 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
653 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
654 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
655 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
656 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
657 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
658 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
659 	     0, 0, 0} },
660 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
661 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
662 	     0, 0} },
663 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
664 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
665 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
666 /* 10 */
667 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
668 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
669 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
670 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
671 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
672 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
673 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
674 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
675 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
676 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
677 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
678 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
679 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
680 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
681 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
682 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
683 				0xff, 0, 0xc7, 0, 0, 0, 0} },
684 /* 15 */
685 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
686 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
687 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
688 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
689 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
690 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
691 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
692 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
693 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
694 	     0xff, 0xff} },
695 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
696 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
697 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
698 	     0} },
699 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
700 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
701 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
702 	     0} },
703 /* 20 */
704 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
705 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
706 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
707 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
708 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
709 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
710 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
711 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
712 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
713 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
714 /* 25 */
715 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
716 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
717 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
718 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
719 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
720 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
721 		 0, 0, 0, 0, 0} },
722 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
723 	    resp_sync_cache, sync_cache_iarr,
724 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
725 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
726 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
727 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
728 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
729 	{ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | F_M_ACCESS,
730 	    resp_pre_fetch, pre_fetch_iarr,
731 	    {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
732 	     0, 0, 0, 0} },			/* PRE-FETCH (10) */
733 
734 /* 30 */
735 	{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW,
736 	    resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
737 		{16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
738 		 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
739 	{ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_D_IN,
740 	    resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
741 		{16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
742 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
743 /* sentinel */
744 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
745 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
746 };
747 
748 static int sdebug_num_hosts;
749 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
750 static int sdebug_ato = DEF_ATO;
751 static int sdebug_cdb_len = DEF_CDB_LEN;
752 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
753 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
754 static int sdebug_dif = DEF_DIF;
755 static int sdebug_dix = DEF_DIX;
756 static int sdebug_dsense = DEF_D_SENSE;
757 static int sdebug_every_nth = DEF_EVERY_NTH;
758 static int sdebug_fake_rw = DEF_FAKE_RW;
759 static unsigned int sdebug_guard = DEF_GUARD;
760 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
761 static int sdebug_max_luns = DEF_MAX_LUNS;
762 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
763 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
764 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
765 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
766 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
767 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
768 static int sdebug_no_uld;
769 static int sdebug_num_parts = DEF_NUM_PARTS;
770 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
771 static int sdebug_opt_blks = DEF_OPT_BLKS;
772 static int sdebug_opts = DEF_OPTS;
773 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
774 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
775 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
776 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
777 static int sdebug_sector_size = DEF_SECTOR_SIZE;
778 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
779 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
780 static unsigned int sdebug_lbpu = DEF_LBPU;
781 static unsigned int sdebug_lbpws = DEF_LBPWS;
782 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
783 static unsigned int sdebug_lbprz = DEF_LBPRZ;
784 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
785 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
786 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
787 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
788 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
789 static int sdebug_uuid_ctl = DEF_UUID_CTL;
790 static bool sdebug_random = DEF_RANDOM;
791 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
792 static bool sdebug_removable = DEF_REMOVABLE;
793 static bool sdebug_clustering;
794 static bool sdebug_host_lock = DEF_HOST_LOCK;
795 static bool sdebug_strict = DEF_STRICT;
796 static bool sdebug_any_injecting_opt;
797 static bool sdebug_verbose;
798 static bool have_dif_prot;
799 static bool write_since_sync;
800 static bool sdebug_statistics = DEF_STATISTICS;
801 static bool sdebug_wp;
802 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
803 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
804 static char *sdeb_zbc_model_s;
805 
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity;	/* in sectors */
808 
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810    may still need them */
811 static int sdebug_heads;		/* heads per disk */
812 static int sdebug_cylinders_per;	/* cylinders per surface */
813 static int sdebug_sectors_per;		/* sectors per cylinder */
814 
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_SPINLOCK(sdebug_host_list_lock);
817 
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
823 
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
833 
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_size_mb;
837 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
838 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
839 
840 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
841 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
842 
843 static DEFINE_RWLOCK(atomic_rw);
844 static DEFINE_RWLOCK(atomic_rw2);
845 
846 static rwlock_t *ramdisk_lck_a[2];
847 
848 static char sdebug_proc_name[] = MY_NAME;
849 static const char *my_name = MY_NAME;
850 
851 static struct bus_type pseudo_lld_bus;
852 
853 static struct device_driver sdebug_driverfs_driver = {
854 	.name 		= sdebug_proc_name,
855 	.bus		= &pseudo_lld_bus,
856 };
857 
858 static const int check_condition_result =
859 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
860 
861 static const int illegal_condition_result =
862 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
863 
864 static const int device_qfull_result =
865 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
866 
867 static const int condition_met_result = SAM_STAT_CONDITION_MET;
868 
869 
870 /* Only do the extra work involved in logical block provisioning if one or
871  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
872  * real reads and writes (i.e. not skipping them for speed).
873  */
874 static inline bool scsi_debug_lbp(void)
875 {
876 	return 0 == sdebug_fake_rw &&
877 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
878 }
879 
880 static void *lba2fake_store(struct sdeb_store_info *sip,
881 			    unsigned long long lba)
882 {
883 	struct sdeb_store_info *lsip = sip;
884 
885 	lba = do_div(lba, sdebug_store_sectors);
886 	if (!sip || !sip->storep) {
887 		WARN_ON_ONCE(true);
888 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
889 	}
890 	return lsip->storep + lba * sdebug_sector_size;
891 }
892 
893 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
894 				      sector_t sector)
895 {
896 	sector = sector_div(sector, sdebug_store_sectors);
897 
898 	return sip->dif_storep + sector;
899 }
900 
901 static void sdebug_max_tgts_luns(void)
902 {
903 	struct sdebug_host_info *sdbg_host;
904 	struct Scsi_Host *hpnt;
905 
906 	spin_lock(&sdebug_host_list_lock);
907 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
908 		hpnt = sdbg_host->shost;
909 		if ((hpnt->this_id >= 0) &&
910 		    (sdebug_num_tgts > hpnt->this_id))
911 			hpnt->max_id = sdebug_num_tgts + 1;
912 		else
913 			hpnt->max_id = sdebug_num_tgts;
914 		/* sdebug_max_luns; */
915 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
916 	}
917 	spin_unlock(&sdebug_host_list_lock);
918 }
919 
920 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
921 
922 /* Set in_bit to -1 to indicate no bit position of invalid field */
923 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
924 				 enum sdeb_cmd_data c_d,
925 				 int in_byte, int in_bit)
926 {
927 	unsigned char *sbuff;
928 	u8 sks[4];
929 	int sl, asc;
930 
931 	sbuff = scp->sense_buffer;
932 	if (!sbuff) {
933 		sdev_printk(KERN_ERR, scp->device,
934 			    "%s: sense_buffer is NULL\n", __func__);
935 		return;
936 	}
937 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
938 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
939 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
940 	memset(sks, 0, sizeof(sks));
941 	sks[0] = 0x80;
942 	if (c_d)
943 		sks[0] |= 0x40;
944 	if (in_bit >= 0) {
945 		sks[0] |= 0x8;
946 		sks[0] |= 0x7 & in_bit;
947 	}
948 	put_unaligned_be16(in_byte, sks + 1);
949 	if (sdebug_dsense) {
950 		sl = sbuff[7] + 8;
951 		sbuff[7] = sl;
952 		sbuff[sl] = 0x2;
953 		sbuff[sl + 1] = 0x6;
954 		memcpy(sbuff + sl + 4, sks, 3);
955 	} else
956 		memcpy(sbuff + 15, sks, 3);
957 	if (sdebug_verbose)
958 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
959 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
960 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
961 }
962 
963 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
964 {
965 	unsigned char *sbuff;
966 
967 	sbuff = scp->sense_buffer;
968 	if (!sbuff) {
969 		sdev_printk(KERN_ERR, scp->device,
970 			    "%s: sense_buffer is NULL\n", __func__);
971 		return;
972 	}
973 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
974 
975 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
976 
977 	if (sdebug_verbose)
978 		sdev_printk(KERN_INFO, scp->device,
979 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
980 			    my_name, key, asc, asq);
981 }
982 
983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
984 {
985 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
986 }
987 
988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
989 			    void __user *arg)
990 {
991 	if (sdebug_verbose) {
992 		if (0x1261 == cmd)
993 			sdev_printk(KERN_INFO, dev,
994 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
995 		else if (0x5331 == cmd)
996 			sdev_printk(KERN_INFO, dev,
997 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
998 				    __func__);
999 		else
1000 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1001 				    __func__, cmd);
1002 	}
1003 	return -EINVAL;
1004 	/* return -ENOTTY; // correct return but upsets fdisk */
1005 }
1006 
1007 static void config_cdb_len(struct scsi_device *sdev)
1008 {
1009 	switch (sdebug_cdb_len) {
1010 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1011 		sdev->use_10_for_rw = false;
1012 		sdev->use_16_for_rw = false;
1013 		sdev->use_10_for_ms = false;
1014 		break;
1015 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1016 		sdev->use_10_for_rw = true;
1017 		sdev->use_16_for_rw = false;
1018 		sdev->use_10_for_ms = false;
1019 		break;
1020 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1021 		sdev->use_10_for_rw = true;
1022 		sdev->use_16_for_rw = false;
1023 		sdev->use_10_for_ms = true;
1024 		break;
1025 	case 16:
1026 		sdev->use_10_for_rw = false;
1027 		sdev->use_16_for_rw = true;
1028 		sdev->use_10_for_ms = true;
1029 		break;
1030 	case 32: /* No knobs to suggest this so same as 16 for now */
1031 		sdev->use_10_for_rw = false;
1032 		sdev->use_16_for_rw = true;
1033 		sdev->use_10_for_ms = true;
1034 		break;
1035 	default:
1036 		pr_warn("unexpected cdb_len=%d, force to 10\n",
1037 			sdebug_cdb_len);
1038 		sdev->use_10_for_rw = true;
1039 		sdev->use_16_for_rw = false;
1040 		sdev->use_10_for_ms = false;
1041 		sdebug_cdb_len = 10;
1042 		break;
1043 	}
1044 }
1045 
1046 static void all_config_cdb_len(void)
1047 {
1048 	struct sdebug_host_info *sdbg_host;
1049 	struct Scsi_Host *shost;
1050 	struct scsi_device *sdev;
1051 
1052 	spin_lock(&sdebug_host_list_lock);
1053 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 		shost = sdbg_host->shost;
1055 		shost_for_each_device(sdev, shost) {
1056 			config_cdb_len(sdev);
1057 		}
1058 	}
1059 	spin_unlock(&sdebug_host_list_lock);
1060 }
1061 
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1063 {
1064 	struct sdebug_host_info *sdhp;
1065 	struct sdebug_dev_info *dp;
1066 
1067 	spin_lock(&sdebug_host_list_lock);
1068 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 			if ((devip->sdbg_host == dp->sdbg_host) &&
1071 			    (devip->target == dp->target))
1072 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 		}
1074 	}
1075 	spin_unlock(&sdebug_host_list_lock);
1076 }
1077 
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1079 {
1080 	int k;
1081 
1082 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 	if (k != SDEBUG_NUM_UAS) {
1084 		const char *cp = NULL;
1085 
1086 		switch (k) {
1087 		case SDEBUG_UA_POR:
1088 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 					POWER_ON_RESET_ASCQ);
1090 			if (sdebug_verbose)
1091 				cp = "power on reset";
1092 			break;
1093 		case SDEBUG_UA_BUS_RESET:
1094 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 					BUS_RESET_ASCQ);
1096 			if (sdebug_verbose)
1097 				cp = "bus reset";
1098 			break;
1099 		case SDEBUG_UA_MODE_CHANGED:
1100 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1101 					MODE_CHANGED_ASCQ);
1102 			if (sdebug_verbose)
1103 				cp = "mode parameters changed";
1104 			break;
1105 		case SDEBUG_UA_CAPACITY_CHANGED:
1106 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 					CAPACITY_CHANGED_ASCQ);
1108 			if (sdebug_verbose)
1109 				cp = "capacity data changed";
1110 			break;
1111 		case SDEBUG_UA_MICROCODE_CHANGED:
1112 			mk_sense_buffer(scp, UNIT_ATTENTION,
1113 					TARGET_CHANGED_ASC,
1114 					MICROCODE_CHANGED_ASCQ);
1115 			if (sdebug_verbose)
1116 				cp = "microcode has been changed";
1117 			break;
1118 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1119 			mk_sense_buffer(scp, UNIT_ATTENTION,
1120 					TARGET_CHANGED_ASC,
1121 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1122 			if (sdebug_verbose)
1123 				cp = "microcode has been changed without reset";
1124 			break;
1125 		case SDEBUG_UA_LUNS_CHANGED:
1126 			/*
1127 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1128 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1129 			 * on the target, until a REPORT LUNS command is
1130 			 * received.  SPC-4 behavior is to report it only once.
1131 			 * NOTE:  sdebug_scsi_level does not use the same
1132 			 * values as struct scsi_device->scsi_level.
1133 			 */
1134 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1135 				clear_luns_changed_on_target(devip);
1136 			mk_sense_buffer(scp, UNIT_ATTENTION,
1137 					TARGET_CHANGED_ASC,
1138 					LUNS_CHANGED_ASCQ);
1139 			if (sdebug_verbose)
1140 				cp = "reported luns data has changed";
1141 			break;
1142 		default:
1143 			pr_warn("unexpected unit attention code=%d\n", k);
1144 			if (sdebug_verbose)
1145 				cp = "unknown";
1146 			break;
1147 		}
1148 		clear_bit(k, devip->uas_bm);
1149 		if (sdebug_verbose)
1150 			sdev_printk(KERN_INFO, scp->device,
1151 				   "%s reports: Unit attention: %s\n",
1152 				   my_name, cp);
1153 		return check_condition_result;
1154 	}
1155 	return 0;
1156 }
1157 
1158 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1159 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1160 				int arr_len)
1161 {
1162 	int act_len;
1163 	struct scsi_data_buffer *sdb = &scp->sdb;
1164 
1165 	if (!sdb->length)
1166 		return 0;
1167 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1168 		return DID_ERROR << 16;
1169 
1170 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1171 				      arr, arr_len);
1172 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1173 
1174 	return 0;
1175 }
1176 
1177 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1178  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1179  * calls, not required to write in ascending offset order. Assumes resid
1180  * set to scsi_bufflen() prior to any calls.
1181  */
1182 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1183 				  int arr_len, unsigned int off_dst)
1184 {
1185 	unsigned int act_len, n;
1186 	struct scsi_data_buffer *sdb = &scp->sdb;
1187 	off_t skip = off_dst;
1188 
1189 	if (sdb->length <= off_dst)
1190 		return 0;
1191 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1192 		return DID_ERROR << 16;
1193 
1194 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1195 				       arr, arr_len, skip);
1196 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1197 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1198 		 scsi_get_resid(scp));
1199 	n = scsi_bufflen(scp) - (off_dst + act_len);
1200 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1201 	return 0;
1202 }
1203 
1204 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1205  * 'arr' or -1 if error.
1206  */
1207 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1208 			       int arr_len)
1209 {
1210 	if (!scsi_bufflen(scp))
1211 		return 0;
1212 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1213 		return -1;
1214 
1215 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1216 }
1217 
1218 
1219 static char sdebug_inq_vendor_id[9] = "Linux   ";
1220 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1221 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1222 /* Use some locally assigned NAAs for SAS addresses. */
1223 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1224 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1225 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1226 
1227 /* Device identification VPD page. Returns number of bytes placed in arr */
1228 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1229 			  int target_dev_id, int dev_id_num,
1230 			  const char *dev_id_str, int dev_id_str_len,
1231 			  const uuid_t *lu_name)
1232 {
1233 	int num, port_a;
1234 	char b[32];
1235 
1236 	port_a = target_dev_id + 1;
1237 	/* T10 vendor identifier field format (faked) */
1238 	arr[0] = 0x2;	/* ASCII */
1239 	arr[1] = 0x1;
1240 	arr[2] = 0x0;
1241 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1242 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1243 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1244 	num = 8 + 16 + dev_id_str_len;
1245 	arr[3] = num;
1246 	num += 4;
1247 	if (dev_id_num >= 0) {
1248 		if (sdebug_uuid_ctl) {
1249 			/* Locally assigned UUID */
1250 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1251 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1252 			arr[num++] = 0x0;
1253 			arr[num++] = 0x12;
1254 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1255 			arr[num++] = 0x0;
1256 			memcpy(arr + num, lu_name, 16);
1257 			num += 16;
1258 		} else {
1259 			/* NAA-3, Logical unit identifier (binary) */
1260 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1261 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1262 			arr[num++] = 0x0;
1263 			arr[num++] = 0x8;
1264 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1265 			num += 8;
1266 		}
1267 		/* Target relative port number */
1268 		arr[num++] = 0x61;	/* proto=sas, binary */
1269 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1270 		arr[num++] = 0x0;	/* reserved */
1271 		arr[num++] = 0x4;	/* length */
1272 		arr[num++] = 0x0;	/* reserved */
1273 		arr[num++] = 0x0;	/* reserved */
1274 		arr[num++] = 0x0;
1275 		arr[num++] = 0x1;	/* relative port A */
1276 	}
1277 	/* NAA-3, Target port identifier */
1278 	arr[num++] = 0x61;	/* proto=sas, binary */
1279 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1280 	arr[num++] = 0x0;
1281 	arr[num++] = 0x8;
1282 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1283 	num += 8;
1284 	/* NAA-3, Target port group identifier */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0x95;	/* piv=1, target port group id */
1287 	arr[num++] = 0x0;
1288 	arr[num++] = 0x4;
1289 	arr[num++] = 0;
1290 	arr[num++] = 0;
1291 	put_unaligned_be16(port_group_id, arr + num);
1292 	num += 2;
1293 	/* NAA-3, Target device identifier */
1294 	arr[num++] = 0x61;	/* proto=sas, binary */
1295 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1296 	arr[num++] = 0x0;
1297 	arr[num++] = 0x8;
1298 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1299 	num += 8;
1300 	/* SCSI name string: Target device identifier */
1301 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1302 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1303 	arr[num++] = 0x0;
1304 	arr[num++] = 24;
1305 	memcpy(arr + num, "naa.32222220", 12);
1306 	num += 12;
1307 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1308 	memcpy(arr + num, b, 8);
1309 	num += 8;
1310 	memset(arr + num, 0, 4);
1311 	num += 4;
1312 	return num;
1313 }
1314 
1315 static unsigned char vpd84_data[] = {
1316 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1317     0x22,0x22,0x22,0x0,0xbb,0x1,
1318     0x22,0x22,0x22,0x0,0xbb,0x2,
1319 };
1320 
1321 /*  Software interface identification VPD page */
1322 static int inquiry_vpd_84(unsigned char *arr)
1323 {
1324 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1325 	return sizeof(vpd84_data);
1326 }
1327 
1328 /* Management network addresses VPD page */
1329 static int inquiry_vpd_85(unsigned char *arr)
1330 {
1331 	int num = 0;
1332 	const char *na1 = "https://www.kernel.org/config";
1333 	const char *na2 = "http://www.kernel.org/log";
1334 	int plen, olen;
1335 
1336 	arr[num++] = 0x1;	/* lu, storage config */
1337 	arr[num++] = 0x0;	/* reserved */
1338 	arr[num++] = 0x0;
1339 	olen = strlen(na1);
1340 	plen = olen + 1;
1341 	if (plen % 4)
1342 		plen = ((plen / 4) + 1) * 4;
1343 	arr[num++] = plen;	/* length, null termianted, padded */
1344 	memcpy(arr + num, na1, olen);
1345 	memset(arr + num + olen, 0, plen - olen);
1346 	num += plen;
1347 
1348 	arr[num++] = 0x4;	/* lu, logging */
1349 	arr[num++] = 0x0;	/* reserved */
1350 	arr[num++] = 0x0;
1351 	olen = strlen(na2);
1352 	plen = olen + 1;
1353 	if (plen % 4)
1354 		plen = ((plen / 4) + 1) * 4;
1355 	arr[num++] = plen;	/* length, null terminated, padded */
1356 	memcpy(arr + num, na2, olen);
1357 	memset(arr + num + olen, 0, plen - olen);
1358 	num += plen;
1359 
1360 	return num;
1361 }
1362 
1363 /* SCSI ports VPD page */
1364 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1365 {
1366 	int num = 0;
1367 	int port_a, port_b;
1368 
1369 	port_a = target_dev_id + 1;
1370 	port_b = port_a + 1;
1371 	arr[num++] = 0x0;	/* reserved */
1372 	arr[num++] = 0x0;	/* reserved */
1373 	arr[num++] = 0x0;
1374 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1375 	memset(arr + num, 0, 6);
1376 	num += 6;
1377 	arr[num++] = 0x0;
1378 	arr[num++] = 12;	/* length tp descriptor */
1379 	/* naa-5 target port identifier (A) */
1380 	arr[num++] = 0x61;	/* proto=sas, binary */
1381 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1382 	arr[num++] = 0x0;	/* reserved */
1383 	arr[num++] = 0x8;	/* length */
1384 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1385 	num += 8;
1386 	arr[num++] = 0x0;	/* reserved */
1387 	arr[num++] = 0x0;	/* reserved */
1388 	arr[num++] = 0x0;
1389 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1390 	memset(arr + num, 0, 6);
1391 	num += 6;
1392 	arr[num++] = 0x0;
1393 	arr[num++] = 12;	/* length tp descriptor */
1394 	/* naa-5 target port identifier (B) */
1395 	arr[num++] = 0x61;	/* proto=sas, binary */
1396 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1397 	arr[num++] = 0x0;	/* reserved */
1398 	arr[num++] = 0x8;	/* length */
1399 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1400 	num += 8;
1401 
1402 	return num;
1403 }
1404 
1405 
1406 static unsigned char vpd89_data[] = {
1407 /* from 4th byte */ 0,0,0,0,
1408 'l','i','n','u','x',' ',' ',' ',
1409 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1410 '1','2','3','4',
1411 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1412 0xec,0,0,0,
1413 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1414 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1415 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1416 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1417 0x53,0x41,
1418 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1419 0x20,0x20,
1420 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1421 0x10,0x80,
1422 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1423 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1424 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1426 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1427 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1428 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1433 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1434 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1435 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1448 };
1449 
1450 /* ATA Information VPD page */
1451 static int inquiry_vpd_89(unsigned char *arr)
1452 {
1453 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1454 	return sizeof(vpd89_data);
1455 }
1456 
1457 
1458 static unsigned char vpdb0_data[] = {
1459 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1460 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1461 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1462 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1463 };
1464 
1465 /* Block limits VPD page (SBC-3) */
1466 static int inquiry_vpd_b0(unsigned char *arr)
1467 {
1468 	unsigned int gran;
1469 
1470 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1471 
1472 	/* Optimal transfer length granularity */
1473 	if (sdebug_opt_xferlen_exp != 0 &&
1474 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1475 		gran = 1 << sdebug_opt_xferlen_exp;
1476 	else
1477 		gran = 1 << sdebug_physblk_exp;
1478 	put_unaligned_be16(gran, arr + 2);
1479 
1480 	/* Maximum Transfer Length */
1481 	if (sdebug_store_sectors > 0x400)
1482 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1483 
1484 	/* Optimal Transfer Length */
1485 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1486 
1487 	if (sdebug_lbpu) {
1488 		/* Maximum Unmap LBA Count */
1489 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1490 
1491 		/* Maximum Unmap Block Descriptor Count */
1492 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1493 	}
1494 
1495 	/* Unmap Granularity Alignment */
1496 	if (sdebug_unmap_alignment) {
1497 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1498 		arr[28] |= 0x80; /* UGAVALID */
1499 	}
1500 
1501 	/* Optimal Unmap Granularity */
1502 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1503 
1504 	/* Maximum WRITE SAME Length */
1505 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1506 
1507 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1508 
1509 	return sizeof(vpdb0_data);
1510 }
1511 
1512 /* Block device characteristics VPD page (SBC-3) */
1513 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1514 {
1515 	memset(arr, 0, 0x3c);
1516 	arr[0] = 0;
1517 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1518 	arr[2] = 0;
1519 	arr[3] = 5;	/* less than 1.8" */
1520 	if (devip->zmodel == BLK_ZONED_HA)
1521 		arr[4] = 1 << 4;	/* zoned field = 01b */
1522 
1523 	return 0x3c;
1524 }
1525 
1526 /* Logical block provisioning VPD page (SBC-4) */
1527 static int inquiry_vpd_b2(unsigned char *arr)
1528 {
1529 	memset(arr, 0, 0x4);
1530 	arr[0] = 0;			/* threshold exponent */
1531 	if (sdebug_lbpu)
1532 		arr[1] = 1 << 7;
1533 	if (sdebug_lbpws)
1534 		arr[1] |= 1 << 6;
1535 	if (sdebug_lbpws10)
1536 		arr[1] |= 1 << 5;
1537 	if (sdebug_lbprz && scsi_debug_lbp())
1538 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1539 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1540 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1541 	/* threshold_percentage=0 */
1542 	return 0x4;
1543 }
1544 
1545 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1546 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1547 {
1548 	memset(arr, 0, 0x3c);
1549 	arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1550 	/*
1551 	 * Set Optimal number of open sequential write preferred zones and
1552 	 * Optimal number of non-sequentially written sequential write
1553 	 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1554 	 * fields set to zero, apart from Max. number of open swrz_s field.
1555 	 */
1556 	put_unaligned_be32(0xffffffff, &arr[4]);
1557 	put_unaligned_be32(0xffffffff, &arr[8]);
1558 	if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1559 		put_unaligned_be32(devip->max_open, &arr[12]);
1560 	else
1561 		put_unaligned_be32(0xffffffff, &arr[12]);
1562 	return 0x3c;
1563 }
1564 
1565 #define SDEBUG_LONG_INQ_SZ 96
1566 #define SDEBUG_MAX_INQ_ARR_SZ 584
1567 
1568 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1569 {
1570 	unsigned char pq_pdt;
1571 	unsigned char *arr;
1572 	unsigned char *cmd = scp->cmnd;
1573 	int alloc_len, n, ret;
1574 	bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1575 
1576 	alloc_len = get_unaligned_be16(cmd + 3);
1577 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1578 	if (! arr)
1579 		return DID_REQUEUE << 16;
1580 	is_disk = (sdebug_ptype == TYPE_DISK);
1581 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1582 	is_disk_zbc = (is_disk || is_zbc);
1583 	have_wlun = scsi_is_wlun(scp->device->lun);
1584 	if (have_wlun)
1585 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1586 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1587 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1588 	else
1589 		pq_pdt = (sdebug_ptype & 0x1f);
1590 	arr[0] = pq_pdt;
1591 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1592 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1593 		kfree(arr);
1594 		return check_condition_result;
1595 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1596 		int lu_id_num, port_group_id, target_dev_id, len;
1597 		char lu_id_str[6];
1598 		int host_no = devip->sdbg_host->shost->host_no;
1599 
1600 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1601 		    (devip->channel & 0x7f);
1602 		if (sdebug_vpd_use_hostno == 0)
1603 			host_no = 0;
1604 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1605 			    (devip->target * 1000) + devip->lun);
1606 		target_dev_id = ((host_no + 1) * 2000) +
1607 				 (devip->target * 1000) - 3;
1608 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1609 		if (0 == cmd[2]) { /* supported vital product data pages */
1610 			arr[1] = cmd[2];	/*sanity */
1611 			n = 4;
1612 			arr[n++] = 0x0;   /* this page */
1613 			arr[n++] = 0x80;  /* unit serial number */
1614 			arr[n++] = 0x83;  /* device identification */
1615 			arr[n++] = 0x84;  /* software interface ident. */
1616 			arr[n++] = 0x85;  /* management network addresses */
1617 			arr[n++] = 0x86;  /* extended inquiry */
1618 			arr[n++] = 0x87;  /* mode page policy */
1619 			arr[n++] = 0x88;  /* SCSI ports */
1620 			if (is_disk_zbc) {	  /* SBC or ZBC */
1621 				arr[n++] = 0x89;  /* ATA information */
1622 				arr[n++] = 0xb0;  /* Block limits */
1623 				arr[n++] = 0xb1;  /* Block characteristics */
1624 				if (is_disk)
1625 					arr[n++] = 0xb2;  /* LB Provisioning */
1626 				if (is_zbc)
1627 					arr[n++] = 0xb6;  /* ZB dev. char. */
1628 			}
1629 			arr[3] = n - 4;	  /* number of supported VPD pages */
1630 		} else if (0x80 == cmd[2]) { /* unit serial number */
1631 			arr[1] = cmd[2];	/*sanity */
1632 			arr[3] = len;
1633 			memcpy(&arr[4], lu_id_str, len);
1634 		} else if (0x83 == cmd[2]) { /* device identification */
1635 			arr[1] = cmd[2];	/*sanity */
1636 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1637 						target_dev_id, lu_id_num,
1638 						lu_id_str, len,
1639 						&devip->lu_name);
1640 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1641 			arr[1] = cmd[2];	/*sanity */
1642 			arr[3] = inquiry_vpd_84(&arr[4]);
1643 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1644 			arr[1] = cmd[2];	/*sanity */
1645 			arr[3] = inquiry_vpd_85(&arr[4]);
1646 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1647 			arr[1] = cmd[2];	/*sanity */
1648 			arr[3] = 0x3c;	/* number of following entries */
1649 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1650 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1651 			else if (have_dif_prot)
1652 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1653 			else
1654 				arr[4] = 0x0;   /* no protection stuff */
1655 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1656 		} else if (0x87 == cmd[2]) { /* mode page policy */
1657 			arr[1] = cmd[2];	/*sanity */
1658 			arr[3] = 0x8;	/* number of following entries */
1659 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1660 			arr[6] = 0x80;	/* mlus, shared */
1661 			arr[8] = 0x18;	 /* protocol specific lu */
1662 			arr[10] = 0x82;	 /* mlus, per initiator port */
1663 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1664 			arr[1] = cmd[2];	/*sanity */
1665 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1666 		} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1667 			arr[1] = cmd[2];        /*sanity */
1668 			n = inquiry_vpd_89(&arr[4]);
1669 			put_unaligned_be16(n, arr + 2);
1670 		} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1671 			arr[1] = cmd[2];        /*sanity */
1672 			arr[3] = inquiry_vpd_b0(&arr[4]);
1673 		} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1674 			arr[1] = cmd[2];        /*sanity */
1675 			arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1676 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1677 			arr[1] = cmd[2];        /*sanity */
1678 			arr[3] = inquiry_vpd_b2(&arr[4]);
1679 		} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1680 			arr[1] = cmd[2];        /*sanity */
1681 			arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1682 		} else {
1683 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1684 			kfree(arr);
1685 			return check_condition_result;
1686 		}
1687 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1688 		ret = fill_from_dev_buffer(scp, arr,
1689 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1690 		kfree(arr);
1691 		return ret;
1692 	}
1693 	/* drops through here for a standard inquiry */
1694 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1695 	arr[2] = sdebug_scsi_level;
1696 	arr[3] = 2;    /* response_data_format==2 */
1697 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1698 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1699 	if (sdebug_vpd_use_hostno == 0)
1700 		arr[5] |= 0x10; /* claim: implicit TPGS */
1701 	arr[6] = 0x10; /* claim: MultiP */
1702 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1703 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1704 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1705 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1706 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1707 	/* Use Vendor Specific area to place driver date in ASCII hex */
1708 	memcpy(&arr[36], sdebug_version_date, 8);
1709 	/* version descriptors (2 bytes each) follow */
1710 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1711 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1712 	n = 62;
1713 	if (is_disk) {		/* SBC-4 no version claimed */
1714 		put_unaligned_be16(0x600, arr + n);
1715 		n += 2;
1716 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1717 		put_unaligned_be16(0x525, arr + n);
1718 		n += 2;
1719 	} else if (is_zbc) {	/* ZBC BSR INCITS 536 revision 05 */
1720 		put_unaligned_be16(0x624, arr + n);
1721 		n += 2;
1722 	}
1723 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1724 	ret = fill_from_dev_buffer(scp, arr,
1725 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1726 	kfree(arr);
1727 	return ret;
1728 }
1729 
1730 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1731 				   0, 0, 0x0, 0x0};
1732 
1733 static int resp_requests(struct scsi_cmnd *scp,
1734 			 struct sdebug_dev_info *devip)
1735 {
1736 	unsigned char *sbuff;
1737 	unsigned char *cmd = scp->cmnd;
1738 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1739 	bool dsense;
1740 	int len = 18;
1741 
1742 	memset(arr, 0, sizeof(arr));
1743 	dsense = !!(cmd[1] & 1);
1744 	sbuff = scp->sense_buffer;
1745 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1746 		if (dsense) {
1747 			arr[0] = 0x72;
1748 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1749 			arr[2] = THRESHOLD_EXCEEDED;
1750 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1751 			len = 8;
1752 		} else {
1753 			arr[0] = 0x70;
1754 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1755 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1756 			arr[12] = THRESHOLD_EXCEEDED;
1757 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1758 		}
1759 	} else {
1760 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1761 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1762 			;	/* have sense and formats match */
1763 		else if (arr[0] <= 0x70) {
1764 			if (dsense) {
1765 				memset(arr, 0, 8);
1766 				arr[0] = 0x72;
1767 				len = 8;
1768 			} else {
1769 				memset(arr, 0, 18);
1770 				arr[0] = 0x70;
1771 				arr[7] = 0xa;
1772 			}
1773 		} else if (dsense) {
1774 			memset(arr, 0, 8);
1775 			arr[0] = 0x72;
1776 			arr[1] = sbuff[2];     /* sense key */
1777 			arr[2] = sbuff[12];    /* asc */
1778 			arr[3] = sbuff[13];    /* ascq */
1779 			len = 8;
1780 		} else {
1781 			memset(arr, 0, 18);
1782 			arr[0] = 0x70;
1783 			arr[2] = sbuff[1];
1784 			arr[7] = 0xa;
1785 			arr[12] = sbuff[1];
1786 			arr[13] = sbuff[3];
1787 		}
1788 
1789 	}
1790 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1791 	return fill_from_dev_buffer(scp, arr, len);
1792 }
1793 
1794 static int resp_start_stop(struct scsi_cmnd *scp,
1795 			   struct sdebug_dev_info *devip)
1796 {
1797 	unsigned char *cmd = scp->cmnd;
1798 	int power_cond, stop;
1799 	bool changing;
1800 
1801 	power_cond = (cmd[4] & 0xf0) >> 4;
1802 	if (power_cond) {
1803 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1804 		return check_condition_result;
1805 	}
1806 	stop = !(cmd[4] & 1);
1807 	changing = atomic_read(&devip->stopped) == !stop;
1808 	atomic_xchg(&devip->stopped, stop);
1809 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1810 		return SDEG_RES_IMMED_MASK;
1811 	else
1812 		return 0;
1813 }
1814 
1815 static sector_t get_sdebug_capacity(void)
1816 {
1817 	static const unsigned int gibibyte = 1073741824;
1818 
1819 	if (sdebug_virtual_gb > 0)
1820 		return (sector_t)sdebug_virtual_gb *
1821 			(gibibyte / sdebug_sector_size);
1822 	else
1823 		return sdebug_store_sectors;
1824 }
1825 
1826 #define SDEBUG_READCAP_ARR_SZ 8
1827 static int resp_readcap(struct scsi_cmnd *scp,
1828 			struct sdebug_dev_info *devip)
1829 {
1830 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1831 	unsigned int capac;
1832 
1833 	/* following just in case virtual_gb changed */
1834 	sdebug_capacity = get_sdebug_capacity();
1835 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1836 	if (sdebug_capacity < 0xffffffff) {
1837 		capac = (unsigned int)sdebug_capacity - 1;
1838 		put_unaligned_be32(capac, arr + 0);
1839 	} else
1840 		put_unaligned_be32(0xffffffff, arr + 0);
1841 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1842 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1843 }
1844 
1845 #define SDEBUG_READCAP16_ARR_SZ 32
1846 static int resp_readcap16(struct scsi_cmnd *scp,
1847 			  struct sdebug_dev_info *devip)
1848 {
1849 	unsigned char *cmd = scp->cmnd;
1850 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1851 	int alloc_len;
1852 
1853 	alloc_len = get_unaligned_be32(cmd + 10);
1854 	/* following just in case virtual_gb changed */
1855 	sdebug_capacity = get_sdebug_capacity();
1856 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1857 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1858 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1859 	arr[13] = sdebug_physblk_exp & 0xf;
1860 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1861 
1862 	if (scsi_debug_lbp()) {
1863 		arr[14] |= 0x80; /* LBPME */
1864 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1865 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1866 		 * in the wider field maps to 0 in this field.
1867 		 */
1868 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1869 			arr[14] |= 0x40;
1870 	}
1871 
1872 	arr[15] = sdebug_lowest_aligned & 0xff;
1873 
1874 	if (have_dif_prot) {
1875 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1876 		arr[12] |= 1; /* PROT_EN */
1877 	}
1878 
1879 	return fill_from_dev_buffer(scp, arr,
1880 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1881 }
1882 
1883 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1884 
1885 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1886 			      struct sdebug_dev_info *devip)
1887 {
1888 	unsigned char *cmd = scp->cmnd;
1889 	unsigned char *arr;
1890 	int host_no = devip->sdbg_host->shost->host_no;
1891 	int n, ret, alen, rlen;
1892 	int port_group_a, port_group_b, port_a, port_b;
1893 
1894 	alen = get_unaligned_be32(cmd + 6);
1895 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1896 	if (! arr)
1897 		return DID_REQUEUE << 16;
1898 	/*
1899 	 * EVPD page 0x88 states we have two ports, one
1900 	 * real and a fake port with no device connected.
1901 	 * So we create two port groups with one port each
1902 	 * and set the group with port B to unavailable.
1903 	 */
1904 	port_a = 0x1; /* relative port A */
1905 	port_b = 0x2; /* relative port B */
1906 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1907 			(devip->channel & 0x7f);
1908 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1909 			(devip->channel & 0x7f) + 0x80;
1910 
1911 	/*
1912 	 * The asymmetric access state is cycled according to the host_id.
1913 	 */
1914 	n = 4;
1915 	if (sdebug_vpd_use_hostno == 0) {
1916 		arr[n++] = host_no % 3; /* Asymm access state */
1917 		arr[n++] = 0x0F; /* claim: all states are supported */
1918 	} else {
1919 		arr[n++] = 0x0; /* Active/Optimized path */
1920 		arr[n++] = 0x01; /* only support active/optimized paths */
1921 	}
1922 	put_unaligned_be16(port_group_a, arr + n);
1923 	n += 2;
1924 	arr[n++] = 0;    /* Reserved */
1925 	arr[n++] = 0;    /* Status code */
1926 	arr[n++] = 0;    /* Vendor unique */
1927 	arr[n++] = 0x1;  /* One port per group */
1928 	arr[n++] = 0;    /* Reserved */
1929 	arr[n++] = 0;    /* Reserved */
1930 	put_unaligned_be16(port_a, arr + n);
1931 	n += 2;
1932 	arr[n++] = 3;    /* Port unavailable */
1933 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1934 	put_unaligned_be16(port_group_b, arr + n);
1935 	n += 2;
1936 	arr[n++] = 0;    /* Reserved */
1937 	arr[n++] = 0;    /* Status code */
1938 	arr[n++] = 0;    /* Vendor unique */
1939 	arr[n++] = 0x1;  /* One port per group */
1940 	arr[n++] = 0;    /* Reserved */
1941 	arr[n++] = 0;    /* Reserved */
1942 	put_unaligned_be16(port_b, arr + n);
1943 	n += 2;
1944 
1945 	rlen = n - 4;
1946 	put_unaligned_be32(rlen, arr + 0);
1947 
1948 	/*
1949 	 * Return the smallest value of either
1950 	 * - The allocated length
1951 	 * - The constructed command length
1952 	 * - The maximum array size
1953 	 */
1954 	rlen = min_t(int, alen, n);
1955 	ret = fill_from_dev_buffer(scp, arr,
1956 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1957 	kfree(arr);
1958 	return ret;
1959 }
1960 
1961 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1962 			     struct sdebug_dev_info *devip)
1963 {
1964 	bool rctd;
1965 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1966 	u16 req_sa, u;
1967 	u32 alloc_len, a_len;
1968 	int k, offset, len, errsts, count, bump, na;
1969 	const struct opcode_info_t *oip;
1970 	const struct opcode_info_t *r_oip;
1971 	u8 *arr;
1972 	u8 *cmd = scp->cmnd;
1973 
1974 	rctd = !!(cmd[2] & 0x80);
1975 	reporting_opts = cmd[2] & 0x7;
1976 	req_opcode = cmd[3];
1977 	req_sa = get_unaligned_be16(cmd + 4);
1978 	alloc_len = get_unaligned_be32(cmd + 6);
1979 	if (alloc_len < 4 || alloc_len > 0xffff) {
1980 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1981 		return check_condition_result;
1982 	}
1983 	if (alloc_len > 8192)
1984 		a_len = 8192;
1985 	else
1986 		a_len = alloc_len;
1987 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1988 	if (NULL == arr) {
1989 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1990 				INSUFF_RES_ASCQ);
1991 		return check_condition_result;
1992 	}
1993 	switch (reporting_opts) {
1994 	case 0:	/* all commands */
1995 		/* count number of commands */
1996 		for (count = 0, oip = opcode_info_arr;
1997 		     oip->num_attached != 0xff; ++oip) {
1998 			if (F_INV_OP & oip->flags)
1999 				continue;
2000 			count += (oip->num_attached + 1);
2001 		}
2002 		bump = rctd ? 20 : 8;
2003 		put_unaligned_be32(count * bump, arr);
2004 		for (offset = 4, oip = opcode_info_arr;
2005 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
2006 			if (F_INV_OP & oip->flags)
2007 				continue;
2008 			na = oip->num_attached;
2009 			arr[offset] = oip->opcode;
2010 			put_unaligned_be16(oip->sa, arr + offset + 2);
2011 			if (rctd)
2012 				arr[offset + 5] |= 0x2;
2013 			if (FF_SA & oip->flags)
2014 				arr[offset + 5] |= 0x1;
2015 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2016 			if (rctd)
2017 				put_unaligned_be16(0xa, arr + offset + 8);
2018 			r_oip = oip;
2019 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2020 				if (F_INV_OP & oip->flags)
2021 					continue;
2022 				offset += bump;
2023 				arr[offset] = oip->opcode;
2024 				put_unaligned_be16(oip->sa, arr + offset + 2);
2025 				if (rctd)
2026 					arr[offset + 5] |= 0x2;
2027 				if (FF_SA & oip->flags)
2028 					arr[offset + 5] |= 0x1;
2029 				put_unaligned_be16(oip->len_mask[0],
2030 						   arr + offset + 6);
2031 				if (rctd)
2032 					put_unaligned_be16(0xa,
2033 							   arr + offset + 8);
2034 			}
2035 			oip = r_oip;
2036 			offset += bump;
2037 		}
2038 		break;
2039 	case 1:	/* one command: opcode only */
2040 	case 2:	/* one command: opcode plus service action */
2041 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
2042 		sdeb_i = opcode_ind_arr[req_opcode];
2043 		oip = &opcode_info_arr[sdeb_i];
2044 		if (F_INV_OP & oip->flags) {
2045 			supp = 1;
2046 			offset = 4;
2047 		} else {
2048 			if (1 == reporting_opts) {
2049 				if (FF_SA & oip->flags) {
2050 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2051 							     2, 2);
2052 					kfree(arr);
2053 					return check_condition_result;
2054 				}
2055 				req_sa = 0;
2056 			} else if (2 == reporting_opts &&
2057 				   0 == (FF_SA & oip->flags)) {
2058 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2059 				kfree(arr);	/* point at requested sa */
2060 				return check_condition_result;
2061 			}
2062 			if (0 == (FF_SA & oip->flags) &&
2063 			    req_opcode == oip->opcode)
2064 				supp = 3;
2065 			else if (0 == (FF_SA & oip->flags)) {
2066 				na = oip->num_attached;
2067 				for (k = 0, oip = oip->arrp; k < na;
2068 				     ++k, ++oip) {
2069 					if (req_opcode == oip->opcode)
2070 						break;
2071 				}
2072 				supp = (k >= na) ? 1 : 3;
2073 			} else if (req_sa != oip->sa) {
2074 				na = oip->num_attached;
2075 				for (k = 0, oip = oip->arrp; k < na;
2076 				     ++k, ++oip) {
2077 					if (req_sa == oip->sa)
2078 						break;
2079 				}
2080 				supp = (k >= na) ? 1 : 3;
2081 			} else
2082 				supp = 3;
2083 			if (3 == supp) {
2084 				u = oip->len_mask[0];
2085 				put_unaligned_be16(u, arr + 2);
2086 				arr[4] = oip->opcode;
2087 				for (k = 1; k < u; ++k)
2088 					arr[4 + k] = (k < 16) ?
2089 						 oip->len_mask[k] : 0xff;
2090 				offset = 4 + u;
2091 			} else
2092 				offset = 4;
2093 		}
2094 		arr[1] = (rctd ? 0x80 : 0) | supp;
2095 		if (rctd) {
2096 			put_unaligned_be16(0xa, arr + offset);
2097 			offset += 12;
2098 		}
2099 		break;
2100 	default:
2101 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2102 		kfree(arr);
2103 		return check_condition_result;
2104 	}
2105 	offset = (offset < a_len) ? offset : a_len;
2106 	len = (offset < alloc_len) ? offset : alloc_len;
2107 	errsts = fill_from_dev_buffer(scp, arr, len);
2108 	kfree(arr);
2109 	return errsts;
2110 }
2111 
2112 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2113 			  struct sdebug_dev_info *devip)
2114 {
2115 	bool repd;
2116 	u32 alloc_len, len;
2117 	u8 arr[16];
2118 	u8 *cmd = scp->cmnd;
2119 
2120 	memset(arr, 0, sizeof(arr));
2121 	repd = !!(cmd[2] & 0x80);
2122 	alloc_len = get_unaligned_be32(cmd + 6);
2123 	if (alloc_len < 4) {
2124 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2125 		return check_condition_result;
2126 	}
2127 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
2128 	arr[1] = 0x1;		/* ITNRS */
2129 	if (repd) {
2130 		arr[3] = 0xc;
2131 		len = 16;
2132 	} else
2133 		len = 4;
2134 
2135 	len = (len < alloc_len) ? len : alloc_len;
2136 	return fill_from_dev_buffer(scp, arr, len);
2137 }
2138 
2139 /* <<Following mode page info copied from ST318451LW>> */
2140 
2141 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2142 {	/* Read-Write Error Recovery page for mode_sense */
2143 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2144 					5, 0, 0xff, 0xff};
2145 
2146 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2147 	if (1 == pcontrol)
2148 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2149 	return sizeof(err_recov_pg);
2150 }
2151 
2152 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2153 { 	/* Disconnect-Reconnect page for mode_sense */
2154 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2155 					 0, 0, 0, 0, 0, 0, 0, 0};
2156 
2157 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2158 	if (1 == pcontrol)
2159 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2160 	return sizeof(disconnect_pg);
2161 }
2162 
2163 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2164 {       /* Format device page for mode_sense */
2165 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2166 				     0, 0, 0, 0, 0, 0, 0, 0,
2167 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2168 
2169 	memcpy(p, format_pg, sizeof(format_pg));
2170 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2171 	put_unaligned_be16(sdebug_sector_size, p + 12);
2172 	if (sdebug_removable)
2173 		p[20] |= 0x20; /* should agree with INQUIRY */
2174 	if (1 == pcontrol)
2175 		memset(p + 2, 0, sizeof(format_pg) - 2);
2176 	return sizeof(format_pg);
2177 }
2178 
2179 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2180 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2181 				     0, 0, 0, 0};
2182 
2183 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2184 { 	/* Caching page for mode_sense */
2185 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2186 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2187 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2188 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2189 
2190 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2191 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2192 	memcpy(p, caching_pg, sizeof(caching_pg));
2193 	if (1 == pcontrol)
2194 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2195 	else if (2 == pcontrol)
2196 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2197 	return sizeof(caching_pg);
2198 }
2199 
2200 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2201 				    0, 0, 0x2, 0x4b};
2202 
2203 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2204 { 	/* Control mode page for mode_sense */
2205 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2206 					0, 0, 0, 0};
2207 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2208 				     0, 0, 0x2, 0x4b};
2209 
2210 	if (sdebug_dsense)
2211 		ctrl_m_pg[2] |= 0x4;
2212 	else
2213 		ctrl_m_pg[2] &= ~0x4;
2214 
2215 	if (sdebug_ato)
2216 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2217 
2218 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2219 	if (1 == pcontrol)
2220 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2221 	else if (2 == pcontrol)
2222 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2223 	return sizeof(ctrl_m_pg);
2224 }
2225 
2226 
2227 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2228 {	/* Informational Exceptions control mode page for mode_sense */
2229 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2230 				       0, 0, 0x0, 0x0};
2231 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2232 				      0, 0, 0x0, 0x0};
2233 
2234 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2235 	if (1 == pcontrol)
2236 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2237 	else if (2 == pcontrol)
2238 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2239 	return sizeof(iec_m_pg);
2240 }
2241 
2242 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2243 {	/* SAS SSP mode page - short format for mode_sense */
2244 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2245 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2246 
2247 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2248 	if (1 == pcontrol)
2249 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2250 	return sizeof(sas_sf_m_pg);
2251 }
2252 
2253 
2254 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2255 			      int target_dev_id)
2256 {	/* SAS phy control and discover mode page for mode_sense */
2257 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2258 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2259 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2260 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2261 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2262 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2263 		    0, 0, 0, 0, 0, 0, 0, 0,
2264 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2265 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2266 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2267 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2268 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2269 		    0, 0, 0, 0, 0, 0, 0, 0,
2270 		};
2271 	int port_a, port_b;
2272 
2273 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2274 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2275 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2276 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2277 	port_a = target_dev_id + 1;
2278 	port_b = port_a + 1;
2279 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2280 	put_unaligned_be32(port_a, p + 20);
2281 	put_unaligned_be32(port_b, p + 48 + 20);
2282 	if (1 == pcontrol)
2283 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2284 	return sizeof(sas_pcd_m_pg);
2285 }
2286 
2287 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2288 {	/* SAS SSP shared protocol specific port mode subpage */
2289 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2290 		    0, 0, 0, 0, 0, 0, 0, 0,
2291 		};
2292 
2293 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2294 	if (1 == pcontrol)
2295 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2296 	return sizeof(sas_sha_m_pg);
2297 }
2298 
2299 #define SDEBUG_MAX_MSENSE_SZ 256
2300 
2301 static int resp_mode_sense(struct scsi_cmnd *scp,
2302 			   struct sdebug_dev_info *devip)
2303 {
2304 	int pcontrol, pcode, subpcode, bd_len;
2305 	unsigned char dev_spec;
2306 	int alloc_len, offset, len, target_dev_id;
2307 	int target = scp->device->id;
2308 	unsigned char *ap;
2309 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2310 	unsigned char *cmd = scp->cmnd;
2311 	bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2312 
2313 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2314 	pcontrol = (cmd[2] & 0xc0) >> 6;
2315 	pcode = cmd[2] & 0x3f;
2316 	subpcode = cmd[3];
2317 	msense_6 = (MODE_SENSE == cmd[0]);
2318 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2319 	is_disk = (sdebug_ptype == TYPE_DISK);
2320 	is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2321 	if ((is_disk || is_zbc) && !dbd)
2322 		bd_len = llbaa ? 16 : 8;
2323 	else
2324 		bd_len = 0;
2325 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2326 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2327 	if (0x3 == pcontrol) {  /* Saving values not supported */
2328 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2329 		return check_condition_result;
2330 	}
2331 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2332 			(devip->target * 1000) - 3;
2333 	/* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2334 	if (is_disk || is_zbc) {
2335 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2336 		if (sdebug_wp)
2337 			dev_spec |= 0x80;
2338 	} else
2339 		dev_spec = 0x0;
2340 	if (msense_6) {
2341 		arr[2] = dev_spec;
2342 		arr[3] = bd_len;
2343 		offset = 4;
2344 	} else {
2345 		arr[3] = dev_spec;
2346 		if (16 == bd_len)
2347 			arr[4] = 0x1;	/* set LONGLBA bit */
2348 		arr[7] = bd_len;	/* assume 255 or less */
2349 		offset = 8;
2350 	}
2351 	ap = arr + offset;
2352 	if ((bd_len > 0) && (!sdebug_capacity))
2353 		sdebug_capacity = get_sdebug_capacity();
2354 
2355 	if (8 == bd_len) {
2356 		if (sdebug_capacity > 0xfffffffe)
2357 			put_unaligned_be32(0xffffffff, ap + 0);
2358 		else
2359 			put_unaligned_be32(sdebug_capacity, ap + 0);
2360 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2361 		offset += bd_len;
2362 		ap = arr + offset;
2363 	} else if (16 == bd_len) {
2364 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2365 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2366 		offset += bd_len;
2367 		ap = arr + offset;
2368 	}
2369 
2370 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2371 		/* TODO: Control Extension page */
2372 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2373 		return check_condition_result;
2374 	}
2375 	bad_pcode = false;
2376 
2377 	switch (pcode) {
2378 	case 0x1:	/* Read-Write error recovery page, direct access */
2379 		len = resp_err_recov_pg(ap, pcontrol, target);
2380 		offset += len;
2381 		break;
2382 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2383 		len = resp_disconnect_pg(ap, pcontrol, target);
2384 		offset += len;
2385 		break;
2386 	case 0x3:       /* Format device page, direct access */
2387 		if (is_disk) {
2388 			len = resp_format_pg(ap, pcontrol, target);
2389 			offset += len;
2390 		} else
2391 			bad_pcode = true;
2392 		break;
2393 	case 0x8:	/* Caching page, direct access */
2394 		if (is_disk || is_zbc) {
2395 			len = resp_caching_pg(ap, pcontrol, target);
2396 			offset += len;
2397 		} else
2398 			bad_pcode = true;
2399 		break;
2400 	case 0xa:	/* Control Mode page, all devices */
2401 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2402 		offset += len;
2403 		break;
2404 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2405 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2406 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2407 			return check_condition_result;
2408 		}
2409 		len = 0;
2410 		if ((0x0 == subpcode) || (0xff == subpcode))
2411 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2412 		if ((0x1 == subpcode) || (0xff == subpcode))
2413 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2414 						  target_dev_id);
2415 		if ((0x2 == subpcode) || (0xff == subpcode))
2416 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2417 		offset += len;
2418 		break;
2419 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2420 		len = resp_iec_m_pg(ap, pcontrol, target);
2421 		offset += len;
2422 		break;
2423 	case 0x3f:	/* Read all Mode pages */
2424 		if ((0 == subpcode) || (0xff == subpcode)) {
2425 			len = resp_err_recov_pg(ap, pcontrol, target);
2426 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2427 			if (is_disk) {
2428 				len += resp_format_pg(ap + len, pcontrol,
2429 						      target);
2430 				len += resp_caching_pg(ap + len, pcontrol,
2431 						       target);
2432 			} else if (is_zbc) {
2433 				len += resp_caching_pg(ap + len, pcontrol,
2434 						       target);
2435 			}
2436 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2437 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2438 			if (0xff == subpcode) {
2439 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2440 						  target, target_dev_id);
2441 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2442 			}
2443 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2444 			offset += len;
2445 		} else {
2446 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2447 			return check_condition_result;
2448 		}
2449 		break;
2450 	default:
2451 		bad_pcode = true;
2452 		break;
2453 	}
2454 	if (bad_pcode) {
2455 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2456 		return check_condition_result;
2457 	}
2458 	if (msense_6)
2459 		arr[0] = offset - 1;
2460 	else
2461 		put_unaligned_be16((offset - 2), arr + 0);
2462 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2463 }
2464 
2465 #define SDEBUG_MAX_MSELECT_SZ 512
2466 
2467 static int resp_mode_select(struct scsi_cmnd *scp,
2468 			    struct sdebug_dev_info *devip)
2469 {
2470 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2471 	int param_len, res, mpage;
2472 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2473 	unsigned char *cmd = scp->cmnd;
2474 	int mselect6 = (MODE_SELECT == cmd[0]);
2475 
2476 	memset(arr, 0, sizeof(arr));
2477 	pf = cmd[1] & 0x10;
2478 	sp = cmd[1] & 0x1;
2479 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2480 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2481 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2482 		return check_condition_result;
2483 	}
2484 	res = fetch_to_dev_buffer(scp, arr, param_len);
2485 	if (-1 == res)
2486 		return DID_ERROR << 16;
2487 	else if (sdebug_verbose && (res < param_len))
2488 		sdev_printk(KERN_INFO, scp->device,
2489 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2490 			    __func__, param_len, res);
2491 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2492 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2493 	if (md_len > 2) {
2494 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2495 		return check_condition_result;
2496 	}
2497 	off = bd_len + (mselect6 ? 4 : 8);
2498 	mpage = arr[off] & 0x3f;
2499 	ps = !!(arr[off] & 0x80);
2500 	if (ps) {
2501 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2502 		return check_condition_result;
2503 	}
2504 	spf = !!(arr[off] & 0x40);
2505 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2506 		       (arr[off + 1] + 2);
2507 	if ((pg_len + off) > param_len) {
2508 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2509 				PARAMETER_LIST_LENGTH_ERR, 0);
2510 		return check_condition_result;
2511 	}
2512 	switch (mpage) {
2513 	case 0x8:      /* Caching Mode page */
2514 		if (caching_pg[1] == arr[off + 1]) {
2515 			memcpy(caching_pg + 2, arr + off + 2,
2516 			       sizeof(caching_pg) - 2);
2517 			goto set_mode_changed_ua;
2518 		}
2519 		break;
2520 	case 0xa:      /* Control Mode page */
2521 		if (ctrl_m_pg[1] == arr[off + 1]) {
2522 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2523 			       sizeof(ctrl_m_pg) - 2);
2524 			if (ctrl_m_pg[4] & 0x8)
2525 				sdebug_wp = true;
2526 			else
2527 				sdebug_wp = false;
2528 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2529 			goto set_mode_changed_ua;
2530 		}
2531 		break;
2532 	case 0x1c:      /* Informational Exceptions Mode page */
2533 		if (iec_m_pg[1] == arr[off + 1]) {
2534 			memcpy(iec_m_pg + 2, arr + off + 2,
2535 			       sizeof(iec_m_pg) - 2);
2536 			goto set_mode_changed_ua;
2537 		}
2538 		break;
2539 	default:
2540 		break;
2541 	}
2542 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2543 	return check_condition_result;
2544 set_mode_changed_ua:
2545 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2546 	return 0;
2547 }
2548 
2549 static int resp_temp_l_pg(unsigned char *arr)
2550 {
2551 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2552 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2553 		};
2554 
2555 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2556 	return sizeof(temp_l_pg);
2557 }
2558 
2559 static int resp_ie_l_pg(unsigned char *arr)
2560 {
2561 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2562 		};
2563 
2564 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2565 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2566 		arr[4] = THRESHOLD_EXCEEDED;
2567 		arr[5] = 0xff;
2568 	}
2569 	return sizeof(ie_l_pg);
2570 }
2571 
2572 #define SDEBUG_MAX_LSENSE_SZ 512
2573 
2574 static int resp_log_sense(struct scsi_cmnd *scp,
2575 			  struct sdebug_dev_info *devip)
2576 {
2577 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2578 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2579 	unsigned char *cmd = scp->cmnd;
2580 
2581 	memset(arr, 0, sizeof(arr));
2582 	ppc = cmd[1] & 0x2;
2583 	sp = cmd[1] & 0x1;
2584 	if (ppc || sp) {
2585 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2586 		return check_condition_result;
2587 	}
2588 	pcode = cmd[2] & 0x3f;
2589 	subpcode = cmd[3] & 0xff;
2590 	alloc_len = get_unaligned_be16(cmd + 7);
2591 	arr[0] = pcode;
2592 	if (0 == subpcode) {
2593 		switch (pcode) {
2594 		case 0x0:	/* Supported log pages log page */
2595 			n = 4;
2596 			arr[n++] = 0x0;		/* this page */
2597 			arr[n++] = 0xd;		/* Temperature */
2598 			arr[n++] = 0x2f;	/* Informational exceptions */
2599 			arr[3] = n - 4;
2600 			break;
2601 		case 0xd:	/* Temperature log page */
2602 			arr[3] = resp_temp_l_pg(arr + 4);
2603 			break;
2604 		case 0x2f:	/* Informational exceptions log page */
2605 			arr[3] = resp_ie_l_pg(arr + 4);
2606 			break;
2607 		default:
2608 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2609 			return check_condition_result;
2610 		}
2611 	} else if (0xff == subpcode) {
2612 		arr[0] |= 0x40;
2613 		arr[1] = subpcode;
2614 		switch (pcode) {
2615 		case 0x0:	/* Supported log pages and subpages log page */
2616 			n = 4;
2617 			arr[n++] = 0x0;
2618 			arr[n++] = 0x0;		/* 0,0 page */
2619 			arr[n++] = 0x0;
2620 			arr[n++] = 0xff;	/* this page */
2621 			arr[n++] = 0xd;
2622 			arr[n++] = 0x0;		/* Temperature */
2623 			arr[n++] = 0x2f;
2624 			arr[n++] = 0x0;	/* Informational exceptions */
2625 			arr[3] = n - 4;
2626 			break;
2627 		case 0xd:	/* Temperature subpages */
2628 			n = 4;
2629 			arr[n++] = 0xd;
2630 			arr[n++] = 0x0;		/* Temperature */
2631 			arr[3] = n - 4;
2632 			break;
2633 		case 0x2f:	/* Informational exceptions subpages */
2634 			n = 4;
2635 			arr[n++] = 0x2f;
2636 			arr[n++] = 0x0;		/* Informational exceptions */
2637 			arr[3] = n - 4;
2638 			break;
2639 		default:
2640 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2641 			return check_condition_result;
2642 		}
2643 	} else {
2644 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2645 		return check_condition_result;
2646 	}
2647 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2648 	return fill_from_dev_buffer(scp, arr,
2649 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2650 }
2651 
2652 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2653 {
2654 	return devip->nr_zones != 0;
2655 }
2656 
2657 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2658 					unsigned long long lba)
2659 {
2660 	unsigned int zno;
2661 
2662 	if (devip->zsize_shift)
2663 		zno = lba >> devip->zsize_shift;
2664 	else
2665 		zno = lba / devip->zsize;
2666 	return &devip->zstate[zno];
2667 }
2668 
2669 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2670 {
2671 	return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2672 }
2673 
2674 static void zbc_close_zone(struct sdebug_dev_info *devip,
2675 			   struct sdeb_zone_state *zsp)
2676 {
2677 	enum sdebug_z_cond zc;
2678 
2679 	if (zbc_zone_is_conv(zsp))
2680 		return;
2681 
2682 	zc = zsp->z_cond;
2683 	if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2684 		return;
2685 
2686 	if (zc == ZC2_IMPLICIT_OPEN)
2687 		devip->nr_imp_open--;
2688 	else
2689 		devip->nr_exp_open--;
2690 
2691 	if (zsp->z_wp == zsp->z_start) {
2692 		zsp->z_cond = ZC1_EMPTY;
2693 	} else {
2694 		zsp->z_cond = ZC4_CLOSED;
2695 		devip->nr_closed++;
2696 	}
2697 }
2698 
2699 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2700 {
2701 	struct sdeb_zone_state *zsp = &devip->zstate[0];
2702 	unsigned int i;
2703 
2704 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
2705 		if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2706 			zbc_close_zone(devip, zsp);
2707 			return;
2708 		}
2709 	}
2710 }
2711 
2712 static void zbc_open_zone(struct sdebug_dev_info *devip,
2713 			  struct sdeb_zone_state *zsp, bool explicit)
2714 {
2715 	enum sdebug_z_cond zc;
2716 
2717 	if (zbc_zone_is_conv(zsp))
2718 		return;
2719 
2720 	zc = zsp->z_cond;
2721 	if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2722 	    (!explicit && zc == ZC2_IMPLICIT_OPEN))
2723 		return;
2724 
2725 	/* Close an implicit open zone if necessary */
2726 	if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2727 		zbc_close_zone(devip, zsp);
2728 	else if (devip->max_open &&
2729 		 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2730 		zbc_close_imp_open_zone(devip);
2731 
2732 	if (zsp->z_cond == ZC4_CLOSED)
2733 		devip->nr_closed--;
2734 	if (explicit) {
2735 		zsp->z_cond = ZC3_EXPLICIT_OPEN;
2736 		devip->nr_exp_open++;
2737 	} else {
2738 		zsp->z_cond = ZC2_IMPLICIT_OPEN;
2739 		devip->nr_imp_open++;
2740 	}
2741 }
2742 
2743 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2744 		       unsigned long long lba, unsigned int num)
2745 {
2746 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2747 	unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2748 
2749 	if (zbc_zone_is_conv(zsp))
2750 		return;
2751 
2752 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2753 		zsp->z_wp += num;
2754 		if (zsp->z_wp >= zend)
2755 			zsp->z_cond = ZC5_FULL;
2756 		return;
2757 	}
2758 
2759 	while (num) {
2760 		if (lba != zsp->z_wp)
2761 			zsp->z_non_seq_resource = true;
2762 
2763 		end = lba + num;
2764 		if (end >= zend) {
2765 			n = zend - lba;
2766 			zsp->z_wp = zend;
2767 		} else if (end > zsp->z_wp) {
2768 			n = num;
2769 			zsp->z_wp = end;
2770 		} else {
2771 			n = num;
2772 		}
2773 		if (zsp->z_wp >= zend)
2774 			zsp->z_cond = ZC5_FULL;
2775 
2776 		num -= n;
2777 		lba += n;
2778 		if (num) {
2779 			zsp++;
2780 			zend = zsp->z_start + zsp->z_size;
2781 		}
2782 	}
2783 }
2784 
2785 static int check_zbc_access_params(struct scsi_cmnd *scp,
2786 			unsigned long long lba, unsigned int num, bool write)
2787 {
2788 	struct scsi_device *sdp = scp->device;
2789 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2790 	struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2791 	struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2792 
2793 	if (!write) {
2794 		if (devip->zmodel == BLK_ZONED_HA)
2795 			return 0;
2796 		/* For host-managed, reads cannot cross zone types boundaries */
2797 		if (zsp_end != zsp &&
2798 		    zbc_zone_is_conv(zsp) &&
2799 		    !zbc_zone_is_conv(zsp_end)) {
2800 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2801 					LBA_OUT_OF_RANGE,
2802 					READ_INVDATA_ASCQ);
2803 			return check_condition_result;
2804 		}
2805 		return 0;
2806 	}
2807 
2808 	/* No restrictions for writes within conventional zones */
2809 	if (zbc_zone_is_conv(zsp)) {
2810 		if (!zbc_zone_is_conv(zsp_end)) {
2811 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2812 					LBA_OUT_OF_RANGE,
2813 					WRITE_BOUNDARY_ASCQ);
2814 			return check_condition_result;
2815 		}
2816 		return 0;
2817 	}
2818 
2819 	if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2820 		/* Writes cannot cross sequential zone boundaries */
2821 		if (zsp_end != zsp) {
2822 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2823 					LBA_OUT_OF_RANGE,
2824 					WRITE_BOUNDARY_ASCQ);
2825 			return check_condition_result;
2826 		}
2827 		/* Cannot write full zones */
2828 		if (zsp->z_cond == ZC5_FULL) {
2829 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2830 					INVALID_FIELD_IN_CDB, 0);
2831 			return check_condition_result;
2832 		}
2833 		/* Writes must be aligned to the zone WP */
2834 		if (lba != zsp->z_wp) {
2835 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
2836 					LBA_OUT_OF_RANGE,
2837 					UNALIGNED_WRITE_ASCQ);
2838 			return check_condition_result;
2839 		}
2840 	}
2841 
2842 	/* Handle implicit open of closed and empty zones */
2843 	if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2844 		if (devip->max_open &&
2845 		    devip->nr_exp_open >= devip->max_open) {
2846 			mk_sense_buffer(scp, DATA_PROTECT,
2847 					INSUFF_RES_ASC,
2848 					INSUFF_ZONE_ASCQ);
2849 			return check_condition_result;
2850 		}
2851 		zbc_open_zone(devip, zsp, false);
2852 	}
2853 
2854 	return 0;
2855 }
2856 
2857 static inline int check_device_access_params
2858 			(struct scsi_cmnd *scp, unsigned long long lba,
2859 			 unsigned int num, bool write)
2860 {
2861 	struct scsi_device *sdp = scp->device;
2862 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2863 
2864 	if (lba + num > sdebug_capacity) {
2865 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2866 		return check_condition_result;
2867 	}
2868 	/* transfer length excessive (tie in to block limits VPD page) */
2869 	if (num > sdebug_store_sectors) {
2870 		/* needs work to find which cdb byte 'num' comes from */
2871 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2872 		return check_condition_result;
2873 	}
2874 	if (write && unlikely(sdebug_wp)) {
2875 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2876 		return check_condition_result;
2877 	}
2878 	if (sdebug_dev_is_zoned(devip))
2879 		return check_zbc_access_params(scp, lba, num, write);
2880 
2881 	return 0;
2882 }
2883 
2884 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip)
2885 {
2886 	return sdebug_fake_rw ?
2887 			NULL : xa_load(per_store_ap, devip->sdbg_host->si_idx);
2888 }
2889 
2890 /* Returns number of bytes copied or -1 if error. */
2891 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2892 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2893 {
2894 	int ret;
2895 	u64 block, rest = 0;
2896 	enum dma_data_direction dir;
2897 	struct scsi_data_buffer *sdb = &scp->sdb;
2898 	u8 *fsp;
2899 
2900 	if (do_write) {
2901 		dir = DMA_TO_DEVICE;
2902 		write_since_sync = true;
2903 	} else {
2904 		dir = DMA_FROM_DEVICE;
2905 	}
2906 
2907 	if (!sdb->length || !sip)
2908 		return 0;
2909 	if (scp->sc_data_direction != dir)
2910 		return -1;
2911 	fsp = sip->storep;
2912 
2913 	block = do_div(lba, sdebug_store_sectors);
2914 	if (block + num > sdebug_store_sectors)
2915 		rest = block + num - sdebug_store_sectors;
2916 
2917 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2918 		   fsp + (block * sdebug_sector_size),
2919 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2920 	if (ret != (num - rest) * sdebug_sector_size)
2921 		return ret;
2922 
2923 	if (rest) {
2924 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2925 			    fsp, rest * sdebug_sector_size,
2926 			    sg_skip + ((num - rest) * sdebug_sector_size),
2927 			    do_write);
2928 	}
2929 
2930 	return ret;
2931 }
2932 
2933 /* Returns number of bytes copied or -1 if error. */
2934 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2935 {
2936 	struct scsi_data_buffer *sdb = &scp->sdb;
2937 
2938 	if (!sdb->length)
2939 		return 0;
2940 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2941 		return -1;
2942 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2943 			      num * sdebug_sector_size, 0, true);
2944 }
2945 
2946 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2947  * arr into sip->storep+lba and return true. If comparison fails then
2948  * return false. */
2949 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2950 			      const u8 *arr, bool compare_only)
2951 {
2952 	bool res;
2953 	u64 block, rest = 0;
2954 	u32 store_blks = sdebug_store_sectors;
2955 	u32 lb_size = sdebug_sector_size;
2956 	u8 *fsp = sip->storep;
2957 
2958 	block = do_div(lba, store_blks);
2959 	if (block + num > store_blks)
2960 		rest = block + num - store_blks;
2961 
2962 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2963 	if (!res)
2964 		return res;
2965 	if (rest)
2966 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2967 			     rest * lb_size);
2968 	if (!res)
2969 		return res;
2970 	if (compare_only)
2971 		return true;
2972 	arr += num * lb_size;
2973 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2974 	if (rest)
2975 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2976 	return res;
2977 }
2978 
2979 static __be16 dif_compute_csum(const void *buf, int len)
2980 {
2981 	__be16 csum;
2982 
2983 	if (sdebug_guard)
2984 		csum = (__force __be16)ip_compute_csum(buf, len);
2985 	else
2986 		csum = cpu_to_be16(crc_t10dif(buf, len));
2987 
2988 	return csum;
2989 }
2990 
2991 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2992 		      sector_t sector, u32 ei_lba)
2993 {
2994 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2995 
2996 	if (sdt->guard_tag != csum) {
2997 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2998 			(unsigned long)sector,
2999 			be16_to_cpu(sdt->guard_tag),
3000 			be16_to_cpu(csum));
3001 		return 0x01;
3002 	}
3003 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3004 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3005 		pr_err("REF check failed on sector %lu\n",
3006 			(unsigned long)sector);
3007 		return 0x03;
3008 	}
3009 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3010 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
3011 		pr_err("REF check failed on sector %lu\n",
3012 			(unsigned long)sector);
3013 		return 0x03;
3014 	}
3015 	return 0;
3016 }
3017 
3018 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3019 			  unsigned int sectors, bool read)
3020 {
3021 	size_t resid;
3022 	void *paddr;
3023 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3024 						scp->device->hostdata);
3025 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
3026 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
3027 	struct sg_mapping_iter miter;
3028 
3029 	/* Bytes of protection data to copy into sgl */
3030 	resid = sectors * sizeof(*dif_storep);
3031 
3032 	sg_miter_start(&miter, scsi_prot_sglist(scp),
3033 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3034 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3035 
3036 	while (sg_miter_next(&miter) && resid > 0) {
3037 		size_t len = min_t(size_t, miter.length, resid);
3038 		void *start = dif_store(sip, sector);
3039 		size_t rest = 0;
3040 
3041 		if (dif_store_end < start + len)
3042 			rest = start + len - dif_store_end;
3043 
3044 		paddr = miter.addr;
3045 
3046 		if (read)
3047 			memcpy(paddr, start, len - rest);
3048 		else
3049 			memcpy(start, paddr, len - rest);
3050 
3051 		if (rest) {
3052 			if (read)
3053 				memcpy(paddr + len - rest, dif_storep, rest);
3054 			else
3055 				memcpy(dif_storep, paddr + len - rest, rest);
3056 		}
3057 
3058 		sector += len / sizeof(*dif_storep);
3059 		resid -= len;
3060 	}
3061 	sg_miter_stop(&miter);
3062 }
3063 
3064 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3065 			    unsigned int sectors, u32 ei_lba)
3066 {
3067 	unsigned int i;
3068 	sector_t sector;
3069 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3070 						scp->device->hostdata);
3071 	struct t10_pi_tuple *sdt;
3072 
3073 	for (i = 0; i < sectors; i++, ei_lba++) {
3074 		int ret;
3075 
3076 		sector = start_sec + i;
3077 		sdt = dif_store(sip, sector);
3078 
3079 		if (sdt->app_tag == cpu_to_be16(0xffff))
3080 			continue;
3081 
3082 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
3083 				 ei_lba);
3084 		if (ret) {
3085 			dif_errors++;
3086 			return ret;
3087 		}
3088 	}
3089 
3090 	dif_copy_prot(scp, start_sec, sectors, true);
3091 	dix_reads++;
3092 
3093 	return 0;
3094 }
3095 
3096 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3097 {
3098 	bool check_prot;
3099 	u32 num;
3100 	u32 ei_lba;
3101 	int ret;
3102 	u64 lba;
3103 	struct sdeb_store_info *sip = devip2sip(devip);
3104 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3105 	u8 *cmd = scp->cmnd;
3106 	struct sdebug_queued_cmd *sqcp;
3107 
3108 	switch (cmd[0]) {
3109 	case READ_16:
3110 		ei_lba = 0;
3111 		lba = get_unaligned_be64(cmd + 2);
3112 		num = get_unaligned_be32(cmd + 10);
3113 		check_prot = true;
3114 		break;
3115 	case READ_10:
3116 		ei_lba = 0;
3117 		lba = get_unaligned_be32(cmd + 2);
3118 		num = get_unaligned_be16(cmd + 7);
3119 		check_prot = true;
3120 		break;
3121 	case READ_6:
3122 		ei_lba = 0;
3123 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3124 		      (u32)(cmd[1] & 0x1f) << 16;
3125 		num = (0 == cmd[4]) ? 256 : cmd[4];
3126 		check_prot = true;
3127 		break;
3128 	case READ_12:
3129 		ei_lba = 0;
3130 		lba = get_unaligned_be32(cmd + 2);
3131 		num = get_unaligned_be32(cmd + 6);
3132 		check_prot = true;
3133 		break;
3134 	case XDWRITEREAD_10:
3135 		ei_lba = 0;
3136 		lba = get_unaligned_be32(cmd + 2);
3137 		num = get_unaligned_be16(cmd + 7);
3138 		check_prot = false;
3139 		break;
3140 	default:	/* assume READ(32) */
3141 		lba = get_unaligned_be64(cmd + 12);
3142 		ei_lba = get_unaligned_be32(cmd + 20);
3143 		num = get_unaligned_be32(cmd + 28);
3144 		check_prot = false;
3145 		break;
3146 	}
3147 	if (unlikely(have_dif_prot && check_prot)) {
3148 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3149 		    (cmd[1] & 0xe0)) {
3150 			mk_sense_invalid_opcode(scp);
3151 			return check_condition_result;
3152 		}
3153 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3154 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3155 		    (cmd[1] & 0xe0) == 0)
3156 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3157 				    "to DIF device\n");
3158 	}
3159 	if (unlikely(sdebug_any_injecting_opt)) {
3160 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
3161 
3162 		if (sqcp) {
3163 			if (sqcp->inj_short)
3164 				num /= 2;
3165 		}
3166 	} else
3167 		sqcp = NULL;
3168 
3169 	ret = check_device_access_params(scp, lba, num, false);
3170 	if (ret)
3171 		return ret;
3172 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3173 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3174 		     ((lba + num) > sdebug_medium_error_start))) {
3175 		/* claim unrecoverable read error */
3176 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3177 		/* set info field and valid bit for fixed descriptor */
3178 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3179 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
3180 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
3181 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3182 			put_unaligned_be32(ret, scp->sense_buffer + 3);
3183 		}
3184 		scsi_set_resid(scp, scsi_bufflen(scp));
3185 		return check_condition_result;
3186 	}
3187 
3188 	read_lock(macc_lckp);
3189 
3190 	/* DIX + T10 DIF */
3191 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3192 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
3193 
3194 		if (prot_ret) {
3195 			read_unlock(macc_lckp);
3196 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
3197 			return illegal_condition_result;
3198 		}
3199 	}
3200 
3201 	ret = do_device_access(sip, scp, 0, lba, num, false);
3202 	read_unlock(macc_lckp);
3203 	if (unlikely(ret == -1))
3204 		return DID_ERROR << 16;
3205 
3206 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3207 
3208 	if (unlikely(sqcp)) {
3209 		if (sqcp->inj_recovered) {
3210 			mk_sense_buffer(scp, RECOVERED_ERROR,
3211 					THRESHOLD_EXCEEDED, 0);
3212 			return check_condition_result;
3213 		} else if (sqcp->inj_transport) {
3214 			mk_sense_buffer(scp, ABORTED_COMMAND,
3215 					TRANSPORT_PROBLEM, ACK_NAK_TO);
3216 			return check_condition_result;
3217 		} else if (sqcp->inj_dif) {
3218 			/* Logical block guard check failed */
3219 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3220 			return illegal_condition_result;
3221 		} else if (sqcp->inj_dix) {
3222 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3223 			return illegal_condition_result;
3224 		}
3225 	}
3226 	return 0;
3227 }
3228 
3229 static void dump_sector(unsigned char *buf, int len)
3230 {
3231 	int i, j, n;
3232 
3233 	pr_err(">>> Sector Dump <<<\n");
3234 	for (i = 0 ; i < len ; i += 16) {
3235 		char b[128];
3236 
3237 		for (j = 0, n = 0; j < 16; j++) {
3238 			unsigned char c = buf[i+j];
3239 
3240 			if (c >= 0x20 && c < 0x7e)
3241 				n += scnprintf(b + n, sizeof(b) - n,
3242 					       " %c ", buf[i+j]);
3243 			else
3244 				n += scnprintf(b + n, sizeof(b) - n,
3245 					       "%02x ", buf[i+j]);
3246 		}
3247 		pr_err("%04d: %s\n", i, b);
3248 	}
3249 }
3250 
3251 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3252 			     unsigned int sectors, u32 ei_lba)
3253 {
3254 	int ret;
3255 	struct t10_pi_tuple *sdt;
3256 	void *daddr;
3257 	sector_t sector = start_sec;
3258 	int ppage_offset;
3259 	int dpage_offset;
3260 	struct sg_mapping_iter diter;
3261 	struct sg_mapping_iter piter;
3262 
3263 	BUG_ON(scsi_sg_count(SCpnt) == 0);
3264 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3265 
3266 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3267 			scsi_prot_sg_count(SCpnt),
3268 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3269 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3270 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3271 
3272 	/* For each protection page */
3273 	while (sg_miter_next(&piter)) {
3274 		dpage_offset = 0;
3275 		if (WARN_ON(!sg_miter_next(&diter))) {
3276 			ret = 0x01;
3277 			goto out;
3278 		}
3279 
3280 		for (ppage_offset = 0; ppage_offset < piter.length;
3281 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
3282 			/* If we're at the end of the current
3283 			 * data page advance to the next one
3284 			 */
3285 			if (dpage_offset >= diter.length) {
3286 				if (WARN_ON(!sg_miter_next(&diter))) {
3287 					ret = 0x01;
3288 					goto out;
3289 				}
3290 				dpage_offset = 0;
3291 			}
3292 
3293 			sdt = piter.addr + ppage_offset;
3294 			daddr = diter.addr + dpage_offset;
3295 
3296 			ret = dif_verify(sdt, daddr, sector, ei_lba);
3297 			if (ret) {
3298 				dump_sector(daddr, sdebug_sector_size);
3299 				goto out;
3300 			}
3301 
3302 			sector++;
3303 			ei_lba++;
3304 			dpage_offset += sdebug_sector_size;
3305 		}
3306 		diter.consumed = dpage_offset;
3307 		sg_miter_stop(&diter);
3308 	}
3309 	sg_miter_stop(&piter);
3310 
3311 	dif_copy_prot(SCpnt, start_sec, sectors, false);
3312 	dix_writes++;
3313 
3314 	return 0;
3315 
3316 out:
3317 	dif_errors++;
3318 	sg_miter_stop(&diter);
3319 	sg_miter_stop(&piter);
3320 	return ret;
3321 }
3322 
3323 static unsigned long lba_to_map_index(sector_t lba)
3324 {
3325 	if (sdebug_unmap_alignment)
3326 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3327 	sector_div(lba, sdebug_unmap_granularity);
3328 	return lba;
3329 }
3330 
3331 static sector_t map_index_to_lba(unsigned long index)
3332 {
3333 	sector_t lba = index * sdebug_unmap_granularity;
3334 
3335 	if (sdebug_unmap_alignment)
3336 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3337 	return lba;
3338 }
3339 
3340 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3341 			      unsigned int *num)
3342 {
3343 	sector_t end;
3344 	unsigned int mapped;
3345 	unsigned long index;
3346 	unsigned long next;
3347 
3348 	index = lba_to_map_index(lba);
3349 	mapped = test_bit(index, sip->map_storep);
3350 
3351 	if (mapped)
3352 		next = find_next_zero_bit(sip->map_storep, map_size, index);
3353 	else
3354 		next = find_next_bit(sip->map_storep, map_size, index);
3355 
3356 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3357 	*num = end - lba;
3358 	return mapped;
3359 }
3360 
3361 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3362 		       unsigned int len)
3363 {
3364 	sector_t end = lba + len;
3365 
3366 	while (lba < end) {
3367 		unsigned long index = lba_to_map_index(lba);
3368 
3369 		if (index < map_size)
3370 			set_bit(index, sip->map_storep);
3371 
3372 		lba = map_index_to_lba(index + 1);
3373 	}
3374 }
3375 
3376 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3377 			 unsigned int len)
3378 {
3379 	sector_t end = lba + len;
3380 	u8 *fsp = sip->storep;
3381 
3382 	while (lba < end) {
3383 		unsigned long index = lba_to_map_index(lba);
3384 
3385 		if (lba == map_index_to_lba(index) &&
3386 		    lba + sdebug_unmap_granularity <= end &&
3387 		    index < map_size) {
3388 			clear_bit(index, sip->map_storep);
3389 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3390 				memset(fsp + lba * sdebug_sector_size,
3391 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3392 				       sdebug_sector_size *
3393 				       sdebug_unmap_granularity);
3394 			}
3395 			if (sip->dif_storep) {
3396 				memset(sip->dif_storep + lba, 0xff,
3397 				       sizeof(*sip->dif_storep) *
3398 				       sdebug_unmap_granularity);
3399 			}
3400 		}
3401 		lba = map_index_to_lba(index + 1);
3402 	}
3403 }
3404 
3405 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3406 {
3407 	bool check_prot;
3408 	u32 num;
3409 	u32 ei_lba;
3410 	int ret;
3411 	u64 lba;
3412 	struct sdeb_store_info *sip = devip2sip(devip);
3413 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3414 	u8 *cmd = scp->cmnd;
3415 
3416 	switch (cmd[0]) {
3417 	case WRITE_16:
3418 		ei_lba = 0;
3419 		lba = get_unaligned_be64(cmd + 2);
3420 		num = get_unaligned_be32(cmd + 10);
3421 		check_prot = true;
3422 		break;
3423 	case WRITE_10:
3424 		ei_lba = 0;
3425 		lba = get_unaligned_be32(cmd + 2);
3426 		num = get_unaligned_be16(cmd + 7);
3427 		check_prot = true;
3428 		break;
3429 	case WRITE_6:
3430 		ei_lba = 0;
3431 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3432 		      (u32)(cmd[1] & 0x1f) << 16;
3433 		num = (0 == cmd[4]) ? 256 : cmd[4];
3434 		check_prot = true;
3435 		break;
3436 	case WRITE_12:
3437 		ei_lba = 0;
3438 		lba = get_unaligned_be32(cmd + 2);
3439 		num = get_unaligned_be32(cmd + 6);
3440 		check_prot = true;
3441 		break;
3442 	case 0x53:	/* XDWRITEREAD(10) */
3443 		ei_lba = 0;
3444 		lba = get_unaligned_be32(cmd + 2);
3445 		num = get_unaligned_be16(cmd + 7);
3446 		check_prot = false;
3447 		break;
3448 	default:	/* assume WRITE(32) */
3449 		lba = get_unaligned_be64(cmd + 12);
3450 		ei_lba = get_unaligned_be32(cmd + 20);
3451 		num = get_unaligned_be32(cmd + 28);
3452 		check_prot = false;
3453 		break;
3454 	}
3455 	if (unlikely(have_dif_prot && check_prot)) {
3456 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3457 		    (cmd[1] & 0xe0)) {
3458 			mk_sense_invalid_opcode(scp);
3459 			return check_condition_result;
3460 		}
3461 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3462 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3463 		    (cmd[1] & 0xe0) == 0)
3464 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3465 				    "to DIF device\n");
3466 	}
3467 
3468 	write_lock(macc_lckp);
3469 	ret = check_device_access_params(scp, lba, num, true);
3470 	if (ret) {
3471 		write_unlock(macc_lckp);
3472 		return ret;
3473 	}
3474 
3475 	/* DIX + T10 DIF */
3476 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3477 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3478 
3479 		if (prot_ret) {
3480 			write_unlock(macc_lckp);
3481 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3482 			return illegal_condition_result;
3483 		}
3484 	}
3485 
3486 	ret = do_device_access(sip, scp, 0, lba, num, true);
3487 	if (unlikely(scsi_debug_lbp()))
3488 		map_region(sip, lba, num);
3489 	/* If ZBC zone then bump its write pointer */
3490 	if (sdebug_dev_is_zoned(devip))
3491 		zbc_inc_wp(devip, lba, num);
3492 	write_unlock(macc_lckp);
3493 	if (unlikely(-1 == ret))
3494 		return DID_ERROR << 16;
3495 	else if (unlikely(sdebug_verbose &&
3496 			  (ret < (num * sdebug_sector_size))))
3497 		sdev_printk(KERN_INFO, scp->device,
3498 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3499 			    my_name, num * sdebug_sector_size, ret);
3500 
3501 	if (unlikely(sdebug_any_injecting_opt)) {
3502 		struct sdebug_queued_cmd *sqcp =
3503 				(struct sdebug_queued_cmd *)scp->host_scribble;
3504 
3505 		if (sqcp) {
3506 			if (sqcp->inj_recovered) {
3507 				mk_sense_buffer(scp, RECOVERED_ERROR,
3508 						THRESHOLD_EXCEEDED, 0);
3509 				return check_condition_result;
3510 			} else if (sqcp->inj_dif) {
3511 				/* Logical block guard check failed */
3512 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3513 				return illegal_condition_result;
3514 			} else if (sqcp->inj_dix) {
3515 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3516 				return illegal_condition_result;
3517 			}
3518 		}
3519 	}
3520 	return 0;
3521 }
3522 
3523 /*
3524  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3525  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3526  */
3527 static int resp_write_scat(struct scsi_cmnd *scp,
3528 			   struct sdebug_dev_info *devip)
3529 {
3530 	u8 *cmd = scp->cmnd;
3531 	u8 *lrdp = NULL;
3532 	u8 *up;
3533 	struct sdeb_store_info *sip = devip2sip(devip);
3534 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3535 	u8 wrprotect;
3536 	u16 lbdof, num_lrd, k;
3537 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3538 	u32 lb_size = sdebug_sector_size;
3539 	u32 ei_lba;
3540 	u64 lba;
3541 	int ret, res;
3542 	bool is_16;
3543 	static const u32 lrd_size = 32; /* + parameter list header size */
3544 
3545 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3546 		is_16 = false;
3547 		wrprotect = (cmd[10] >> 5) & 0x7;
3548 		lbdof = get_unaligned_be16(cmd + 12);
3549 		num_lrd = get_unaligned_be16(cmd + 16);
3550 		bt_len = get_unaligned_be32(cmd + 28);
3551 	} else {        /* that leaves WRITE SCATTERED(16) */
3552 		is_16 = true;
3553 		wrprotect = (cmd[2] >> 5) & 0x7;
3554 		lbdof = get_unaligned_be16(cmd + 4);
3555 		num_lrd = get_unaligned_be16(cmd + 8);
3556 		bt_len = get_unaligned_be32(cmd + 10);
3557 		if (unlikely(have_dif_prot)) {
3558 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3559 			    wrprotect) {
3560 				mk_sense_invalid_opcode(scp);
3561 				return illegal_condition_result;
3562 			}
3563 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3564 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3565 			     wrprotect == 0)
3566 				sdev_printk(KERN_ERR, scp->device,
3567 					    "Unprotected WR to DIF device\n");
3568 		}
3569 	}
3570 	if ((num_lrd == 0) || (bt_len == 0))
3571 		return 0;       /* T10 says these do-nothings are not errors */
3572 	if (lbdof == 0) {
3573 		if (sdebug_verbose)
3574 			sdev_printk(KERN_INFO, scp->device,
3575 				"%s: %s: LB Data Offset field bad\n",
3576 				my_name, __func__);
3577 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3578 		return illegal_condition_result;
3579 	}
3580 	lbdof_blen = lbdof * lb_size;
3581 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3582 		if (sdebug_verbose)
3583 			sdev_printk(KERN_INFO, scp->device,
3584 				"%s: %s: LBA range descriptors don't fit\n",
3585 				my_name, __func__);
3586 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3587 		return illegal_condition_result;
3588 	}
3589 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3590 	if (lrdp == NULL)
3591 		return SCSI_MLQUEUE_HOST_BUSY;
3592 	if (sdebug_verbose)
3593 		sdev_printk(KERN_INFO, scp->device,
3594 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3595 			my_name, __func__, lbdof_blen);
3596 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3597 	if (res == -1) {
3598 		ret = DID_ERROR << 16;
3599 		goto err_out;
3600 	}
3601 
3602 	write_lock(macc_lckp);
3603 	sg_off = lbdof_blen;
3604 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3605 	cum_lb = 0;
3606 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3607 		lba = get_unaligned_be64(up + 0);
3608 		num = get_unaligned_be32(up + 8);
3609 		if (sdebug_verbose)
3610 			sdev_printk(KERN_INFO, scp->device,
3611 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3612 				my_name, __func__, k, lba, num, sg_off);
3613 		if (num == 0)
3614 			continue;
3615 		ret = check_device_access_params(scp, lba, num, true);
3616 		if (ret)
3617 			goto err_out_unlock;
3618 		num_by = num * lb_size;
3619 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3620 
3621 		if ((cum_lb + num) > bt_len) {
3622 			if (sdebug_verbose)
3623 				sdev_printk(KERN_INFO, scp->device,
3624 				    "%s: %s: sum of blocks > data provided\n",
3625 				    my_name, __func__);
3626 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3627 					0);
3628 			ret = illegal_condition_result;
3629 			goto err_out_unlock;
3630 		}
3631 
3632 		/* DIX + T10 DIF */
3633 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3634 			int prot_ret = prot_verify_write(scp, lba, num,
3635 							 ei_lba);
3636 
3637 			if (prot_ret) {
3638 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3639 						prot_ret);
3640 				ret = illegal_condition_result;
3641 				goto err_out_unlock;
3642 			}
3643 		}
3644 
3645 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3646 		/* If ZBC zone then bump its write pointer */
3647 		if (sdebug_dev_is_zoned(devip))
3648 			zbc_inc_wp(devip, lba, num);
3649 		if (unlikely(scsi_debug_lbp()))
3650 			map_region(sip, lba, num);
3651 		if (unlikely(-1 == ret)) {
3652 			ret = DID_ERROR << 16;
3653 			goto err_out_unlock;
3654 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3655 			sdev_printk(KERN_INFO, scp->device,
3656 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3657 			    my_name, num_by, ret);
3658 
3659 		if (unlikely(sdebug_any_injecting_opt)) {
3660 			struct sdebug_queued_cmd *sqcp =
3661 				(struct sdebug_queued_cmd *)scp->host_scribble;
3662 
3663 			if (sqcp) {
3664 				if (sqcp->inj_recovered) {
3665 					mk_sense_buffer(scp, RECOVERED_ERROR,
3666 							THRESHOLD_EXCEEDED, 0);
3667 					ret = illegal_condition_result;
3668 					goto err_out_unlock;
3669 				} else if (sqcp->inj_dif) {
3670 					/* Logical block guard check failed */
3671 					mk_sense_buffer(scp, ABORTED_COMMAND,
3672 							0x10, 1);
3673 					ret = illegal_condition_result;
3674 					goto err_out_unlock;
3675 				} else if (sqcp->inj_dix) {
3676 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3677 							0x10, 1);
3678 					ret = illegal_condition_result;
3679 					goto err_out_unlock;
3680 				}
3681 			}
3682 		}
3683 		sg_off += num_by;
3684 		cum_lb += num;
3685 	}
3686 	ret = 0;
3687 err_out_unlock:
3688 	write_unlock(macc_lckp);
3689 err_out:
3690 	kfree(lrdp);
3691 	return ret;
3692 }
3693 
3694 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3695 			   u32 ei_lba, bool unmap, bool ndob)
3696 {
3697 	struct scsi_device *sdp = scp->device;
3698 	struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3699 	unsigned long long i;
3700 	u64 block, lbaa;
3701 	u32 lb_size = sdebug_sector_size;
3702 	int ret;
3703 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3704 						scp->device->hostdata);
3705 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3706 	u8 *fs1p;
3707 	u8 *fsp;
3708 
3709 	write_lock(macc_lckp);
3710 
3711 	ret = check_device_access_params(scp, lba, num, true);
3712 	if (ret) {
3713 		write_unlock(macc_lckp);
3714 		return ret;
3715 	}
3716 
3717 	if (unmap && scsi_debug_lbp()) {
3718 		unmap_region(sip, lba, num);
3719 		goto out;
3720 	}
3721 	lbaa = lba;
3722 	block = do_div(lbaa, sdebug_store_sectors);
3723 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3724 	fsp = sip->storep;
3725 	fs1p = fsp + (block * lb_size);
3726 	if (ndob) {
3727 		memset(fs1p, 0, lb_size);
3728 		ret = 0;
3729 	} else
3730 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3731 
3732 	if (-1 == ret) {
3733 		write_unlock(&sip->macc_lck);
3734 		return DID_ERROR << 16;
3735 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3736 		sdev_printk(KERN_INFO, scp->device,
3737 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3738 			    my_name, "write same", lb_size, ret);
3739 
3740 	/* Copy first sector to remaining blocks */
3741 	for (i = 1 ; i < num ; i++) {
3742 		lbaa = lba + i;
3743 		block = do_div(lbaa, sdebug_store_sectors);
3744 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3745 	}
3746 	if (scsi_debug_lbp())
3747 		map_region(sip, lba, num);
3748 	/* If ZBC zone then bump its write pointer */
3749 	if (sdebug_dev_is_zoned(devip))
3750 		zbc_inc_wp(devip, lba, num);
3751 out:
3752 	write_unlock(macc_lckp);
3753 
3754 	return 0;
3755 }
3756 
3757 static int resp_write_same_10(struct scsi_cmnd *scp,
3758 			      struct sdebug_dev_info *devip)
3759 {
3760 	u8 *cmd = scp->cmnd;
3761 	u32 lba;
3762 	u16 num;
3763 	u32 ei_lba = 0;
3764 	bool unmap = false;
3765 
3766 	if (cmd[1] & 0x8) {
3767 		if (sdebug_lbpws10 == 0) {
3768 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3769 			return check_condition_result;
3770 		} else
3771 			unmap = true;
3772 	}
3773 	lba = get_unaligned_be32(cmd + 2);
3774 	num = get_unaligned_be16(cmd + 7);
3775 	if (num > sdebug_write_same_length) {
3776 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3777 		return check_condition_result;
3778 	}
3779 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3780 }
3781 
3782 static int resp_write_same_16(struct scsi_cmnd *scp,
3783 			      struct sdebug_dev_info *devip)
3784 {
3785 	u8 *cmd = scp->cmnd;
3786 	u64 lba;
3787 	u32 num;
3788 	u32 ei_lba = 0;
3789 	bool unmap = false;
3790 	bool ndob = false;
3791 
3792 	if (cmd[1] & 0x8) {	/* UNMAP */
3793 		if (sdebug_lbpws == 0) {
3794 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3795 			return check_condition_result;
3796 		} else
3797 			unmap = true;
3798 	}
3799 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3800 		ndob = true;
3801 	lba = get_unaligned_be64(cmd + 2);
3802 	num = get_unaligned_be32(cmd + 10);
3803 	if (num > sdebug_write_same_length) {
3804 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3805 		return check_condition_result;
3806 	}
3807 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3808 }
3809 
3810 /* Note the mode field is in the same position as the (lower) service action
3811  * field. For the Report supported operation codes command, SPC-4 suggests
3812  * each mode of this command should be reported separately; for future. */
3813 static int resp_write_buffer(struct scsi_cmnd *scp,
3814 			     struct sdebug_dev_info *devip)
3815 {
3816 	u8 *cmd = scp->cmnd;
3817 	struct scsi_device *sdp = scp->device;
3818 	struct sdebug_dev_info *dp;
3819 	u8 mode;
3820 
3821 	mode = cmd[1] & 0x1f;
3822 	switch (mode) {
3823 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3824 		/* set UAs on this device only */
3825 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3826 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3827 		break;
3828 	case 0x5:	/* download MC, save and ACT */
3829 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3830 		break;
3831 	case 0x6:	/* download MC with offsets and ACT */
3832 		/* set UAs on most devices (LUs) in this target */
3833 		list_for_each_entry(dp,
3834 				    &devip->sdbg_host->dev_info_list,
3835 				    dev_list)
3836 			if (dp->target == sdp->id) {
3837 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3838 				if (devip != dp)
3839 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3840 						dp->uas_bm);
3841 			}
3842 		break;
3843 	case 0x7:	/* download MC with offsets, save, and ACT */
3844 		/* set UA on all devices (LUs) in this target */
3845 		list_for_each_entry(dp,
3846 				    &devip->sdbg_host->dev_info_list,
3847 				    dev_list)
3848 			if (dp->target == sdp->id)
3849 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3850 					dp->uas_bm);
3851 		break;
3852 	default:
3853 		/* do nothing for this command for other mode values */
3854 		break;
3855 	}
3856 	return 0;
3857 }
3858 
3859 static int resp_comp_write(struct scsi_cmnd *scp,
3860 			   struct sdebug_dev_info *devip)
3861 {
3862 	u8 *cmd = scp->cmnd;
3863 	u8 *arr;
3864 	struct sdeb_store_info *sip = devip2sip(devip);
3865 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3866 	u64 lba;
3867 	u32 dnum;
3868 	u32 lb_size = sdebug_sector_size;
3869 	u8 num;
3870 	int ret;
3871 	int retval = 0;
3872 
3873 	lba = get_unaligned_be64(cmd + 2);
3874 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3875 	if (0 == num)
3876 		return 0;	/* degenerate case, not an error */
3877 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3878 	    (cmd[1] & 0xe0)) {
3879 		mk_sense_invalid_opcode(scp);
3880 		return check_condition_result;
3881 	}
3882 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3883 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3884 	    (cmd[1] & 0xe0) == 0)
3885 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3886 			    "to DIF device\n");
3887 	ret = check_device_access_params(scp, lba, num, false);
3888 	if (ret)
3889 		return ret;
3890 	dnum = 2 * num;
3891 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3892 	if (NULL == arr) {
3893 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3894 				INSUFF_RES_ASCQ);
3895 		return check_condition_result;
3896 	}
3897 
3898 	write_lock(macc_lckp);
3899 
3900 	ret = do_dout_fetch(scp, dnum, arr);
3901 	if (ret == -1) {
3902 		retval = DID_ERROR << 16;
3903 		goto cleanup;
3904 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3905 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3906 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3907 			    dnum * lb_size, ret);
3908 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3909 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3910 		retval = check_condition_result;
3911 		goto cleanup;
3912 	}
3913 	if (scsi_debug_lbp())
3914 		map_region(sip, lba, num);
3915 cleanup:
3916 	write_unlock(macc_lckp);
3917 	kfree(arr);
3918 	return retval;
3919 }
3920 
3921 struct unmap_block_desc {
3922 	__be64	lba;
3923 	__be32	blocks;
3924 	__be32	__reserved;
3925 };
3926 
3927 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3928 {
3929 	unsigned char *buf;
3930 	struct unmap_block_desc *desc;
3931 	struct sdeb_store_info *sip = devip2sip(devip);
3932 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3933 	unsigned int i, payload_len, descriptors;
3934 	int ret;
3935 
3936 	if (!scsi_debug_lbp())
3937 		return 0;	/* fib and say its done */
3938 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3939 	BUG_ON(scsi_bufflen(scp) != payload_len);
3940 
3941 	descriptors = (payload_len - 8) / 16;
3942 	if (descriptors > sdebug_unmap_max_desc) {
3943 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3944 		return check_condition_result;
3945 	}
3946 
3947 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3948 	if (!buf) {
3949 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3950 				INSUFF_RES_ASCQ);
3951 		return check_condition_result;
3952 	}
3953 
3954 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3955 
3956 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3957 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3958 
3959 	desc = (void *)&buf[8];
3960 
3961 	write_lock(macc_lckp);
3962 
3963 	for (i = 0 ; i < descriptors ; i++) {
3964 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3965 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3966 
3967 		ret = check_device_access_params(scp, lba, num, true);
3968 		if (ret)
3969 			goto out;
3970 
3971 		unmap_region(sip, lba, num);
3972 	}
3973 
3974 	ret = 0;
3975 
3976 out:
3977 	write_unlock(macc_lckp);
3978 	kfree(buf);
3979 
3980 	return ret;
3981 }
3982 
3983 #define SDEBUG_GET_LBA_STATUS_LEN 32
3984 
3985 static int resp_get_lba_status(struct scsi_cmnd *scp,
3986 			       struct sdebug_dev_info *devip)
3987 {
3988 	u8 *cmd = scp->cmnd;
3989 	struct sdeb_store_info *sip = devip2sip(devip);
3990 	u64 lba;
3991 	u32 alloc_len, mapped, num;
3992 	int ret;
3993 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3994 
3995 	lba = get_unaligned_be64(cmd + 2);
3996 	alloc_len = get_unaligned_be32(cmd + 10);
3997 
3998 	if (alloc_len < 24)
3999 		return 0;
4000 
4001 	ret = check_device_access_params(scp, lba, 1, false);
4002 	if (ret)
4003 		return ret;
4004 
4005 	if (scsi_debug_lbp())
4006 		mapped = map_state(sip, lba, &num);
4007 	else {
4008 		mapped = 1;
4009 		/* following just in case virtual_gb changed */
4010 		sdebug_capacity = get_sdebug_capacity();
4011 		if (sdebug_capacity - lba <= 0xffffffff)
4012 			num = sdebug_capacity - lba;
4013 		else
4014 			num = 0xffffffff;
4015 	}
4016 
4017 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4018 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
4019 	put_unaligned_be64(lba, arr + 8);	/* LBA */
4020 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
4021 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
4022 
4023 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4024 }
4025 
4026 static int resp_sync_cache(struct scsi_cmnd *scp,
4027 			   struct sdebug_dev_info *devip)
4028 {
4029 	int res = 0;
4030 	u64 lba;
4031 	u32 num_blocks;
4032 	u8 *cmd = scp->cmnd;
4033 
4034 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
4035 		lba = get_unaligned_be32(cmd + 2);
4036 		num_blocks = get_unaligned_be16(cmd + 7);
4037 	} else {				/* SYNCHRONIZE_CACHE(16) */
4038 		lba = get_unaligned_be64(cmd + 2);
4039 		num_blocks = get_unaligned_be32(cmd + 10);
4040 	}
4041 	if (lba + num_blocks > sdebug_capacity) {
4042 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4043 		return check_condition_result;
4044 	}
4045 	if (!write_since_sync || cmd[1] & 0x2)
4046 		res = SDEG_RES_IMMED_MASK;
4047 	else		/* delay if write_since_sync and IMMED clear */
4048 		write_since_sync = false;
4049 	return res;
4050 }
4051 
4052 /*
4053  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4054  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4055  * a GOOD status otherwise. Model a disk with a big cache and yield
4056  * CONDITION MET. Actually tries to bring range in main memory into the
4057  * cache associated with the CPU(s).
4058  */
4059 static int resp_pre_fetch(struct scsi_cmnd *scp,
4060 			  struct sdebug_dev_info *devip)
4061 {
4062 	int res = 0;
4063 	u64 lba;
4064 	u64 block, rest = 0;
4065 	u32 nblks;
4066 	u8 *cmd = scp->cmnd;
4067 	struct sdeb_store_info *sip = devip2sip(devip);
4068 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4069 	u8 *fsp = sip ? sip->storep : NULL;
4070 
4071 	if (cmd[0] == PRE_FETCH) {	/* 10 byte cdb */
4072 		lba = get_unaligned_be32(cmd + 2);
4073 		nblks = get_unaligned_be16(cmd + 7);
4074 	} else {			/* PRE-FETCH(16) */
4075 		lba = get_unaligned_be64(cmd + 2);
4076 		nblks = get_unaligned_be32(cmd + 10);
4077 	}
4078 	if (lba + nblks > sdebug_capacity) {
4079 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4080 		return check_condition_result;
4081 	}
4082 	if (!fsp)
4083 		goto fini;
4084 	/* PRE-FETCH spec says nothing about LBP or PI so skip them */
4085 	block = do_div(lba, sdebug_store_sectors);
4086 	if (block + nblks > sdebug_store_sectors)
4087 		rest = block + nblks - sdebug_store_sectors;
4088 
4089 	/* Try to bring the PRE-FETCH range into CPU's cache */
4090 	read_lock(macc_lckp);
4091 	prefetch_range(fsp + (sdebug_sector_size * block),
4092 		       (nblks - rest) * sdebug_sector_size);
4093 	if (rest)
4094 		prefetch_range(fsp, rest * sdebug_sector_size);
4095 	read_unlock(macc_lckp);
4096 fini:
4097 	if (cmd[1] & 0x2)
4098 		res = SDEG_RES_IMMED_MASK;
4099 	return res | condition_met_result;
4100 }
4101 
4102 #define RL_BUCKET_ELEMS 8
4103 
4104 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4105  * (W-LUN), the normal Linux scanning logic does not associate it with a
4106  * device (e.g. /dev/sg7). The following magic will make that association:
4107  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4108  * where <n> is a host number. If there are multiple targets in a host then
4109  * the above will associate a W-LUN to each target. To only get a W-LUN
4110  * for target 2, then use "echo '- 2 49409' > scan" .
4111  */
4112 static int resp_report_luns(struct scsi_cmnd *scp,
4113 			    struct sdebug_dev_info *devip)
4114 {
4115 	unsigned char *cmd = scp->cmnd;
4116 	unsigned int alloc_len;
4117 	unsigned char select_report;
4118 	u64 lun;
4119 	struct scsi_lun *lun_p;
4120 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4121 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
4122 	unsigned int wlun_cnt;	/* report luns W-LUN count */
4123 	unsigned int tlun_cnt;	/* total LUN count */
4124 	unsigned int rlen;	/* response length (in bytes) */
4125 	int k, j, n, res;
4126 	unsigned int off_rsp = 0;
4127 	const int sz_lun = sizeof(struct scsi_lun);
4128 
4129 	clear_luns_changed_on_target(devip);
4130 
4131 	select_report = cmd[2];
4132 	alloc_len = get_unaligned_be32(cmd + 6);
4133 
4134 	if (alloc_len < 4) {
4135 		pr_err("alloc len too small %d\n", alloc_len);
4136 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4137 		return check_condition_result;
4138 	}
4139 
4140 	switch (select_report) {
4141 	case 0:		/* all LUNs apart from W-LUNs */
4142 		lun_cnt = sdebug_max_luns;
4143 		wlun_cnt = 0;
4144 		break;
4145 	case 1:		/* only W-LUNs */
4146 		lun_cnt = 0;
4147 		wlun_cnt = 1;
4148 		break;
4149 	case 2:		/* all LUNs */
4150 		lun_cnt = sdebug_max_luns;
4151 		wlun_cnt = 1;
4152 		break;
4153 	case 0x10:	/* only administrative LUs */
4154 	case 0x11:	/* see SPC-5 */
4155 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
4156 	default:
4157 		pr_debug("select report invalid %d\n", select_report);
4158 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4159 		return check_condition_result;
4160 	}
4161 
4162 	if (sdebug_no_lun_0 && (lun_cnt > 0))
4163 		--lun_cnt;
4164 
4165 	tlun_cnt = lun_cnt + wlun_cnt;
4166 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
4167 	scsi_set_resid(scp, scsi_bufflen(scp));
4168 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4169 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4170 
4171 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
4172 	lun = sdebug_no_lun_0 ? 1 : 0;
4173 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4174 		memset(arr, 0, sizeof(arr));
4175 		lun_p = (struct scsi_lun *)&arr[0];
4176 		if (k == 0) {
4177 			put_unaligned_be32(rlen, &arr[0]);
4178 			++lun_p;
4179 			j = 1;
4180 		}
4181 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4182 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4183 				break;
4184 			int_to_scsilun(lun++, lun_p);
4185 		}
4186 		if (j < RL_BUCKET_ELEMS)
4187 			break;
4188 		n = j * sz_lun;
4189 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4190 		if (res)
4191 			return res;
4192 		off_rsp += n;
4193 	}
4194 	if (wlun_cnt) {
4195 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4196 		++j;
4197 	}
4198 	if (j > 0)
4199 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4200 	return res;
4201 }
4202 
4203 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4204 {
4205 	bool is_bytchk3 = false;
4206 	u8 bytchk;
4207 	int ret, j;
4208 	u32 vnum, a_num, off;
4209 	const u32 lb_size = sdebug_sector_size;
4210 	u64 lba;
4211 	u8 *arr;
4212 	u8 *cmd = scp->cmnd;
4213 	struct sdeb_store_info *sip = devip2sip(devip);
4214 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4215 
4216 	bytchk = (cmd[1] >> 1) & 0x3;
4217 	if (bytchk == 0) {
4218 		return 0;	/* always claim internal verify okay */
4219 	} else if (bytchk == 2) {
4220 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4221 		return check_condition_result;
4222 	} else if (bytchk == 3) {
4223 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
4224 	}
4225 	switch (cmd[0]) {
4226 	case VERIFY_16:
4227 		lba = get_unaligned_be64(cmd + 2);
4228 		vnum = get_unaligned_be32(cmd + 10);
4229 		break;
4230 	case VERIFY:		/* is VERIFY(10) */
4231 		lba = get_unaligned_be32(cmd + 2);
4232 		vnum = get_unaligned_be16(cmd + 7);
4233 		break;
4234 	default:
4235 		mk_sense_invalid_opcode(scp);
4236 		return check_condition_result;
4237 	}
4238 	a_num = is_bytchk3 ? 1 : vnum;
4239 	/* Treat following check like one for read (i.e. no write) access */
4240 	ret = check_device_access_params(scp, lba, a_num, false);
4241 	if (ret)
4242 		return ret;
4243 
4244 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4245 	if (!arr) {
4246 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4247 				INSUFF_RES_ASCQ);
4248 		return check_condition_result;
4249 	}
4250 	/* Not changing store, so only need read access */
4251 	read_lock(macc_lckp);
4252 
4253 	ret = do_dout_fetch(scp, a_num, arr);
4254 	if (ret == -1) {
4255 		ret = DID_ERROR << 16;
4256 		goto cleanup;
4257 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4258 		sdev_printk(KERN_INFO, scp->device,
4259 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4260 			    my_name, __func__, a_num * lb_size, ret);
4261 	}
4262 	if (is_bytchk3) {
4263 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4264 			memcpy(arr + off, arr, lb_size);
4265 	}
4266 	ret = 0;
4267 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4268 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4269 		ret = check_condition_result;
4270 		goto cleanup;
4271 	}
4272 cleanup:
4273 	read_unlock(macc_lckp);
4274 	kfree(arr);
4275 	return ret;
4276 }
4277 
4278 #define RZONES_DESC_HD 64
4279 
4280 /* Report zones depending on start LBA nad reporting options */
4281 static int resp_report_zones(struct scsi_cmnd *scp,
4282 			     struct sdebug_dev_info *devip)
4283 {
4284 	unsigned int i, max_zones, rep_max_zones, nrz = 0;
4285 	int ret = 0;
4286 	u32 alloc_len, rep_opts, rep_len;
4287 	bool partial;
4288 	u64 lba, zs_lba;
4289 	u8 *arr = NULL, *desc;
4290 	u8 *cmd = scp->cmnd;
4291 	struct sdeb_zone_state *zsp;
4292 	struct sdeb_store_info *sip = devip2sip(devip);
4293 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4294 
4295 	if (!sdebug_dev_is_zoned(devip)) {
4296 		mk_sense_invalid_opcode(scp);
4297 		return check_condition_result;
4298 	}
4299 	zs_lba = get_unaligned_be64(cmd + 2);
4300 	alloc_len = get_unaligned_be32(cmd + 10);
4301 	rep_opts = cmd[14] & 0x3f;
4302 	partial = cmd[14] & 0x80;
4303 
4304 	if (zs_lba >= sdebug_capacity) {
4305 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4306 		return check_condition_result;
4307 	}
4308 
4309 	max_zones = devip->nr_zones - zs_lba / devip->zsize;
4310 	rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4311 			    max_zones);
4312 
4313 	arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4314 	if (!arr) {
4315 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4316 				INSUFF_RES_ASCQ);
4317 		return check_condition_result;
4318 	}
4319 
4320 	read_lock(macc_lckp);
4321 
4322 	desc = arr + 64;
4323 	for (i = 0; i < max_zones; i++) {
4324 		lba = zs_lba + devip->zsize * i;
4325 		if (lba > sdebug_capacity)
4326 			break;
4327 		zsp = zbc_zone(devip, lba);
4328 		switch (rep_opts) {
4329 		case 0x00:
4330 			/* All zones */
4331 			break;
4332 		case 0x01:
4333 			/* Empty zones */
4334 			if (zsp->z_cond != ZC1_EMPTY)
4335 				continue;
4336 			break;
4337 		case 0x02:
4338 			/* Implicit open zones */
4339 			if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4340 				continue;
4341 			break;
4342 		case 0x03:
4343 			/* Explicit open zones */
4344 			if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4345 				continue;
4346 			break;
4347 		case 0x04:
4348 			/* Closed zones */
4349 			if (zsp->z_cond != ZC4_CLOSED)
4350 				continue;
4351 			break;
4352 		case 0x05:
4353 			/* Full zones */
4354 			if (zsp->z_cond != ZC5_FULL)
4355 				continue;
4356 			break;
4357 		case 0x06:
4358 		case 0x07:
4359 		case 0x10:
4360 			/*
4361 			 * Read-only, offline, reset WP recommended are
4362 			 * not emulated: no zones to report;
4363 			 */
4364 			continue;
4365 		case 0x11:
4366 			/* non-seq-resource set */
4367 			if (!zsp->z_non_seq_resource)
4368 				continue;
4369 			break;
4370 		case 0x3f:
4371 			/* Not write pointer (conventional) zones */
4372 			if (!zbc_zone_is_conv(zsp))
4373 				continue;
4374 			break;
4375 		default:
4376 			mk_sense_buffer(scp, ILLEGAL_REQUEST,
4377 					INVALID_FIELD_IN_CDB, 0);
4378 			ret = check_condition_result;
4379 			goto fini;
4380 		}
4381 
4382 		if (nrz < rep_max_zones) {
4383 			/* Fill zone descriptor */
4384 			desc[0] = zsp->z_type;
4385 			desc[1] = zsp->z_cond << 4;
4386 			if (zsp->z_non_seq_resource)
4387 				desc[1] |= 1 << 1;
4388 			put_unaligned_be64((u64)zsp->z_size, desc + 8);
4389 			put_unaligned_be64((u64)zsp->z_start, desc + 16);
4390 			put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4391 			desc += 64;
4392 		}
4393 
4394 		if (partial && nrz >= rep_max_zones)
4395 			break;
4396 
4397 		nrz++;
4398 	}
4399 
4400 	/* Report header */
4401 	put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4402 	put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4403 
4404 	rep_len = (unsigned long)desc - (unsigned long)arr;
4405 	ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4406 
4407 fini:
4408 	read_unlock(macc_lckp);
4409 	kfree(arr);
4410 	return ret;
4411 }
4412 
4413 /* Logic transplanted from tcmu-runner, file_zbc.c */
4414 static void zbc_open_all(struct sdebug_dev_info *devip)
4415 {
4416 	struct sdeb_zone_state *zsp = &devip->zstate[0];
4417 	unsigned int i;
4418 
4419 	for (i = 0; i < devip->nr_zones; i++, zsp++) {
4420 		if (zsp->z_cond == ZC4_CLOSED)
4421 			zbc_open_zone(devip, &devip->zstate[i], true);
4422 	}
4423 }
4424 
4425 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4426 {
4427 	int res = 0;
4428 	u64 z_id;
4429 	enum sdebug_z_cond zc;
4430 	u8 *cmd = scp->cmnd;
4431 	struct sdeb_zone_state *zsp;
4432 	bool all = cmd[14] & 0x01;
4433 	struct sdeb_store_info *sip = devip2sip(devip);
4434 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4435 
4436 	if (!sdebug_dev_is_zoned(devip)) {
4437 		mk_sense_invalid_opcode(scp);
4438 		return check_condition_result;
4439 	}
4440 
4441 	write_lock(macc_lckp);
4442 
4443 	if (all) {
4444 		/* Check if all closed zones can be open */
4445 		if (devip->max_open &&
4446 		    devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4447 			mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4448 					INSUFF_ZONE_ASCQ);
4449 			res = check_condition_result;
4450 			goto fini;
4451 		}
4452 		/* Open all closed zones */
4453 		zbc_open_all(devip);
4454 		goto fini;
4455 	}
4456 
4457 	/* Open the specified zone */
4458 	z_id = get_unaligned_be64(cmd + 2);
4459 	if (z_id >= sdebug_capacity) {
4460 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4461 		res = check_condition_result;
4462 		goto fini;
4463 	}
4464 
4465 	zsp = zbc_zone(devip, z_id);
4466 	if (z_id != zsp->z_start) {
4467 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4468 		res = check_condition_result;
4469 		goto fini;
4470 	}
4471 	if (zbc_zone_is_conv(zsp)) {
4472 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4473 		res = check_condition_result;
4474 		goto fini;
4475 	}
4476 
4477 	zc = zsp->z_cond;
4478 	if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4479 		goto fini;
4480 
4481 	if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4482 		mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4483 				INSUFF_ZONE_ASCQ);
4484 		res = check_condition_result;
4485 		goto fini;
4486 	}
4487 
4488 	if (zc == ZC2_IMPLICIT_OPEN)
4489 		zbc_close_zone(devip, zsp);
4490 	zbc_open_zone(devip, zsp, true);
4491 fini:
4492 	write_unlock(macc_lckp);
4493 	return res;
4494 }
4495 
4496 static void zbc_close_all(struct sdebug_dev_info *devip)
4497 {
4498 	unsigned int i;
4499 
4500 	for (i = 0; i < devip->nr_zones; i++)
4501 		zbc_close_zone(devip, &devip->zstate[i]);
4502 }
4503 
4504 static int resp_close_zone(struct scsi_cmnd *scp,
4505 			   struct sdebug_dev_info *devip)
4506 {
4507 	int res = 0;
4508 	u64 z_id;
4509 	u8 *cmd = scp->cmnd;
4510 	struct sdeb_zone_state *zsp;
4511 	bool all = cmd[14] & 0x01;
4512 	struct sdeb_store_info *sip = devip2sip(devip);
4513 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4514 
4515 	if (!sdebug_dev_is_zoned(devip)) {
4516 		mk_sense_invalid_opcode(scp);
4517 		return check_condition_result;
4518 	}
4519 
4520 	write_lock(macc_lckp);
4521 
4522 	if (all) {
4523 		zbc_close_all(devip);
4524 		goto fini;
4525 	}
4526 
4527 	/* Close specified zone */
4528 	z_id = get_unaligned_be64(cmd + 2);
4529 	if (z_id >= sdebug_capacity) {
4530 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4531 		res = check_condition_result;
4532 		goto fini;
4533 	}
4534 
4535 	zsp = zbc_zone(devip, z_id);
4536 	if (z_id != zsp->z_start) {
4537 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4538 		res = check_condition_result;
4539 		goto fini;
4540 	}
4541 	if (zbc_zone_is_conv(zsp)) {
4542 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4543 		res = check_condition_result;
4544 		goto fini;
4545 	}
4546 
4547 	zbc_close_zone(devip, zsp);
4548 fini:
4549 	write_unlock(macc_lckp);
4550 	return res;
4551 }
4552 
4553 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4554 			    struct sdeb_zone_state *zsp, bool empty)
4555 {
4556 	enum sdebug_z_cond zc = zsp->z_cond;
4557 
4558 	if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4559 	    zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4560 		if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4561 			zbc_close_zone(devip, zsp);
4562 		if (zsp->z_cond == ZC4_CLOSED)
4563 			devip->nr_closed--;
4564 		zsp->z_wp = zsp->z_start + zsp->z_size;
4565 		zsp->z_cond = ZC5_FULL;
4566 	}
4567 }
4568 
4569 static void zbc_finish_all(struct sdebug_dev_info *devip)
4570 {
4571 	unsigned int i;
4572 
4573 	for (i = 0; i < devip->nr_zones; i++)
4574 		zbc_finish_zone(devip, &devip->zstate[i], false);
4575 }
4576 
4577 static int resp_finish_zone(struct scsi_cmnd *scp,
4578 			    struct sdebug_dev_info *devip)
4579 {
4580 	struct sdeb_zone_state *zsp;
4581 	int res = 0;
4582 	u64 z_id;
4583 	u8 *cmd = scp->cmnd;
4584 	bool all = cmd[14] & 0x01;
4585 	struct sdeb_store_info *sip = devip2sip(devip);
4586 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4587 
4588 	if (!sdebug_dev_is_zoned(devip)) {
4589 		mk_sense_invalid_opcode(scp);
4590 		return check_condition_result;
4591 	}
4592 
4593 	write_lock(macc_lckp);
4594 
4595 	if (all) {
4596 		zbc_finish_all(devip);
4597 		goto fini;
4598 	}
4599 
4600 	/* Finish the specified zone */
4601 	z_id = get_unaligned_be64(cmd + 2);
4602 	if (z_id >= sdebug_capacity) {
4603 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4604 		res = check_condition_result;
4605 		goto fini;
4606 	}
4607 
4608 	zsp = zbc_zone(devip, z_id);
4609 	if (z_id != zsp->z_start) {
4610 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4611 		res = check_condition_result;
4612 		goto fini;
4613 	}
4614 	if (zbc_zone_is_conv(zsp)) {
4615 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4616 		res = check_condition_result;
4617 		goto fini;
4618 	}
4619 
4620 	zbc_finish_zone(devip, zsp, true);
4621 fini:
4622 	write_unlock(macc_lckp);
4623 	return res;
4624 }
4625 
4626 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4627 			 struct sdeb_zone_state *zsp)
4628 {
4629 	enum sdebug_z_cond zc;
4630 
4631 	if (zbc_zone_is_conv(zsp))
4632 		return;
4633 
4634 	zc = zsp->z_cond;
4635 	if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4636 		zbc_close_zone(devip, zsp);
4637 
4638 	if (zsp->z_cond == ZC4_CLOSED)
4639 		devip->nr_closed--;
4640 
4641 	zsp->z_non_seq_resource = false;
4642 	zsp->z_wp = zsp->z_start;
4643 	zsp->z_cond = ZC1_EMPTY;
4644 }
4645 
4646 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4647 {
4648 	unsigned int i;
4649 
4650 	for (i = 0; i < devip->nr_zones; i++)
4651 		zbc_rwp_zone(devip, &devip->zstate[i]);
4652 }
4653 
4654 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4655 {
4656 	struct sdeb_zone_state *zsp;
4657 	int res = 0;
4658 	u64 z_id;
4659 	u8 *cmd = scp->cmnd;
4660 	bool all = cmd[14] & 0x01;
4661 	struct sdeb_store_info *sip = devip2sip(devip);
4662 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4663 
4664 	if (!sdebug_dev_is_zoned(devip)) {
4665 		mk_sense_invalid_opcode(scp);
4666 		return check_condition_result;
4667 	}
4668 
4669 	write_lock(macc_lckp);
4670 
4671 	if (all) {
4672 		zbc_rwp_all(devip);
4673 		goto fini;
4674 	}
4675 
4676 	z_id = get_unaligned_be64(cmd + 2);
4677 	if (z_id >= sdebug_capacity) {
4678 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4679 		res = check_condition_result;
4680 		goto fini;
4681 	}
4682 
4683 	zsp = zbc_zone(devip, z_id);
4684 	if (z_id != zsp->z_start) {
4685 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4686 		res = check_condition_result;
4687 		goto fini;
4688 	}
4689 	if (zbc_zone_is_conv(zsp)) {
4690 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4691 		res = check_condition_result;
4692 		goto fini;
4693 	}
4694 
4695 	zbc_rwp_zone(devip, zsp);
4696 fini:
4697 	write_unlock(macc_lckp);
4698 	return res;
4699 }
4700 
4701 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4702 {
4703 	u32 tag = blk_mq_unique_tag(cmnd->request);
4704 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
4705 
4706 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4707 	if (WARN_ON_ONCE(hwq >= submit_queues))
4708 		hwq = 0;
4709 	return sdebug_q_arr + hwq;
4710 }
4711 
4712 /* Queued (deferred) command completions converge here. */
4713 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4714 {
4715 	bool aborted = sd_dp->aborted;
4716 	int qc_idx;
4717 	int retiring = 0;
4718 	unsigned long iflags;
4719 	struct sdebug_queue *sqp;
4720 	struct sdebug_queued_cmd *sqcp;
4721 	struct scsi_cmnd *scp;
4722 	struct sdebug_dev_info *devip;
4723 
4724 	sd_dp->defer_t = SDEB_DEFER_NONE;
4725 	if (unlikely(aborted))
4726 		sd_dp->aborted = false;
4727 	qc_idx = sd_dp->qc_idx;
4728 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
4729 	if (sdebug_statistics) {
4730 		atomic_inc(&sdebug_completions);
4731 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4732 			atomic_inc(&sdebug_miss_cpus);
4733 	}
4734 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4735 		pr_err("wild qc_idx=%d\n", qc_idx);
4736 		return;
4737 	}
4738 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4739 	sqcp = &sqp->qc_arr[qc_idx];
4740 	scp = sqcp->a_cmnd;
4741 	if (unlikely(scp == NULL)) {
4742 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4743 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
4744 		       sd_dp->sqa_idx, qc_idx);
4745 		return;
4746 	}
4747 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
4748 	if (likely(devip))
4749 		atomic_dec(&devip->num_in_q);
4750 	else
4751 		pr_err("devip=NULL\n");
4752 	if (unlikely(atomic_read(&retired_max_queue) > 0))
4753 		retiring = 1;
4754 
4755 	sqcp->a_cmnd = NULL;
4756 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4757 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4758 		pr_err("Unexpected completion\n");
4759 		return;
4760 	}
4761 
4762 	if (unlikely(retiring)) {	/* user has reduced max_queue */
4763 		int k, retval;
4764 
4765 		retval = atomic_read(&retired_max_queue);
4766 		if (qc_idx >= retval) {
4767 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4768 			pr_err("index %d too large\n", retval);
4769 			return;
4770 		}
4771 		k = find_last_bit(sqp->in_use_bm, retval);
4772 		if ((k < sdebug_max_queue) || (k == retval))
4773 			atomic_set(&retired_max_queue, 0);
4774 		else
4775 			atomic_set(&retired_max_queue, k + 1);
4776 	}
4777 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4778 	if (unlikely(aborted)) {
4779 		if (sdebug_verbose)
4780 			pr_info("bypassing scsi_done() due to aborted cmd\n");
4781 		return;
4782 	}
4783 	scp->scsi_done(scp); /* callback to mid level */
4784 }
4785 
4786 /* When high resolution timer goes off this function is called. */
4787 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4788 {
4789 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4790 						  hrt);
4791 	sdebug_q_cmd_complete(sd_dp);
4792 	return HRTIMER_NORESTART;
4793 }
4794 
4795 /* When work queue schedules work, it calls this function. */
4796 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4797 {
4798 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4799 						  ew.work);
4800 	sdebug_q_cmd_complete(sd_dp);
4801 }
4802 
4803 static bool got_shared_uuid;
4804 static uuid_t shared_uuid;
4805 
4806 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4807 {
4808 	struct sdeb_zone_state *zsp;
4809 	sector_t capacity = get_sdebug_capacity();
4810 	sector_t zstart = 0;
4811 	unsigned int i;
4812 
4813 	/*
4814 	 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4815 	 * a zone size allowing for at least 4 zones on the device. Otherwise,
4816 	 * use the specified zone size checking that at least 2 zones can be
4817 	 * created for the device.
4818 	 */
4819 	if (!sdeb_zbc_zone_size_mb) {
4820 		devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4821 			>> ilog2(sdebug_sector_size);
4822 		while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4823 			devip->zsize >>= 1;
4824 		if (devip->zsize < 2) {
4825 			pr_err("Device capacity too small\n");
4826 			return -EINVAL;
4827 		}
4828 	} else {
4829 		devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4830 			>> ilog2(sdebug_sector_size);
4831 		if (devip->zsize >= capacity) {
4832 			pr_err("Zone size too large for device capacity\n");
4833 			return -EINVAL;
4834 		}
4835 	}
4836 
4837 	if (is_power_of_2(devip->zsize))
4838 		devip->zsize_shift = ilog2(devip->zsize);
4839 	devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4840 
4841 	if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4842 		pr_err("Number of conventional zones too large\n");
4843 		return -EINVAL;
4844 	}
4845 	devip->nr_conv_zones = sdeb_zbc_nr_conv;
4846 
4847 	if (devip->zmodel == BLK_ZONED_HM) {
4848 		/* zbc_max_open_zones can be 0, meaning "not reported" */
4849 		if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4850 			devip->max_open = (devip->nr_zones - 1) / 2;
4851 		else
4852 			devip->max_open = sdeb_zbc_max_open;
4853 	}
4854 
4855 	devip->zstate = kcalloc(devip->nr_zones,
4856 				sizeof(struct sdeb_zone_state), GFP_KERNEL);
4857 	if (!devip->zstate)
4858 		return -ENOMEM;
4859 
4860 	for (i = 0; i < devip->nr_zones; i++) {
4861 		zsp = &devip->zstate[i];
4862 
4863 		zsp->z_start = zstart;
4864 
4865 		if (i < devip->nr_conv_zones) {
4866 			zsp->z_type = ZBC_ZONE_TYPE_CNV;
4867 			zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4868 			zsp->z_wp = (sector_t)-1;
4869 		} else {
4870 			if (devip->zmodel == BLK_ZONED_HM)
4871 				zsp->z_type = ZBC_ZONE_TYPE_SWR;
4872 			else
4873 				zsp->z_type = ZBC_ZONE_TYPE_SWP;
4874 			zsp->z_cond = ZC1_EMPTY;
4875 			zsp->z_wp = zsp->z_start;
4876 		}
4877 
4878 		if (zsp->z_start + devip->zsize < capacity)
4879 			zsp->z_size = devip->zsize;
4880 		else
4881 			zsp->z_size = capacity - zsp->z_start;
4882 
4883 		zstart += zsp->z_size;
4884 	}
4885 
4886 	return 0;
4887 }
4888 
4889 static struct sdebug_dev_info *sdebug_device_create(
4890 			struct sdebug_host_info *sdbg_host, gfp_t flags)
4891 {
4892 	struct sdebug_dev_info *devip;
4893 
4894 	devip = kzalloc(sizeof(*devip), flags);
4895 	if (devip) {
4896 		if (sdebug_uuid_ctl == 1)
4897 			uuid_gen(&devip->lu_name);
4898 		else if (sdebug_uuid_ctl == 2) {
4899 			if (got_shared_uuid)
4900 				devip->lu_name = shared_uuid;
4901 			else {
4902 				uuid_gen(&shared_uuid);
4903 				got_shared_uuid = true;
4904 				devip->lu_name = shared_uuid;
4905 			}
4906 		}
4907 		devip->sdbg_host = sdbg_host;
4908 		if (sdeb_zbc_in_use) {
4909 			devip->zmodel = sdeb_zbc_model;
4910 			if (sdebug_device_create_zones(devip)) {
4911 				kfree(devip);
4912 				return NULL;
4913 			}
4914 		} else {
4915 			devip->zmodel = BLK_ZONED_NONE;
4916 		}
4917 		devip->sdbg_host = sdbg_host;
4918 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4919 	}
4920 	return devip;
4921 }
4922 
4923 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4924 {
4925 	struct sdebug_host_info *sdbg_host;
4926 	struct sdebug_dev_info *open_devip = NULL;
4927 	struct sdebug_dev_info *devip;
4928 
4929 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4930 	if (!sdbg_host) {
4931 		pr_err("Host info NULL\n");
4932 		return NULL;
4933 	}
4934 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4935 		if ((devip->used) && (devip->channel == sdev->channel) &&
4936 		    (devip->target == sdev->id) &&
4937 		    (devip->lun == sdev->lun))
4938 			return devip;
4939 		else {
4940 			if ((!devip->used) && (!open_devip))
4941 				open_devip = devip;
4942 		}
4943 	}
4944 	if (!open_devip) { /* try and make a new one */
4945 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4946 		if (!open_devip) {
4947 			pr_err("out of memory at line %d\n", __LINE__);
4948 			return NULL;
4949 		}
4950 	}
4951 
4952 	open_devip->channel = sdev->channel;
4953 	open_devip->target = sdev->id;
4954 	open_devip->lun = sdev->lun;
4955 	open_devip->sdbg_host = sdbg_host;
4956 	atomic_set(&open_devip->num_in_q, 0);
4957 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4958 	open_devip->used = true;
4959 	return open_devip;
4960 }
4961 
4962 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4963 {
4964 	if (sdebug_verbose)
4965 		pr_info("slave_alloc <%u %u %u %llu>\n",
4966 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4967 	return 0;
4968 }
4969 
4970 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4971 {
4972 	struct sdebug_dev_info *devip =
4973 			(struct sdebug_dev_info *)sdp->hostdata;
4974 
4975 	if (sdebug_verbose)
4976 		pr_info("slave_configure <%u %u %u %llu>\n",
4977 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4978 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4979 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4980 	if (devip == NULL) {
4981 		devip = find_build_dev_info(sdp);
4982 		if (devip == NULL)
4983 			return 1;  /* no resources, will be marked offline */
4984 	}
4985 	sdp->hostdata = devip;
4986 	if (sdebug_no_uld)
4987 		sdp->no_uld_attach = 1;
4988 	config_cdb_len(sdp);
4989 	return 0;
4990 }
4991 
4992 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4993 {
4994 	struct sdebug_dev_info *devip =
4995 		(struct sdebug_dev_info *)sdp->hostdata;
4996 
4997 	if (sdebug_verbose)
4998 		pr_info("slave_destroy <%u %u %u %llu>\n",
4999 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5000 	if (devip) {
5001 		/* make this slot available for re-use */
5002 		devip->used = false;
5003 		sdp->hostdata = NULL;
5004 	}
5005 }
5006 
5007 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5008 			   enum sdeb_defer_type defer_t)
5009 {
5010 	if (!sd_dp)
5011 		return;
5012 	if (defer_t == SDEB_DEFER_HRT)
5013 		hrtimer_cancel(&sd_dp->hrt);
5014 	else if (defer_t == SDEB_DEFER_WQ)
5015 		cancel_work_sync(&sd_dp->ew.work);
5016 }
5017 
5018 /* If @cmnd found deletes its timer or work queue and returns true; else
5019    returns false */
5020 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5021 {
5022 	unsigned long iflags;
5023 	int j, k, qmax, r_qmax;
5024 	enum sdeb_defer_type l_defer_t;
5025 	struct sdebug_queue *sqp;
5026 	struct sdebug_queued_cmd *sqcp;
5027 	struct sdebug_dev_info *devip;
5028 	struct sdebug_defer *sd_dp;
5029 
5030 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5031 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5032 		qmax = sdebug_max_queue;
5033 		r_qmax = atomic_read(&retired_max_queue);
5034 		if (r_qmax > qmax)
5035 			qmax = r_qmax;
5036 		for (k = 0; k < qmax; ++k) {
5037 			if (test_bit(k, sqp->in_use_bm)) {
5038 				sqcp = &sqp->qc_arr[k];
5039 				if (cmnd != sqcp->a_cmnd)
5040 					continue;
5041 				/* found */
5042 				devip = (struct sdebug_dev_info *)
5043 						cmnd->device->hostdata;
5044 				if (devip)
5045 					atomic_dec(&devip->num_in_q);
5046 				sqcp->a_cmnd = NULL;
5047 				sd_dp = sqcp->sd_dp;
5048 				if (sd_dp) {
5049 					l_defer_t = sd_dp->defer_t;
5050 					sd_dp->defer_t = SDEB_DEFER_NONE;
5051 				} else
5052 					l_defer_t = SDEB_DEFER_NONE;
5053 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5054 				stop_qc_helper(sd_dp, l_defer_t);
5055 				clear_bit(k, sqp->in_use_bm);
5056 				return true;
5057 			}
5058 		}
5059 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5060 	}
5061 	return false;
5062 }
5063 
5064 /* Deletes (stops) timers or work queues of all queued commands */
5065 static void stop_all_queued(void)
5066 {
5067 	unsigned long iflags;
5068 	int j, k;
5069 	enum sdeb_defer_type l_defer_t;
5070 	struct sdebug_queue *sqp;
5071 	struct sdebug_queued_cmd *sqcp;
5072 	struct sdebug_dev_info *devip;
5073 	struct sdebug_defer *sd_dp;
5074 
5075 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5076 		spin_lock_irqsave(&sqp->qc_lock, iflags);
5077 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5078 			if (test_bit(k, sqp->in_use_bm)) {
5079 				sqcp = &sqp->qc_arr[k];
5080 				if (sqcp->a_cmnd == NULL)
5081 					continue;
5082 				devip = (struct sdebug_dev_info *)
5083 					sqcp->a_cmnd->device->hostdata;
5084 				if (devip)
5085 					atomic_dec(&devip->num_in_q);
5086 				sqcp->a_cmnd = NULL;
5087 				sd_dp = sqcp->sd_dp;
5088 				if (sd_dp) {
5089 					l_defer_t = sd_dp->defer_t;
5090 					sd_dp->defer_t = SDEB_DEFER_NONE;
5091 				} else
5092 					l_defer_t = SDEB_DEFER_NONE;
5093 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5094 				stop_qc_helper(sd_dp, l_defer_t);
5095 				clear_bit(k, sqp->in_use_bm);
5096 				spin_lock_irqsave(&sqp->qc_lock, iflags);
5097 			}
5098 		}
5099 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5100 	}
5101 }
5102 
5103 /* Free queued command memory on heap */
5104 static void free_all_queued(void)
5105 {
5106 	int j, k;
5107 	struct sdebug_queue *sqp;
5108 	struct sdebug_queued_cmd *sqcp;
5109 
5110 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5111 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5112 			sqcp = &sqp->qc_arr[k];
5113 			kfree(sqcp->sd_dp);
5114 			sqcp->sd_dp = NULL;
5115 		}
5116 	}
5117 }
5118 
5119 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5120 {
5121 	bool ok;
5122 
5123 	++num_aborts;
5124 	if (SCpnt) {
5125 		ok = stop_queued_cmnd(SCpnt);
5126 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5127 			sdev_printk(KERN_INFO, SCpnt->device,
5128 				    "%s: command%s found\n", __func__,
5129 				    ok ? "" : " not");
5130 	}
5131 	return SUCCESS;
5132 }
5133 
5134 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5135 {
5136 	++num_dev_resets;
5137 	if (SCpnt && SCpnt->device) {
5138 		struct scsi_device *sdp = SCpnt->device;
5139 		struct sdebug_dev_info *devip =
5140 				(struct sdebug_dev_info *)sdp->hostdata;
5141 
5142 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5143 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5144 		if (devip)
5145 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
5146 	}
5147 	return SUCCESS;
5148 }
5149 
5150 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5151 {
5152 	struct sdebug_host_info *sdbg_host;
5153 	struct sdebug_dev_info *devip;
5154 	struct scsi_device *sdp;
5155 	struct Scsi_Host *hp;
5156 	int k = 0;
5157 
5158 	++num_target_resets;
5159 	if (!SCpnt)
5160 		goto lie;
5161 	sdp = SCpnt->device;
5162 	if (!sdp)
5163 		goto lie;
5164 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5165 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5166 	hp = sdp->host;
5167 	if (!hp)
5168 		goto lie;
5169 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5170 	if (sdbg_host) {
5171 		list_for_each_entry(devip,
5172 				    &sdbg_host->dev_info_list,
5173 				    dev_list)
5174 			if (devip->target == sdp->id) {
5175 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5176 				++k;
5177 			}
5178 	}
5179 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5180 		sdev_printk(KERN_INFO, sdp,
5181 			    "%s: %d device(s) found in target\n", __func__, k);
5182 lie:
5183 	return SUCCESS;
5184 }
5185 
5186 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5187 {
5188 	struct sdebug_host_info *sdbg_host;
5189 	struct sdebug_dev_info *devip;
5190 	struct scsi_device *sdp;
5191 	struct Scsi_Host *hp;
5192 	int k = 0;
5193 
5194 	++num_bus_resets;
5195 	if (!(SCpnt && SCpnt->device))
5196 		goto lie;
5197 	sdp = SCpnt->device;
5198 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5199 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5200 	hp = sdp->host;
5201 	if (hp) {
5202 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5203 		if (sdbg_host) {
5204 			list_for_each_entry(devip,
5205 					    &sdbg_host->dev_info_list,
5206 					    dev_list) {
5207 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5208 				++k;
5209 			}
5210 		}
5211 	}
5212 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5213 		sdev_printk(KERN_INFO, sdp,
5214 			    "%s: %d device(s) found in host\n", __func__, k);
5215 lie:
5216 	return SUCCESS;
5217 }
5218 
5219 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5220 {
5221 	struct sdebug_host_info *sdbg_host;
5222 	struct sdebug_dev_info *devip;
5223 	int k = 0;
5224 
5225 	++num_host_resets;
5226 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5227 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5228 	spin_lock(&sdebug_host_list_lock);
5229 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5230 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
5231 				    dev_list) {
5232 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5233 			++k;
5234 		}
5235 	}
5236 	spin_unlock(&sdebug_host_list_lock);
5237 	stop_all_queued();
5238 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5239 		sdev_printk(KERN_INFO, SCpnt->device,
5240 			    "%s: %d device(s) found\n", __func__, k);
5241 	return SUCCESS;
5242 }
5243 
5244 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5245 {
5246 	struct msdos_partition *pp;
5247 	int starts[SDEBUG_MAX_PARTS + 2];
5248 	int sectors_per_part, num_sectors, k;
5249 	int heads_by_sects, start_sec, end_sec;
5250 
5251 	/* assume partition table already zeroed */
5252 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
5253 		return;
5254 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5255 		sdebug_num_parts = SDEBUG_MAX_PARTS;
5256 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5257 	}
5258 	num_sectors = (int)sdebug_store_sectors;
5259 	sectors_per_part = (num_sectors - sdebug_sectors_per)
5260 			   / sdebug_num_parts;
5261 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
5262 	starts[0] = sdebug_sectors_per;
5263 	for (k = 1; k < sdebug_num_parts; ++k)
5264 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
5265 			    * heads_by_sects;
5266 	starts[sdebug_num_parts] = num_sectors;
5267 	starts[sdebug_num_parts + 1] = 0;
5268 
5269 	ramp[510] = 0x55;	/* magic partition markings */
5270 	ramp[511] = 0xAA;
5271 	pp = (struct msdos_partition *)(ramp + 0x1be);
5272 	for (k = 0; starts[k + 1]; ++k, ++pp) {
5273 		start_sec = starts[k];
5274 		end_sec = starts[k + 1] - 1;
5275 		pp->boot_ind = 0;
5276 
5277 		pp->cyl = start_sec / heads_by_sects;
5278 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
5279 			   / sdebug_sectors_per;
5280 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
5281 
5282 		pp->end_cyl = end_sec / heads_by_sects;
5283 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5284 			       / sdebug_sectors_per;
5285 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5286 
5287 		pp->start_sect = cpu_to_le32(start_sec);
5288 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5289 		pp->sys_ind = 0x83;	/* plain Linux partition */
5290 	}
5291 }
5292 
5293 static void block_unblock_all_queues(bool block)
5294 {
5295 	int j;
5296 	struct sdebug_queue *sqp;
5297 
5298 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5299 		atomic_set(&sqp->blocked, (int)block);
5300 }
5301 
5302 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5303  * commands will be processed normally before triggers occur.
5304  */
5305 static void tweak_cmnd_count(void)
5306 {
5307 	int count, modulo;
5308 
5309 	modulo = abs(sdebug_every_nth);
5310 	if (modulo < 2)
5311 		return;
5312 	block_unblock_all_queues(true);
5313 	count = atomic_read(&sdebug_cmnd_count);
5314 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5315 	block_unblock_all_queues(false);
5316 }
5317 
5318 static void clear_queue_stats(void)
5319 {
5320 	atomic_set(&sdebug_cmnd_count, 0);
5321 	atomic_set(&sdebug_completions, 0);
5322 	atomic_set(&sdebug_miss_cpus, 0);
5323 	atomic_set(&sdebug_a_tsf, 0);
5324 }
5325 
5326 static void setup_inject(struct sdebug_queue *sqp,
5327 			 struct sdebug_queued_cmd *sqcp)
5328 {
5329 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
5330 		if (sdebug_every_nth > 0)
5331 			sqcp->inj_recovered = sqcp->inj_transport
5332 				= sqcp->inj_dif
5333 				= sqcp->inj_dix = sqcp->inj_short
5334 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
5335 		return;
5336 	}
5337 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
5338 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
5339 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
5340 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
5341 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
5342 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
5343 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
5344 }
5345 
5346 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
5347 
5348 /* Complete the processing of the thread that queued a SCSI command to this
5349  * driver. It either completes the command by calling cmnd_done() or
5350  * schedules a hr timer or work queue then returns 0. Returns
5351  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5352  */
5353 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5354 			 int scsi_result,
5355 			 int (*pfp)(struct scsi_cmnd *,
5356 				    struct sdebug_dev_info *),
5357 			 int delta_jiff, int ndelay)
5358 {
5359 	bool new_sd_dp;
5360 	int k, num_in_q, qdepth, inject;
5361 	unsigned long iflags;
5362 	u64 ns_from_boot = 0;
5363 	struct sdebug_queue *sqp;
5364 	struct sdebug_queued_cmd *sqcp;
5365 	struct scsi_device *sdp;
5366 	struct sdebug_defer *sd_dp;
5367 
5368 	if (unlikely(devip == NULL)) {
5369 		if (scsi_result == 0)
5370 			scsi_result = DID_NO_CONNECT << 16;
5371 		goto respond_in_thread;
5372 	}
5373 	sdp = cmnd->device;
5374 
5375 	if (delta_jiff == 0)
5376 		goto respond_in_thread;
5377 
5378 	sqp = get_queue(cmnd);
5379 	spin_lock_irqsave(&sqp->qc_lock, iflags);
5380 	if (unlikely(atomic_read(&sqp->blocked))) {
5381 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5382 		return SCSI_MLQUEUE_HOST_BUSY;
5383 	}
5384 	num_in_q = atomic_read(&devip->num_in_q);
5385 	qdepth = cmnd->device->queue_depth;
5386 	inject = 0;
5387 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5388 		if (scsi_result) {
5389 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5390 			goto respond_in_thread;
5391 		} else
5392 			scsi_result = device_qfull_result;
5393 	} else if (unlikely(sdebug_every_nth &&
5394 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5395 			    (scsi_result == 0))) {
5396 		if ((num_in_q == (qdepth - 1)) &&
5397 		    (atomic_inc_return(&sdebug_a_tsf) >=
5398 		     abs(sdebug_every_nth))) {
5399 			atomic_set(&sdebug_a_tsf, 0);
5400 			inject = 1;
5401 			scsi_result = device_qfull_result;
5402 		}
5403 	}
5404 
5405 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5406 	if (unlikely(k >= sdebug_max_queue)) {
5407 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5408 		if (scsi_result)
5409 			goto respond_in_thread;
5410 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5411 			scsi_result = device_qfull_result;
5412 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5413 			sdev_printk(KERN_INFO, sdp,
5414 				    "%s: max_queue=%d exceeded, %s\n",
5415 				    __func__, sdebug_max_queue,
5416 				    (scsi_result ?  "status: TASK SET FULL" :
5417 						    "report: host busy"));
5418 		if (scsi_result)
5419 			goto respond_in_thread;
5420 		else
5421 			return SCSI_MLQUEUE_HOST_BUSY;
5422 	}
5423 	__set_bit(k, sqp->in_use_bm);
5424 	atomic_inc(&devip->num_in_q);
5425 	sqcp = &sqp->qc_arr[k];
5426 	sqcp->a_cmnd = cmnd;
5427 	cmnd->host_scribble = (unsigned char *)sqcp;
5428 	sd_dp = sqcp->sd_dp;
5429 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5430 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
5431 		setup_inject(sqp, sqcp);
5432 	if (sd_dp == NULL) {
5433 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5434 		if (sd_dp == NULL)
5435 			return SCSI_MLQUEUE_HOST_BUSY;
5436 		new_sd_dp = true;
5437 	} else {
5438 		new_sd_dp = false;
5439 	}
5440 
5441 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5442 		ns_from_boot = ktime_get_boottime_ns();
5443 
5444 	/* one of the resp_*() response functions is called here */
5445 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5446 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
5447 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
5448 		delta_jiff = ndelay = 0;
5449 	}
5450 	if (cmnd->result == 0 && scsi_result != 0)
5451 		cmnd->result = scsi_result;
5452 
5453 	if (unlikely(sdebug_verbose && cmnd->result))
5454 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5455 			    __func__, cmnd->result);
5456 
5457 	if (delta_jiff > 0 || ndelay > 0) {
5458 		ktime_t kt;
5459 
5460 		if (delta_jiff > 0) {
5461 			u64 ns = jiffies_to_nsecs(delta_jiff);
5462 
5463 			if (sdebug_random && ns < U32_MAX) {
5464 				ns = prandom_u32_max((u32)ns);
5465 			} else if (sdebug_random) {
5466 				ns >>= 12;	/* scale to 4 usec precision */
5467 				if (ns < U32_MAX)	/* over 4 hours max */
5468 					ns = prandom_u32_max((u32)ns);
5469 				ns <<= 12;
5470 			}
5471 			kt = ns_to_ktime(ns);
5472 		} else {	/* ndelay has a 4.2 second max */
5473 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5474 					     (u32)ndelay;
5475 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5476 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
5477 
5478 				if (kt <= d) {	/* elapsed duration >= kt */
5479 					sqcp->a_cmnd = NULL;
5480 					atomic_dec(&devip->num_in_q);
5481 					clear_bit(k, sqp->in_use_bm);
5482 					if (new_sd_dp)
5483 						kfree(sd_dp);
5484 					/* call scsi_done() from this thread */
5485 					cmnd->scsi_done(cmnd);
5486 					return 0;
5487 				}
5488 				/* otherwise reduce kt by elapsed time */
5489 				kt -= d;
5490 			}
5491 		}
5492 		if (!sd_dp->init_hrt) {
5493 			sd_dp->init_hrt = true;
5494 			sqcp->sd_dp = sd_dp;
5495 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5496 				     HRTIMER_MODE_REL_PINNED);
5497 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5498 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5499 			sd_dp->qc_idx = k;
5500 		}
5501 		if (sdebug_statistics)
5502 			sd_dp->issuing_cpu = raw_smp_processor_id();
5503 		sd_dp->defer_t = SDEB_DEFER_HRT;
5504 		/* schedule the invocation of scsi_done() for a later time */
5505 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5506 	} else {	/* jdelay < 0, use work queue */
5507 		if (!sd_dp->init_wq) {
5508 			sd_dp->init_wq = true;
5509 			sqcp->sd_dp = sd_dp;
5510 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
5511 			sd_dp->qc_idx = k;
5512 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5513 		}
5514 		if (sdebug_statistics)
5515 			sd_dp->issuing_cpu = raw_smp_processor_id();
5516 		sd_dp->defer_t = SDEB_DEFER_WQ;
5517 		if (unlikely(sqcp->inj_cmd_abort))
5518 			sd_dp->aborted = true;
5519 		schedule_work(&sd_dp->ew.work);
5520 		if (unlikely(sqcp->inj_cmd_abort)) {
5521 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5522 				    cmnd->request->tag);
5523 			blk_abort_request(cmnd->request);
5524 		}
5525 	}
5526 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
5527 		     (scsi_result == device_qfull_result)))
5528 		sdev_printk(KERN_INFO, sdp,
5529 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
5530 			    num_in_q, (inject ? "<inject> " : ""),
5531 			    "status: TASK SET FULL");
5532 	return 0;
5533 
5534 respond_in_thread:	/* call back to mid-layer using invocation thread */
5535 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5536 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
5537 	if (cmnd->result == 0 && scsi_result != 0)
5538 		cmnd->result = scsi_result;
5539 	cmnd->scsi_done(cmnd);
5540 	return 0;
5541 }
5542 
5543 /* Note: The following macros create attribute files in the
5544    /sys/module/scsi_debug/parameters directory. Unfortunately this
5545    driver is unaware of a change and cannot trigger auxiliary actions
5546    as it can when the corresponding attribute in the
5547    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5548  */
5549 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5550 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5551 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5552 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5553 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5554 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5555 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5556 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5557 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5558 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5559 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5560 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5561 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5562 module_param_string(inq_product, sdebug_inq_product_id,
5563 		    sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5564 module_param_string(inq_rev, sdebug_inq_product_rev,
5565 		    sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5566 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5567 		    sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5568 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5569 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5570 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5571 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5572 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5573 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5574 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5575 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5576 		   S_IRUGO | S_IWUSR);
5577 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5578 		   S_IRUGO | S_IWUSR);
5579 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5580 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5581 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5582 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5583 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5584 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5585 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5586 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5587 module_param_named(per_host_store, sdebug_per_host_store, bool,
5588 		   S_IRUGO | S_IWUSR);
5589 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5590 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5591 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5592 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5593 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5594 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5595 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5596 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5597 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5598 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5599 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5600 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5601 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5602 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5603 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5604 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5605 		   S_IRUGO | S_IWUSR);
5606 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5607 module_param_named(write_same_length, sdebug_write_same_length, int,
5608 		   S_IRUGO | S_IWUSR);
5609 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5610 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5611 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5612 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5613 
5614 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5615 MODULE_DESCRIPTION("SCSI debug adapter driver");
5616 MODULE_LICENSE("GPL");
5617 MODULE_VERSION(SDEBUG_VERSION);
5618 
5619 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5620 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5621 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5622 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5623 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5624 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5625 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5626 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5627 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5628 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5629 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5630 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5631 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5632 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5633 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5634 		 SDEBUG_VERSION "\")");
5635 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5636 MODULE_PARM_DESC(lbprz,
5637 		 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5638 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5639 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5640 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5641 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5642 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5643 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5644 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5645 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5646 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5647 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5648 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5649 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5650 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5651 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5652 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5653 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5654 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5655 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5656 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5657 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5658 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5659 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5660 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5661 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5662 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5663 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5664 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5665 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5666 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5667 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5668 MODULE_PARM_DESC(uuid_ctl,
5669 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5670 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5671 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5672 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5673 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5674 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5675 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5676 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5677 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5678 
5679 #define SDEBUG_INFO_LEN 256
5680 static char sdebug_info[SDEBUG_INFO_LEN];
5681 
5682 static const char *scsi_debug_info(struct Scsi_Host *shp)
5683 {
5684 	int k;
5685 
5686 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5687 		      my_name, SDEBUG_VERSION, sdebug_version_date);
5688 	if (k >= (SDEBUG_INFO_LEN - 1))
5689 		return sdebug_info;
5690 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5691 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5692 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
5693 		  "statistics", (int)sdebug_statistics);
5694 	return sdebug_info;
5695 }
5696 
5697 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5698 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5699 				 int length)
5700 {
5701 	char arr[16];
5702 	int opts;
5703 	int minLen = length > 15 ? 15 : length;
5704 
5705 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5706 		return -EACCES;
5707 	memcpy(arr, buffer, minLen);
5708 	arr[minLen] = '\0';
5709 	if (1 != sscanf(arr, "%d", &opts))
5710 		return -EINVAL;
5711 	sdebug_opts = opts;
5712 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5713 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5714 	if (sdebug_every_nth != 0)
5715 		tweak_cmnd_count();
5716 	return length;
5717 }
5718 
5719 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5720  * same for each scsi_debug host (if more than one). Some of the counters
5721  * output are not atomics so might be inaccurate in a busy system. */
5722 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5723 {
5724 	int f, j, l;
5725 	struct sdebug_queue *sqp;
5726 	struct sdebug_host_info *sdhp;
5727 
5728 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5729 		   SDEBUG_VERSION, sdebug_version_date);
5730 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5731 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5732 		   sdebug_opts, sdebug_every_nth);
5733 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5734 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5735 		   sdebug_sector_size, "bytes");
5736 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5737 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5738 		   num_aborts);
5739 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5740 		   num_dev_resets, num_target_resets, num_bus_resets,
5741 		   num_host_resets);
5742 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5743 		   dix_reads, dix_writes, dif_errors);
5744 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5745 		   sdebug_statistics);
5746 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5747 		   atomic_read(&sdebug_cmnd_count),
5748 		   atomic_read(&sdebug_completions),
5749 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
5750 		   atomic_read(&sdebug_a_tsf));
5751 
5752 	seq_printf(m, "submit_queues=%d\n", submit_queues);
5753 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5754 		seq_printf(m, "  queue %d:\n", j);
5755 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5756 		if (f != sdebug_max_queue) {
5757 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5758 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
5759 				   "first,last bits", f, l);
5760 		}
5761 	}
5762 
5763 	seq_printf(m, "this host_no=%d\n", host->host_no);
5764 	if (!xa_empty(per_store_ap)) {
5765 		bool niu;
5766 		int idx;
5767 		unsigned long l_idx;
5768 		struct sdeb_store_info *sip;
5769 
5770 		seq_puts(m, "\nhost list:\n");
5771 		j = 0;
5772 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5773 			idx = sdhp->si_idx;
5774 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
5775 				   sdhp->shost->host_no, idx);
5776 			++j;
5777 		}
5778 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5779 			   sdeb_most_recent_idx);
5780 		j = 0;
5781 		xa_for_each(per_store_ap, l_idx, sip) {
5782 			niu = xa_get_mark(per_store_ap, l_idx,
5783 					  SDEB_XA_NOT_IN_USE);
5784 			idx = (int)l_idx;
5785 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
5786 				   (niu ? "  not_in_use" : ""));
5787 			++j;
5788 		}
5789 	}
5790 	return 0;
5791 }
5792 
5793 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5794 {
5795 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5796 }
5797 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5798  * of delay is jiffies.
5799  */
5800 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5801 			   size_t count)
5802 {
5803 	int jdelay, res;
5804 
5805 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5806 		res = count;
5807 		if (sdebug_jdelay != jdelay) {
5808 			int j, k;
5809 			struct sdebug_queue *sqp;
5810 
5811 			block_unblock_all_queues(true);
5812 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5813 			     ++j, ++sqp) {
5814 				k = find_first_bit(sqp->in_use_bm,
5815 						   sdebug_max_queue);
5816 				if (k != sdebug_max_queue) {
5817 					res = -EBUSY;   /* queued commands */
5818 					break;
5819 				}
5820 			}
5821 			if (res > 0) {
5822 				sdebug_jdelay = jdelay;
5823 				sdebug_ndelay = 0;
5824 			}
5825 			block_unblock_all_queues(false);
5826 		}
5827 		return res;
5828 	}
5829 	return -EINVAL;
5830 }
5831 static DRIVER_ATTR_RW(delay);
5832 
5833 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5834 {
5835 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5836 }
5837 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5838 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5839 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5840 			    size_t count)
5841 {
5842 	int ndelay, res;
5843 
5844 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5845 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5846 		res = count;
5847 		if (sdebug_ndelay != ndelay) {
5848 			int j, k;
5849 			struct sdebug_queue *sqp;
5850 
5851 			block_unblock_all_queues(true);
5852 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5853 			     ++j, ++sqp) {
5854 				k = find_first_bit(sqp->in_use_bm,
5855 						   sdebug_max_queue);
5856 				if (k != sdebug_max_queue) {
5857 					res = -EBUSY;   /* queued commands */
5858 					break;
5859 				}
5860 			}
5861 			if (res > 0) {
5862 				sdebug_ndelay = ndelay;
5863 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
5864 							: DEF_JDELAY;
5865 			}
5866 			block_unblock_all_queues(false);
5867 		}
5868 		return res;
5869 	}
5870 	return -EINVAL;
5871 }
5872 static DRIVER_ATTR_RW(ndelay);
5873 
5874 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5875 {
5876 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5877 }
5878 
5879 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5880 			  size_t count)
5881 {
5882 	int opts;
5883 	char work[20];
5884 
5885 	if (sscanf(buf, "%10s", work) == 1) {
5886 		if (strncasecmp(work, "0x", 2) == 0) {
5887 			if (kstrtoint(work + 2, 16, &opts) == 0)
5888 				goto opts_done;
5889 		} else {
5890 			if (kstrtoint(work, 10, &opts) == 0)
5891 				goto opts_done;
5892 		}
5893 	}
5894 	return -EINVAL;
5895 opts_done:
5896 	sdebug_opts = opts;
5897 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5898 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5899 	tweak_cmnd_count();
5900 	return count;
5901 }
5902 static DRIVER_ATTR_RW(opts);
5903 
5904 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5905 {
5906 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5907 }
5908 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5909 			   size_t count)
5910 {
5911 	int n;
5912 
5913 	/* Cannot change from or to TYPE_ZBC with sysfs */
5914 	if (sdebug_ptype == TYPE_ZBC)
5915 		return -EINVAL;
5916 
5917 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5918 		if (n == TYPE_ZBC)
5919 			return -EINVAL;
5920 		sdebug_ptype = n;
5921 		return count;
5922 	}
5923 	return -EINVAL;
5924 }
5925 static DRIVER_ATTR_RW(ptype);
5926 
5927 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
5928 {
5929 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
5930 }
5931 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
5932 			    size_t count)
5933 {
5934 	int n;
5935 
5936 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5937 		sdebug_dsense = n;
5938 		return count;
5939 	}
5940 	return -EINVAL;
5941 }
5942 static DRIVER_ATTR_RW(dsense);
5943 
5944 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
5945 {
5946 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
5947 }
5948 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
5949 			     size_t count)
5950 {
5951 	int n, idx;
5952 
5953 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5954 		bool want_store = (n == 0);
5955 		struct sdebug_host_info *sdhp;
5956 
5957 		n = (n > 0);
5958 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5959 		if (sdebug_fake_rw == n)
5960 			return count;	/* not transitioning so do nothing */
5961 
5962 		if (want_store) {	/* 1 --> 0 transition, set up store */
5963 			if (sdeb_first_idx < 0) {
5964 				idx = sdebug_add_store();
5965 				if (idx < 0)
5966 					return idx;
5967 			} else {
5968 				idx = sdeb_first_idx;
5969 				xa_clear_mark(per_store_ap, idx,
5970 					      SDEB_XA_NOT_IN_USE);
5971 			}
5972 			/* make all hosts use same store */
5973 			list_for_each_entry(sdhp, &sdebug_host_list,
5974 					    host_list) {
5975 				if (sdhp->si_idx != idx) {
5976 					xa_set_mark(per_store_ap, sdhp->si_idx,
5977 						    SDEB_XA_NOT_IN_USE);
5978 					sdhp->si_idx = idx;
5979 				}
5980 			}
5981 			sdeb_most_recent_idx = idx;
5982 		} else {	/* 0 --> 1 transition is trigger for shrink */
5983 			sdebug_erase_all_stores(true /* apart from first */);
5984 		}
5985 		sdebug_fake_rw = n;
5986 		return count;
5987 	}
5988 	return -EINVAL;
5989 }
5990 static DRIVER_ATTR_RW(fake_rw);
5991 
5992 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5993 {
5994 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5995 }
5996 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5997 			      size_t count)
5998 {
5999 	int n;
6000 
6001 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6002 		sdebug_no_lun_0 = n;
6003 		return count;
6004 	}
6005 	return -EINVAL;
6006 }
6007 static DRIVER_ATTR_RW(no_lun_0);
6008 
6009 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6010 {
6011 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6012 }
6013 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6014 			      size_t count)
6015 {
6016 	int n;
6017 
6018 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6019 		sdebug_num_tgts = n;
6020 		sdebug_max_tgts_luns();
6021 		return count;
6022 	}
6023 	return -EINVAL;
6024 }
6025 static DRIVER_ATTR_RW(num_tgts);
6026 
6027 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6028 {
6029 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6030 }
6031 static DRIVER_ATTR_RO(dev_size_mb);
6032 
6033 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6034 {
6035 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6036 }
6037 
6038 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6039 				    size_t count)
6040 {
6041 	bool v;
6042 
6043 	if (kstrtobool(buf, &v))
6044 		return -EINVAL;
6045 
6046 	sdebug_per_host_store = v;
6047 	return count;
6048 }
6049 static DRIVER_ATTR_RW(per_host_store);
6050 
6051 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6052 {
6053 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6054 }
6055 static DRIVER_ATTR_RO(num_parts);
6056 
6057 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6058 {
6059 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6060 }
6061 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6062 			       size_t count)
6063 {
6064 	int nth;
6065 
6066 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
6067 		sdebug_every_nth = nth;
6068 		if (nth && !sdebug_statistics) {
6069 			pr_info("every_nth needs statistics=1, set it\n");
6070 			sdebug_statistics = true;
6071 		}
6072 		tweak_cmnd_count();
6073 		return count;
6074 	}
6075 	return -EINVAL;
6076 }
6077 static DRIVER_ATTR_RW(every_nth);
6078 
6079 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6080 {
6081 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6082 }
6083 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6084 			      size_t count)
6085 {
6086 	int n;
6087 	bool changed;
6088 
6089 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6090 		if (n > 256) {
6091 			pr_warn("max_luns can be no more than 256\n");
6092 			return -EINVAL;
6093 		}
6094 		changed = (sdebug_max_luns != n);
6095 		sdebug_max_luns = n;
6096 		sdebug_max_tgts_luns();
6097 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
6098 			struct sdebug_host_info *sdhp;
6099 			struct sdebug_dev_info *dp;
6100 
6101 			spin_lock(&sdebug_host_list_lock);
6102 			list_for_each_entry(sdhp, &sdebug_host_list,
6103 					    host_list) {
6104 				list_for_each_entry(dp, &sdhp->dev_info_list,
6105 						    dev_list) {
6106 					set_bit(SDEBUG_UA_LUNS_CHANGED,
6107 						dp->uas_bm);
6108 				}
6109 			}
6110 			spin_unlock(&sdebug_host_list_lock);
6111 		}
6112 		return count;
6113 	}
6114 	return -EINVAL;
6115 }
6116 static DRIVER_ATTR_RW(max_luns);
6117 
6118 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6119 {
6120 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6121 }
6122 /* N.B. max_queue can be changed while there are queued commands. In flight
6123  * commands beyond the new max_queue will be completed. */
6124 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6125 			       size_t count)
6126 {
6127 	int j, n, k, a;
6128 	struct sdebug_queue *sqp;
6129 
6130 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6131 	    (n <= SDEBUG_CANQUEUE)) {
6132 		block_unblock_all_queues(true);
6133 		k = 0;
6134 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6135 		     ++j, ++sqp) {
6136 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6137 			if (a > k)
6138 				k = a;
6139 		}
6140 		sdebug_max_queue = n;
6141 		if (k == SDEBUG_CANQUEUE)
6142 			atomic_set(&retired_max_queue, 0);
6143 		else if (k >= n)
6144 			atomic_set(&retired_max_queue, k + 1);
6145 		else
6146 			atomic_set(&retired_max_queue, 0);
6147 		block_unblock_all_queues(false);
6148 		return count;
6149 	}
6150 	return -EINVAL;
6151 }
6152 static DRIVER_ATTR_RW(max_queue);
6153 
6154 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6155 {
6156 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6157 }
6158 static DRIVER_ATTR_RO(no_uld);
6159 
6160 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6161 {
6162 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6163 }
6164 static DRIVER_ATTR_RO(scsi_level);
6165 
6166 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6167 {
6168 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6169 }
6170 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6171 				size_t count)
6172 {
6173 	int n;
6174 	bool changed;
6175 
6176 	/* Ignore capacity change for ZBC drives for now */
6177 	if (sdeb_zbc_in_use)
6178 		return -ENOTSUPP;
6179 
6180 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6181 		changed = (sdebug_virtual_gb != n);
6182 		sdebug_virtual_gb = n;
6183 		sdebug_capacity = get_sdebug_capacity();
6184 		if (changed) {
6185 			struct sdebug_host_info *sdhp;
6186 			struct sdebug_dev_info *dp;
6187 
6188 			spin_lock(&sdebug_host_list_lock);
6189 			list_for_each_entry(sdhp, &sdebug_host_list,
6190 					    host_list) {
6191 				list_for_each_entry(dp, &sdhp->dev_info_list,
6192 						    dev_list) {
6193 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6194 						dp->uas_bm);
6195 				}
6196 			}
6197 			spin_unlock(&sdebug_host_list_lock);
6198 		}
6199 		return count;
6200 	}
6201 	return -EINVAL;
6202 }
6203 static DRIVER_ATTR_RW(virtual_gb);
6204 
6205 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6206 {
6207 	/* absolute number of hosts currently active is what is shown */
6208 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6209 }
6210 
6211 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6212 			      size_t count)
6213 {
6214 	bool found;
6215 	unsigned long idx;
6216 	struct sdeb_store_info *sip;
6217 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6218 	int delta_hosts;
6219 
6220 	if (sscanf(buf, "%d", &delta_hosts) != 1)
6221 		return -EINVAL;
6222 	if (delta_hosts > 0) {
6223 		do {
6224 			found = false;
6225 			if (want_phs) {
6226 				xa_for_each_marked(per_store_ap, idx, sip,
6227 						   SDEB_XA_NOT_IN_USE) {
6228 					sdeb_most_recent_idx = (int)idx;
6229 					found = true;
6230 					break;
6231 				}
6232 				if (found)	/* re-use case */
6233 					sdebug_add_host_helper((int)idx);
6234 				else
6235 					sdebug_do_add_host(true);
6236 			} else {
6237 				sdebug_do_add_host(false);
6238 			}
6239 		} while (--delta_hosts);
6240 	} else if (delta_hosts < 0) {
6241 		do {
6242 			sdebug_do_remove_host(false);
6243 		} while (++delta_hosts);
6244 	}
6245 	return count;
6246 }
6247 static DRIVER_ATTR_RW(add_host);
6248 
6249 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6250 {
6251 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6252 }
6253 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6254 				    size_t count)
6255 {
6256 	int n;
6257 
6258 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6259 		sdebug_vpd_use_hostno = n;
6260 		return count;
6261 	}
6262 	return -EINVAL;
6263 }
6264 static DRIVER_ATTR_RW(vpd_use_hostno);
6265 
6266 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6267 {
6268 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6269 }
6270 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6271 				size_t count)
6272 {
6273 	int n;
6274 
6275 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6276 		if (n > 0)
6277 			sdebug_statistics = true;
6278 		else {
6279 			clear_queue_stats();
6280 			sdebug_statistics = false;
6281 		}
6282 		return count;
6283 	}
6284 	return -EINVAL;
6285 }
6286 static DRIVER_ATTR_RW(statistics);
6287 
6288 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6289 {
6290 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6291 }
6292 static DRIVER_ATTR_RO(sector_size);
6293 
6294 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6295 {
6296 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6297 }
6298 static DRIVER_ATTR_RO(submit_queues);
6299 
6300 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6301 {
6302 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6303 }
6304 static DRIVER_ATTR_RO(dix);
6305 
6306 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6307 {
6308 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6309 }
6310 static DRIVER_ATTR_RO(dif);
6311 
6312 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6313 {
6314 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6315 }
6316 static DRIVER_ATTR_RO(guard);
6317 
6318 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6319 {
6320 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6321 }
6322 static DRIVER_ATTR_RO(ato);
6323 
6324 static ssize_t map_show(struct device_driver *ddp, char *buf)
6325 {
6326 	ssize_t count = 0;
6327 
6328 	if (!scsi_debug_lbp())
6329 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6330 				 sdebug_store_sectors);
6331 
6332 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6333 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6334 
6335 		if (sip)
6336 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6337 					  (int)map_size, sip->map_storep);
6338 	}
6339 	buf[count++] = '\n';
6340 	buf[count] = '\0';
6341 
6342 	return count;
6343 }
6344 static DRIVER_ATTR_RO(map);
6345 
6346 static ssize_t random_show(struct device_driver *ddp, char *buf)
6347 {
6348 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6349 }
6350 
6351 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6352 			    size_t count)
6353 {
6354 	bool v;
6355 
6356 	if (kstrtobool(buf, &v))
6357 		return -EINVAL;
6358 
6359 	sdebug_random = v;
6360 	return count;
6361 }
6362 static DRIVER_ATTR_RW(random);
6363 
6364 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6365 {
6366 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6367 }
6368 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6369 			       size_t count)
6370 {
6371 	int n;
6372 
6373 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6374 		sdebug_removable = (n > 0);
6375 		return count;
6376 	}
6377 	return -EINVAL;
6378 }
6379 static DRIVER_ATTR_RW(removable);
6380 
6381 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6382 {
6383 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6384 }
6385 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6386 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6387 			       size_t count)
6388 {
6389 	int n;
6390 
6391 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6392 		sdebug_host_lock = (n > 0);
6393 		return count;
6394 	}
6395 	return -EINVAL;
6396 }
6397 static DRIVER_ATTR_RW(host_lock);
6398 
6399 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6400 {
6401 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6402 }
6403 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6404 			    size_t count)
6405 {
6406 	int n;
6407 
6408 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6409 		sdebug_strict = (n > 0);
6410 		return count;
6411 	}
6412 	return -EINVAL;
6413 }
6414 static DRIVER_ATTR_RW(strict);
6415 
6416 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6417 {
6418 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6419 }
6420 static DRIVER_ATTR_RO(uuid_ctl);
6421 
6422 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6423 {
6424 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6425 }
6426 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6427 			     size_t count)
6428 {
6429 	int ret, n;
6430 
6431 	ret = kstrtoint(buf, 0, &n);
6432 	if (ret)
6433 		return ret;
6434 	sdebug_cdb_len = n;
6435 	all_config_cdb_len();
6436 	return count;
6437 }
6438 static DRIVER_ATTR_RW(cdb_len);
6439 
6440 static const char * const zbc_model_strs_a[] = {
6441 	[BLK_ZONED_NONE] = "none",
6442 	[BLK_ZONED_HA]   = "host-aware",
6443 	[BLK_ZONED_HM]   = "host-managed",
6444 };
6445 
6446 static const char * const zbc_model_strs_b[] = {
6447 	[BLK_ZONED_NONE] = "no",
6448 	[BLK_ZONED_HA]   = "aware",
6449 	[BLK_ZONED_HM]   = "managed",
6450 };
6451 
6452 static const char * const zbc_model_strs_c[] = {
6453 	[BLK_ZONED_NONE] = "0",
6454 	[BLK_ZONED_HA]   = "1",
6455 	[BLK_ZONED_HM]   = "2",
6456 };
6457 
6458 static int sdeb_zbc_model_str(const char *cp)
6459 {
6460 	int res = sysfs_match_string(zbc_model_strs_a, cp);
6461 
6462 	if (res < 0) {
6463 		res = sysfs_match_string(zbc_model_strs_b, cp);
6464 		if (res < 0) {
6465 			res = sysfs_match_string(zbc_model_strs_c, cp);
6466 			if (sdeb_zbc_model < 0)
6467 				return -EINVAL;
6468 		}
6469 	}
6470 	return res;
6471 }
6472 
6473 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6474 {
6475 	return scnprintf(buf, PAGE_SIZE, "%s\n",
6476 			 zbc_model_strs_a[sdeb_zbc_model]);
6477 }
6478 static DRIVER_ATTR_RO(zbc);
6479 
6480 /* Note: The following array creates attribute files in the
6481    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6482    files (over those found in the /sys/module/scsi_debug/parameters
6483    directory) is that auxiliary actions can be triggered when an attribute
6484    is changed. For example see: add_host_store() above.
6485  */
6486 
6487 static struct attribute *sdebug_drv_attrs[] = {
6488 	&driver_attr_delay.attr,
6489 	&driver_attr_opts.attr,
6490 	&driver_attr_ptype.attr,
6491 	&driver_attr_dsense.attr,
6492 	&driver_attr_fake_rw.attr,
6493 	&driver_attr_no_lun_0.attr,
6494 	&driver_attr_num_tgts.attr,
6495 	&driver_attr_dev_size_mb.attr,
6496 	&driver_attr_num_parts.attr,
6497 	&driver_attr_every_nth.attr,
6498 	&driver_attr_max_luns.attr,
6499 	&driver_attr_max_queue.attr,
6500 	&driver_attr_no_uld.attr,
6501 	&driver_attr_scsi_level.attr,
6502 	&driver_attr_virtual_gb.attr,
6503 	&driver_attr_add_host.attr,
6504 	&driver_attr_per_host_store.attr,
6505 	&driver_attr_vpd_use_hostno.attr,
6506 	&driver_attr_sector_size.attr,
6507 	&driver_attr_statistics.attr,
6508 	&driver_attr_submit_queues.attr,
6509 	&driver_attr_dix.attr,
6510 	&driver_attr_dif.attr,
6511 	&driver_attr_guard.attr,
6512 	&driver_attr_ato.attr,
6513 	&driver_attr_map.attr,
6514 	&driver_attr_random.attr,
6515 	&driver_attr_removable.attr,
6516 	&driver_attr_host_lock.attr,
6517 	&driver_attr_ndelay.attr,
6518 	&driver_attr_strict.attr,
6519 	&driver_attr_uuid_ctl.attr,
6520 	&driver_attr_cdb_len.attr,
6521 	&driver_attr_zbc.attr,
6522 	NULL,
6523 };
6524 ATTRIBUTE_GROUPS(sdebug_drv);
6525 
6526 static struct device *pseudo_primary;
6527 
6528 static int __init scsi_debug_init(void)
6529 {
6530 	bool want_store = (sdebug_fake_rw == 0);
6531 	unsigned long sz;
6532 	int k, ret, hosts_to_add;
6533 	int idx = -1;
6534 
6535 	ramdisk_lck_a[0] = &atomic_rw;
6536 	ramdisk_lck_a[1] = &atomic_rw2;
6537 	atomic_set(&retired_max_queue, 0);
6538 
6539 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6540 		pr_warn("ndelay must be less than 1 second, ignored\n");
6541 		sdebug_ndelay = 0;
6542 	} else if (sdebug_ndelay > 0)
6543 		sdebug_jdelay = JDELAY_OVERRIDDEN;
6544 
6545 	switch (sdebug_sector_size) {
6546 	case  512:
6547 	case 1024:
6548 	case 2048:
6549 	case 4096:
6550 		break;
6551 	default:
6552 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
6553 		return -EINVAL;
6554 	}
6555 
6556 	switch (sdebug_dif) {
6557 	case T10_PI_TYPE0_PROTECTION:
6558 		break;
6559 	case T10_PI_TYPE1_PROTECTION:
6560 	case T10_PI_TYPE2_PROTECTION:
6561 	case T10_PI_TYPE3_PROTECTION:
6562 		have_dif_prot = true;
6563 		break;
6564 
6565 	default:
6566 		pr_err("dif must be 0, 1, 2 or 3\n");
6567 		return -EINVAL;
6568 	}
6569 
6570 	if (sdebug_num_tgts < 0) {
6571 		pr_err("num_tgts must be >= 0\n");
6572 		return -EINVAL;
6573 	}
6574 
6575 	if (sdebug_guard > 1) {
6576 		pr_err("guard must be 0 or 1\n");
6577 		return -EINVAL;
6578 	}
6579 
6580 	if (sdebug_ato > 1) {
6581 		pr_err("ato must be 0 or 1\n");
6582 		return -EINVAL;
6583 	}
6584 
6585 	if (sdebug_physblk_exp > 15) {
6586 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6587 		return -EINVAL;
6588 	}
6589 	if (sdebug_max_luns > 256) {
6590 		pr_warn("max_luns can be no more than 256, use default\n");
6591 		sdebug_max_luns = DEF_MAX_LUNS;
6592 	}
6593 
6594 	if (sdebug_lowest_aligned > 0x3fff) {
6595 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6596 		return -EINVAL;
6597 	}
6598 
6599 	if (submit_queues < 1) {
6600 		pr_err("submit_queues must be 1 or more\n");
6601 		return -EINVAL;
6602 	}
6603 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6604 			       GFP_KERNEL);
6605 	if (sdebug_q_arr == NULL)
6606 		return -ENOMEM;
6607 	for (k = 0; k < submit_queues; ++k)
6608 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
6609 
6610 	/*
6611 	 * check for host managed zoned block device specified with
6612 	 * ptype=0x14 or zbc=XXX.
6613 	 */
6614 	if (sdebug_ptype == TYPE_ZBC) {
6615 		sdeb_zbc_model = BLK_ZONED_HM;
6616 	} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6617 		k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6618 		if (k < 0) {
6619 			ret = k;
6620 			goto free_vm;
6621 		}
6622 		sdeb_zbc_model = k;
6623 		switch (sdeb_zbc_model) {
6624 		case BLK_ZONED_NONE:
6625 		case BLK_ZONED_HA:
6626 			sdebug_ptype = TYPE_DISK;
6627 			break;
6628 		case BLK_ZONED_HM:
6629 			sdebug_ptype = TYPE_ZBC;
6630 			break;
6631 		default:
6632 			pr_err("Invalid ZBC model\n");
6633 			return -EINVAL;
6634 		}
6635 	}
6636 	if (sdeb_zbc_model != BLK_ZONED_NONE) {
6637 		sdeb_zbc_in_use = true;
6638 		if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6639 			sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6640 	}
6641 
6642 	if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6643 		sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6644 	if (sdebug_dev_size_mb < 1)
6645 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
6646 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6647 	sdebug_store_sectors = sz / sdebug_sector_size;
6648 	sdebug_capacity = get_sdebug_capacity();
6649 
6650 	/* play around with geometry, don't waste too much on track 0 */
6651 	sdebug_heads = 8;
6652 	sdebug_sectors_per = 32;
6653 	if (sdebug_dev_size_mb >= 256)
6654 		sdebug_heads = 64;
6655 	else if (sdebug_dev_size_mb >= 16)
6656 		sdebug_heads = 32;
6657 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6658 			       (sdebug_sectors_per * sdebug_heads);
6659 	if (sdebug_cylinders_per >= 1024) {
6660 		/* other LLDs do this; implies >= 1GB ram disk ... */
6661 		sdebug_heads = 255;
6662 		sdebug_sectors_per = 63;
6663 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6664 			       (sdebug_sectors_per * sdebug_heads);
6665 	}
6666 	if (scsi_debug_lbp()) {
6667 		sdebug_unmap_max_blocks =
6668 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6669 
6670 		sdebug_unmap_max_desc =
6671 			clamp(sdebug_unmap_max_desc, 0U, 256U);
6672 
6673 		sdebug_unmap_granularity =
6674 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6675 
6676 		if (sdebug_unmap_alignment &&
6677 		    sdebug_unmap_granularity <=
6678 		    sdebug_unmap_alignment) {
6679 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6680 			ret = -EINVAL;
6681 			goto free_q_arr;
6682 		}
6683 	}
6684 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6685 	if (want_store) {
6686 		idx = sdebug_add_store();
6687 		if (idx < 0) {
6688 			ret = idx;
6689 			goto free_q_arr;
6690 		}
6691 	}
6692 
6693 	pseudo_primary = root_device_register("pseudo_0");
6694 	if (IS_ERR(pseudo_primary)) {
6695 		pr_warn("root_device_register() error\n");
6696 		ret = PTR_ERR(pseudo_primary);
6697 		goto free_vm;
6698 	}
6699 	ret = bus_register(&pseudo_lld_bus);
6700 	if (ret < 0) {
6701 		pr_warn("bus_register error: %d\n", ret);
6702 		goto dev_unreg;
6703 	}
6704 	ret = driver_register(&sdebug_driverfs_driver);
6705 	if (ret < 0) {
6706 		pr_warn("driver_register error: %d\n", ret);
6707 		goto bus_unreg;
6708 	}
6709 
6710 	hosts_to_add = sdebug_add_host;
6711 	sdebug_add_host = 0;
6712 
6713 	for (k = 0; k < hosts_to_add; k++) {
6714 		if (want_store && k == 0) {
6715 			ret = sdebug_add_host_helper(idx);
6716 			if (ret < 0) {
6717 				pr_err("add_host_helper k=%d, error=%d\n",
6718 				       k, -ret);
6719 				break;
6720 			}
6721 		} else {
6722 			ret = sdebug_do_add_host(want_store &&
6723 						 sdebug_per_host_store);
6724 			if (ret < 0) {
6725 				pr_err("add_host k=%d error=%d\n", k, -ret);
6726 				break;
6727 			}
6728 		}
6729 	}
6730 	if (sdebug_verbose)
6731 		pr_info("built %d host(s)\n", sdebug_num_hosts);
6732 
6733 	return 0;
6734 
6735 bus_unreg:
6736 	bus_unregister(&pseudo_lld_bus);
6737 dev_unreg:
6738 	root_device_unregister(pseudo_primary);
6739 free_vm:
6740 	sdebug_erase_store(idx, NULL);
6741 free_q_arr:
6742 	kfree(sdebug_q_arr);
6743 	return ret;
6744 }
6745 
6746 static void __exit scsi_debug_exit(void)
6747 {
6748 	int k = sdebug_num_hosts;
6749 
6750 	stop_all_queued();
6751 	for (; k; k--)
6752 		sdebug_do_remove_host(true);
6753 	free_all_queued();
6754 	driver_unregister(&sdebug_driverfs_driver);
6755 	bus_unregister(&pseudo_lld_bus);
6756 	root_device_unregister(pseudo_primary);
6757 
6758 	sdebug_erase_all_stores(false);
6759 	xa_destroy(per_store_ap);
6760 }
6761 
6762 device_initcall(scsi_debug_init);
6763 module_exit(scsi_debug_exit);
6764 
6765 static void sdebug_release_adapter(struct device *dev)
6766 {
6767 	struct sdebug_host_info *sdbg_host;
6768 
6769 	sdbg_host = to_sdebug_host(dev);
6770 	kfree(sdbg_host);
6771 }
6772 
6773 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6774 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6775 {
6776 	if (idx < 0)
6777 		return;
6778 	if (!sip) {
6779 		if (xa_empty(per_store_ap))
6780 			return;
6781 		sip = xa_load(per_store_ap, idx);
6782 		if (!sip)
6783 			return;
6784 	}
6785 	vfree(sip->map_storep);
6786 	vfree(sip->dif_storep);
6787 	vfree(sip->storep);
6788 	xa_erase(per_store_ap, idx);
6789 	kfree(sip);
6790 }
6791 
6792 /* Assume apart_from_first==false only in shutdown case. */
6793 static void sdebug_erase_all_stores(bool apart_from_first)
6794 {
6795 	unsigned long idx;
6796 	struct sdeb_store_info *sip = NULL;
6797 
6798 	xa_for_each(per_store_ap, idx, sip) {
6799 		if (apart_from_first)
6800 			apart_from_first = false;
6801 		else
6802 			sdebug_erase_store(idx, sip);
6803 	}
6804 	if (apart_from_first)
6805 		sdeb_most_recent_idx = sdeb_first_idx;
6806 }
6807 
6808 /*
6809  * Returns store xarray new element index (idx) if >=0 else negated errno.
6810  * Limit the number of stores to 65536.
6811  */
6812 static int sdebug_add_store(void)
6813 {
6814 	int res;
6815 	u32 n_idx;
6816 	unsigned long iflags;
6817 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6818 	struct sdeb_store_info *sip = NULL;
6819 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
6820 
6821 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
6822 	if (!sip)
6823 		return -ENOMEM;
6824 
6825 	xa_lock_irqsave(per_store_ap, iflags);
6826 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
6827 	if (unlikely(res < 0)) {
6828 		xa_unlock_irqrestore(per_store_ap, iflags);
6829 		kfree(sip);
6830 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
6831 		return res;
6832 	}
6833 	sdeb_most_recent_idx = n_idx;
6834 	if (sdeb_first_idx < 0)
6835 		sdeb_first_idx = n_idx;
6836 	xa_unlock_irqrestore(per_store_ap, iflags);
6837 
6838 	res = -ENOMEM;
6839 	sip->storep = vzalloc(sz);
6840 	if (!sip->storep) {
6841 		pr_err("user data oom\n");
6842 		goto err;
6843 	}
6844 	if (sdebug_num_parts > 0)
6845 		sdebug_build_parts(sip->storep, sz);
6846 
6847 	/* DIF/DIX: what T10 calls Protection Information (PI) */
6848 	if (sdebug_dix) {
6849 		int dif_size;
6850 
6851 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
6852 		sip->dif_storep = vmalloc(dif_size);
6853 
6854 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
6855 			sip->dif_storep);
6856 
6857 		if (!sip->dif_storep) {
6858 			pr_err("DIX oom\n");
6859 			goto err;
6860 		}
6861 		memset(sip->dif_storep, 0xff, dif_size);
6862 	}
6863 	/* Logical Block Provisioning */
6864 	if (scsi_debug_lbp()) {
6865 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
6866 		sip->map_storep = vmalloc(array_size(sizeof(long),
6867 						     BITS_TO_LONGS(map_size)));
6868 
6869 		pr_info("%lu provisioning blocks\n", map_size);
6870 
6871 		if (!sip->map_storep) {
6872 			pr_err("LBP map oom\n");
6873 			goto err;
6874 		}
6875 
6876 		bitmap_zero(sip->map_storep, map_size);
6877 
6878 		/* Map first 1KB for partition table */
6879 		if (sdebug_num_parts)
6880 			map_region(sip, 0, 2);
6881 	}
6882 
6883 	rwlock_init(&sip->macc_lck);
6884 	return (int)n_idx;
6885 err:
6886 	sdebug_erase_store((int)n_idx, sip);
6887 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
6888 	return res;
6889 }
6890 
6891 static int sdebug_add_host_helper(int per_host_idx)
6892 {
6893 	int k, devs_per_host, idx;
6894 	int error = -ENOMEM;
6895 	struct sdebug_host_info *sdbg_host;
6896 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6897 
6898 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
6899 	if (!sdbg_host)
6900 		return -ENOMEM;
6901 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
6902 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
6903 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6904 	sdbg_host->si_idx = idx;
6905 
6906 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
6907 
6908 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
6909 	for (k = 0; k < devs_per_host; k++) {
6910 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
6911 		if (!sdbg_devinfo)
6912 			goto clean;
6913 	}
6914 
6915 	spin_lock(&sdebug_host_list_lock);
6916 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
6917 	spin_unlock(&sdebug_host_list_lock);
6918 
6919 	sdbg_host->dev.bus = &pseudo_lld_bus;
6920 	sdbg_host->dev.parent = pseudo_primary;
6921 	sdbg_host->dev.release = &sdebug_release_adapter;
6922 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
6923 
6924 	error = device_register(&sdbg_host->dev);
6925 	if (error)
6926 		goto clean;
6927 
6928 	++sdebug_num_hosts;
6929 	return 0;
6930 
6931 clean:
6932 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6933 				 dev_list) {
6934 		list_del(&sdbg_devinfo->dev_list);
6935 		kfree(sdbg_devinfo->zstate);
6936 		kfree(sdbg_devinfo);
6937 	}
6938 	kfree(sdbg_host);
6939 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
6940 	return error;
6941 }
6942 
6943 static int sdebug_do_add_host(bool mk_new_store)
6944 {
6945 	int ph_idx = sdeb_most_recent_idx;
6946 
6947 	if (mk_new_store) {
6948 		ph_idx = sdebug_add_store();
6949 		if (ph_idx < 0)
6950 			return ph_idx;
6951 	}
6952 	return sdebug_add_host_helper(ph_idx);
6953 }
6954 
6955 static void sdebug_do_remove_host(bool the_end)
6956 {
6957 	int idx = -1;
6958 	struct sdebug_host_info *sdbg_host = NULL;
6959 	struct sdebug_host_info *sdbg_host2;
6960 
6961 	spin_lock(&sdebug_host_list_lock);
6962 	if (!list_empty(&sdebug_host_list)) {
6963 		sdbg_host = list_entry(sdebug_host_list.prev,
6964 				       struct sdebug_host_info, host_list);
6965 		idx = sdbg_host->si_idx;
6966 	}
6967 	if (!the_end && idx >= 0) {
6968 		bool unique = true;
6969 
6970 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
6971 			if (sdbg_host2 == sdbg_host)
6972 				continue;
6973 			if (idx == sdbg_host2->si_idx) {
6974 				unique = false;
6975 				break;
6976 			}
6977 		}
6978 		if (unique) {
6979 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
6980 			if (idx == sdeb_most_recent_idx)
6981 				--sdeb_most_recent_idx;
6982 		}
6983 	}
6984 	if (sdbg_host)
6985 		list_del(&sdbg_host->host_list);
6986 	spin_unlock(&sdebug_host_list_lock);
6987 
6988 	if (!sdbg_host)
6989 		return;
6990 
6991 	device_unregister(&sdbg_host->dev);
6992 	--sdebug_num_hosts;
6993 }
6994 
6995 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
6996 {
6997 	int num_in_q = 0;
6998 	struct sdebug_dev_info *devip;
6999 
7000 	block_unblock_all_queues(true);
7001 	devip = (struct sdebug_dev_info *)sdev->hostdata;
7002 	if (NULL == devip) {
7003 		block_unblock_all_queues(false);
7004 		return	-ENODEV;
7005 	}
7006 	num_in_q = atomic_read(&devip->num_in_q);
7007 
7008 	if (qdepth < 1)
7009 		qdepth = 1;
7010 	/* allow to exceed max host qc_arr elements for testing */
7011 	if (qdepth > SDEBUG_CANQUEUE + 10)
7012 		qdepth = SDEBUG_CANQUEUE + 10;
7013 	scsi_change_queue_depth(sdev, qdepth);
7014 
7015 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7016 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7017 			    __func__, qdepth, num_in_q);
7018 	}
7019 	block_unblock_all_queues(false);
7020 	return sdev->queue_depth;
7021 }
7022 
7023 static bool fake_timeout(struct scsi_cmnd *scp)
7024 {
7025 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7026 		if (sdebug_every_nth < -1)
7027 			sdebug_every_nth = -1;
7028 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7029 			return true; /* ignore command causing timeout */
7030 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7031 			 scsi_medium_access_command(scp))
7032 			return true; /* time out reads and writes */
7033 	}
7034 	return false;
7035 }
7036 
7037 static bool fake_host_busy(struct scsi_cmnd *scp)
7038 {
7039 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
7040 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
7041 }
7042 
7043 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7044 				   struct scsi_cmnd *scp)
7045 {
7046 	u8 sdeb_i;
7047 	struct scsi_device *sdp = scp->device;
7048 	const struct opcode_info_t *oip;
7049 	const struct opcode_info_t *r_oip;
7050 	struct sdebug_dev_info *devip;
7051 
7052 	u8 *cmd = scp->cmnd;
7053 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7054 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7055 	int k, na;
7056 	int errsts = 0;
7057 	u32 flags;
7058 	u16 sa;
7059 	u8 opcode = cmd[0];
7060 	bool has_wlun_rl;
7061 
7062 	scsi_set_resid(scp, 0);
7063 	if (sdebug_statistics)
7064 		atomic_inc(&sdebug_cmnd_count);
7065 	if (unlikely(sdebug_verbose &&
7066 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7067 		char b[120];
7068 		int n, len, sb;
7069 
7070 		len = scp->cmd_len;
7071 		sb = (int)sizeof(b);
7072 		if (len > 32)
7073 			strcpy(b, "too long, over 32 bytes");
7074 		else {
7075 			for (k = 0, n = 0; k < len && n < sb; ++k)
7076 				n += scnprintf(b + n, sb - n, "%02x ",
7077 					       (u32)cmd[k]);
7078 		}
7079 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7080 			    blk_mq_unique_tag(scp->request), b);
7081 	}
7082 	if (fake_host_busy(scp))
7083 		return SCSI_MLQUEUE_HOST_BUSY;
7084 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7085 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
7086 		goto err_out;
7087 
7088 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
7089 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
7090 	devip = (struct sdebug_dev_info *)sdp->hostdata;
7091 	if (unlikely(!devip)) {
7092 		devip = find_build_dev_info(sdp);
7093 		if (NULL == devip)
7094 			goto err_out;
7095 	}
7096 	na = oip->num_attached;
7097 	r_pfp = oip->pfp;
7098 	if (na) {	/* multiple commands with this opcode */
7099 		r_oip = oip;
7100 		if (FF_SA & r_oip->flags) {
7101 			if (F_SA_LOW & oip->flags)
7102 				sa = 0x1f & cmd[1];
7103 			else
7104 				sa = get_unaligned_be16(cmd + 8);
7105 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7106 				if (opcode == oip->opcode && sa == oip->sa)
7107 					break;
7108 			}
7109 		} else {   /* since no service action only check opcode */
7110 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7111 				if (opcode == oip->opcode)
7112 					break;
7113 			}
7114 		}
7115 		if (k > na) {
7116 			if (F_SA_LOW & r_oip->flags)
7117 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7118 			else if (F_SA_HIGH & r_oip->flags)
7119 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7120 			else
7121 				mk_sense_invalid_opcode(scp);
7122 			goto check_cond;
7123 		}
7124 	}	/* else (when na==0) we assume the oip is a match */
7125 	flags = oip->flags;
7126 	if (unlikely(F_INV_OP & flags)) {
7127 		mk_sense_invalid_opcode(scp);
7128 		goto check_cond;
7129 	}
7130 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7131 		if (sdebug_verbose)
7132 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7133 				    my_name, opcode, " supported for wlun");
7134 		mk_sense_invalid_opcode(scp);
7135 		goto check_cond;
7136 	}
7137 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
7138 		u8 rem;
7139 		int j;
7140 
7141 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7142 			rem = ~oip->len_mask[k] & cmd[k];
7143 			if (rem) {
7144 				for (j = 7; j >= 0; --j, rem <<= 1) {
7145 					if (0x80 & rem)
7146 						break;
7147 				}
7148 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7149 				goto check_cond;
7150 			}
7151 		}
7152 	}
7153 	if (unlikely(!(F_SKIP_UA & flags) &&
7154 		     find_first_bit(devip->uas_bm,
7155 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7156 		errsts = make_ua(scp, devip);
7157 		if (errsts)
7158 			goto check_cond;
7159 	}
7160 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
7161 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7162 		if (sdebug_verbose)
7163 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
7164 				    "%s\n", my_name, "initializing command "
7165 				    "required");
7166 		errsts = check_condition_result;
7167 		goto fini;
7168 	}
7169 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
7170 		goto fini;
7171 	if (unlikely(sdebug_every_nth)) {
7172 		if (fake_timeout(scp))
7173 			return 0;	/* ignore command: make trouble */
7174 	}
7175 	if (likely(oip->pfp))
7176 		pfp = oip->pfp;	/* calls a resp_* function */
7177 	else
7178 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
7179 
7180 fini:
7181 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
7182 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7183 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7184 					    sdebug_ndelay > 10000)) {
7185 		/*
7186 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7187 		 * for Start Stop Unit (SSU) want at least 1 second delay and
7188 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
7189 		 * For Synchronize Cache want 1/20 of SSU's delay.
7190 		 */
7191 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7192 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7193 
7194 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7195 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7196 	} else
7197 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7198 				     sdebug_ndelay);
7199 check_cond:
7200 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7201 err_out:
7202 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7203 }
7204 
7205 static struct scsi_host_template sdebug_driver_template = {
7206 	.show_info =		scsi_debug_show_info,
7207 	.write_info =		scsi_debug_write_info,
7208 	.proc_name =		sdebug_proc_name,
7209 	.name =			"SCSI DEBUG",
7210 	.info =			scsi_debug_info,
7211 	.slave_alloc =		scsi_debug_slave_alloc,
7212 	.slave_configure =	scsi_debug_slave_configure,
7213 	.slave_destroy =	scsi_debug_slave_destroy,
7214 	.ioctl =		scsi_debug_ioctl,
7215 	.queuecommand =		scsi_debug_queuecommand,
7216 	.change_queue_depth =	sdebug_change_qdepth,
7217 	.eh_abort_handler =	scsi_debug_abort,
7218 	.eh_device_reset_handler = scsi_debug_device_reset,
7219 	.eh_target_reset_handler = scsi_debug_target_reset,
7220 	.eh_bus_reset_handler = scsi_debug_bus_reset,
7221 	.eh_host_reset_handler = scsi_debug_host_reset,
7222 	.can_queue =		SDEBUG_CANQUEUE,
7223 	.this_id =		7,
7224 	.sg_tablesize =		SG_MAX_SEGMENTS,
7225 	.cmd_per_lun =		DEF_CMD_PER_LUN,
7226 	.max_sectors =		-1U,
7227 	.max_segment_size =	-1U,
7228 	.module =		THIS_MODULE,
7229 	.track_queue_depth =	1,
7230 };
7231 
7232 static int sdebug_driver_probe(struct device *dev)
7233 {
7234 	int error = 0;
7235 	struct sdebug_host_info *sdbg_host;
7236 	struct Scsi_Host *hpnt;
7237 	int hprot;
7238 
7239 	sdbg_host = to_sdebug_host(dev);
7240 
7241 	sdebug_driver_template.can_queue = sdebug_max_queue;
7242 	if (!sdebug_clustering)
7243 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7244 
7245 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7246 	if (NULL == hpnt) {
7247 		pr_err("scsi_host_alloc failed\n");
7248 		error = -ENODEV;
7249 		return error;
7250 	}
7251 	if (submit_queues > nr_cpu_ids) {
7252 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7253 			my_name, submit_queues, nr_cpu_ids);
7254 		submit_queues = nr_cpu_ids;
7255 	}
7256 	/* Decide whether to tell scsi subsystem that we want mq */
7257 	/* Following should give the same answer for each host */
7258 	hpnt->nr_hw_queues = submit_queues;
7259 
7260 	sdbg_host->shost = hpnt;
7261 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7262 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7263 		hpnt->max_id = sdebug_num_tgts + 1;
7264 	else
7265 		hpnt->max_id = sdebug_num_tgts;
7266 	/* = sdebug_max_luns; */
7267 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7268 
7269 	hprot = 0;
7270 
7271 	switch (sdebug_dif) {
7272 
7273 	case T10_PI_TYPE1_PROTECTION:
7274 		hprot = SHOST_DIF_TYPE1_PROTECTION;
7275 		if (sdebug_dix)
7276 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
7277 		break;
7278 
7279 	case T10_PI_TYPE2_PROTECTION:
7280 		hprot = SHOST_DIF_TYPE2_PROTECTION;
7281 		if (sdebug_dix)
7282 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
7283 		break;
7284 
7285 	case T10_PI_TYPE3_PROTECTION:
7286 		hprot = SHOST_DIF_TYPE3_PROTECTION;
7287 		if (sdebug_dix)
7288 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
7289 		break;
7290 
7291 	default:
7292 		if (sdebug_dix)
7293 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
7294 		break;
7295 	}
7296 
7297 	scsi_host_set_prot(hpnt, hprot);
7298 
7299 	if (have_dif_prot || sdebug_dix)
7300 		pr_info("host protection%s%s%s%s%s%s%s\n",
7301 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7302 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7303 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7304 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7305 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7306 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7307 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7308 
7309 	if (sdebug_guard == 1)
7310 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7311 	else
7312 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7313 
7314 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7315 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7316 	if (sdebug_every_nth)	/* need stats counters for every_nth */
7317 		sdebug_statistics = true;
7318 	error = scsi_add_host(hpnt, &sdbg_host->dev);
7319 	if (error) {
7320 		pr_err("scsi_add_host failed\n");
7321 		error = -ENODEV;
7322 		scsi_host_put(hpnt);
7323 	} else {
7324 		scsi_scan_host(hpnt);
7325 	}
7326 
7327 	return error;
7328 }
7329 
7330 static int sdebug_driver_remove(struct device *dev)
7331 {
7332 	struct sdebug_host_info *sdbg_host;
7333 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
7334 
7335 	sdbg_host = to_sdebug_host(dev);
7336 
7337 	if (!sdbg_host) {
7338 		pr_err("Unable to locate host info\n");
7339 		return -ENODEV;
7340 	}
7341 
7342 	scsi_remove_host(sdbg_host->shost);
7343 
7344 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7345 				 dev_list) {
7346 		list_del(&sdbg_devinfo->dev_list);
7347 		kfree(sdbg_devinfo->zstate);
7348 		kfree(sdbg_devinfo);
7349 	}
7350 
7351 	scsi_host_put(sdbg_host->shost);
7352 	return 0;
7353 }
7354 
7355 static int pseudo_lld_bus_match(struct device *dev,
7356 				struct device_driver *dev_driver)
7357 {
7358 	return 1;
7359 }
7360 
7361 static struct bus_type pseudo_lld_bus = {
7362 	.name = "pseudo",
7363 	.match = pseudo_lld_bus_match,
7364 	.probe = sdebug_driver_probe,
7365 	.remove = sdebug_driver_remove,
7366 	.drv_groups = sdebug_drv_groups,
7367 };
7368