xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision a2aede97)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2018 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20190125";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
87 #define CAPACITY_CHANGED_ASCQ 0x9
88 #define SAVING_PARAMS_UNSUP 0x39
89 #define TRANSPORT_PROBLEM 0x4b
90 #define THRESHOLD_EXCEEDED 0x5d
91 #define LOW_POWER_COND_ON 0x5e
92 #define MISCOMPARE_VERIFY_ASC 0x1d
93 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
94 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
95 #define WRITE_ERROR_ASC 0xc
96 
97 /* Additional Sense Code Qualifier (ASCQ) */
98 #define ACK_NAK_TO 0x3
99 
100 /* Default values for driver parameters */
101 #define DEF_NUM_HOST   1
102 #define DEF_NUM_TGTS   1
103 #define DEF_MAX_LUNS   1
104 /* With these defaults, this driver will make 1 host with 1 target
105  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
106  */
107 #define DEF_ATO 1
108 #define DEF_CDB_LEN 10
109 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
110 #define DEF_DEV_SIZE_MB   8
111 #define DEF_DIF 0
112 #define DEF_DIX 0
113 #define DEF_PER_HOST_STORE false
114 #define DEF_D_SENSE   0
115 #define DEF_EVERY_NTH   0
116 #define DEF_FAKE_RW	0
117 #define DEF_GUARD 0
118 #define DEF_HOST_LOCK 0
119 #define DEF_LBPU 0
120 #define DEF_LBPWS 0
121 #define DEF_LBPWS10 0
122 #define DEF_LBPRZ 1
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0   0
126 #define DEF_NUM_PARTS   0
127 #define DEF_OPTS   0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE   TYPE_DISK
132 #define DEF_RANDOM false
133 #define DEF_REMOVABLE false
134 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
135 #define DEF_SECTOR_SIZE 512
136 #define DEF_UNMAP_ALIGNMENT 0
137 #define DEF_UNMAP_GRANULARITY 1
138 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
139 #define DEF_UNMAP_MAX_DESC 256
140 #define DEF_VIRTUAL_GB   0
141 #define DEF_VPD_USE_HOSTNO 1
142 #define DEF_WRITESAME_LENGTH 0xFFFF
143 #define DEF_STRICT 0
144 #define DEF_STATISTICS false
145 #define DEF_SUBMIT_QUEUES 1
146 #define DEF_UUID_CTL 0
147 #define JDELAY_OVERRIDDEN -9999
148 
149 #define SDEBUG_LUN_0_VAL 0
150 
151 /* bit mask values for sdebug_opts */
152 #define SDEBUG_OPT_NOISE		1
153 #define SDEBUG_OPT_MEDIUM_ERR		2
154 #define SDEBUG_OPT_TIMEOUT		4
155 #define SDEBUG_OPT_RECOVERED_ERR	8
156 #define SDEBUG_OPT_TRANSPORT_ERR	16
157 #define SDEBUG_OPT_DIF_ERR		32
158 #define SDEBUG_OPT_DIX_ERR		64
159 #define SDEBUG_OPT_MAC_TIMEOUT		128
160 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
161 #define SDEBUG_OPT_Q_NOISE		0x200
162 #define SDEBUG_OPT_ALL_TSF		0x400
163 #define SDEBUG_OPT_RARE_TSF		0x800
164 #define SDEBUG_OPT_N_WCE		0x1000
165 #define SDEBUG_OPT_RESET_NOISE		0x2000
166 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
167 #define SDEBUG_OPT_HOST_BUSY		0x8000
168 #define SDEBUG_OPT_CMD_ABORT		0x10000
169 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
170 			      SDEBUG_OPT_RESET_NOISE)
171 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
172 				  SDEBUG_OPT_TRANSPORT_ERR | \
173 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
174 				  SDEBUG_OPT_SHORT_TRANSFER | \
175 				  SDEBUG_OPT_HOST_BUSY | \
176 				  SDEBUG_OPT_CMD_ABORT)
177 /* When "every_nth" > 0 then modulo "every_nth" commands:
178  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
179  *   - a RECOVERED_ERROR is simulated on successful read and write
180  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
181  *   - a TRANSPORT_ERROR is simulated on successful read and write
182  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
183  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
184  *     CMD_ABORT
185  *
186  * When "every_nth" < 0 then after "- every_nth" commands the selected
187  * error will be injected. The error will be injected on every subsequent
188  * command until some other action occurs; for example, the user writing
189  * a new value (other than -1 or 1) to every_nth:
190  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
191  */
192 
193 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
194  * priority order. In the subset implemented here lower numbers have higher
195  * priority. The UA numbers should be a sequence starting from 0 with
196  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
197 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
198 #define SDEBUG_UA_BUS_RESET 1
199 #define SDEBUG_UA_MODE_CHANGED 2
200 #define SDEBUG_UA_CAPACITY_CHANGED 3
201 #define SDEBUG_UA_LUNS_CHANGED 4
202 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
203 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
204 #define SDEBUG_NUM_UAS 7
205 
206 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
207  * sector on read commands: */
208 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
209 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
210 
211 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
212  * or "peripheral device" addressing (value 0) */
213 #define SAM2_LUN_ADDRESS_METHOD 0
214 
215 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
216  * (for response) per submit queue at one time. Can be reduced by max_queue
217  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
218  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
219  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
220  * but cannot exceed SDEBUG_CANQUEUE .
221  */
222 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
223 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
224 #define DEF_CMD_PER_LUN  255
225 
226 #define F_D_IN			1
227 #define F_D_OUT			2
228 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
229 #define F_D_UNKN		8
230 #define F_RL_WLUN_OK		0x10
231 #define F_SKIP_UA		0x20
232 #define F_DELAY_OVERR		0x40
233 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
235 #define F_INV_OP		0x200
236 #define F_FAKE_RW		0x400
237 #define F_M_ACCESS		0x800	/* media access */
238 #define F_SSU_DELAY		0x1000
239 #define F_SYNC_DELAY		0x2000
240 
241 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
242 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
243 #define FF_SA (F_SA_HIGH | F_SA_LOW)
244 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
245 
246 #define SDEBUG_MAX_PARTS 4
247 
248 #define SDEBUG_MAX_CMD_LEN 32
249 
250 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 
252 
253 struct sdebug_dev_info {
254 	struct list_head dev_list;
255 	unsigned int channel;
256 	unsigned int target;
257 	u64 lun;
258 	uuid_t lu_name;
259 	struct sdebug_host_info *sdbg_host;
260 	unsigned long uas_bm[1];
261 	atomic_t num_in_q;
262 	atomic_t stopped;
263 	bool used;
264 };
265 
266 struct sdebug_host_info {
267 	struct list_head host_list;
268 	int si_idx;	/* sdeb_store_info (per host) xarray index */
269 	struct Scsi_Host *shost;
270 	struct device dev;
271 	struct list_head dev_info_list;
272 };
273 
274 /* There is an xarray of pointers to this struct's objects, one per host */
275 struct sdeb_store_info {
276 	rwlock_t macc_lck;	/* for atomic media access on this store */
277 	u8 *storep;		/* user data storage (ram) */
278 	struct t10_pi_tuple *dif_storep; /* protection info */
279 	void *map_storep;	/* provisioning map */
280 };
281 
282 #define to_sdebug_host(d)	\
283 	container_of(d, struct sdebug_host_info, dev)
284 
285 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
286 		      SDEB_DEFER_WQ = 2};
287 
288 struct sdebug_defer {
289 	struct hrtimer hrt;
290 	struct execute_work ew;
291 	int sqa_idx;	/* index of sdebug_queue array */
292 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
293 	int issuing_cpu;
294 	bool init_hrt;
295 	bool init_wq;
296 	bool aborted;	/* true when blk_abort_request() already called */
297 	enum sdeb_defer_type defer_t;
298 };
299 
300 struct sdebug_queued_cmd {
301 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
302 	 * instance indicates this slot is in use.
303 	 */
304 	struct sdebug_defer *sd_dp;
305 	struct scsi_cmnd *a_cmnd;
306 	unsigned int inj_recovered:1;
307 	unsigned int inj_transport:1;
308 	unsigned int inj_dif:1;
309 	unsigned int inj_dix:1;
310 	unsigned int inj_short:1;
311 	unsigned int inj_host_busy:1;
312 	unsigned int inj_cmd_abort:1;
313 };
314 
315 struct sdebug_queue {
316 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
317 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
318 	spinlock_t qc_lock;
319 	atomic_t blocked;	/* to temporarily stop more being queued */
320 };
321 
322 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
323 static atomic_t sdebug_completions;  /* count of deferred completions */
324 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
325 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
326 
327 struct opcode_info_t {
328 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
329 				/* for terminating element */
330 	u8 opcode;		/* if num_attached > 0, preferred */
331 	u16 sa;			/* service action */
332 	u32 flags;		/* OR-ed set of SDEB_F_* */
333 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
334 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
335 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
336 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
337 };
338 
339 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
340 enum sdeb_opcode_index {
341 	SDEB_I_INVALID_OPCODE =	0,
342 	SDEB_I_INQUIRY = 1,
343 	SDEB_I_REPORT_LUNS = 2,
344 	SDEB_I_REQUEST_SENSE = 3,
345 	SDEB_I_TEST_UNIT_READY = 4,
346 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
347 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
348 	SDEB_I_LOG_SENSE = 7,
349 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
350 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
351 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
352 	SDEB_I_START_STOP = 11,
353 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
354 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
355 	SDEB_I_MAINT_IN = 14,
356 	SDEB_I_MAINT_OUT = 15,
357 	SDEB_I_VERIFY = 16,		/* VERIFY(10), VERIFY(16) */
358 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
359 	SDEB_I_RESERVE = 18,		/* 6, 10 */
360 	SDEB_I_RELEASE = 19,		/* 6, 10 */
361 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
362 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
363 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
364 	SDEB_I_SEND_DIAG = 23,
365 	SDEB_I_UNMAP = 24,
366 	SDEB_I_WRITE_BUFFER = 25,
367 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
368 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
369 	SDEB_I_COMP_WRITE = 28,
370 	SDEB_I_LAST_ELEMENT = 29,	/* keep this last (previous + 1) */
371 };
372 
373 
374 static const unsigned char opcode_ind_arr[256] = {
375 /* 0x0; 0x0->0x1f: 6 byte cdbs */
376 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
377 	    0, 0, 0, 0,
378 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
379 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
380 	    SDEB_I_RELEASE,
381 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
382 	    SDEB_I_ALLOW_REMOVAL, 0,
383 /* 0x20; 0x20->0x3f: 10 byte cdbs */
384 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
385 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
386 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
387 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
388 /* 0x40; 0x40->0x5f: 10 byte cdbs */
389 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
390 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
391 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
392 	    SDEB_I_RELEASE,
393 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
394 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
395 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
396 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
397 	0, SDEB_I_VARIABLE_LEN,
398 /* 0x80; 0x80->0x9f: 16 byte cdbs */
399 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
400 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
401 	0, 0, 0, SDEB_I_VERIFY,
402 	0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
403 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
404 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
405 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
406 	     SDEB_I_MAINT_OUT, 0, 0, 0,
407 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
408 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
409 	0, 0, 0, 0, 0, 0, 0, 0,
410 	0, 0, 0, 0, 0, 0, 0, 0,
411 /* 0xc0; 0xc0->0xff: vendor specific */
412 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
413 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
414 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
415 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
416 };
417 
418 /*
419  * The following "response" functions return the SCSI mid-level's 4 byte
420  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
421  * command completion, they can mask their return value with
422  * SDEG_RES_IMMED_MASK .
423  */
424 #define SDEG_RES_IMMED_MASK 0x40000000
425 
426 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
432 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
433 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
434 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
435 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
436 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
437 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
438 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
439 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
440 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
441 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
442 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
443 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
444 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
445 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
446 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
447 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
448 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
449 
450 static int sdebug_do_add_host(bool mk_new_store);
451 static int sdebug_add_host_helper(int per_host_idx);
452 static void sdebug_do_remove_host(bool the_end);
453 static int sdebug_add_store(void);
454 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
455 static void sdebug_erase_all_stores(bool apart_from_first);
456 
457 /*
458  * The following are overflow arrays for cdbs that "hit" the same index in
459  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
460  * should be placed in opcode_info_arr[], the others should be placed here.
461  */
462 static const struct opcode_info_t msense_iarr[] = {
463 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
464 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
465 };
466 
467 static const struct opcode_info_t mselect_iarr[] = {
468 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
469 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
470 };
471 
472 static const struct opcode_info_t read_iarr[] = {
473 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
474 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
475 	     0, 0, 0, 0} },
476 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
477 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
478 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
479 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
480 	     0xc7, 0, 0, 0, 0} },
481 };
482 
483 static const struct opcode_info_t write_iarr[] = {
484 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
485 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
486 		   0, 0, 0, 0, 0, 0} },
487 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
488 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
489 		   0, 0, 0} },
490 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
491 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
492 		   0xbf, 0xc7, 0, 0, 0, 0} },
493 };
494 
495 static const struct opcode_info_t verify_iarr[] = {
496 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
497 	    NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
498 		   0, 0, 0, 0, 0, 0} },
499 };
500 
501 static const struct opcode_info_t sa_in_16_iarr[] = {
502 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
503 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
504 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
505 };
506 
507 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
508 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
509 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
510 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
511 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
512 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
513 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
514 };
515 
516 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
517 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
518 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
519 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
520 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
521 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
522 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
523 };
524 
525 static const struct opcode_info_t write_same_iarr[] = {
526 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
527 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
528 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
529 };
530 
531 static const struct opcode_info_t reserve_iarr[] = {
532 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
533 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
534 };
535 
536 static const struct opcode_info_t release_iarr[] = {
537 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
538 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
539 };
540 
541 static const struct opcode_info_t sync_cache_iarr[] = {
542 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
543 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
544 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
545 };
546 
547 
548 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
549  * plus the terminating elements for logic that scans this table such as
550  * REPORT SUPPORTED OPERATION CODES. */
551 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
552 /* 0 */
553 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
554 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
555 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
556 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
557 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
558 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
559 	     0, 0} },					/* REPORT LUNS */
560 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
561 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
562 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
563 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
564 /* 5 */
565 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
566 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
567 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
568 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
569 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
570 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
571 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
572 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
573 	     0, 0, 0} },
574 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
575 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
576 	     0, 0} },
577 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
578 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
579 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
580 /* 10 */
581 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
582 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
583 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
584 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
585 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
586 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
588 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
589 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
590 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
591 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
592 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
593 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
594 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
595 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
596 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
597 				0xff, 0, 0xc7, 0, 0, 0, 0} },
598 /* 15 */
599 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
600 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
601 	{ARRAY_SIZE(verify_iarr), 0x8f, 0,
602 	    F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,	/* VERIFY(16) */
603 	    verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
604 			  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
605 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
606 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
607 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
608 	     0xff, 0xff} },
609 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
610 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
611 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
612 	     0} },
613 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
614 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
615 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
616 	     0} },
617 /* 20 */
618 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
619 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
620 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
621 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
622 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
623 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
624 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
625 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
627 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
628 /* 25 */
629 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
630 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
631 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
632 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
633 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
634 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
635 		 0, 0, 0, 0, 0} },
636 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
637 	    resp_sync_cache, sync_cache_iarr,
638 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
639 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
640 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
641 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
642 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
643 
644 /* 29 */
645 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
646 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
647 };
648 
649 static int sdebug_num_hosts;
650 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
651 static int sdebug_ato = DEF_ATO;
652 static int sdebug_cdb_len = DEF_CDB_LEN;
653 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
654 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
655 static int sdebug_dif = DEF_DIF;
656 static int sdebug_dix = DEF_DIX;
657 static int sdebug_dsense = DEF_D_SENSE;
658 static int sdebug_every_nth = DEF_EVERY_NTH;
659 static int sdebug_fake_rw = DEF_FAKE_RW;
660 static unsigned int sdebug_guard = DEF_GUARD;
661 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
662 static int sdebug_max_luns = DEF_MAX_LUNS;
663 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
664 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
665 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
666 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
667 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
668 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
669 static int sdebug_no_uld;
670 static int sdebug_num_parts = DEF_NUM_PARTS;
671 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
672 static int sdebug_opt_blks = DEF_OPT_BLKS;
673 static int sdebug_opts = DEF_OPTS;
674 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
675 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
676 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
677 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
678 static int sdebug_sector_size = DEF_SECTOR_SIZE;
679 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
680 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
681 static unsigned int sdebug_lbpu = DEF_LBPU;
682 static unsigned int sdebug_lbpws = DEF_LBPWS;
683 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
684 static unsigned int sdebug_lbprz = DEF_LBPRZ;
685 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
686 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
687 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
688 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
689 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
690 static int sdebug_uuid_ctl = DEF_UUID_CTL;
691 static bool sdebug_random = DEF_RANDOM;
692 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
693 static bool sdebug_removable = DEF_REMOVABLE;
694 static bool sdebug_clustering;
695 static bool sdebug_host_lock = DEF_HOST_LOCK;
696 static bool sdebug_strict = DEF_STRICT;
697 static bool sdebug_any_injecting_opt;
698 static bool sdebug_verbose;
699 static bool have_dif_prot;
700 static bool write_since_sync;
701 static bool sdebug_statistics = DEF_STATISTICS;
702 static bool sdebug_wp;
703 
704 static unsigned int sdebug_store_sectors;
705 static sector_t sdebug_capacity;	/* in sectors */
706 
707 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
708    may still need them */
709 static int sdebug_heads;		/* heads per disk */
710 static int sdebug_cylinders_per;	/* cylinders per surface */
711 static int sdebug_sectors_per;		/* sectors per cylinder */
712 
713 static LIST_HEAD(sdebug_host_list);
714 static DEFINE_SPINLOCK(sdebug_host_list_lock);
715 
716 static struct xarray per_store_arr;
717 static struct xarray *per_store_ap = &per_store_arr;
718 static int sdeb_first_idx = -1;		/* invalid index ==> none created */
719 static int sdeb_most_recent_idx = -1;
720 static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */
721 
722 static unsigned long map_size;
723 static int num_aborts;
724 static int num_dev_resets;
725 static int num_target_resets;
726 static int num_bus_resets;
727 static int num_host_resets;
728 static int dix_writes;
729 static int dix_reads;
730 static int dif_errors;
731 
732 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
733 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
734 
735 static DEFINE_RWLOCK(atomic_rw);
736 static DEFINE_RWLOCK(atomic_rw2);
737 
738 static rwlock_t *ramdisk_lck_a[2];
739 
740 static char sdebug_proc_name[] = MY_NAME;
741 static const char *my_name = MY_NAME;
742 
743 static struct bus_type pseudo_lld_bus;
744 
745 static struct device_driver sdebug_driverfs_driver = {
746 	.name 		= sdebug_proc_name,
747 	.bus		= &pseudo_lld_bus,
748 };
749 
750 static const int check_condition_result =
751 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
752 
753 static const int illegal_condition_result =
754 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
755 
756 static const int device_qfull_result =
757 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
758 
759 
760 /* Only do the extra work involved in logical block provisioning if one or
761  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
762  * real reads and writes (i.e. not skipping them for speed).
763  */
764 static inline bool scsi_debug_lbp(void)
765 {
766 	return 0 == sdebug_fake_rw &&
767 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
768 }
769 
770 static void *lba2fake_store(struct sdeb_store_info *sip,
771 			    unsigned long long lba)
772 {
773 	struct sdeb_store_info *lsip = sip;
774 
775 	lba = do_div(lba, sdebug_store_sectors);
776 	if (!sip || !sip->storep) {
777 		WARN_ON_ONCE(true);
778 		lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
779 	}
780 	return lsip->storep + lba * sdebug_sector_size;
781 }
782 
783 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
784 				      sector_t sector)
785 {
786 	sector = sector_div(sector, sdebug_store_sectors);
787 
788 	return sip->dif_storep + sector;
789 }
790 
791 static void sdebug_max_tgts_luns(void)
792 {
793 	struct sdebug_host_info *sdbg_host;
794 	struct Scsi_Host *hpnt;
795 
796 	spin_lock(&sdebug_host_list_lock);
797 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
798 		hpnt = sdbg_host->shost;
799 		if ((hpnt->this_id >= 0) &&
800 		    (sdebug_num_tgts > hpnt->this_id))
801 			hpnt->max_id = sdebug_num_tgts + 1;
802 		else
803 			hpnt->max_id = sdebug_num_tgts;
804 		/* sdebug_max_luns; */
805 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
806 	}
807 	spin_unlock(&sdebug_host_list_lock);
808 }
809 
810 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
811 
812 /* Set in_bit to -1 to indicate no bit position of invalid field */
813 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
814 				 enum sdeb_cmd_data c_d,
815 				 int in_byte, int in_bit)
816 {
817 	unsigned char *sbuff;
818 	u8 sks[4];
819 	int sl, asc;
820 
821 	sbuff = scp->sense_buffer;
822 	if (!sbuff) {
823 		sdev_printk(KERN_ERR, scp->device,
824 			    "%s: sense_buffer is NULL\n", __func__);
825 		return;
826 	}
827 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
828 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
829 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
830 	memset(sks, 0, sizeof(sks));
831 	sks[0] = 0x80;
832 	if (c_d)
833 		sks[0] |= 0x40;
834 	if (in_bit >= 0) {
835 		sks[0] |= 0x8;
836 		sks[0] |= 0x7 & in_bit;
837 	}
838 	put_unaligned_be16(in_byte, sks + 1);
839 	if (sdebug_dsense) {
840 		sl = sbuff[7] + 8;
841 		sbuff[7] = sl;
842 		sbuff[sl] = 0x2;
843 		sbuff[sl + 1] = 0x6;
844 		memcpy(sbuff + sl + 4, sks, 3);
845 	} else
846 		memcpy(sbuff + 15, sks, 3);
847 	if (sdebug_verbose)
848 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
849 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
850 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
851 }
852 
853 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
854 {
855 	unsigned char *sbuff;
856 
857 	sbuff = scp->sense_buffer;
858 	if (!sbuff) {
859 		sdev_printk(KERN_ERR, scp->device,
860 			    "%s: sense_buffer is NULL\n", __func__);
861 		return;
862 	}
863 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
864 
865 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
866 
867 	if (sdebug_verbose)
868 		sdev_printk(KERN_INFO, scp->device,
869 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
870 			    my_name, key, asc, asq);
871 }
872 
873 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
874 {
875 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
876 }
877 
878 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
879 			    void __user *arg)
880 {
881 	if (sdebug_verbose) {
882 		if (0x1261 == cmd)
883 			sdev_printk(KERN_INFO, dev,
884 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
885 		else if (0x5331 == cmd)
886 			sdev_printk(KERN_INFO, dev,
887 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
888 				    __func__);
889 		else
890 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
891 				    __func__, cmd);
892 	}
893 	return -EINVAL;
894 	/* return -ENOTTY; // correct return but upsets fdisk */
895 }
896 
897 static void config_cdb_len(struct scsi_device *sdev)
898 {
899 	switch (sdebug_cdb_len) {
900 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
901 		sdev->use_10_for_rw = false;
902 		sdev->use_16_for_rw = false;
903 		sdev->use_10_for_ms = false;
904 		break;
905 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
906 		sdev->use_10_for_rw = true;
907 		sdev->use_16_for_rw = false;
908 		sdev->use_10_for_ms = false;
909 		break;
910 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
911 		sdev->use_10_for_rw = true;
912 		sdev->use_16_for_rw = false;
913 		sdev->use_10_for_ms = true;
914 		break;
915 	case 16:
916 		sdev->use_10_for_rw = false;
917 		sdev->use_16_for_rw = true;
918 		sdev->use_10_for_ms = true;
919 		break;
920 	case 32: /* No knobs to suggest this so same as 16 for now */
921 		sdev->use_10_for_rw = false;
922 		sdev->use_16_for_rw = true;
923 		sdev->use_10_for_ms = true;
924 		break;
925 	default:
926 		pr_warn("unexpected cdb_len=%d, force to 10\n",
927 			sdebug_cdb_len);
928 		sdev->use_10_for_rw = true;
929 		sdev->use_16_for_rw = false;
930 		sdev->use_10_for_ms = false;
931 		sdebug_cdb_len = 10;
932 		break;
933 	}
934 }
935 
936 static void all_config_cdb_len(void)
937 {
938 	struct sdebug_host_info *sdbg_host;
939 	struct Scsi_Host *shost;
940 	struct scsi_device *sdev;
941 
942 	spin_lock(&sdebug_host_list_lock);
943 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
944 		shost = sdbg_host->shost;
945 		shost_for_each_device(sdev, shost) {
946 			config_cdb_len(sdev);
947 		}
948 	}
949 	spin_unlock(&sdebug_host_list_lock);
950 }
951 
952 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
953 {
954 	struct sdebug_host_info *sdhp;
955 	struct sdebug_dev_info *dp;
956 
957 	spin_lock(&sdebug_host_list_lock);
958 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
959 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
960 			if ((devip->sdbg_host == dp->sdbg_host) &&
961 			    (devip->target == dp->target))
962 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
963 		}
964 	}
965 	spin_unlock(&sdebug_host_list_lock);
966 }
967 
968 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
969 {
970 	int k;
971 
972 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
973 	if (k != SDEBUG_NUM_UAS) {
974 		const char *cp = NULL;
975 
976 		switch (k) {
977 		case SDEBUG_UA_POR:
978 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
979 					POWER_ON_RESET_ASCQ);
980 			if (sdebug_verbose)
981 				cp = "power on reset";
982 			break;
983 		case SDEBUG_UA_BUS_RESET:
984 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
985 					BUS_RESET_ASCQ);
986 			if (sdebug_verbose)
987 				cp = "bus reset";
988 			break;
989 		case SDEBUG_UA_MODE_CHANGED:
990 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
991 					MODE_CHANGED_ASCQ);
992 			if (sdebug_verbose)
993 				cp = "mode parameters changed";
994 			break;
995 		case SDEBUG_UA_CAPACITY_CHANGED:
996 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
997 					CAPACITY_CHANGED_ASCQ);
998 			if (sdebug_verbose)
999 				cp = "capacity data changed";
1000 			break;
1001 		case SDEBUG_UA_MICROCODE_CHANGED:
1002 			mk_sense_buffer(scp, UNIT_ATTENTION,
1003 					TARGET_CHANGED_ASC,
1004 					MICROCODE_CHANGED_ASCQ);
1005 			if (sdebug_verbose)
1006 				cp = "microcode has been changed";
1007 			break;
1008 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1009 			mk_sense_buffer(scp, UNIT_ATTENTION,
1010 					TARGET_CHANGED_ASC,
1011 					MICROCODE_CHANGED_WO_RESET_ASCQ);
1012 			if (sdebug_verbose)
1013 				cp = "microcode has been changed without reset";
1014 			break;
1015 		case SDEBUG_UA_LUNS_CHANGED:
1016 			/*
1017 			 * SPC-3 behavior is to report a UNIT ATTENTION with
1018 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1019 			 * on the target, until a REPORT LUNS command is
1020 			 * received.  SPC-4 behavior is to report it only once.
1021 			 * NOTE:  sdebug_scsi_level does not use the same
1022 			 * values as struct scsi_device->scsi_level.
1023 			 */
1024 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
1025 				clear_luns_changed_on_target(devip);
1026 			mk_sense_buffer(scp, UNIT_ATTENTION,
1027 					TARGET_CHANGED_ASC,
1028 					LUNS_CHANGED_ASCQ);
1029 			if (sdebug_verbose)
1030 				cp = "reported luns data has changed";
1031 			break;
1032 		default:
1033 			pr_warn("unexpected unit attention code=%d\n", k);
1034 			if (sdebug_verbose)
1035 				cp = "unknown";
1036 			break;
1037 		}
1038 		clear_bit(k, devip->uas_bm);
1039 		if (sdebug_verbose)
1040 			sdev_printk(KERN_INFO, scp->device,
1041 				   "%s reports: Unit attention: %s\n",
1042 				   my_name, cp);
1043 		return check_condition_result;
1044 	}
1045 	return 0;
1046 }
1047 
1048 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1049 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1050 				int arr_len)
1051 {
1052 	int act_len;
1053 	struct scsi_data_buffer *sdb = &scp->sdb;
1054 
1055 	if (!sdb->length)
1056 		return 0;
1057 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1058 		return DID_ERROR << 16;
1059 
1060 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1061 				      arr, arr_len);
1062 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1063 
1064 	return 0;
1065 }
1066 
1067 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1068  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1069  * calls, not required to write in ascending offset order. Assumes resid
1070  * set to scsi_bufflen() prior to any calls.
1071  */
1072 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1073 				  int arr_len, unsigned int off_dst)
1074 {
1075 	unsigned int act_len, n;
1076 	struct scsi_data_buffer *sdb = &scp->sdb;
1077 	off_t skip = off_dst;
1078 
1079 	if (sdb->length <= off_dst)
1080 		return 0;
1081 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1082 		return DID_ERROR << 16;
1083 
1084 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1085 				       arr, arr_len, skip);
1086 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1087 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1088 		 scsi_get_resid(scp));
1089 	n = scsi_bufflen(scp) - (off_dst + act_len);
1090 	scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1091 	return 0;
1092 }
1093 
1094 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1095  * 'arr' or -1 if error.
1096  */
1097 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1098 			       int arr_len)
1099 {
1100 	if (!scsi_bufflen(scp))
1101 		return 0;
1102 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1103 		return -1;
1104 
1105 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1106 }
1107 
1108 
1109 static char sdebug_inq_vendor_id[9] = "Linux   ";
1110 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1111 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1112 /* Use some locally assigned NAAs for SAS addresses. */
1113 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1114 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1115 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1116 
1117 /* Device identification VPD page. Returns number of bytes placed in arr */
1118 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1119 			  int target_dev_id, int dev_id_num,
1120 			  const char *dev_id_str, int dev_id_str_len,
1121 			  const uuid_t *lu_name)
1122 {
1123 	int num, port_a;
1124 	char b[32];
1125 
1126 	port_a = target_dev_id + 1;
1127 	/* T10 vendor identifier field format (faked) */
1128 	arr[0] = 0x2;	/* ASCII */
1129 	arr[1] = 0x1;
1130 	arr[2] = 0x0;
1131 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1132 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1133 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1134 	num = 8 + 16 + dev_id_str_len;
1135 	arr[3] = num;
1136 	num += 4;
1137 	if (dev_id_num >= 0) {
1138 		if (sdebug_uuid_ctl) {
1139 			/* Locally assigned UUID */
1140 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1141 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1142 			arr[num++] = 0x0;
1143 			arr[num++] = 0x12;
1144 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1145 			arr[num++] = 0x0;
1146 			memcpy(arr + num, lu_name, 16);
1147 			num += 16;
1148 		} else {
1149 			/* NAA-3, Logical unit identifier (binary) */
1150 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1151 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1152 			arr[num++] = 0x0;
1153 			arr[num++] = 0x8;
1154 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1155 			num += 8;
1156 		}
1157 		/* Target relative port number */
1158 		arr[num++] = 0x61;	/* proto=sas, binary */
1159 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1160 		arr[num++] = 0x0;	/* reserved */
1161 		arr[num++] = 0x4;	/* length */
1162 		arr[num++] = 0x0;	/* reserved */
1163 		arr[num++] = 0x0;	/* reserved */
1164 		arr[num++] = 0x0;
1165 		arr[num++] = 0x1;	/* relative port A */
1166 	}
1167 	/* NAA-3, Target port identifier */
1168 	arr[num++] = 0x61;	/* proto=sas, binary */
1169 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1170 	arr[num++] = 0x0;
1171 	arr[num++] = 0x8;
1172 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1173 	num += 8;
1174 	/* NAA-3, Target port group identifier */
1175 	arr[num++] = 0x61;	/* proto=sas, binary */
1176 	arr[num++] = 0x95;	/* piv=1, target port group id */
1177 	arr[num++] = 0x0;
1178 	arr[num++] = 0x4;
1179 	arr[num++] = 0;
1180 	arr[num++] = 0;
1181 	put_unaligned_be16(port_group_id, arr + num);
1182 	num += 2;
1183 	/* NAA-3, Target device identifier */
1184 	arr[num++] = 0x61;	/* proto=sas, binary */
1185 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1186 	arr[num++] = 0x0;
1187 	arr[num++] = 0x8;
1188 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1189 	num += 8;
1190 	/* SCSI name string: Target device identifier */
1191 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1192 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1193 	arr[num++] = 0x0;
1194 	arr[num++] = 24;
1195 	memcpy(arr + num, "naa.32222220", 12);
1196 	num += 12;
1197 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1198 	memcpy(arr + num, b, 8);
1199 	num += 8;
1200 	memset(arr + num, 0, 4);
1201 	num += 4;
1202 	return num;
1203 }
1204 
1205 static unsigned char vpd84_data[] = {
1206 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1207     0x22,0x22,0x22,0x0,0xbb,0x1,
1208     0x22,0x22,0x22,0x0,0xbb,0x2,
1209 };
1210 
1211 /*  Software interface identification VPD page */
1212 static int inquiry_vpd_84(unsigned char *arr)
1213 {
1214 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1215 	return sizeof(vpd84_data);
1216 }
1217 
1218 /* Management network addresses VPD page */
1219 static int inquiry_vpd_85(unsigned char *arr)
1220 {
1221 	int num = 0;
1222 	const char *na1 = "https://www.kernel.org/config";
1223 	const char *na2 = "http://www.kernel.org/log";
1224 	int plen, olen;
1225 
1226 	arr[num++] = 0x1;	/* lu, storage config */
1227 	arr[num++] = 0x0;	/* reserved */
1228 	arr[num++] = 0x0;
1229 	olen = strlen(na1);
1230 	plen = olen + 1;
1231 	if (plen % 4)
1232 		plen = ((plen / 4) + 1) * 4;
1233 	arr[num++] = plen;	/* length, null termianted, padded */
1234 	memcpy(arr + num, na1, olen);
1235 	memset(arr + num + olen, 0, plen - olen);
1236 	num += plen;
1237 
1238 	arr[num++] = 0x4;	/* lu, logging */
1239 	arr[num++] = 0x0;	/* reserved */
1240 	arr[num++] = 0x0;
1241 	olen = strlen(na2);
1242 	plen = olen + 1;
1243 	if (plen % 4)
1244 		plen = ((plen / 4) + 1) * 4;
1245 	arr[num++] = plen;	/* length, null terminated, padded */
1246 	memcpy(arr + num, na2, olen);
1247 	memset(arr + num + olen, 0, plen - olen);
1248 	num += plen;
1249 
1250 	return num;
1251 }
1252 
1253 /* SCSI ports VPD page */
1254 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1255 {
1256 	int num = 0;
1257 	int port_a, port_b;
1258 
1259 	port_a = target_dev_id + 1;
1260 	port_b = port_a + 1;
1261 	arr[num++] = 0x0;	/* reserved */
1262 	arr[num++] = 0x0;	/* reserved */
1263 	arr[num++] = 0x0;
1264 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1265 	memset(arr + num, 0, 6);
1266 	num += 6;
1267 	arr[num++] = 0x0;
1268 	arr[num++] = 12;	/* length tp descriptor */
1269 	/* naa-5 target port identifier (A) */
1270 	arr[num++] = 0x61;	/* proto=sas, binary */
1271 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1272 	arr[num++] = 0x0;	/* reserved */
1273 	arr[num++] = 0x8;	/* length */
1274 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1275 	num += 8;
1276 	arr[num++] = 0x0;	/* reserved */
1277 	arr[num++] = 0x0;	/* reserved */
1278 	arr[num++] = 0x0;
1279 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1280 	memset(arr + num, 0, 6);
1281 	num += 6;
1282 	arr[num++] = 0x0;
1283 	arr[num++] = 12;	/* length tp descriptor */
1284 	/* naa-5 target port identifier (B) */
1285 	arr[num++] = 0x61;	/* proto=sas, binary */
1286 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1287 	arr[num++] = 0x0;	/* reserved */
1288 	arr[num++] = 0x8;	/* length */
1289 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1290 	num += 8;
1291 
1292 	return num;
1293 }
1294 
1295 
1296 static unsigned char vpd89_data[] = {
1297 /* from 4th byte */ 0,0,0,0,
1298 'l','i','n','u','x',' ',' ',' ',
1299 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1300 '1','2','3','4',
1301 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1302 0xec,0,0,0,
1303 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1304 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1305 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1306 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1307 0x53,0x41,
1308 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1309 0x20,0x20,
1310 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1311 0x10,0x80,
1312 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1313 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1314 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1315 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1316 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1317 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1318 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1319 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1320 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1321 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1322 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1323 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1324 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1325 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1326 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1327 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1328 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1329 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1330 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1331 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1332 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1333 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1334 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1335 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1336 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1337 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1338 };
1339 
1340 /* ATA Information VPD page */
1341 static int inquiry_vpd_89(unsigned char *arr)
1342 {
1343 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1344 	return sizeof(vpd89_data);
1345 }
1346 
1347 
1348 static unsigned char vpdb0_data[] = {
1349 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1350 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1351 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1352 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1353 };
1354 
1355 /* Block limits VPD page (SBC-3) */
1356 static int inquiry_vpd_b0(unsigned char *arr)
1357 {
1358 	unsigned int gran;
1359 
1360 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1361 
1362 	/* Optimal transfer length granularity */
1363 	if (sdebug_opt_xferlen_exp != 0 &&
1364 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1365 		gran = 1 << sdebug_opt_xferlen_exp;
1366 	else
1367 		gran = 1 << sdebug_physblk_exp;
1368 	put_unaligned_be16(gran, arr + 2);
1369 
1370 	/* Maximum Transfer Length */
1371 	if (sdebug_store_sectors > 0x400)
1372 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1373 
1374 	/* Optimal Transfer Length */
1375 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1376 
1377 	if (sdebug_lbpu) {
1378 		/* Maximum Unmap LBA Count */
1379 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1380 
1381 		/* Maximum Unmap Block Descriptor Count */
1382 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1383 	}
1384 
1385 	/* Unmap Granularity Alignment */
1386 	if (sdebug_unmap_alignment) {
1387 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1388 		arr[28] |= 0x80; /* UGAVALID */
1389 	}
1390 
1391 	/* Optimal Unmap Granularity */
1392 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1393 
1394 	/* Maximum WRITE SAME Length */
1395 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1396 
1397 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1398 
1399 	return sizeof(vpdb0_data);
1400 }
1401 
1402 /* Block device characteristics VPD page (SBC-3) */
1403 static int inquiry_vpd_b1(unsigned char *arr)
1404 {
1405 	memset(arr, 0, 0x3c);
1406 	arr[0] = 0;
1407 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1408 	arr[2] = 0;
1409 	arr[3] = 5;	/* less than 1.8" */
1410 
1411 	return 0x3c;
1412 }
1413 
1414 /* Logical block provisioning VPD page (SBC-4) */
1415 static int inquiry_vpd_b2(unsigned char *arr)
1416 {
1417 	memset(arr, 0, 0x4);
1418 	arr[0] = 0;			/* threshold exponent */
1419 	if (sdebug_lbpu)
1420 		arr[1] = 1 << 7;
1421 	if (sdebug_lbpws)
1422 		arr[1] |= 1 << 6;
1423 	if (sdebug_lbpws10)
1424 		arr[1] |= 1 << 5;
1425 	if (sdebug_lbprz && scsi_debug_lbp())
1426 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1427 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1428 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1429 	/* threshold_percentage=0 */
1430 	return 0x4;
1431 }
1432 
1433 #define SDEBUG_LONG_INQ_SZ 96
1434 #define SDEBUG_MAX_INQ_ARR_SZ 584
1435 
1436 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1437 {
1438 	unsigned char pq_pdt;
1439 	unsigned char *arr;
1440 	unsigned char *cmd = scp->cmnd;
1441 	int alloc_len, n, ret;
1442 	bool have_wlun, is_disk;
1443 
1444 	alloc_len = get_unaligned_be16(cmd + 3);
1445 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1446 	if (! arr)
1447 		return DID_REQUEUE << 16;
1448 	is_disk = (sdebug_ptype == TYPE_DISK);
1449 	have_wlun = scsi_is_wlun(scp->device->lun);
1450 	if (have_wlun)
1451 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1452 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1453 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1454 	else
1455 		pq_pdt = (sdebug_ptype & 0x1f);
1456 	arr[0] = pq_pdt;
1457 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1458 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1459 		kfree(arr);
1460 		return check_condition_result;
1461 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1462 		int lu_id_num, port_group_id, target_dev_id, len;
1463 		char lu_id_str[6];
1464 		int host_no = devip->sdbg_host->shost->host_no;
1465 
1466 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1467 		    (devip->channel & 0x7f);
1468 		if (sdebug_vpd_use_hostno == 0)
1469 			host_no = 0;
1470 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1471 			    (devip->target * 1000) + devip->lun);
1472 		target_dev_id = ((host_no + 1) * 2000) +
1473 				 (devip->target * 1000) - 3;
1474 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1475 		if (0 == cmd[2]) { /* supported vital product data pages */
1476 			arr[1] = cmd[2];	/*sanity */
1477 			n = 4;
1478 			arr[n++] = 0x0;   /* this page */
1479 			arr[n++] = 0x80;  /* unit serial number */
1480 			arr[n++] = 0x83;  /* device identification */
1481 			arr[n++] = 0x84;  /* software interface ident. */
1482 			arr[n++] = 0x85;  /* management network addresses */
1483 			arr[n++] = 0x86;  /* extended inquiry */
1484 			arr[n++] = 0x87;  /* mode page policy */
1485 			arr[n++] = 0x88;  /* SCSI ports */
1486 			if (is_disk) {	  /* SBC only */
1487 				arr[n++] = 0x89;  /* ATA information */
1488 				arr[n++] = 0xb0;  /* Block limits */
1489 				arr[n++] = 0xb1;  /* Block characteristics */
1490 				arr[n++] = 0xb2;  /* Logical Block Prov */
1491 			}
1492 			arr[3] = n - 4;	  /* number of supported VPD pages */
1493 		} else if (0x80 == cmd[2]) { /* unit serial number */
1494 			arr[1] = cmd[2];	/*sanity */
1495 			arr[3] = len;
1496 			memcpy(&arr[4], lu_id_str, len);
1497 		} else if (0x83 == cmd[2]) { /* device identification */
1498 			arr[1] = cmd[2];	/*sanity */
1499 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1500 						target_dev_id, lu_id_num,
1501 						lu_id_str, len,
1502 						&devip->lu_name);
1503 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1504 			arr[1] = cmd[2];	/*sanity */
1505 			arr[3] = inquiry_vpd_84(&arr[4]);
1506 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1507 			arr[1] = cmd[2];	/*sanity */
1508 			arr[3] = inquiry_vpd_85(&arr[4]);
1509 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1510 			arr[1] = cmd[2];	/*sanity */
1511 			arr[3] = 0x3c;	/* number of following entries */
1512 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1513 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1514 			else if (have_dif_prot)
1515 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1516 			else
1517 				arr[4] = 0x0;   /* no protection stuff */
1518 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1519 		} else if (0x87 == cmd[2]) { /* mode page policy */
1520 			arr[1] = cmd[2];	/*sanity */
1521 			arr[3] = 0x8;	/* number of following entries */
1522 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1523 			arr[6] = 0x80;	/* mlus, shared */
1524 			arr[8] = 0x18;	 /* protocol specific lu */
1525 			arr[10] = 0x82;	 /* mlus, per initiator port */
1526 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1527 			arr[1] = cmd[2];	/*sanity */
1528 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1529 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1530 			arr[1] = cmd[2];        /*sanity */
1531 			n = inquiry_vpd_89(&arr[4]);
1532 			put_unaligned_be16(n, arr + 2);
1533 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1534 			arr[1] = cmd[2];        /*sanity */
1535 			arr[3] = inquiry_vpd_b0(&arr[4]);
1536 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1537 			arr[1] = cmd[2];        /*sanity */
1538 			arr[3] = inquiry_vpd_b1(&arr[4]);
1539 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1540 			arr[1] = cmd[2];        /*sanity */
1541 			arr[3] = inquiry_vpd_b2(&arr[4]);
1542 		} else {
1543 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1544 			kfree(arr);
1545 			return check_condition_result;
1546 		}
1547 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1548 		ret = fill_from_dev_buffer(scp, arr,
1549 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1550 		kfree(arr);
1551 		return ret;
1552 	}
1553 	/* drops through here for a standard inquiry */
1554 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1555 	arr[2] = sdebug_scsi_level;
1556 	arr[3] = 2;    /* response_data_format==2 */
1557 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1558 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1559 	if (sdebug_vpd_use_hostno == 0)
1560 		arr[5] |= 0x10; /* claim: implicit TPGS */
1561 	arr[6] = 0x10; /* claim: MultiP */
1562 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1563 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1564 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1565 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1566 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1567 	/* Use Vendor Specific area to place driver date in ASCII hex */
1568 	memcpy(&arr[36], sdebug_version_date, 8);
1569 	/* version descriptors (2 bytes each) follow */
1570 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1571 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1572 	n = 62;
1573 	if (is_disk) {		/* SBC-4 no version claimed */
1574 		put_unaligned_be16(0x600, arr + n);
1575 		n += 2;
1576 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1577 		put_unaligned_be16(0x525, arr + n);
1578 		n += 2;
1579 	}
1580 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1581 	ret = fill_from_dev_buffer(scp, arr,
1582 			    min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1583 	kfree(arr);
1584 	return ret;
1585 }
1586 
1587 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1588 				   0, 0, 0x0, 0x0};
1589 
1590 static int resp_requests(struct scsi_cmnd *scp,
1591 			 struct sdebug_dev_info *devip)
1592 {
1593 	unsigned char *sbuff;
1594 	unsigned char *cmd = scp->cmnd;
1595 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1596 	bool dsense;
1597 	int len = 18;
1598 
1599 	memset(arr, 0, sizeof(arr));
1600 	dsense = !!(cmd[1] & 1);
1601 	sbuff = scp->sense_buffer;
1602 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1603 		if (dsense) {
1604 			arr[0] = 0x72;
1605 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1606 			arr[2] = THRESHOLD_EXCEEDED;
1607 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1608 			len = 8;
1609 		} else {
1610 			arr[0] = 0x70;
1611 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1612 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1613 			arr[12] = THRESHOLD_EXCEEDED;
1614 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1615 		}
1616 	} else {
1617 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1618 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1619 			;	/* have sense and formats match */
1620 		else if (arr[0] <= 0x70) {
1621 			if (dsense) {
1622 				memset(arr, 0, 8);
1623 				arr[0] = 0x72;
1624 				len = 8;
1625 			} else {
1626 				memset(arr, 0, 18);
1627 				arr[0] = 0x70;
1628 				arr[7] = 0xa;
1629 			}
1630 		} else if (dsense) {
1631 			memset(arr, 0, 8);
1632 			arr[0] = 0x72;
1633 			arr[1] = sbuff[2];     /* sense key */
1634 			arr[2] = sbuff[12];    /* asc */
1635 			arr[3] = sbuff[13];    /* ascq */
1636 			len = 8;
1637 		} else {
1638 			memset(arr, 0, 18);
1639 			arr[0] = 0x70;
1640 			arr[2] = sbuff[1];
1641 			arr[7] = 0xa;
1642 			arr[12] = sbuff[1];
1643 			arr[13] = sbuff[3];
1644 		}
1645 
1646 	}
1647 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1648 	return fill_from_dev_buffer(scp, arr, len);
1649 }
1650 
1651 static int resp_start_stop(struct scsi_cmnd *scp,
1652 			   struct sdebug_dev_info *devip)
1653 {
1654 	unsigned char *cmd = scp->cmnd;
1655 	int power_cond, stop;
1656 	bool changing;
1657 
1658 	power_cond = (cmd[4] & 0xf0) >> 4;
1659 	if (power_cond) {
1660 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1661 		return check_condition_result;
1662 	}
1663 	stop = !(cmd[4] & 1);
1664 	changing = atomic_read(&devip->stopped) == !stop;
1665 	atomic_xchg(&devip->stopped, stop);
1666 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1667 		return SDEG_RES_IMMED_MASK;
1668 	else
1669 		return 0;
1670 }
1671 
1672 static sector_t get_sdebug_capacity(void)
1673 {
1674 	static const unsigned int gibibyte = 1073741824;
1675 
1676 	if (sdebug_virtual_gb > 0)
1677 		return (sector_t)sdebug_virtual_gb *
1678 			(gibibyte / sdebug_sector_size);
1679 	else
1680 		return sdebug_store_sectors;
1681 }
1682 
1683 #define SDEBUG_READCAP_ARR_SZ 8
1684 static int resp_readcap(struct scsi_cmnd *scp,
1685 			struct sdebug_dev_info *devip)
1686 {
1687 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1688 	unsigned int capac;
1689 
1690 	/* following just in case virtual_gb changed */
1691 	sdebug_capacity = get_sdebug_capacity();
1692 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1693 	if (sdebug_capacity < 0xffffffff) {
1694 		capac = (unsigned int)sdebug_capacity - 1;
1695 		put_unaligned_be32(capac, arr + 0);
1696 	} else
1697 		put_unaligned_be32(0xffffffff, arr + 0);
1698 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1699 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1700 }
1701 
1702 #define SDEBUG_READCAP16_ARR_SZ 32
1703 static int resp_readcap16(struct scsi_cmnd *scp,
1704 			  struct sdebug_dev_info *devip)
1705 {
1706 	unsigned char *cmd = scp->cmnd;
1707 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1708 	int alloc_len;
1709 
1710 	alloc_len = get_unaligned_be32(cmd + 10);
1711 	/* following just in case virtual_gb changed */
1712 	sdebug_capacity = get_sdebug_capacity();
1713 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1714 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1715 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1716 	arr[13] = sdebug_physblk_exp & 0xf;
1717 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1718 
1719 	if (scsi_debug_lbp()) {
1720 		arr[14] |= 0x80; /* LBPME */
1721 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1722 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1723 		 * in the wider field maps to 0 in this field.
1724 		 */
1725 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1726 			arr[14] |= 0x40;
1727 	}
1728 
1729 	arr[15] = sdebug_lowest_aligned & 0xff;
1730 
1731 	if (have_dif_prot) {
1732 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1733 		arr[12] |= 1; /* PROT_EN */
1734 	}
1735 
1736 	return fill_from_dev_buffer(scp, arr,
1737 			    min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1738 }
1739 
1740 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1741 
1742 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1743 			      struct sdebug_dev_info *devip)
1744 {
1745 	unsigned char *cmd = scp->cmnd;
1746 	unsigned char *arr;
1747 	int host_no = devip->sdbg_host->shost->host_no;
1748 	int n, ret, alen, rlen;
1749 	int port_group_a, port_group_b, port_a, port_b;
1750 
1751 	alen = get_unaligned_be32(cmd + 6);
1752 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1753 	if (! arr)
1754 		return DID_REQUEUE << 16;
1755 	/*
1756 	 * EVPD page 0x88 states we have two ports, one
1757 	 * real and a fake port with no device connected.
1758 	 * So we create two port groups with one port each
1759 	 * and set the group with port B to unavailable.
1760 	 */
1761 	port_a = 0x1; /* relative port A */
1762 	port_b = 0x2; /* relative port B */
1763 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1764 			(devip->channel & 0x7f);
1765 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1766 			(devip->channel & 0x7f) + 0x80;
1767 
1768 	/*
1769 	 * The asymmetric access state is cycled according to the host_id.
1770 	 */
1771 	n = 4;
1772 	if (sdebug_vpd_use_hostno == 0) {
1773 		arr[n++] = host_no % 3; /* Asymm access state */
1774 		arr[n++] = 0x0F; /* claim: all states are supported */
1775 	} else {
1776 		arr[n++] = 0x0; /* Active/Optimized path */
1777 		arr[n++] = 0x01; /* only support active/optimized paths */
1778 	}
1779 	put_unaligned_be16(port_group_a, arr + n);
1780 	n += 2;
1781 	arr[n++] = 0;    /* Reserved */
1782 	arr[n++] = 0;    /* Status code */
1783 	arr[n++] = 0;    /* Vendor unique */
1784 	arr[n++] = 0x1;  /* One port per group */
1785 	arr[n++] = 0;    /* Reserved */
1786 	arr[n++] = 0;    /* Reserved */
1787 	put_unaligned_be16(port_a, arr + n);
1788 	n += 2;
1789 	arr[n++] = 3;    /* Port unavailable */
1790 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1791 	put_unaligned_be16(port_group_b, arr + n);
1792 	n += 2;
1793 	arr[n++] = 0;    /* Reserved */
1794 	arr[n++] = 0;    /* Status code */
1795 	arr[n++] = 0;    /* Vendor unique */
1796 	arr[n++] = 0x1;  /* One port per group */
1797 	arr[n++] = 0;    /* Reserved */
1798 	arr[n++] = 0;    /* Reserved */
1799 	put_unaligned_be16(port_b, arr + n);
1800 	n += 2;
1801 
1802 	rlen = n - 4;
1803 	put_unaligned_be32(rlen, arr + 0);
1804 
1805 	/*
1806 	 * Return the smallest value of either
1807 	 * - The allocated length
1808 	 * - The constructed command length
1809 	 * - The maximum array size
1810 	 */
1811 	rlen = min_t(int, alen, n);
1812 	ret = fill_from_dev_buffer(scp, arr,
1813 			   min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1814 	kfree(arr);
1815 	return ret;
1816 }
1817 
1818 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1819 			     struct sdebug_dev_info *devip)
1820 {
1821 	bool rctd;
1822 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1823 	u16 req_sa, u;
1824 	u32 alloc_len, a_len;
1825 	int k, offset, len, errsts, count, bump, na;
1826 	const struct opcode_info_t *oip;
1827 	const struct opcode_info_t *r_oip;
1828 	u8 *arr;
1829 	u8 *cmd = scp->cmnd;
1830 
1831 	rctd = !!(cmd[2] & 0x80);
1832 	reporting_opts = cmd[2] & 0x7;
1833 	req_opcode = cmd[3];
1834 	req_sa = get_unaligned_be16(cmd + 4);
1835 	alloc_len = get_unaligned_be32(cmd + 6);
1836 	if (alloc_len < 4 || alloc_len > 0xffff) {
1837 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1838 		return check_condition_result;
1839 	}
1840 	if (alloc_len > 8192)
1841 		a_len = 8192;
1842 	else
1843 		a_len = alloc_len;
1844 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1845 	if (NULL == arr) {
1846 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1847 				INSUFF_RES_ASCQ);
1848 		return check_condition_result;
1849 	}
1850 	switch (reporting_opts) {
1851 	case 0:	/* all commands */
1852 		/* count number of commands */
1853 		for (count = 0, oip = opcode_info_arr;
1854 		     oip->num_attached != 0xff; ++oip) {
1855 			if (F_INV_OP & oip->flags)
1856 				continue;
1857 			count += (oip->num_attached + 1);
1858 		}
1859 		bump = rctd ? 20 : 8;
1860 		put_unaligned_be32(count * bump, arr);
1861 		for (offset = 4, oip = opcode_info_arr;
1862 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1863 			if (F_INV_OP & oip->flags)
1864 				continue;
1865 			na = oip->num_attached;
1866 			arr[offset] = oip->opcode;
1867 			put_unaligned_be16(oip->sa, arr + offset + 2);
1868 			if (rctd)
1869 				arr[offset + 5] |= 0x2;
1870 			if (FF_SA & oip->flags)
1871 				arr[offset + 5] |= 0x1;
1872 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1873 			if (rctd)
1874 				put_unaligned_be16(0xa, arr + offset + 8);
1875 			r_oip = oip;
1876 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1877 				if (F_INV_OP & oip->flags)
1878 					continue;
1879 				offset += bump;
1880 				arr[offset] = oip->opcode;
1881 				put_unaligned_be16(oip->sa, arr + offset + 2);
1882 				if (rctd)
1883 					arr[offset + 5] |= 0x2;
1884 				if (FF_SA & oip->flags)
1885 					arr[offset + 5] |= 0x1;
1886 				put_unaligned_be16(oip->len_mask[0],
1887 						   arr + offset + 6);
1888 				if (rctd)
1889 					put_unaligned_be16(0xa,
1890 							   arr + offset + 8);
1891 			}
1892 			oip = r_oip;
1893 			offset += bump;
1894 		}
1895 		break;
1896 	case 1:	/* one command: opcode only */
1897 	case 2:	/* one command: opcode plus service action */
1898 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1899 		sdeb_i = opcode_ind_arr[req_opcode];
1900 		oip = &opcode_info_arr[sdeb_i];
1901 		if (F_INV_OP & oip->flags) {
1902 			supp = 1;
1903 			offset = 4;
1904 		} else {
1905 			if (1 == reporting_opts) {
1906 				if (FF_SA & oip->flags) {
1907 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1908 							     2, 2);
1909 					kfree(arr);
1910 					return check_condition_result;
1911 				}
1912 				req_sa = 0;
1913 			} else if (2 == reporting_opts &&
1914 				   0 == (FF_SA & oip->flags)) {
1915 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1916 				kfree(arr);	/* point at requested sa */
1917 				return check_condition_result;
1918 			}
1919 			if (0 == (FF_SA & oip->flags) &&
1920 			    req_opcode == oip->opcode)
1921 				supp = 3;
1922 			else if (0 == (FF_SA & oip->flags)) {
1923 				na = oip->num_attached;
1924 				for (k = 0, oip = oip->arrp; k < na;
1925 				     ++k, ++oip) {
1926 					if (req_opcode == oip->opcode)
1927 						break;
1928 				}
1929 				supp = (k >= na) ? 1 : 3;
1930 			} else if (req_sa != oip->sa) {
1931 				na = oip->num_attached;
1932 				for (k = 0, oip = oip->arrp; k < na;
1933 				     ++k, ++oip) {
1934 					if (req_sa == oip->sa)
1935 						break;
1936 				}
1937 				supp = (k >= na) ? 1 : 3;
1938 			} else
1939 				supp = 3;
1940 			if (3 == supp) {
1941 				u = oip->len_mask[0];
1942 				put_unaligned_be16(u, arr + 2);
1943 				arr[4] = oip->opcode;
1944 				for (k = 1; k < u; ++k)
1945 					arr[4 + k] = (k < 16) ?
1946 						 oip->len_mask[k] : 0xff;
1947 				offset = 4 + u;
1948 			} else
1949 				offset = 4;
1950 		}
1951 		arr[1] = (rctd ? 0x80 : 0) | supp;
1952 		if (rctd) {
1953 			put_unaligned_be16(0xa, arr + offset);
1954 			offset += 12;
1955 		}
1956 		break;
1957 	default:
1958 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1959 		kfree(arr);
1960 		return check_condition_result;
1961 	}
1962 	offset = (offset < a_len) ? offset : a_len;
1963 	len = (offset < alloc_len) ? offset : alloc_len;
1964 	errsts = fill_from_dev_buffer(scp, arr, len);
1965 	kfree(arr);
1966 	return errsts;
1967 }
1968 
1969 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1970 			  struct sdebug_dev_info *devip)
1971 {
1972 	bool repd;
1973 	u32 alloc_len, len;
1974 	u8 arr[16];
1975 	u8 *cmd = scp->cmnd;
1976 
1977 	memset(arr, 0, sizeof(arr));
1978 	repd = !!(cmd[2] & 0x80);
1979 	alloc_len = get_unaligned_be32(cmd + 6);
1980 	if (alloc_len < 4) {
1981 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1982 		return check_condition_result;
1983 	}
1984 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1985 	arr[1] = 0x1;		/* ITNRS */
1986 	if (repd) {
1987 		arr[3] = 0xc;
1988 		len = 16;
1989 	} else
1990 		len = 4;
1991 
1992 	len = (len < alloc_len) ? len : alloc_len;
1993 	return fill_from_dev_buffer(scp, arr, len);
1994 }
1995 
1996 /* <<Following mode page info copied from ST318451LW>> */
1997 
1998 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1999 {	/* Read-Write Error Recovery page for mode_sense */
2000 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2001 					5, 0, 0xff, 0xff};
2002 
2003 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2004 	if (1 == pcontrol)
2005 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2006 	return sizeof(err_recov_pg);
2007 }
2008 
2009 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2010 { 	/* Disconnect-Reconnect page for mode_sense */
2011 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2012 					 0, 0, 0, 0, 0, 0, 0, 0};
2013 
2014 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2015 	if (1 == pcontrol)
2016 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2017 	return sizeof(disconnect_pg);
2018 }
2019 
2020 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2021 {       /* Format device page for mode_sense */
2022 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2023 				     0, 0, 0, 0, 0, 0, 0, 0,
2024 				     0, 0, 0, 0, 0x40, 0, 0, 0};
2025 
2026 	memcpy(p, format_pg, sizeof(format_pg));
2027 	put_unaligned_be16(sdebug_sectors_per, p + 10);
2028 	put_unaligned_be16(sdebug_sector_size, p + 12);
2029 	if (sdebug_removable)
2030 		p[20] |= 0x20; /* should agree with INQUIRY */
2031 	if (1 == pcontrol)
2032 		memset(p + 2, 0, sizeof(format_pg) - 2);
2033 	return sizeof(format_pg);
2034 }
2035 
2036 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2037 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2038 				     0, 0, 0, 0};
2039 
2040 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2041 { 	/* Caching page for mode_sense */
2042 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2043 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2044 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2045 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2046 
2047 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2048 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2049 	memcpy(p, caching_pg, sizeof(caching_pg));
2050 	if (1 == pcontrol)
2051 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2052 	else if (2 == pcontrol)
2053 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2054 	return sizeof(caching_pg);
2055 }
2056 
2057 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2058 				    0, 0, 0x2, 0x4b};
2059 
2060 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2061 { 	/* Control mode page for mode_sense */
2062 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2063 					0, 0, 0, 0};
2064 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2065 				     0, 0, 0x2, 0x4b};
2066 
2067 	if (sdebug_dsense)
2068 		ctrl_m_pg[2] |= 0x4;
2069 	else
2070 		ctrl_m_pg[2] &= ~0x4;
2071 
2072 	if (sdebug_ato)
2073 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2074 
2075 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2076 	if (1 == pcontrol)
2077 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2078 	else if (2 == pcontrol)
2079 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2080 	return sizeof(ctrl_m_pg);
2081 }
2082 
2083 
2084 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2085 {	/* Informational Exceptions control mode page for mode_sense */
2086 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2087 				       0, 0, 0x0, 0x0};
2088 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2089 				      0, 0, 0x0, 0x0};
2090 
2091 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2092 	if (1 == pcontrol)
2093 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2094 	else if (2 == pcontrol)
2095 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2096 	return sizeof(iec_m_pg);
2097 }
2098 
2099 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2100 {	/* SAS SSP mode page - short format for mode_sense */
2101 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2102 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2103 
2104 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2105 	if (1 == pcontrol)
2106 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2107 	return sizeof(sas_sf_m_pg);
2108 }
2109 
2110 
2111 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2112 			      int target_dev_id)
2113 {	/* SAS phy control and discover mode page for mode_sense */
2114 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2115 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2116 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2117 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2118 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2119 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2120 		    0, 0, 0, 0, 0, 0, 0, 0,
2121 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2122 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2123 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2124 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2125 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2126 		    0, 0, 0, 0, 0, 0, 0, 0,
2127 		};
2128 	int port_a, port_b;
2129 
2130 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2131 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2132 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2133 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2134 	port_a = target_dev_id + 1;
2135 	port_b = port_a + 1;
2136 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2137 	put_unaligned_be32(port_a, p + 20);
2138 	put_unaligned_be32(port_b, p + 48 + 20);
2139 	if (1 == pcontrol)
2140 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2141 	return sizeof(sas_pcd_m_pg);
2142 }
2143 
2144 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2145 {	/* SAS SSP shared protocol specific port mode subpage */
2146 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2147 		    0, 0, 0, 0, 0, 0, 0, 0,
2148 		};
2149 
2150 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2151 	if (1 == pcontrol)
2152 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2153 	return sizeof(sas_sha_m_pg);
2154 }
2155 
2156 #define SDEBUG_MAX_MSENSE_SZ 256
2157 
2158 static int resp_mode_sense(struct scsi_cmnd *scp,
2159 			   struct sdebug_dev_info *devip)
2160 {
2161 	int pcontrol, pcode, subpcode, bd_len;
2162 	unsigned char dev_spec;
2163 	int alloc_len, offset, len, target_dev_id;
2164 	int target = scp->device->id;
2165 	unsigned char *ap;
2166 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2167 	unsigned char *cmd = scp->cmnd;
2168 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2169 
2170 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2171 	pcontrol = (cmd[2] & 0xc0) >> 6;
2172 	pcode = cmd[2] & 0x3f;
2173 	subpcode = cmd[3];
2174 	msense_6 = (MODE_SENSE == cmd[0]);
2175 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2176 	is_disk = (sdebug_ptype == TYPE_DISK);
2177 	if (is_disk && !dbd)
2178 		bd_len = llbaa ? 16 : 8;
2179 	else
2180 		bd_len = 0;
2181 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2182 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2183 	if (0x3 == pcontrol) {  /* Saving values not supported */
2184 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2185 		return check_condition_result;
2186 	}
2187 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2188 			(devip->target * 1000) - 3;
2189 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2190 	if (is_disk) {
2191 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2192 		if (sdebug_wp)
2193 			dev_spec |= 0x80;
2194 	} else
2195 		dev_spec = 0x0;
2196 	if (msense_6) {
2197 		arr[2] = dev_spec;
2198 		arr[3] = bd_len;
2199 		offset = 4;
2200 	} else {
2201 		arr[3] = dev_spec;
2202 		if (16 == bd_len)
2203 			arr[4] = 0x1;	/* set LONGLBA bit */
2204 		arr[7] = bd_len;	/* assume 255 or less */
2205 		offset = 8;
2206 	}
2207 	ap = arr + offset;
2208 	if ((bd_len > 0) && (!sdebug_capacity))
2209 		sdebug_capacity = get_sdebug_capacity();
2210 
2211 	if (8 == bd_len) {
2212 		if (sdebug_capacity > 0xfffffffe)
2213 			put_unaligned_be32(0xffffffff, ap + 0);
2214 		else
2215 			put_unaligned_be32(sdebug_capacity, ap + 0);
2216 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2217 		offset += bd_len;
2218 		ap = arr + offset;
2219 	} else if (16 == bd_len) {
2220 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2221 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2222 		offset += bd_len;
2223 		ap = arr + offset;
2224 	}
2225 
2226 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2227 		/* TODO: Control Extension page */
2228 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2229 		return check_condition_result;
2230 	}
2231 	bad_pcode = false;
2232 
2233 	switch (pcode) {
2234 	case 0x1:	/* Read-Write error recovery page, direct access */
2235 		len = resp_err_recov_pg(ap, pcontrol, target);
2236 		offset += len;
2237 		break;
2238 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2239 		len = resp_disconnect_pg(ap, pcontrol, target);
2240 		offset += len;
2241 		break;
2242 	case 0x3:       /* Format device page, direct access */
2243 		if (is_disk) {
2244 			len = resp_format_pg(ap, pcontrol, target);
2245 			offset += len;
2246 		} else
2247 			bad_pcode = true;
2248 		break;
2249 	case 0x8:	/* Caching page, direct access */
2250 		if (is_disk) {
2251 			len = resp_caching_pg(ap, pcontrol, target);
2252 			offset += len;
2253 		} else
2254 			bad_pcode = true;
2255 		break;
2256 	case 0xa:	/* Control Mode page, all devices */
2257 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2258 		offset += len;
2259 		break;
2260 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2261 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2262 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2263 			return check_condition_result;
2264 		}
2265 		len = 0;
2266 		if ((0x0 == subpcode) || (0xff == subpcode))
2267 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2268 		if ((0x1 == subpcode) || (0xff == subpcode))
2269 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2270 						  target_dev_id);
2271 		if ((0x2 == subpcode) || (0xff == subpcode))
2272 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2273 		offset += len;
2274 		break;
2275 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2276 		len = resp_iec_m_pg(ap, pcontrol, target);
2277 		offset += len;
2278 		break;
2279 	case 0x3f:	/* Read all Mode pages */
2280 		if ((0 == subpcode) || (0xff == subpcode)) {
2281 			len = resp_err_recov_pg(ap, pcontrol, target);
2282 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2283 			if (is_disk) {
2284 				len += resp_format_pg(ap + len, pcontrol,
2285 						      target);
2286 				len += resp_caching_pg(ap + len, pcontrol,
2287 						       target);
2288 			}
2289 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2290 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2291 			if (0xff == subpcode) {
2292 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2293 						  target, target_dev_id);
2294 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2295 			}
2296 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2297 			offset += len;
2298 		} else {
2299 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2300 			return check_condition_result;
2301 		}
2302 		break;
2303 	default:
2304 		bad_pcode = true;
2305 		break;
2306 	}
2307 	if (bad_pcode) {
2308 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2309 		return check_condition_result;
2310 	}
2311 	if (msense_6)
2312 		arr[0] = offset - 1;
2313 	else
2314 		put_unaligned_be16((offset - 2), arr + 0);
2315 	return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2316 }
2317 
2318 #define SDEBUG_MAX_MSELECT_SZ 512
2319 
2320 static int resp_mode_select(struct scsi_cmnd *scp,
2321 			    struct sdebug_dev_info *devip)
2322 {
2323 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2324 	int param_len, res, mpage;
2325 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2326 	unsigned char *cmd = scp->cmnd;
2327 	int mselect6 = (MODE_SELECT == cmd[0]);
2328 
2329 	memset(arr, 0, sizeof(arr));
2330 	pf = cmd[1] & 0x10;
2331 	sp = cmd[1] & 0x1;
2332 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2333 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2334 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2335 		return check_condition_result;
2336 	}
2337 	res = fetch_to_dev_buffer(scp, arr, param_len);
2338 	if (-1 == res)
2339 		return DID_ERROR << 16;
2340 	else if (sdebug_verbose && (res < param_len))
2341 		sdev_printk(KERN_INFO, scp->device,
2342 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2343 			    __func__, param_len, res);
2344 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2345 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2346 	if (md_len > 2) {
2347 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2348 		return check_condition_result;
2349 	}
2350 	off = bd_len + (mselect6 ? 4 : 8);
2351 	mpage = arr[off] & 0x3f;
2352 	ps = !!(arr[off] & 0x80);
2353 	if (ps) {
2354 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2355 		return check_condition_result;
2356 	}
2357 	spf = !!(arr[off] & 0x40);
2358 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2359 		       (arr[off + 1] + 2);
2360 	if ((pg_len + off) > param_len) {
2361 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2362 				PARAMETER_LIST_LENGTH_ERR, 0);
2363 		return check_condition_result;
2364 	}
2365 	switch (mpage) {
2366 	case 0x8:      /* Caching Mode page */
2367 		if (caching_pg[1] == arr[off + 1]) {
2368 			memcpy(caching_pg + 2, arr + off + 2,
2369 			       sizeof(caching_pg) - 2);
2370 			goto set_mode_changed_ua;
2371 		}
2372 		break;
2373 	case 0xa:      /* Control Mode page */
2374 		if (ctrl_m_pg[1] == arr[off + 1]) {
2375 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2376 			       sizeof(ctrl_m_pg) - 2);
2377 			if (ctrl_m_pg[4] & 0x8)
2378 				sdebug_wp = true;
2379 			else
2380 				sdebug_wp = false;
2381 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2382 			goto set_mode_changed_ua;
2383 		}
2384 		break;
2385 	case 0x1c:      /* Informational Exceptions Mode page */
2386 		if (iec_m_pg[1] == arr[off + 1]) {
2387 			memcpy(iec_m_pg + 2, arr + off + 2,
2388 			       sizeof(iec_m_pg) - 2);
2389 			goto set_mode_changed_ua;
2390 		}
2391 		break;
2392 	default:
2393 		break;
2394 	}
2395 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2396 	return check_condition_result;
2397 set_mode_changed_ua:
2398 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2399 	return 0;
2400 }
2401 
2402 static int resp_temp_l_pg(unsigned char *arr)
2403 {
2404 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2405 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2406 		};
2407 
2408 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2409 	return sizeof(temp_l_pg);
2410 }
2411 
2412 static int resp_ie_l_pg(unsigned char *arr)
2413 {
2414 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2415 		};
2416 
2417 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2418 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2419 		arr[4] = THRESHOLD_EXCEEDED;
2420 		arr[5] = 0xff;
2421 	}
2422 	return sizeof(ie_l_pg);
2423 }
2424 
2425 #define SDEBUG_MAX_LSENSE_SZ 512
2426 
2427 static int resp_log_sense(struct scsi_cmnd *scp,
2428 			  struct sdebug_dev_info *devip)
2429 {
2430 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2431 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2432 	unsigned char *cmd = scp->cmnd;
2433 
2434 	memset(arr, 0, sizeof(arr));
2435 	ppc = cmd[1] & 0x2;
2436 	sp = cmd[1] & 0x1;
2437 	if (ppc || sp) {
2438 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2439 		return check_condition_result;
2440 	}
2441 	pcode = cmd[2] & 0x3f;
2442 	subpcode = cmd[3] & 0xff;
2443 	alloc_len = get_unaligned_be16(cmd + 7);
2444 	arr[0] = pcode;
2445 	if (0 == subpcode) {
2446 		switch (pcode) {
2447 		case 0x0:	/* Supported log pages log page */
2448 			n = 4;
2449 			arr[n++] = 0x0;		/* this page */
2450 			arr[n++] = 0xd;		/* Temperature */
2451 			arr[n++] = 0x2f;	/* Informational exceptions */
2452 			arr[3] = n - 4;
2453 			break;
2454 		case 0xd:	/* Temperature log page */
2455 			arr[3] = resp_temp_l_pg(arr + 4);
2456 			break;
2457 		case 0x2f:	/* Informational exceptions log page */
2458 			arr[3] = resp_ie_l_pg(arr + 4);
2459 			break;
2460 		default:
2461 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2462 			return check_condition_result;
2463 		}
2464 	} else if (0xff == subpcode) {
2465 		arr[0] |= 0x40;
2466 		arr[1] = subpcode;
2467 		switch (pcode) {
2468 		case 0x0:	/* Supported log pages and subpages log page */
2469 			n = 4;
2470 			arr[n++] = 0x0;
2471 			arr[n++] = 0x0;		/* 0,0 page */
2472 			arr[n++] = 0x0;
2473 			arr[n++] = 0xff;	/* this page */
2474 			arr[n++] = 0xd;
2475 			arr[n++] = 0x0;		/* Temperature */
2476 			arr[n++] = 0x2f;
2477 			arr[n++] = 0x0;	/* Informational exceptions */
2478 			arr[3] = n - 4;
2479 			break;
2480 		case 0xd:	/* Temperature subpages */
2481 			n = 4;
2482 			arr[n++] = 0xd;
2483 			arr[n++] = 0x0;		/* Temperature */
2484 			arr[3] = n - 4;
2485 			break;
2486 		case 0x2f:	/* Informational exceptions subpages */
2487 			n = 4;
2488 			arr[n++] = 0x2f;
2489 			arr[n++] = 0x0;		/* Informational exceptions */
2490 			arr[3] = n - 4;
2491 			break;
2492 		default:
2493 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2494 			return check_condition_result;
2495 		}
2496 	} else {
2497 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2498 		return check_condition_result;
2499 	}
2500 	len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2501 	return fill_from_dev_buffer(scp, arr,
2502 		    min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2503 }
2504 
2505 static inline int check_device_access_params(struct scsi_cmnd *scp,
2506 	unsigned long long lba, unsigned int num, bool write)
2507 {
2508 	if (lba + num > sdebug_capacity) {
2509 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2510 		return check_condition_result;
2511 	}
2512 	/* transfer length excessive (tie in to block limits VPD page) */
2513 	if (num > sdebug_store_sectors) {
2514 		/* needs work to find which cdb byte 'num' comes from */
2515 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2516 		return check_condition_result;
2517 	}
2518 	if (write && unlikely(sdebug_wp)) {
2519 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2520 		return check_condition_result;
2521 	}
2522 	return 0;
2523 }
2524 
2525 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip)
2526 {
2527 	return sdebug_fake_rw ?
2528 			NULL : xa_load(per_store_ap, devip->sdbg_host->si_idx);
2529 }
2530 
2531 /* Returns number of bytes copied or -1 if error. */
2532 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2533 			    u32 sg_skip, u64 lba, u32 num, bool do_write)
2534 {
2535 	int ret;
2536 	u64 block, rest = 0;
2537 	enum dma_data_direction dir;
2538 	struct scsi_data_buffer *sdb = &scp->sdb;
2539 	u8 *fsp;
2540 
2541 	if (do_write) {
2542 		dir = DMA_TO_DEVICE;
2543 		write_since_sync = true;
2544 	} else {
2545 		dir = DMA_FROM_DEVICE;
2546 	}
2547 
2548 	if (!sdb->length || !sip)
2549 		return 0;
2550 	if (scp->sc_data_direction != dir)
2551 		return -1;
2552 	fsp = sip->storep;
2553 
2554 	block = do_div(lba, sdebug_store_sectors);
2555 	if (block + num > sdebug_store_sectors)
2556 		rest = block + num - sdebug_store_sectors;
2557 
2558 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2559 		   fsp + (block * sdebug_sector_size),
2560 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2561 	if (ret != (num - rest) * sdebug_sector_size)
2562 		return ret;
2563 
2564 	if (rest) {
2565 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2566 			    fsp, rest * sdebug_sector_size,
2567 			    sg_skip + ((num - rest) * sdebug_sector_size),
2568 			    do_write);
2569 	}
2570 
2571 	return ret;
2572 }
2573 
2574 /* Returns number of bytes copied or -1 if error. */
2575 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2576 {
2577 	struct scsi_data_buffer *sdb = &scp->sdb;
2578 
2579 	if (!sdb->length)
2580 		return 0;
2581 	if (scp->sc_data_direction != DMA_TO_DEVICE)
2582 		return -1;
2583 	return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2584 			      num * sdebug_sector_size, 0, true);
2585 }
2586 
2587 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2588  * arr into sip->storep+lba and return true. If comparison fails then
2589  * return false. */
2590 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2591 			      const u8 *arr, bool compare_only)
2592 {
2593 	bool res;
2594 	u64 block, rest = 0;
2595 	u32 store_blks = sdebug_store_sectors;
2596 	u32 lb_size = sdebug_sector_size;
2597 	u8 *fsp = sip->storep;
2598 
2599 	block = do_div(lba, store_blks);
2600 	if (block + num > store_blks)
2601 		rest = block + num - store_blks;
2602 
2603 	res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2604 	if (!res)
2605 		return res;
2606 	if (rest)
2607 		res = memcmp(fsp, arr + ((num - rest) * lb_size),
2608 			     rest * lb_size);
2609 	if (!res)
2610 		return res;
2611 	if (compare_only)
2612 		return true;
2613 	arr += num * lb_size;
2614 	memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2615 	if (rest)
2616 		memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2617 	return res;
2618 }
2619 
2620 static __be16 dif_compute_csum(const void *buf, int len)
2621 {
2622 	__be16 csum;
2623 
2624 	if (sdebug_guard)
2625 		csum = (__force __be16)ip_compute_csum(buf, len);
2626 	else
2627 		csum = cpu_to_be16(crc_t10dif(buf, len));
2628 
2629 	return csum;
2630 }
2631 
2632 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2633 		      sector_t sector, u32 ei_lba)
2634 {
2635 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2636 
2637 	if (sdt->guard_tag != csum) {
2638 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2639 			(unsigned long)sector,
2640 			be16_to_cpu(sdt->guard_tag),
2641 			be16_to_cpu(csum));
2642 		return 0x01;
2643 	}
2644 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2645 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2646 		pr_err("REF check failed on sector %lu\n",
2647 			(unsigned long)sector);
2648 		return 0x03;
2649 	}
2650 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2651 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2652 		pr_err("REF check failed on sector %lu\n",
2653 			(unsigned long)sector);
2654 		return 0x03;
2655 	}
2656 	return 0;
2657 }
2658 
2659 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
2660 			  unsigned int sectors, bool read)
2661 {
2662 	size_t resid;
2663 	void *paddr;
2664 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
2665 						scp->device->hostdata);
2666 	struct t10_pi_tuple *dif_storep = sip->dif_storep;
2667 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2668 	struct sg_mapping_iter miter;
2669 
2670 	/* Bytes of protection data to copy into sgl */
2671 	resid = sectors * sizeof(*dif_storep);
2672 
2673 	sg_miter_start(&miter, scsi_prot_sglist(scp),
2674 		       scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
2675 		       (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2676 
2677 	while (sg_miter_next(&miter) && resid > 0) {
2678 		size_t len = min_t(size_t, miter.length, resid);
2679 		void *start = dif_store(sip, sector);
2680 		size_t rest = 0;
2681 
2682 		if (dif_store_end < start + len)
2683 			rest = start + len - dif_store_end;
2684 
2685 		paddr = miter.addr;
2686 
2687 		if (read)
2688 			memcpy(paddr, start, len - rest);
2689 		else
2690 			memcpy(start, paddr, len - rest);
2691 
2692 		if (rest) {
2693 			if (read)
2694 				memcpy(paddr + len - rest, dif_storep, rest);
2695 			else
2696 				memcpy(dif_storep, paddr + len - rest, rest);
2697 		}
2698 
2699 		sector += len / sizeof(*dif_storep);
2700 		resid -= len;
2701 	}
2702 	sg_miter_stop(&miter);
2703 }
2704 
2705 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
2706 			    unsigned int sectors, u32 ei_lba)
2707 {
2708 	unsigned int i;
2709 	sector_t sector;
2710 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
2711 						scp->device->hostdata);
2712 	struct t10_pi_tuple *sdt;
2713 
2714 	for (i = 0; i < sectors; i++, ei_lba++) {
2715 		int ret;
2716 
2717 		sector = start_sec + i;
2718 		sdt = dif_store(sip, sector);
2719 
2720 		if (sdt->app_tag == cpu_to_be16(0xffff))
2721 			continue;
2722 
2723 		ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
2724 				 ei_lba);
2725 		if (ret) {
2726 			dif_errors++;
2727 			return ret;
2728 		}
2729 	}
2730 
2731 	dif_copy_prot(scp, start_sec, sectors, true);
2732 	dix_reads++;
2733 
2734 	return 0;
2735 }
2736 
2737 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2738 {
2739 	bool check_prot;
2740 	u32 num;
2741 	u32 ei_lba;
2742 	int ret;
2743 	u64 lba;
2744 	struct sdeb_store_info *sip = devip2sip(devip);
2745 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
2746 	u8 *cmd = scp->cmnd;
2747 	struct sdebug_queued_cmd *sqcp;
2748 
2749 	switch (cmd[0]) {
2750 	case READ_16:
2751 		ei_lba = 0;
2752 		lba = get_unaligned_be64(cmd + 2);
2753 		num = get_unaligned_be32(cmd + 10);
2754 		check_prot = true;
2755 		break;
2756 	case READ_10:
2757 		ei_lba = 0;
2758 		lba = get_unaligned_be32(cmd + 2);
2759 		num = get_unaligned_be16(cmd + 7);
2760 		check_prot = true;
2761 		break;
2762 	case READ_6:
2763 		ei_lba = 0;
2764 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2765 		      (u32)(cmd[1] & 0x1f) << 16;
2766 		num = (0 == cmd[4]) ? 256 : cmd[4];
2767 		check_prot = true;
2768 		break;
2769 	case READ_12:
2770 		ei_lba = 0;
2771 		lba = get_unaligned_be32(cmd + 2);
2772 		num = get_unaligned_be32(cmd + 6);
2773 		check_prot = true;
2774 		break;
2775 	case XDWRITEREAD_10:
2776 		ei_lba = 0;
2777 		lba = get_unaligned_be32(cmd + 2);
2778 		num = get_unaligned_be16(cmd + 7);
2779 		check_prot = false;
2780 		break;
2781 	default:	/* assume READ(32) */
2782 		lba = get_unaligned_be64(cmd + 12);
2783 		ei_lba = get_unaligned_be32(cmd + 20);
2784 		num = get_unaligned_be32(cmd + 28);
2785 		check_prot = false;
2786 		break;
2787 	}
2788 	if (unlikely(have_dif_prot && check_prot)) {
2789 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2790 		    (cmd[1] & 0xe0)) {
2791 			mk_sense_invalid_opcode(scp);
2792 			return check_condition_result;
2793 		}
2794 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2795 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2796 		    (cmd[1] & 0xe0) == 0)
2797 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2798 				    "to DIF device\n");
2799 	}
2800 	if (unlikely(sdebug_any_injecting_opt)) {
2801 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2802 
2803 		if (sqcp) {
2804 			if (sqcp->inj_short)
2805 				num /= 2;
2806 		}
2807 	} else
2808 		sqcp = NULL;
2809 
2810 	ret = check_device_access_params(scp, lba, num, false);
2811 	if (ret)
2812 		return ret;
2813 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2814 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2815 		     ((lba + num) > sdebug_medium_error_start))) {
2816 		/* claim unrecoverable read error */
2817 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2818 		/* set info field and valid bit for fixed descriptor */
2819 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2820 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2821 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2822 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2823 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2824 		}
2825 		scsi_set_resid(scp, scsi_bufflen(scp));
2826 		return check_condition_result;
2827 	}
2828 
2829 	read_lock(macc_lckp);
2830 
2831 	/* DIX + T10 DIF */
2832 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2833 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2834 
2835 		if (prot_ret) {
2836 			read_unlock(macc_lckp);
2837 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2838 			return illegal_condition_result;
2839 		}
2840 	}
2841 
2842 	ret = do_device_access(sip, scp, 0, lba, num, false);
2843 	read_unlock(macc_lckp);
2844 	if (unlikely(ret == -1))
2845 		return DID_ERROR << 16;
2846 
2847 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
2848 
2849 	if (unlikely(sqcp)) {
2850 		if (sqcp->inj_recovered) {
2851 			mk_sense_buffer(scp, RECOVERED_ERROR,
2852 					THRESHOLD_EXCEEDED, 0);
2853 			return check_condition_result;
2854 		} else if (sqcp->inj_transport) {
2855 			mk_sense_buffer(scp, ABORTED_COMMAND,
2856 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2857 			return check_condition_result;
2858 		} else if (sqcp->inj_dif) {
2859 			/* Logical block guard check failed */
2860 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2861 			return illegal_condition_result;
2862 		} else if (sqcp->inj_dix) {
2863 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2864 			return illegal_condition_result;
2865 		}
2866 	}
2867 	return 0;
2868 }
2869 
2870 static void dump_sector(unsigned char *buf, int len)
2871 {
2872 	int i, j, n;
2873 
2874 	pr_err(">>> Sector Dump <<<\n");
2875 	for (i = 0 ; i < len ; i += 16) {
2876 		char b[128];
2877 
2878 		for (j = 0, n = 0; j < 16; j++) {
2879 			unsigned char c = buf[i+j];
2880 
2881 			if (c >= 0x20 && c < 0x7e)
2882 				n += scnprintf(b + n, sizeof(b) - n,
2883 					       " %c ", buf[i+j]);
2884 			else
2885 				n += scnprintf(b + n, sizeof(b) - n,
2886 					       "%02x ", buf[i+j]);
2887 		}
2888 		pr_err("%04d: %s\n", i, b);
2889 	}
2890 }
2891 
2892 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2893 			     unsigned int sectors, u32 ei_lba)
2894 {
2895 	int ret;
2896 	struct t10_pi_tuple *sdt;
2897 	void *daddr;
2898 	sector_t sector = start_sec;
2899 	int ppage_offset;
2900 	int dpage_offset;
2901 	struct sg_mapping_iter diter;
2902 	struct sg_mapping_iter piter;
2903 
2904 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2905 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2906 
2907 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2908 			scsi_prot_sg_count(SCpnt),
2909 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2910 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2911 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2912 
2913 	/* For each protection page */
2914 	while (sg_miter_next(&piter)) {
2915 		dpage_offset = 0;
2916 		if (WARN_ON(!sg_miter_next(&diter))) {
2917 			ret = 0x01;
2918 			goto out;
2919 		}
2920 
2921 		for (ppage_offset = 0; ppage_offset < piter.length;
2922 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2923 			/* If we're at the end of the current
2924 			 * data page advance to the next one
2925 			 */
2926 			if (dpage_offset >= diter.length) {
2927 				if (WARN_ON(!sg_miter_next(&diter))) {
2928 					ret = 0x01;
2929 					goto out;
2930 				}
2931 				dpage_offset = 0;
2932 			}
2933 
2934 			sdt = piter.addr + ppage_offset;
2935 			daddr = diter.addr + dpage_offset;
2936 
2937 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2938 			if (ret) {
2939 				dump_sector(daddr, sdebug_sector_size);
2940 				goto out;
2941 			}
2942 
2943 			sector++;
2944 			ei_lba++;
2945 			dpage_offset += sdebug_sector_size;
2946 		}
2947 		diter.consumed = dpage_offset;
2948 		sg_miter_stop(&diter);
2949 	}
2950 	sg_miter_stop(&piter);
2951 
2952 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2953 	dix_writes++;
2954 
2955 	return 0;
2956 
2957 out:
2958 	dif_errors++;
2959 	sg_miter_stop(&diter);
2960 	sg_miter_stop(&piter);
2961 	return ret;
2962 }
2963 
2964 static unsigned long lba_to_map_index(sector_t lba)
2965 {
2966 	if (sdebug_unmap_alignment)
2967 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2968 	sector_div(lba, sdebug_unmap_granularity);
2969 	return lba;
2970 }
2971 
2972 static sector_t map_index_to_lba(unsigned long index)
2973 {
2974 	sector_t lba = index * sdebug_unmap_granularity;
2975 
2976 	if (sdebug_unmap_alignment)
2977 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2978 	return lba;
2979 }
2980 
2981 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
2982 			      unsigned int *num)
2983 {
2984 	sector_t end;
2985 	unsigned int mapped;
2986 	unsigned long index;
2987 	unsigned long next;
2988 
2989 	index = lba_to_map_index(lba);
2990 	mapped = test_bit(index, sip->map_storep);
2991 
2992 	if (mapped)
2993 		next = find_next_zero_bit(sip->map_storep, map_size, index);
2994 	else
2995 		next = find_next_bit(sip->map_storep, map_size, index);
2996 
2997 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2998 	*num = end - lba;
2999 	return mapped;
3000 }
3001 
3002 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3003 		       unsigned int len)
3004 {
3005 	sector_t end = lba + len;
3006 
3007 	while (lba < end) {
3008 		unsigned long index = lba_to_map_index(lba);
3009 
3010 		if (index < map_size)
3011 			set_bit(index, sip->map_storep);
3012 
3013 		lba = map_index_to_lba(index + 1);
3014 	}
3015 }
3016 
3017 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3018 			 unsigned int len)
3019 {
3020 	sector_t end = lba + len;
3021 	u8 *fsp = sip->storep;
3022 
3023 	while (lba < end) {
3024 		unsigned long index = lba_to_map_index(lba);
3025 
3026 		if (lba == map_index_to_lba(index) &&
3027 		    lba + sdebug_unmap_granularity <= end &&
3028 		    index < map_size) {
3029 			clear_bit(index, sip->map_storep);
3030 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3031 				memset(fsp + lba * sdebug_sector_size,
3032 				       (sdebug_lbprz & 1) ? 0 : 0xff,
3033 				       sdebug_sector_size *
3034 				       sdebug_unmap_granularity);
3035 			}
3036 			if (sip->dif_storep) {
3037 				memset(sip->dif_storep + lba, 0xff,
3038 				       sizeof(*sip->dif_storep) *
3039 				       sdebug_unmap_granularity);
3040 			}
3041 		}
3042 		lba = map_index_to_lba(index + 1);
3043 	}
3044 }
3045 
3046 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3047 {
3048 	bool check_prot;
3049 	u32 num;
3050 	u32 ei_lba;
3051 	int ret;
3052 	u64 lba;
3053 	struct sdeb_store_info *sip = devip2sip(devip);
3054 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3055 	u8 *cmd = scp->cmnd;
3056 
3057 	switch (cmd[0]) {
3058 	case WRITE_16:
3059 		ei_lba = 0;
3060 		lba = get_unaligned_be64(cmd + 2);
3061 		num = get_unaligned_be32(cmd + 10);
3062 		check_prot = true;
3063 		break;
3064 	case WRITE_10:
3065 		ei_lba = 0;
3066 		lba = get_unaligned_be32(cmd + 2);
3067 		num = get_unaligned_be16(cmd + 7);
3068 		check_prot = true;
3069 		break;
3070 	case WRITE_6:
3071 		ei_lba = 0;
3072 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3073 		      (u32)(cmd[1] & 0x1f) << 16;
3074 		num = (0 == cmd[4]) ? 256 : cmd[4];
3075 		check_prot = true;
3076 		break;
3077 	case WRITE_12:
3078 		ei_lba = 0;
3079 		lba = get_unaligned_be32(cmd + 2);
3080 		num = get_unaligned_be32(cmd + 6);
3081 		check_prot = true;
3082 		break;
3083 	case 0x53:	/* XDWRITEREAD(10) */
3084 		ei_lba = 0;
3085 		lba = get_unaligned_be32(cmd + 2);
3086 		num = get_unaligned_be16(cmd + 7);
3087 		check_prot = false;
3088 		break;
3089 	default:	/* assume WRITE(32) */
3090 		lba = get_unaligned_be64(cmd + 12);
3091 		ei_lba = get_unaligned_be32(cmd + 20);
3092 		num = get_unaligned_be32(cmd + 28);
3093 		check_prot = false;
3094 		break;
3095 	}
3096 	if (unlikely(have_dif_prot && check_prot)) {
3097 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3098 		    (cmd[1] & 0xe0)) {
3099 			mk_sense_invalid_opcode(scp);
3100 			return check_condition_result;
3101 		}
3102 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3103 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3104 		    (cmd[1] & 0xe0) == 0)
3105 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3106 				    "to DIF device\n");
3107 	}
3108 	ret = check_device_access_params(scp, lba, num, true);
3109 	if (ret)
3110 		return ret;
3111 	write_lock(macc_lckp);
3112 
3113 	/* DIX + T10 DIF */
3114 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3115 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3116 
3117 		if (prot_ret) {
3118 			write_unlock(macc_lckp);
3119 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3120 			return illegal_condition_result;
3121 		}
3122 	}
3123 
3124 	ret = do_device_access(sip, scp, 0, lba, num, true);
3125 	if (unlikely(scsi_debug_lbp()))
3126 		map_region(sip, lba, num);
3127 	write_unlock(macc_lckp);
3128 	if (unlikely(-1 == ret))
3129 		return DID_ERROR << 16;
3130 	else if (unlikely(sdebug_verbose &&
3131 			  (ret < (num * sdebug_sector_size))))
3132 		sdev_printk(KERN_INFO, scp->device,
3133 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3134 			    my_name, num * sdebug_sector_size, ret);
3135 
3136 	if (unlikely(sdebug_any_injecting_opt)) {
3137 		struct sdebug_queued_cmd *sqcp =
3138 				(struct sdebug_queued_cmd *)scp->host_scribble;
3139 
3140 		if (sqcp) {
3141 			if (sqcp->inj_recovered) {
3142 				mk_sense_buffer(scp, RECOVERED_ERROR,
3143 						THRESHOLD_EXCEEDED, 0);
3144 				return check_condition_result;
3145 			} else if (sqcp->inj_dif) {
3146 				/* Logical block guard check failed */
3147 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3148 				return illegal_condition_result;
3149 			} else if (sqcp->inj_dix) {
3150 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3151 				return illegal_condition_result;
3152 			}
3153 		}
3154 	}
3155 	return 0;
3156 }
3157 
3158 /*
3159  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3160  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3161  */
3162 static int resp_write_scat(struct scsi_cmnd *scp,
3163 			   struct sdebug_dev_info *devip)
3164 {
3165 	u8 *cmd = scp->cmnd;
3166 	u8 *lrdp = NULL;
3167 	u8 *up;
3168 	struct sdeb_store_info *sip = devip2sip(devip);
3169 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3170 	u8 wrprotect;
3171 	u16 lbdof, num_lrd, k;
3172 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3173 	u32 lb_size = sdebug_sector_size;
3174 	u32 ei_lba;
3175 	u64 lba;
3176 	int ret, res;
3177 	bool is_16;
3178 	static const u32 lrd_size = 32; /* + parameter list header size */
3179 
3180 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3181 		is_16 = false;
3182 		wrprotect = (cmd[10] >> 5) & 0x7;
3183 		lbdof = get_unaligned_be16(cmd + 12);
3184 		num_lrd = get_unaligned_be16(cmd + 16);
3185 		bt_len = get_unaligned_be32(cmd + 28);
3186 	} else {        /* that leaves WRITE SCATTERED(16) */
3187 		is_16 = true;
3188 		wrprotect = (cmd[2] >> 5) & 0x7;
3189 		lbdof = get_unaligned_be16(cmd + 4);
3190 		num_lrd = get_unaligned_be16(cmd + 8);
3191 		bt_len = get_unaligned_be32(cmd + 10);
3192 		if (unlikely(have_dif_prot)) {
3193 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3194 			    wrprotect) {
3195 				mk_sense_invalid_opcode(scp);
3196 				return illegal_condition_result;
3197 			}
3198 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3199 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3200 			     wrprotect == 0)
3201 				sdev_printk(KERN_ERR, scp->device,
3202 					    "Unprotected WR to DIF device\n");
3203 		}
3204 	}
3205 	if ((num_lrd == 0) || (bt_len == 0))
3206 		return 0;       /* T10 says these do-nothings are not errors */
3207 	if (lbdof == 0) {
3208 		if (sdebug_verbose)
3209 			sdev_printk(KERN_INFO, scp->device,
3210 				"%s: %s: LB Data Offset field bad\n",
3211 				my_name, __func__);
3212 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3213 		return illegal_condition_result;
3214 	}
3215 	lbdof_blen = lbdof * lb_size;
3216 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3217 		if (sdebug_verbose)
3218 			sdev_printk(KERN_INFO, scp->device,
3219 				"%s: %s: LBA range descriptors don't fit\n",
3220 				my_name, __func__);
3221 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3222 		return illegal_condition_result;
3223 	}
3224 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3225 	if (lrdp == NULL)
3226 		return SCSI_MLQUEUE_HOST_BUSY;
3227 	if (sdebug_verbose)
3228 		sdev_printk(KERN_INFO, scp->device,
3229 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3230 			my_name, __func__, lbdof_blen);
3231 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3232 	if (res == -1) {
3233 		ret = DID_ERROR << 16;
3234 		goto err_out;
3235 	}
3236 
3237 	write_lock(macc_lckp);
3238 	sg_off = lbdof_blen;
3239 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3240 	cum_lb = 0;
3241 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3242 		lba = get_unaligned_be64(up + 0);
3243 		num = get_unaligned_be32(up + 8);
3244 		if (sdebug_verbose)
3245 			sdev_printk(KERN_INFO, scp->device,
3246 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3247 				my_name, __func__, k, lba, num, sg_off);
3248 		if (num == 0)
3249 			continue;
3250 		ret = check_device_access_params(scp, lba, num, true);
3251 		if (ret)
3252 			goto err_out_unlock;
3253 		num_by = num * lb_size;
3254 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3255 
3256 		if ((cum_lb + num) > bt_len) {
3257 			if (sdebug_verbose)
3258 				sdev_printk(KERN_INFO, scp->device,
3259 				    "%s: %s: sum of blocks > data provided\n",
3260 				    my_name, __func__);
3261 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3262 					0);
3263 			ret = illegal_condition_result;
3264 			goto err_out_unlock;
3265 		}
3266 
3267 		/* DIX + T10 DIF */
3268 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3269 			int prot_ret = prot_verify_write(scp, lba, num,
3270 							 ei_lba);
3271 
3272 			if (prot_ret) {
3273 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3274 						prot_ret);
3275 				ret = illegal_condition_result;
3276 				goto err_out_unlock;
3277 			}
3278 		}
3279 
3280 		ret = do_device_access(sip, scp, sg_off, lba, num, true);
3281 		if (unlikely(scsi_debug_lbp()))
3282 			map_region(sip, lba, num);
3283 		if (unlikely(-1 == ret)) {
3284 			ret = DID_ERROR << 16;
3285 			goto err_out_unlock;
3286 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3287 			sdev_printk(KERN_INFO, scp->device,
3288 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3289 			    my_name, num_by, ret);
3290 
3291 		if (unlikely(sdebug_any_injecting_opt)) {
3292 			struct sdebug_queued_cmd *sqcp =
3293 				(struct sdebug_queued_cmd *)scp->host_scribble;
3294 
3295 			if (sqcp) {
3296 				if (sqcp->inj_recovered) {
3297 					mk_sense_buffer(scp, RECOVERED_ERROR,
3298 							THRESHOLD_EXCEEDED, 0);
3299 					ret = illegal_condition_result;
3300 					goto err_out_unlock;
3301 				} else if (sqcp->inj_dif) {
3302 					/* Logical block guard check failed */
3303 					mk_sense_buffer(scp, ABORTED_COMMAND,
3304 							0x10, 1);
3305 					ret = illegal_condition_result;
3306 					goto err_out_unlock;
3307 				} else if (sqcp->inj_dix) {
3308 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3309 							0x10, 1);
3310 					ret = illegal_condition_result;
3311 					goto err_out_unlock;
3312 				}
3313 			}
3314 		}
3315 		sg_off += num_by;
3316 		cum_lb += num;
3317 	}
3318 	ret = 0;
3319 err_out_unlock:
3320 	write_unlock(macc_lckp);
3321 err_out:
3322 	kfree(lrdp);
3323 	return ret;
3324 }
3325 
3326 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3327 			   u32 ei_lba, bool unmap, bool ndob)
3328 {
3329 	unsigned long long i;
3330 	u64 block, lbaa;
3331 	u32 lb_size = sdebug_sector_size;
3332 	int ret;
3333 	struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3334 						scp->device->hostdata);
3335 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3336 	u8 *fs1p;
3337 	u8 *fsp;
3338 
3339 	ret = check_device_access_params(scp, lba, num, true);
3340 	if (ret)
3341 		return ret;
3342 
3343 	write_lock(macc_lckp);
3344 
3345 	if (unmap && scsi_debug_lbp()) {
3346 		unmap_region(sip, lba, num);
3347 		goto out;
3348 	}
3349 	lbaa = lba;
3350 	block = do_div(lbaa, sdebug_store_sectors);
3351 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3352 	fsp = sip->storep;
3353 	fs1p = fsp + (block * lb_size);
3354 	if (ndob) {
3355 		memset(fs1p, 0, lb_size);
3356 		ret = 0;
3357 	} else
3358 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3359 
3360 	if (-1 == ret) {
3361 		write_unlock(&sip->macc_lck);
3362 		return DID_ERROR << 16;
3363 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3364 		sdev_printk(KERN_INFO, scp->device,
3365 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3366 			    my_name, "write same", lb_size, ret);
3367 
3368 	/* Copy first sector to remaining blocks */
3369 	for (i = 1 ; i < num ; i++) {
3370 		lbaa = lba + i;
3371 		block = do_div(lbaa, sdebug_store_sectors);
3372 		memmove(fsp + (block * lb_size), fs1p, lb_size);
3373 	}
3374 	if (scsi_debug_lbp())
3375 		map_region(sip, lba, num);
3376 out:
3377 	write_unlock(macc_lckp);
3378 
3379 	return 0;
3380 }
3381 
3382 static int resp_write_same_10(struct scsi_cmnd *scp,
3383 			      struct sdebug_dev_info *devip)
3384 {
3385 	u8 *cmd = scp->cmnd;
3386 	u32 lba;
3387 	u16 num;
3388 	u32 ei_lba = 0;
3389 	bool unmap = false;
3390 
3391 	if (cmd[1] & 0x8) {
3392 		if (sdebug_lbpws10 == 0) {
3393 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3394 			return check_condition_result;
3395 		} else
3396 			unmap = true;
3397 	}
3398 	lba = get_unaligned_be32(cmd + 2);
3399 	num = get_unaligned_be16(cmd + 7);
3400 	if (num > sdebug_write_same_length) {
3401 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3402 		return check_condition_result;
3403 	}
3404 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3405 }
3406 
3407 static int resp_write_same_16(struct scsi_cmnd *scp,
3408 			      struct sdebug_dev_info *devip)
3409 {
3410 	u8 *cmd = scp->cmnd;
3411 	u64 lba;
3412 	u32 num;
3413 	u32 ei_lba = 0;
3414 	bool unmap = false;
3415 	bool ndob = false;
3416 
3417 	if (cmd[1] & 0x8) {	/* UNMAP */
3418 		if (sdebug_lbpws == 0) {
3419 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3420 			return check_condition_result;
3421 		} else
3422 			unmap = true;
3423 	}
3424 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3425 		ndob = true;
3426 	lba = get_unaligned_be64(cmd + 2);
3427 	num = get_unaligned_be32(cmd + 10);
3428 	if (num > sdebug_write_same_length) {
3429 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3430 		return check_condition_result;
3431 	}
3432 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3433 }
3434 
3435 /* Note the mode field is in the same position as the (lower) service action
3436  * field. For the Report supported operation codes command, SPC-4 suggests
3437  * each mode of this command should be reported separately; for future. */
3438 static int resp_write_buffer(struct scsi_cmnd *scp,
3439 			     struct sdebug_dev_info *devip)
3440 {
3441 	u8 *cmd = scp->cmnd;
3442 	struct scsi_device *sdp = scp->device;
3443 	struct sdebug_dev_info *dp;
3444 	u8 mode;
3445 
3446 	mode = cmd[1] & 0x1f;
3447 	switch (mode) {
3448 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3449 		/* set UAs on this device only */
3450 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3451 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3452 		break;
3453 	case 0x5:	/* download MC, save and ACT */
3454 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3455 		break;
3456 	case 0x6:	/* download MC with offsets and ACT */
3457 		/* set UAs on most devices (LUs) in this target */
3458 		list_for_each_entry(dp,
3459 				    &devip->sdbg_host->dev_info_list,
3460 				    dev_list)
3461 			if (dp->target == sdp->id) {
3462 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3463 				if (devip != dp)
3464 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3465 						dp->uas_bm);
3466 			}
3467 		break;
3468 	case 0x7:	/* download MC with offsets, save, and ACT */
3469 		/* set UA on all devices (LUs) in this target */
3470 		list_for_each_entry(dp,
3471 				    &devip->sdbg_host->dev_info_list,
3472 				    dev_list)
3473 			if (dp->target == sdp->id)
3474 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3475 					dp->uas_bm);
3476 		break;
3477 	default:
3478 		/* do nothing for this command for other mode values */
3479 		break;
3480 	}
3481 	return 0;
3482 }
3483 
3484 static int resp_comp_write(struct scsi_cmnd *scp,
3485 			   struct sdebug_dev_info *devip)
3486 {
3487 	u8 *cmd = scp->cmnd;
3488 	u8 *arr;
3489 	struct sdeb_store_info *sip = devip2sip(devip);
3490 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3491 	u64 lba;
3492 	u32 dnum;
3493 	u32 lb_size = sdebug_sector_size;
3494 	u8 num;
3495 	int ret;
3496 	int retval = 0;
3497 
3498 	lba = get_unaligned_be64(cmd + 2);
3499 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3500 	if (0 == num)
3501 		return 0;	/* degenerate case, not an error */
3502 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3503 	    (cmd[1] & 0xe0)) {
3504 		mk_sense_invalid_opcode(scp);
3505 		return check_condition_result;
3506 	}
3507 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3508 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3509 	    (cmd[1] & 0xe0) == 0)
3510 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3511 			    "to DIF device\n");
3512 	ret = check_device_access_params(scp, lba, num, false);
3513 	if (ret)
3514 		return ret;
3515 	dnum = 2 * num;
3516 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3517 	if (NULL == arr) {
3518 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3519 				INSUFF_RES_ASCQ);
3520 		return check_condition_result;
3521 	}
3522 
3523 	write_lock(macc_lckp);
3524 
3525 	ret = do_dout_fetch(scp, dnum, arr);
3526 	if (ret == -1) {
3527 		retval = DID_ERROR << 16;
3528 		goto cleanup;
3529 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3530 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3531 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3532 			    dnum * lb_size, ret);
3533 	if (!comp_write_worker(sip, lba, num, arr, false)) {
3534 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3535 		retval = check_condition_result;
3536 		goto cleanup;
3537 	}
3538 	if (scsi_debug_lbp())
3539 		map_region(sip, lba, num);
3540 cleanup:
3541 	write_unlock(macc_lckp);
3542 	kfree(arr);
3543 	return retval;
3544 }
3545 
3546 struct unmap_block_desc {
3547 	__be64	lba;
3548 	__be32	blocks;
3549 	__be32	__reserved;
3550 };
3551 
3552 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3553 {
3554 	unsigned char *buf;
3555 	struct unmap_block_desc *desc;
3556 	struct sdeb_store_info *sip = devip2sip(devip);
3557 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3558 	unsigned int i, payload_len, descriptors;
3559 	int ret;
3560 
3561 	if (!scsi_debug_lbp())
3562 		return 0;	/* fib and say its done */
3563 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3564 	BUG_ON(scsi_bufflen(scp) != payload_len);
3565 
3566 	descriptors = (payload_len - 8) / 16;
3567 	if (descriptors > sdebug_unmap_max_desc) {
3568 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3569 		return check_condition_result;
3570 	}
3571 
3572 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3573 	if (!buf) {
3574 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3575 				INSUFF_RES_ASCQ);
3576 		return check_condition_result;
3577 	}
3578 
3579 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3580 
3581 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3582 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3583 
3584 	desc = (void *)&buf[8];
3585 
3586 	write_lock(macc_lckp);
3587 
3588 	for (i = 0 ; i < descriptors ; i++) {
3589 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3590 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3591 
3592 		ret = check_device_access_params(scp, lba, num, true);
3593 		if (ret)
3594 			goto out;
3595 
3596 		unmap_region(sip, lba, num);
3597 	}
3598 
3599 	ret = 0;
3600 
3601 out:
3602 	write_unlock(macc_lckp);
3603 	kfree(buf);
3604 
3605 	return ret;
3606 }
3607 
3608 #define SDEBUG_GET_LBA_STATUS_LEN 32
3609 
3610 static int resp_get_lba_status(struct scsi_cmnd *scp,
3611 			       struct sdebug_dev_info *devip)
3612 {
3613 	u8 *cmd = scp->cmnd;
3614 	struct sdeb_store_info *sip = devip2sip(devip);
3615 	u64 lba;
3616 	u32 alloc_len, mapped, num;
3617 	int ret;
3618 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3619 
3620 	lba = get_unaligned_be64(cmd + 2);
3621 	alloc_len = get_unaligned_be32(cmd + 10);
3622 
3623 	if (alloc_len < 24)
3624 		return 0;
3625 
3626 	ret = check_device_access_params(scp, lba, 1, false);
3627 	if (ret)
3628 		return ret;
3629 
3630 	if (scsi_debug_lbp())
3631 		mapped = map_state(sip, lba, &num);
3632 	else {
3633 		mapped = 1;
3634 		/* following just in case virtual_gb changed */
3635 		sdebug_capacity = get_sdebug_capacity();
3636 		if (sdebug_capacity - lba <= 0xffffffff)
3637 			num = sdebug_capacity - lba;
3638 		else
3639 			num = 0xffffffff;
3640 	}
3641 
3642 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3643 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3644 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3645 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3646 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3647 
3648 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3649 }
3650 
3651 static int resp_sync_cache(struct scsi_cmnd *scp,
3652 			   struct sdebug_dev_info *devip)
3653 {
3654 	int res = 0;
3655 	u64 lba;
3656 	u32 num_blocks;
3657 	u8 *cmd = scp->cmnd;
3658 
3659 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
3660 		lba = get_unaligned_be32(cmd + 2);
3661 		num_blocks = get_unaligned_be16(cmd + 7);
3662 	} else {				/* SYNCHRONIZE_CACHE(16) */
3663 		lba = get_unaligned_be64(cmd + 2);
3664 		num_blocks = get_unaligned_be32(cmd + 10);
3665 	}
3666 	if (lba + num_blocks > sdebug_capacity) {
3667 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3668 		return check_condition_result;
3669 	}
3670 	if (!write_since_sync || cmd[1] & 0x2)
3671 		res = SDEG_RES_IMMED_MASK;
3672 	else		/* delay if write_since_sync and IMMED clear */
3673 		write_since_sync = false;
3674 	return res;
3675 }
3676 
3677 #define RL_BUCKET_ELEMS 8
3678 
3679 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3680  * (W-LUN), the normal Linux scanning logic does not associate it with a
3681  * device (e.g. /dev/sg7). The following magic will make that association:
3682  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3683  * where <n> is a host number. If there are multiple targets in a host then
3684  * the above will associate a W-LUN to each target. To only get a W-LUN
3685  * for target 2, then use "echo '- 2 49409' > scan" .
3686  */
3687 static int resp_report_luns(struct scsi_cmnd *scp,
3688 			    struct sdebug_dev_info *devip)
3689 {
3690 	unsigned char *cmd = scp->cmnd;
3691 	unsigned int alloc_len;
3692 	unsigned char select_report;
3693 	u64 lun;
3694 	struct scsi_lun *lun_p;
3695 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3696 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3697 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3698 	unsigned int tlun_cnt;	/* total LUN count */
3699 	unsigned int rlen;	/* response length (in bytes) */
3700 	int k, j, n, res;
3701 	unsigned int off_rsp = 0;
3702 	const int sz_lun = sizeof(struct scsi_lun);
3703 
3704 	clear_luns_changed_on_target(devip);
3705 
3706 	select_report = cmd[2];
3707 	alloc_len = get_unaligned_be32(cmd + 6);
3708 
3709 	if (alloc_len < 4) {
3710 		pr_err("alloc len too small %d\n", alloc_len);
3711 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3712 		return check_condition_result;
3713 	}
3714 
3715 	switch (select_report) {
3716 	case 0:		/* all LUNs apart from W-LUNs */
3717 		lun_cnt = sdebug_max_luns;
3718 		wlun_cnt = 0;
3719 		break;
3720 	case 1:		/* only W-LUNs */
3721 		lun_cnt = 0;
3722 		wlun_cnt = 1;
3723 		break;
3724 	case 2:		/* all LUNs */
3725 		lun_cnt = sdebug_max_luns;
3726 		wlun_cnt = 1;
3727 		break;
3728 	case 0x10:	/* only administrative LUs */
3729 	case 0x11:	/* see SPC-5 */
3730 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3731 	default:
3732 		pr_debug("select report invalid %d\n", select_report);
3733 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3734 		return check_condition_result;
3735 	}
3736 
3737 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3738 		--lun_cnt;
3739 
3740 	tlun_cnt = lun_cnt + wlun_cnt;
3741 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3742 	scsi_set_resid(scp, scsi_bufflen(scp));
3743 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3744 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3745 
3746 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3747 	lun = sdebug_no_lun_0 ? 1 : 0;
3748 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3749 		memset(arr, 0, sizeof(arr));
3750 		lun_p = (struct scsi_lun *)&arr[0];
3751 		if (k == 0) {
3752 			put_unaligned_be32(rlen, &arr[0]);
3753 			++lun_p;
3754 			j = 1;
3755 		}
3756 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3757 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3758 				break;
3759 			int_to_scsilun(lun++, lun_p);
3760 		}
3761 		if (j < RL_BUCKET_ELEMS)
3762 			break;
3763 		n = j * sz_lun;
3764 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3765 		if (res)
3766 			return res;
3767 		off_rsp += n;
3768 	}
3769 	if (wlun_cnt) {
3770 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3771 		++j;
3772 	}
3773 	if (j > 0)
3774 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3775 	return res;
3776 }
3777 
3778 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3779 {
3780 	bool is_bytchk3 = false;
3781 	u8 bytchk;
3782 	int ret, j;
3783 	u32 vnum, a_num, off;
3784 	const u32 lb_size = sdebug_sector_size;
3785 	u64 lba;
3786 	u8 *arr;
3787 	u8 *cmd = scp->cmnd;
3788 	struct sdeb_store_info *sip = devip2sip(devip);
3789 	rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3790 
3791 	bytchk = (cmd[1] >> 1) & 0x3;
3792 	if (bytchk == 0) {
3793 		return 0;	/* always claim internal verify okay */
3794 	} else if (bytchk == 2) {
3795 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
3796 		return check_condition_result;
3797 	} else if (bytchk == 3) {
3798 		is_bytchk3 = true;	/* 1 block sent, compared repeatedly */
3799 	}
3800 	switch (cmd[0]) {
3801 	case VERIFY_16:
3802 		lba = get_unaligned_be64(cmd + 2);
3803 		vnum = get_unaligned_be32(cmd + 10);
3804 		break;
3805 	case VERIFY:		/* is VERIFY(10) */
3806 		lba = get_unaligned_be32(cmd + 2);
3807 		vnum = get_unaligned_be16(cmd + 7);
3808 		break;
3809 	default:
3810 		mk_sense_invalid_opcode(scp);
3811 		return check_condition_result;
3812 	}
3813 	a_num = is_bytchk3 ? 1 : vnum;
3814 	/* Treat following check like one for read (i.e. no write) access */
3815 	ret = check_device_access_params(scp, lba, a_num, false);
3816 	if (ret)
3817 		return ret;
3818 
3819 	arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
3820 	if (!arr) {
3821 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3822 				INSUFF_RES_ASCQ);
3823 		return check_condition_result;
3824 	}
3825 	/* Not changing store, so only need read access */
3826 	read_lock(macc_lckp);
3827 
3828 	ret = do_dout_fetch(scp, a_num, arr);
3829 	if (ret == -1) {
3830 		ret = DID_ERROR << 16;
3831 		goto cleanup;
3832 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
3833 		sdev_printk(KERN_INFO, scp->device,
3834 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3835 			    my_name, __func__, a_num * lb_size, ret);
3836 	}
3837 	if (is_bytchk3) {
3838 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
3839 			memcpy(arr + off, arr, lb_size);
3840 	}
3841 	ret = 0;
3842 	if (!comp_write_worker(sip, lba, vnum, arr, true)) {
3843 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3844 		ret = check_condition_result;
3845 		goto cleanup;
3846 	}
3847 cleanup:
3848 	read_unlock(macc_lckp);
3849 	kfree(arr);
3850 	return ret;
3851 }
3852 
3853 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3854 {
3855 	u32 tag = blk_mq_unique_tag(cmnd->request);
3856 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3857 
3858 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3859 	if (WARN_ON_ONCE(hwq >= submit_queues))
3860 		hwq = 0;
3861 	return sdebug_q_arr + hwq;
3862 }
3863 
3864 /* Queued (deferred) command completions converge here. */
3865 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3866 {
3867 	bool aborted = sd_dp->aborted;
3868 	int qc_idx;
3869 	int retiring = 0;
3870 	unsigned long iflags;
3871 	struct sdebug_queue *sqp;
3872 	struct sdebug_queued_cmd *sqcp;
3873 	struct scsi_cmnd *scp;
3874 	struct sdebug_dev_info *devip;
3875 
3876 	sd_dp->defer_t = SDEB_DEFER_NONE;
3877 	if (unlikely(aborted))
3878 		sd_dp->aborted = false;
3879 	qc_idx = sd_dp->qc_idx;
3880 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3881 	if (sdebug_statistics) {
3882 		atomic_inc(&sdebug_completions);
3883 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3884 			atomic_inc(&sdebug_miss_cpus);
3885 	}
3886 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3887 		pr_err("wild qc_idx=%d\n", qc_idx);
3888 		return;
3889 	}
3890 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3891 	sqcp = &sqp->qc_arr[qc_idx];
3892 	scp = sqcp->a_cmnd;
3893 	if (unlikely(scp == NULL)) {
3894 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3895 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3896 		       sd_dp->sqa_idx, qc_idx);
3897 		return;
3898 	}
3899 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3900 	if (likely(devip))
3901 		atomic_dec(&devip->num_in_q);
3902 	else
3903 		pr_err("devip=NULL\n");
3904 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3905 		retiring = 1;
3906 
3907 	sqcp->a_cmnd = NULL;
3908 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3909 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3910 		pr_err("Unexpected completion\n");
3911 		return;
3912 	}
3913 
3914 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3915 		int k, retval;
3916 
3917 		retval = atomic_read(&retired_max_queue);
3918 		if (qc_idx >= retval) {
3919 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3920 			pr_err("index %d too large\n", retval);
3921 			return;
3922 		}
3923 		k = find_last_bit(sqp->in_use_bm, retval);
3924 		if ((k < sdebug_max_queue) || (k == retval))
3925 			atomic_set(&retired_max_queue, 0);
3926 		else
3927 			atomic_set(&retired_max_queue, k + 1);
3928 	}
3929 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3930 	if (unlikely(aborted)) {
3931 		if (sdebug_verbose)
3932 			pr_info("bypassing scsi_done() due to aborted cmd\n");
3933 		return;
3934 	}
3935 	scp->scsi_done(scp); /* callback to mid level */
3936 }
3937 
3938 /* When high resolution timer goes off this function is called. */
3939 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3940 {
3941 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3942 						  hrt);
3943 	sdebug_q_cmd_complete(sd_dp);
3944 	return HRTIMER_NORESTART;
3945 }
3946 
3947 /* When work queue schedules work, it calls this function. */
3948 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3949 {
3950 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3951 						  ew.work);
3952 	sdebug_q_cmd_complete(sd_dp);
3953 }
3954 
3955 static bool got_shared_uuid;
3956 static uuid_t shared_uuid;
3957 
3958 static struct sdebug_dev_info *sdebug_device_create(
3959 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3960 {
3961 	struct sdebug_dev_info *devip;
3962 
3963 	devip = kzalloc(sizeof(*devip), flags);
3964 	if (devip) {
3965 		if (sdebug_uuid_ctl == 1)
3966 			uuid_gen(&devip->lu_name);
3967 		else if (sdebug_uuid_ctl == 2) {
3968 			if (got_shared_uuid)
3969 				devip->lu_name = shared_uuid;
3970 			else {
3971 				uuid_gen(&shared_uuid);
3972 				got_shared_uuid = true;
3973 				devip->lu_name = shared_uuid;
3974 			}
3975 		}
3976 		devip->sdbg_host = sdbg_host;
3977 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3978 	}
3979 	return devip;
3980 }
3981 
3982 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3983 {
3984 	struct sdebug_host_info *sdbg_host;
3985 	struct sdebug_dev_info *open_devip = NULL;
3986 	struct sdebug_dev_info *devip;
3987 
3988 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3989 	if (!sdbg_host) {
3990 		pr_err("Host info NULL\n");
3991 		return NULL;
3992 	}
3993 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3994 		if ((devip->used) && (devip->channel == sdev->channel) &&
3995 		    (devip->target == sdev->id) &&
3996 		    (devip->lun == sdev->lun))
3997 			return devip;
3998 		else {
3999 			if ((!devip->used) && (!open_devip))
4000 				open_devip = devip;
4001 		}
4002 	}
4003 	if (!open_devip) { /* try and make a new one */
4004 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4005 		if (!open_devip) {
4006 			pr_err("out of memory at line %d\n", __LINE__);
4007 			return NULL;
4008 		}
4009 	}
4010 
4011 	open_devip->channel = sdev->channel;
4012 	open_devip->target = sdev->id;
4013 	open_devip->lun = sdev->lun;
4014 	open_devip->sdbg_host = sdbg_host;
4015 	atomic_set(&open_devip->num_in_q, 0);
4016 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4017 	open_devip->used = true;
4018 	return open_devip;
4019 }
4020 
4021 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
4022 {
4023 	if (sdebug_verbose)
4024 		pr_info("slave_alloc <%u %u %u %llu>\n",
4025 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4026 	return 0;
4027 }
4028 
4029 static int scsi_debug_slave_configure(struct scsi_device *sdp)
4030 {
4031 	struct sdebug_dev_info *devip =
4032 			(struct sdebug_dev_info *)sdp->hostdata;
4033 
4034 	if (sdebug_verbose)
4035 		pr_info("slave_configure <%u %u %u %llu>\n",
4036 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4037 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
4038 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
4039 	if (devip == NULL) {
4040 		devip = find_build_dev_info(sdp);
4041 		if (devip == NULL)
4042 			return 1;  /* no resources, will be marked offline */
4043 	}
4044 	sdp->hostdata = devip;
4045 	if (sdebug_no_uld)
4046 		sdp->no_uld_attach = 1;
4047 	config_cdb_len(sdp);
4048 	return 0;
4049 }
4050 
4051 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
4052 {
4053 	struct sdebug_dev_info *devip =
4054 		(struct sdebug_dev_info *)sdp->hostdata;
4055 
4056 	if (sdebug_verbose)
4057 		pr_info("slave_destroy <%u %u %u %llu>\n",
4058 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
4059 	if (devip) {
4060 		/* make this slot available for re-use */
4061 		devip->used = false;
4062 		sdp->hostdata = NULL;
4063 	}
4064 }
4065 
4066 static void stop_qc_helper(struct sdebug_defer *sd_dp,
4067 			   enum sdeb_defer_type defer_t)
4068 {
4069 	if (!sd_dp)
4070 		return;
4071 	if (defer_t == SDEB_DEFER_HRT)
4072 		hrtimer_cancel(&sd_dp->hrt);
4073 	else if (defer_t == SDEB_DEFER_WQ)
4074 		cancel_work_sync(&sd_dp->ew.work);
4075 }
4076 
4077 /* If @cmnd found deletes its timer or work queue and returns true; else
4078    returns false */
4079 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4080 {
4081 	unsigned long iflags;
4082 	int j, k, qmax, r_qmax;
4083 	enum sdeb_defer_type l_defer_t;
4084 	struct sdebug_queue *sqp;
4085 	struct sdebug_queued_cmd *sqcp;
4086 	struct sdebug_dev_info *devip;
4087 	struct sdebug_defer *sd_dp;
4088 
4089 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4090 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4091 		qmax = sdebug_max_queue;
4092 		r_qmax = atomic_read(&retired_max_queue);
4093 		if (r_qmax > qmax)
4094 			qmax = r_qmax;
4095 		for (k = 0; k < qmax; ++k) {
4096 			if (test_bit(k, sqp->in_use_bm)) {
4097 				sqcp = &sqp->qc_arr[k];
4098 				if (cmnd != sqcp->a_cmnd)
4099 					continue;
4100 				/* found */
4101 				devip = (struct sdebug_dev_info *)
4102 						cmnd->device->hostdata;
4103 				if (devip)
4104 					atomic_dec(&devip->num_in_q);
4105 				sqcp->a_cmnd = NULL;
4106 				sd_dp = sqcp->sd_dp;
4107 				if (sd_dp) {
4108 					l_defer_t = sd_dp->defer_t;
4109 					sd_dp->defer_t = SDEB_DEFER_NONE;
4110 				} else
4111 					l_defer_t = SDEB_DEFER_NONE;
4112 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4113 				stop_qc_helper(sd_dp, l_defer_t);
4114 				clear_bit(k, sqp->in_use_bm);
4115 				return true;
4116 			}
4117 		}
4118 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4119 	}
4120 	return false;
4121 }
4122 
4123 /* Deletes (stops) timers or work queues of all queued commands */
4124 static void stop_all_queued(void)
4125 {
4126 	unsigned long iflags;
4127 	int j, k;
4128 	enum sdeb_defer_type l_defer_t;
4129 	struct sdebug_queue *sqp;
4130 	struct sdebug_queued_cmd *sqcp;
4131 	struct sdebug_dev_info *devip;
4132 	struct sdebug_defer *sd_dp;
4133 
4134 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4135 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4136 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4137 			if (test_bit(k, sqp->in_use_bm)) {
4138 				sqcp = &sqp->qc_arr[k];
4139 				if (sqcp->a_cmnd == NULL)
4140 					continue;
4141 				devip = (struct sdebug_dev_info *)
4142 					sqcp->a_cmnd->device->hostdata;
4143 				if (devip)
4144 					atomic_dec(&devip->num_in_q);
4145 				sqcp->a_cmnd = NULL;
4146 				sd_dp = sqcp->sd_dp;
4147 				if (sd_dp) {
4148 					l_defer_t = sd_dp->defer_t;
4149 					sd_dp->defer_t = SDEB_DEFER_NONE;
4150 				} else
4151 					l_defer_t = SDEB_DEFER_NONE;
4152 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4153 				stop_qc_helper(sd_dp, l_defer_t);
4154 				clear_bit(k, sqp->in_use_bm);
4155 				spin_lock_irqsave(&sqp->qc_lock, iflags);
4156 			}
4157 		}
4158 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4159 	}
4160 }
4161 
4162 /* Free queued command memory on heap */
4163 static void free_all_queued(void)
4164 {
4165 	int j, k;
4166 	struct sdebug_queue *sqp;
4167 	struct sdebug_queued_cmd *sqcp;
4168 
4169 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4170 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4171 			sqcp = &sqp->qc_arr[k];
4172 			kfree(sqcp->sd_dp);
4173 			sqcp->sd_dp = NULL;
4174 		}
4175 	}
4176 }
4177 
4178 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4179 {
4180 	bool ok;
4181 
4182 	++num_aborts;
4183 	if (SCpnt) {
4184 		ok = stop_queued_cmnd(SCpnt);
4185 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4186 			sdev_printk(KERN_INFO, SCpnt->device,
4187 				    "%s: command%s found\n", __func__,
4188 				    ok ? "" : " not");
4189 	}
4190 	return SUCCESS;
4191 }
4192 
4193 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4194 {
4195 	++num_dev_resets;
4196 	if (SCpnt && SCpnt->device) {
4197 		struct scsi_device *sdp = SCpnt->device;
4198 		struct sdebug_dev_info *devip =
4199 				(struct sdebug_dev_info *)sdp->hostdata;
4200 
4201 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4202 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4203 		if (devip)
4204 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4205 	}
4206 	return SUCCESS;
4207 }
4208 
4209 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4210 {
4211 	struct sdebug_host_info *sdbg_host;
4212 	struct sdebug_dev_info *devip;
4213 	struct scsi_device *sdp;
4214 	struct Scsi_Host *hp;
4215 	int k = 0;
4216 
4217 	++num_target_resets;
4218 	if (!SCpnt)
4219 		goto lie;
4220 	sdp = SCpnt->device;
4221 	if (!sdp)
4222 		goto lie;
4223 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4224 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4225 	hp = sdp->host;
4226 	if (!hp)
4227 		goto lie;
4228 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4229 	if (sdbg_host) {
4230 		list_for_each_entry(devip,
4231 				    &sdbg_host->dev_info_list,
4232 				    dev_list)
4233 			if (devip->target == sdp->id) {
4234 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4235 				++k;
4236 			}
4237 	}
4238 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4239 		sdev_printk(KERN_INFO, sdp,
4240 			    "%s: %d device(s) found in target\n", __func__, k);
4241 lie:
4242 	return SUCCESS;
4243 }
4244 
4245 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4246 {
4247 	struct sdebug_host_info *sdbg_host;
4248 	struct sdebug_dev_info *devip;
4249 	struct scsi_device *sdp;
4250 	struct Scsi_Host *hp;
4251 	int k = 0;
4252 
4253 	++num_bus_resets;
4254 	if (!(SCpnt && SCpnt->device))
4255 		goto lie;
4256 	sdp = SCpnt->device;
4257 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4258 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4259 	hp = sdp->host;
4260 	if (hp) {
4261 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4262 		if (sdbg_host) {
4263 			list_for_each_entry(devip,
4264 					    &sdbg_host->dev_info_list,
4265 					    dev_list) {
4266 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4267 				++k;
4268 			}
4269 		}
4270 	}
4271 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4272 		sdev_printk(KERN_INFO, sdp,
4273 			    "%s: %d device(s) found in host\n", __func__, k);
4274 lie:
4275 	return SUCCESS;
4276 }
4277 
4278 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4279 {
4280 	struct sdebug_host_info *sdbg_host;
4281 	struct sdebug_dev_info *devip;
4282 	int k = 0;
4283 
4284 	++num_host_resets;
4285 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4286 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4287 	spin_lock(&sdebug_host_list_lock);
4288 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4289 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4290 				    dev_list) {
4291 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4292 			++k;
4293 		}
4294 	}
4295 	spin_unlock(&sdebug_host_list_lock);
4296 	stop_all_queued();
4297 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4298 		sdev_printk(KERN_INFO, SCpnt->device,
4299 			    "%s: %d device(s) found\n", __func__, k);
4300 	return SUCCESS;
4301 }
4302 
4303 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
4304 {
4305 	struct msdos_partition *pp;
4306 	int starts[SDEBUG_MAX_PARTS + 2];
4307 	int sectors_per_part, num_sectors, k;
4308 	int heads_by_sects, start_sec, end_sec;
4309 
4310 	/* assume partition table already zeroed */
4311 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4312 		return;
4313 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4314 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4315 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4316 	}
4317 	num_sectors = (int)sdebug_store_sectors;
4318 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4319 			   / sdebug_num_parts;
4320 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4321 	starts[0] = sdebug_sectors_per;
4322 	for (k = 1; k < sdebug_num_parts; ++k)
4323 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4324 			    * heads_by_sects;
4325 	starts[sdebug_num_parts] = num_sectors;
4326 	starts[sdebug_num_parts + 1] = 0;
4327 
4328 	ramp[510] = 0x55;	/* magic partition markings */
4329 	ramp[511] = 0xAA;
4330 	pp = (struct msdos_partition *)(ramp + 0x1be);
4331 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4332 		start_sec = starts[k];
4333 		end_sec = starts[k + 1] - 1;
4334 		pp->boot_ind = 0;
4335 
4336 		pp->cyl = start_sec / heads_by_sects;
4337 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4338 			   / sdebug_sectors_per;
4339 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4340 
4341 		pp->end_cyl = end_sec / heads_by_sects;
4342 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4343 			       / sdebug_sectors_per;
4344 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4345 
4346 		pp->start_sect = cpu_to_le32(start_sec);
4347 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4348 		pp->sys_ind = 0x83;	/* plain Linux partition */
4349 	}
4350 }
4351 
4352 static void block_unblock_all_queues(bool block)
4353 {
4354 	int j;
4355 	struct sdebug_queue *sqp;
4356 
4357 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4358 		atomic_set(&sqp->blocked, (int)block);
4359 }
4360 
4361 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4362  * commands will be processed normally before triggers occur.
4363  */
4364 static void tweak_cmnd_count(void)
4365 {
4366 	int count, modulo;
4367 
4368 	modulo = abs(sdebug_every_nth);
4369 	if (modulo < 2)
4370 		return;
4371 	block_unblock_all_queues(true);
4372 	count = atomic_read(&sdebug_cmnd_count);
4373 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4374 	block_unblock_all_queues(false);
4375 }
4376 
4377 static void clear_queue_stats(void)
4378 {
4379 	atomic_set(&sdebug_cmnd_count, 0);
4380 	atomic_set(&sdebug_completions, 0);
4381 	atomic_set(&sdebug_miss_cpus, 0);
4382 	atomic_set(&sdebug_a_tsf, 0);
4383 }
4384 
4385 static void setup_inject(struct sdebug_queue *sqp,
4386 			 struct sdebug_queued_cmd *sqcp)
4387 {
4388 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4389 		if (sdebug_every_nth > 0)
4390 			sqcp->inj_recovered = sqcp->inj_transport
4391 				= sqcp->inj_dif
4392 				= sqcp->inj_dix = sqcp->inj_short
4393 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4394 		return;
4395 	}
4396 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4397 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4398 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4399 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4400 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4401 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4402 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4403 }
4404 
4405 #define INCLUSIVE_TIMING_MAX_NS 1000000		/* 1 millisecond */
4406 
4407 /* Complete the processing of the thread that queued a SCSI command to this
4408  * driver. It either completes the command by calling cmnd_done() or
4409  * schedules a hr timer or work queue then returns 0. Returns
4410  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4411  */
4412 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4413 			 int scsi_result,
4414 			 int (*pfp)(struct scsi_cmnd *,
4415 				    struct sdebug_dev_info *),
4416 			 int delta_jiff, int ndelay)
4417 {
4418 	bool new_sd_dp;
4419 	int k, num_in_q, qdepth, inject;
4420 	unsigned long iflags;
4421 	u64 ns_from_boot = 0;
4422 	struct sdebug_queue *sqp;
4423 	struct sdebug_queued_cmd *sqcp;
4424 	struct scsi_device *sdp;
4425 	struct sdebug_defer *sd_dp;
4426 
4427 	if (unlikely(devip == NULL)) {
4428 		if (scsi_result == 0)
4429 			scsi_result = DID_NO_CONNECT << 16;
4430 		goto respond_in_thread;
4431 	}
4432 	sdp = cmnd->device;
4433 
4434 	if (delta_jiff == 0)
4435 		goto respond_in_thread;
4436 
4437 	sqp = get_queue(cmnd);
4438 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4439 	if (unlikely(atomic_read(&sqp->blocked))) {
4440 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4441 		return SCSI_MLQUEUE_HOST_BUSY;
4442 	}
4443 	num_in_q = atomic_read(&devip->num_in_q);
4444 	qdepth = cmnd->device->queue_depth;
4445 	inject = 0;
4446 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4447 		if (scsi_result) {
4448 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4449 			goto respond_in_thread;
4450 		} else
4451 			scsi_result = device_qfull_result;
4452 	} else if (unlikely(sdebug_every_nth &&
4453 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4454 			    (scsi_result == 0))) {
4455 		if ((num_in_q == (qdepth - 1)) &&
4456 		    (atomic_inc_return(&sdebug_a_tsf) >=
4457 		     abs(sdebug_every_nth))) {
4458 			atomic_set(&sdebug_a_tsf, 0);
4459 			inject = 1;
4460 			scsi_result = device_qfull_result;
4461 		}
4462 	}
4463 
4464 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4465 	if (unlikely(k >= sdebug_max_queue)) {
4466 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4467 		if (scsi_result)
4468 			goto respond_in_thread;
4469 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4470 			scsi_result = device_qfull_result;
4471 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4472 			sdev_printk(KERN_INFO, sdp,
4473 				    "%s: max_queue=%d exceeded, %s\n",
4474 				    __func__, sdebug_max_queue,
4475 				    (scsi_result ?  "status: TASK SET FULL" :
4476 						    "report: host busy"));
4477 		if (scsi_result)
4478 			goto respond_in_thread;
4479 		else
4480 			return SCSI_MLQUEUE_HOST_BUSY;
4481 	}
4482 	__set_bit(k, sqp->in_use_bm);
4483 	atomic_inc(&devip->num_in_q);
4484 	sqcp = &sqp->qc_arr[k];
4485 	sqcp->a_cmnd = cmnd;
4486 	cmnd->host_scribble = (unsigned char *)sqcp;
4487 	sd_dp = sqcp->sd_dp;
4488 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4489 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4490 		setup_inject(sqp, sqcp);
4491 	if (sd_dp == NULL) {
4492 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4493 		if (sd_dp == NULL)
4494 			return SCSI_MLQUEUE_HOST_BUSY;
4495 		new_sd_dp = true;
4496 	} else {
4497 		new_sd_dp = false;
4498 	}
4499 
4500 	if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
4501 		ns_from_boot = ktime_get_boottime_ns();
4502 
4503 	/* one of the resp_*() response functions is called here */
4504 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4505 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
4506 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
4507 		delta_jiff = ndelay = 0;
4508 	}
4509 	if (cmnd->result == 0 && scsi_result != 0)
4510 		cmnd->result = scsi_result;
4511 
4512 	if (unlikely(sdebug_verbose && cmnd->result))
4513 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4514 			    __func__, cmnd->result);
4515 
4516 	if (delta_jiff > 0 || ndelay > 0) {
4517 		ktime_t kt;
4518 
4519 		if (delta_jiff > 0) {
4520 			u64 ns = jiffies_to_nsecs(delta_jiff);
4521 
4522 			if (sdebug_random && ns < U32_MAX) {
4523 				ns = prandom_u32_max((u32)ns);
4524 			} else if (sdebug_random) {
4525 				ns >>= 12;	/* scale to 4 usec precision */
4526 				if (ns < U32_MAX)	/* over 4 hours max */
4527 					ns = prandom_u32_max((u32)ns);
4528 				ns <<= 12;
4529 			}
4530 			kt = ns_to_ktime(ns);
4531 		} else {	/* ndelay has a 4.2 second max */
4532 			kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
4533 					     (u32)ndelay;
4534 			if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
4535 				u64 d = ktime_get_boottime_ns() - ns_from_boot;
4536 
4537 				if (kt <= d) {	/* elapsed duration >= kt */
4538 					sqcp->a_cmnd = NULL;
4539 					atomic_dec(&devip->num_in_q);
4540 					clear_bit(k, sqp->in_use_bm);
4541 					if (new_sd_dp)
4542 						kfree(sd_dp);
4543 					/* call scsi_done() from this thread */
4544 					cmnd->scsi_done(cmnd);
4545 					return 0;
4546 				}
4547 				/* otherwise reduce kt by elapsed time */
4548 				kt -= d;
4549 			}
4550 		}
4551 		if (!sd_dp->init_hrt) {
4552 			sd_dp->init_hrt = true;
4553 			sqcp->sd_dp = sd_dp;
4554 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4555 				     HRTIMER_MODE_REL_PINNED);
4556 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4557 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4558 			sd_dp->qc_idx = k;
4559 		}
4560 		if (sdebug_statistics)
4561 			sd_dp->issuing_cpu = raw_smp_processor_id();
4562 		sd_dp->defer_t = SDEB_DEFER_HRT;
4563 		/* schedule the invocation of scsi_done() for a later time */
4564 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4565 	} else {	/* jdelay < 0, use work queue */
4566 		if (!sd_dp->init_wq) {
4567 			sd_dp->init_wq = true;
4568 			sqcp->sd_dp = sd_dp;
4569 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4570 			sd_dp->qc_idx = k;
4571 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4572 		}
4573 		if (sdebug_statistics)
4574 			sd_dp->issuing_cpu = raw_smp_processor_id();
4575 		sd_dp->defer_t = SDEB_DEFER_WQ;
4576 		if (unlikely(sqcp->inj_cmd_abort))
4577 			sd_dp->aborted = true;
4578 		schedule_work(&sd_dp->ew.work);
4579 		if (unlikely(sqcp->inj_cmd_abort)) {
4580 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4581 				    cmnd->request->tag);
4582 			blk_abort_request(cmnd->request);
4583 		}
4584 	}
4585 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4586 		     (scsi_result == device_qfull_result)))
4587 		sdev_printk(KERN_INFO, sdp,
4588 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4589 			    num_in_q, (inject ? "<inject> " : ""),
4590 			    "status: TASK SET FULL");
4591 	return 0;
4592 
4593 respond_in_thread:	/* call back to mid-layer using invocation thread */
4594 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4595 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
4596 	if (cmnd->result == 0 && scsi_result != 0)
4597 		cmnd->result = scsi_result;
4598 	cmnd->scsi_done(cmnd);
4599 	return 0;
4600 }
4601 
4602 /* Note: The following macros create attribute files in the
4603    /sys/module/scsi_debug/parameters directory. Unfortunately this
4604    driver is unaware of a change and cannot trigger auxiliary actions
4605    as it can when the corresponding attribute in the
4606    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4607  */
4608 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4609 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4610 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4611 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4612 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4613 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4614 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4615 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4616 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4617 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4618 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4619 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4620 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4621 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4622 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4623 module_param_string(inq_product, sdebug_inq_product_id,
4624 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4625 module_param_string(inq_rev, sdebug_inq_product_rev,
4626 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4627 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4628 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4629 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4630 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4631 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4632 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4633 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4634 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4635 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4636 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4637 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4638 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4639 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4640 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4641 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4642 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4643 module_param_named(per_host_store, sdebug_per_host_store, bool,
4644 		   S_IRUGO | S_IWUSR);
4645 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4646 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4647 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4648 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
4649 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4650 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4651 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4652 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4653 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4654 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4655 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4656 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4657 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4658 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4659 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4660 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4661 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4662 		   S_IRUGO | S_IWUSR);
4663 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
4664 module_param_named(write_same_length, sdebug_write_same_length, int,
4665 		   S_IRUGO | S_IWUSR);
4666 
4667 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4668 MODULE_DESCRIPTION("SCSI debug adapter driver");
4669 MODULE_LICENSE("GPL");
4670 MODULE_VERSION(SDEBUG_VERSION);
4671 
4672 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4673 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4674 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4675 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4676 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4677 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4678 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4679 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4680 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4681 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4682 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4683 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4684 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4685 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4686 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4687 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4688 		 SDEBUG_VERSION "\")");
4689 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4690 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4691 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4692 MODULE_PARM_DESC(lbprz,
4693 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4694 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4695 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4696 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4697 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4698 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4699 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4700 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4701 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4702 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4703 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4704 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4705 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4706 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store");
4707 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4708 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4709 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4710 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
4711 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4712 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4713 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4714 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4715 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4716 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4717 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4718 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4719 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4720 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4721 MODULE_PARM_DESC(uuid_ctl,
4722 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4723 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4724 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4725 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
4726 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4727 
4728 #define SDEBUG_INFO_LEN 256
4729 static char sdebug_info[SDEBUG_INFO_LEN];
4730 
4731 static const char *scsi_debug_info(struct Scsi_Host *shp)
4732 {
4733 	int k;
4734 
4735 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4736 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4737 	if (k >= (SDEBUG_INFO_LEN - 1))
4738 		return sdebug_info;
4739 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4740 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4741 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4742 		  "statistics", (int)sdebug_statistics);
4743 	return sdebug_info;
4744 }
4745 
4746 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4747 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4748 				 int length)
4749 {
4750 	char arr[16];
4751 	int opts;
4752 	int minLen = length > 15 ? 15 : length;
4753 
4754 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4755 		return -EACCES;
4756 	memcpy(arr, buffer, minLen);
4757 	arr[minLen] = '\0';
4758 	if (1 != sscanf(arr, "%d", &opts))
4759 		return -EINVAL;
4760 	sdebug_opts = opts;
4761 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4762 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4763 	if (sdebug_every_nth != 0)
4764 		tweak_cmnd_count();
4765 	return length;
4766 }
4767 
4768 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4769  * same for each scsi_debug host (if more than one). Some of the counters
4770  * output are not atomics so might be inaccurate in a busy system. */
4771 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4772 {
4773 	int f, j, l;
4774 	struct sdebug_queue *sqp;
4775 	struct sdebug_host_info *sdhp;
4776 
4777 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4778 		   SDEBUG_VERSION, sdebug_version_date);
4779 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4780 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4781 		   sdebug_opts, sdebug_every_nth);
4782 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4783 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4784 		   sdebug_sector_size, "bytes");
4785 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4786 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4787 		   num_aborts);
4788 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4789 		   num_dev_resets, num_target_resets, num_bus_resets,
4790 		   num_host_resets);
4791 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4792 		   dix_reads, dix_writes, dif_errors);
4793 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4794 		   sdebug_statistics);
4795 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4796 		   atomic_read(&sdebug_cmnd_count),
4797 		   atomic_read(&sdebug_completions),
4798 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4799 		   atomic_read(&sdebug_a_tsf));
4800 
4801 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4802 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4803 		seq_printf(m, "  queue %d:\n", j);
4804 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4805 		if (f != sdebug_max_queue) {
4806 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4807 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4808 				   "first,last bits", f, l);
4809 		}
4810 	}
4811 
4812 	seq_printf(m, "this host_no=%d\n", host->host_no);
4813 	if (!xa_empty(per_store_ap)) {
4814 		bool niu;
4815 		int idx;
4816 		unsigned long l_idx;
4817 		struct sdeb_store_info *sip;
4818 
4819 		seq_puts(m, "\nhost list:\n");
4820 		j = 0;
4821 		list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
4822 			idx = sdhp->si_idx;
4823 			seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
4824 				   sdhp->shost->host_no, idx);
4825 			++j;
4826 		}
4827 		seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
4828 			   sdeb_most_recent_idx);
4829 		j = 0;
4830 		xa_for_each(per_store_ap, l_idx, sip) {
4831 			niu = xa_get_mark(per_store_ap, l_idx,
4832 					  SDEB_XA_NOT_IN_USE);
4833 			idx = (int)l_idx;
4834 			seq_printf(m, "  %d: idx=%d%s\n", j, idx,
4835 				   (niu ? "  not_in_use" : ""));
4836 			++j;
4837 		}
4838 	}
4839 	return 0;
4840 }
4841 
4842 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4843 {
4844 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4845 }
4846 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4847  * of delay is jiffies.
4848  */
4849 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4850 			   size_t count)
4851 {
4852 	int jdelay, res;
4853 
4854 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4855 		res = count;
4856 		if (sdebug_jdelay != jdelay) {
4857 			int j, k;
4858 			struct sdebug_queue *sqp;
4859 
4860 			block_unblock_all_queues(true);
4861 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4862 			     ++j, ++sqp) {
4863 				k = find_first_bit(sqp->in_use_bm,
4864 						   sdebug_max_queue);
4865 				if (k != sdebug_max_queue) {
4866 					res = -EBUSY;   /* queued commands */
4867 					break;
4868 				}
4869 			}
4870 			if (res > 0) {
4871 				sdebug_jdelay = jdelay;
4872 				sdebug_ndelay = 0;
4873 			}
4874 			block_unblock_all_queues(false);
4875 		}
4876 		return res;
4877 	}
4878 	return -EINVAL;
4879 }
4880 static DRIVER_ATTR_RW(delay);
4881 
4882 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4883 {
4884 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4885 }
4886 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4887 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4888 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4889 			    size_t count)
4890 {
4891 	int ndelay, res;
4892 
4893 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4894 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4895 		res = count;
4896 		if (sdebug_ndelay != ndelay) {
4897 			int j, k;
4898 			struct sdebug_queue *sqp;
4899 
4900 			block_unblock_all_queues(true);
4901 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4902 			     ++j, ++sqp) {
4903 				k = find_first_bit(sqp->in_use_bm,
4904 						   sdebug_max_queue);
4905 				if (k != sdebug_max_queue) {
4906 					res = -EBUSY;   /* queued commands */
4907 					break;
4908 				}
4909 			}
4910 			if (res > 0) {
4911 				sdebug_ndelay = ndelay;
4912 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4913 							: DEF_JDELAY;
4914 			}
4915 			block_unblock_all_queues(false);
4916 		}
4917 		return res;
4918 	}
4919 	return -EINVAL;
4920 }
4921 static DRIVER_ATTR_RW(ndelay);
4922 
4923 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4924 {
4925 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4926 }
4927 
4928 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4929 			  size_t count)
4930 {
4931 	int opts;
4932 	char work[20];
4933 
4934 	if (sscanf(buf, "%10s", work) == 1) {
4935 		if (strncasecmp(work, "0x", 2) == 0) {
4936 			if (kstrtoint(work + 2, 16, &opts) == 0)
4937 				goto opts_done;
4938 		} else {
4939 			if (kstrtoint(work, 10, &opts) == 0)
4940 				goto opts_done;
4941 		}
4942 	}
4943 	return -EINVAL;
4944 opts_done:
4945 	sdebug_opts = opts;
4946 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4947 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4948 	tweak_cmnd_count();
4949 	return count;
4950 }
4951 static DRIVER_ATTR_RW(opts);
4952 
4953 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4954 {
4955 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4956 }
4957 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4958 			   size_t count)
4959 {
4960 	int n;
4961 
4962 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4963 		sdebug_ptype = n;
4964 		return count;
4965 	}
4966 	return -EINVAL;
4967 }
4968 static DRIVER_ATTR_RW(ptype);
4969 
4970 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4971 {
4972 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4973 }
4974 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4975 			    size_t count)
4976 {
4977 	int n;
4978 
4979 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4980 		sdebug_dsense = n;
4981 		return count;
4982 	}
4983 	return -EINVAL;
4984 }
4985 static DRIVER_ATTR_RW(dsense);
4986 
4987 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4988 {
4989 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4990 }
4991 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4992 			     size_t count)
4993 {
4994 	int n, idx;
4995 
4996 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4997 		bool want_store = (n == 0);
4998 		struct sdebug_host_info *sdhp;
4999 
5000 		n = (n > 0);
5001 		sdebug_fake_rw = (sdebug_fake_rw > 0);
5002 		if (sdebug_fake_rw == n)
5003 			return count;	/* not transitioning so do nothing */
5004 
5005 		if (want_store) {	/* 1 --> 0 transition, set up store */
5006 			if (sdeb_first_idx < 0) {
5007 				idx = sdebug_add_store();
5008 				if (idx < 0)
5009 					return idx;
5010 			} else {
5011 				idx = sdeb_first_idx;
5012 				xa_clear_mark(per_store_ap, idx,
5013 					      SDEB_XA_NOT_IN_USE);
5014 			}
5015 			/* make all hosts use same store */
5016 			list_for_each_entry(sdhp, &sdebug_host_list,
5017 					    host_list) {
5018 				if (sdhp->si_idx != idx) {
5019 					xa_set_mark(per_store_ap, sdhp->si_idx,
5020 						    SDEB_XA_NOT_IN_USE);
5021 					sdhp->si_idx = idx;
5022 				}
5023 			}
5024 			sdeb_most_recent_idx = idx;
5025 		} else {	/* 0 --> 1 transition is trigger for shrink */
5026 			sdebug_erase_all_stores(true /* apart from first */);
5027 		}
5028 		sdebug_fake_rw = n;
5029 		return count;
5030 	}
5031 	return -EINVAL;
5032 }
5033 static DRIVER_ATTR_RW(fake_rw);
5034 
5035 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
5036 {
5037 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
5038 }
5039 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
5040 			      size_t count)
5041 {
5042 	int n;
5043 
5044 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5045 		sdebug_no_lun_0 = n;
5046 		return count;
5047 	}
5048 	return -EINVAL;
5049 }
5050 static DRIVER_ATTR_RW(no_lun_0);
5051 
5052 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
5053 {
5054 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
5055 }
5056 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
5057 			      size_t count)
5058 {
5059 	int n;
5060 
5061 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5062 		sdebug_num_tgts = n;
5063 		sdebug_max_tgts_luns();
5064 		return count;
5065 	}
5066 	return -EINVAL;
5067 }
5068 static DRIVER_ATTR_RW(num_tgts);
5069 
5070 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
5071 {
5072 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
5073 }
5074 static DRIVER_ATTR_RO(dev_size_mb);
5075 
5076 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
5077 {
5078 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
5079 }
5080 
5081 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
5082 				    size_t count)
5083 {
5084 	bool v;
5085 
5086 	if (kstrtobool(buf, &v))
5087 		return -EINVAL;
5088 
5089 	sdebug_per_host_store = v;
5090 	return count;
5091 }
5092 static DRIVER_ATTR_RW(per_host_store);
5093 
5094 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
5095 {
5096 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
5097 }
5098 static DRIVER_ATTR_RO(num_parts);
5099 
5100 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
5101 {
5102 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
5103 }
5104 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
5105 			       size_t count)
5106 {
5107 	int nth;
5108 
5109 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
5110 		sdebug_every_nth = nth;
5111 		if (nth && !sdebug_statistics) {
5112 			pr_info("every_nth needs statistics=1, set it\n");
5113 			sdebug_statistics = true;
5114 		}
5115 		tweak_cmnd_count();
5116 		return count;
5117 	}
5118 	return -EINVAL;
5119 }
5120 static DRIVER_ATTR_RW(every_nth);
5121 
5122 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
5123 {
5124 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
5125 }
5126 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
5127 			      size_t count)
5128 {
5129 	int n;
5130 	bool changed;
5131 
5132 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5133 		if (n > 256) {
5134 			pr_warn("max_luns can be no more than 256\n");
5135 			return -EINVAL;
5136 		}
5137 		changed = (sdebug_max_luns != n);
5138 		sdebug_max_luns = n;
5139 		sdebug_max_tgts_luns();
5140 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
5141 			struct sdebug_host_info *sdhp;
5142 			struct sdebug_dev_info *dp;
5143 
5144 			spin_lock(&sdebug_host_list_lock);
5145 			list_for_each_entry(sdhp, &sdebug_host_list,
5146 					    host_list) {
5147 				list_for_each_entry(dp, &sdhp->dev_info_list,
5148 						    dev_list) {
5149 					set_bit(SDEBUG_UA_LUNS_CHANGED,
5150 						dp->uas_bm);
5151 				}
5152 			}
5153 			spin_unlock(&sdebug_host_list_lock);
5154 		}
5155 		return count;
5156 	}
5157 	return -EINVAL;
5158 }
5159 static DRIVER_ATTR_RW(max_luns);
5160 
5161 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
5162 {
5163 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
5164 }
5165 /* N.B. max_queue can be changed while there are queued commands. In flight
5166  * commands beyond the new max_queue will be completed. */
5167 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
5168 			       size_t count)
5169 {
5170 	int j, n, k, a;
5171 	struct sdebug_queue *sqp;
5172 
5173 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
5174 	    (n <= SDEBUG_CANQUEUE)) {
5175 		block_unblock_all_queues(true);
5176 		k = 0;
5177 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5178 		     ++j, ++sqp) {
5179 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
5180 			if (a > k)
5181 				k = a;
5182 		}
5183 		sdebug_max_queue = n;
5184 		if (k == SDEBUG_CANQUEUE)
5185 			atomic_set(&retired_max_queue, 0);
5186 		else if (k >= n)
5187 			atomic_set(&retired_max_queue, k + 1);
5188 		else
5189 			atomic_set(&retired_max_queue, 0);
5190 		block_unblock_all_queues(false);
5191 		return count;
5192 	}
5193 	return -EINVAL;
5194 }
5195 static DRIVER_ATTR_RW(max_queue);
5196 
5197 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
5198 {
5199 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
5200 }
5201 static DRIVER_ATTR_RO(no_uld);
5202 
5203 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
5204 {
5205 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5206 }
5207 static DRIVER_ATTR_RO(scsi_level);
5208 
5209 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5210 {
5211 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5212 }
5213 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5214 				size_t count)
5215 {
5216 	int n;
5217 	bool changed;
5218 
5219 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5220 		changed = (sdebug_virtual_gb != n);
5221 		sdebug_virtual_gb = n;
5222 		sdebug_capacity = get_sdebug_capacity();
5223 		if (changed) {
5224 			struct sdebug_host_info *sdhp;
5225 			struct sdebug_dev_info *dp;
5226 
5227 			spin_lock(&sdebug_host_list_lock);
5228 			list_for_each_entry(sdhp, &sdebug_host_list,
5229 					    host_list) {
5230 				list_for_each_entry(dp, &sdhp->dev_info_list,
5231 						    dev_list) {
5232 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5233 						dp->uas_bm);
5234 				}
5235 			}
5236 			spin_unlock(&sdebug_host_list_lock);
5237 		}
5238 		return count;
5239 	}
5240 	return -EINVAL;
5241 }
5242 static DRIVER_ATTR_RW(virtual_gb);
5243 
5244 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5245 {
5246 	/* absolute number of hosts currently active is what is shown */
5247 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
5248 }
5249 
5250 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5251 			      size_t count)
5252 {
5253 	bool found;
5254 	unsigned long idx;
5255 	struct sdeb_store_info *sip;
5256 	bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
5257 	int delta_hosts;
5258 
5259 	if (sscanf(buf, "%d", &delta_hosts) != 1)
5260 		return -EINVAL;
5261 	if (delta_hosts > 0) {
5262 		do {
5263 			found = false;
5264 			if (want_phs) {
5265 				xa_for_each_marked(per_store_ap, idx, sip,
5266 						   SDEB_XA_NOT_IN_USE) {
5267 					sdeb_most_recent_idx = (int)idx;
5268 					found = true;
5269 					break;
5270 				}
5271 				if (found)	/* re-use case */
5272 					sdebug_add_host_helper((int)idx);
5273 				else
5274 					sdebug_do_add_host(true);
5275 			} else {
5276 				sdebug_do_add_host(false);
5277 			}
5278 		} while (--delta_hosts);
5279 	} else if (delta_hosts < 0) {
5280 		do {
5281 			sdebug_do_remove_host(false);
5282 		} while (++delta_hosts);
5283 	}
5284 	return count;
5285 }
5286 static DRIVER_ATTR_RW(add_host);
5287 
5288 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5289 {
5290 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5291 }
5292 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5293 				    size_t count)
5294 {
5295 	int n;
5296 
5297 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5298 		sdebug_vpd_use_hostno = n;
5299 		return count;
5300 	}
5301 	return -EINVAL;
5302 }
5303 static DRIVER_ATTR_RW(vpd_use_hostno);
5304 
5305 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5306 {
5307 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5308 }
5309 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5310 				size_t count)
5311 {
5312 	int n;
5313 
5314 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5315 		if (n > 0)
5316 			sdebug_statistics = true;
5317 		else {
5318 			clear_queue_stats();
5319 			sdebug_statistics = false;
5320 		}
5321 		return count;
5322 	}
5323 	return -EINVAL;
5324 }
5325 static DRIVER_ATTR_RW(statistics);
5326 
5327 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5328 {
5329 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5330 }
5331 static DRIVER_ATTR_RO(sector_size);
5332 
5333 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5334 {
5335 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5336 }
5337 static DRIVER_ATTR_RO(submit_queues);
5338 
5339 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5340 {
5341 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5342 }
5343 static DRIVER_ATTR_RO(dix);
5344 
5345 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5346 {
5347 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5348 }
5349 static DRIVER_ATTR_RO(dif);
5350 
5351 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5352 {
5353 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5354 }
5355 static DRIVER_ATTR_RO(guard);
5356 
5357 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5358 {
5359 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5360 }
5361 static DRIVER_ATTR_RO(ato);
5362 
5363 static ssize_t map_show(struct device_driver *ddp, char *buf)
5364 {
5365 	ssize_t count = 0;
5366 
5367 	if (!scsi_debug_lbp())
5368 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5369 				 sdebug_store_sectors);
5370 
5371 	if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
5372 		struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
5373 
5374 		if (sip)
5375 			count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5376 					  (int)map_size, sip->map_storep);
5377 	}
5378 	buf[count++] = '\n';
5379 	buf[count] = '\0';
5380 
5381 	return count;
5382 }
5383 static DRIVER_ATTR_RO(map);
5384 
5385 static ssize_t random_show(struct device_driver *ddp, char *buf)
5386 {
5387 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
5388 }
5389 
5390 static ssize_t random_store(struct device_driver *ddp, const char *buf,
5391 			    size_t count)
5392 {
5393 	bool v;
5394 
5395 	if (kstrtobool(buf, &v))
5396 		return -EINVAL;
5397 
5398 	sdebug_random = v;
5399 	return count;
5400 }
5401 static DRIVER_ATTR_RW(random);
5402 
5403 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5404 {
5405 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5406 }
5407 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5408 			       size_t count)
5409 {
5410 	int n;
5411 
5412 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5413 		sdebug_removable = (n > 0);
5414 		return count;
5415 	}
5416 	return -EINVAL;
5417 }
5418 static DRIVER_ATTR_RW(removable);
5419 
5420 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5421 {
5422 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5423 }
5424 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5425 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5426 			       size_t count)
5427 {
5428 	int n;
5429 
5430 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5431 		sdebug_host_lock = (n > 0);
5432 		return count;
5433 	}
5434 	return -EINVAL;
5435 }
5436 static DRIVER_ATTR_RW(host_lock);
5437 
5438 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5439 {
5440 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5441 }
5442 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5443 			    size_t count)
5444 {
5445 	int n;
5446 
5447 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5448 		sdebug_strict = (n > 0);
5449 		return count;
5450 	}
5451 	return -EINVAL;
5452 }
5453 static DRIVER_ATTR_RW(strict);
5454 
5455 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5456 {
5457 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5458 }
5459 static DRIVER_ATTR_RO(uuid_ctl);
5460 
5461 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5462 {
5463 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5464 }
5465 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5466 			     size_t count)
5467 {
5468 	int ret, n;
5469 
5470 	ret = kstrtoint(buf, 0, &n);
5471 	if (ret)
5472 		return ret;
5473 	sdebug_cdb_len = n;
5474 	all_config_cdb_len();
5475 	return count;
5476 }
5477 static DRIVER_ATTR_RW(cdb_len);
5478 
5479 
5480 /* Note: The following array creates attribute files in the
5481    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5482    files (over those found in the /sys/module/scsi_debug/parameters
5483    directory) is that auxiliary actions can be triggered when an attribute
5484    is changed. For example see: add_host_store() above.
5485  */
5486 
5487 static struct attribute *sdebug_drv_attrs[] = {
5488 	&driver_attr_delay.attr,
5489 	&driver_attr_opts.attr,
5490 	&driver_attr_ptype.attr,
5491 	&driver_attr_dsense.attr,
5492 	&driver_attr_fake_rw.attr,
5493 	&driver_attr_no_lun_0.attr,
5494 	&driver_attr_num_tgts.attr,
5495 	&driver_attr_dev_size_mb.attr,
5496 	&driver_attr_num_parts.attr,
5497 	&driver_attr_every_nth.attr,
5498 	&driver_attr_max_luns.attr,
5499 	&driver_attr_max_queue.attr,
5500 	&driver_attr_no_uld.attr,
5501 	&driver_attr_scsi_level.attr,
5502 	&driver_attr_virtual_gb.attr,
5503 	&driver_attr_add_host.attr,
5504 	&driver_attr_per_host_store.attr,
5505 	&driver_attr_vpd_use_hostno.attr,
5506 	&driver_attr_sector_size.attr,
5507 	&driver_attr_statistics.attr,
5508 	&driver_attr_submit_queues.attr,
5509 	&driver_attr_dix.attr,
5510 	&driver_attr_dif.attr,
5511 	&driver_attr_guard.attr,
5512 	&driver_attr_ato.attr,
5513 	&driver_attr_map.attr,
5514 	&driver_attr_random.attr,
5515 	&driver_attr_removable.attr,
5516 	&driver_attr_host_lock.attr,
5517 	&driver_attr_ndelay.attr,
5518 	&driver_attr_strict.attr,
5519 	&driver_attr_uuid_ctl.attr,
5520 	&driver_attr_cdb_len.attr,
5521 	NULL,
5522 };
5523 ATTRIBUTE_GROUPS(sdebug_drv);
5524 
5525 static struct device *pseudo_primary;
5526 
5527 static int __init scsi_debug_init(void)
5528 {
5529 	bool want_store = (sdebug_fake_rw == 0);
5530 	unsigned long sz;
5531 	int k, ret, hosts_to_add;
5532 	int idx = -1;
5533 
5534 	ramdisk_lck_a[0] = &atomic_rw;
5535 	ramdisk_lck_a[1] = &atomic_rw2;
5536 	atomic_set(&retired_max_queue, 0);
5537 
5538 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5539 		pr_warn("ndelay must be less than 1 second, ignored\n");
5540 		sdebug_ndelay = 0;
5541 	} else if (sdebug_ndelay > 0)
5542 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5543 
5544 	switch (sdebug_sector_size) {
5545 	case  512:
5546 	case 1024:
5547 	case 2048:
5548 	case 4096:
5549 		break;
5550 	default:
5551 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5552 		return -EINVAL;
5553 	}
5554 
5555 	switch (sdebug_dif) {
5556 	case T10_PI_TYPE0_PROTECTION:
5557 		break;
5558 	case T10_PI_TYPE1_PROTECTION:
5559 	case T10_PI_TYPE2_PROTECTION:
5560 	case T10_PI_TYPE3_PROTECTION:
5561 		have_dif_prot = true;
5562 		break;
5563 
5564 	default:
5565 		pr_err("dif must be 0, 1, 2 or 3\n");
5566 		return -EINVAL;
5567 	}
5568 
5569 	if (sdebug_num_tgts < 0) {
5570 		pr_err("num_tgts must be >= 0\n");
5571 		return -EINVAL;
5572 	}
5573 
5574 	if (sdebug_guard > 1) {
5575 		pr_err("guard must be 0 or 1\n");
5576 		return -EINVAL;
5577 	}
5578 
5579 	if (sdebug_ato > 1) {
5580 		pr_err("ato must be 0 or 1\n");
5581 		return -EINVAL;
5582 	}
5583 
5584 	if (sdebug_physblk_exp > 15) {
5585 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5586 		return -EINVAL;
5587 	}
5588 	if (sdebug_max_luns > 256) {
5589 		pr_warn("max_luns can be no more than 256, use default\n");
5590 		sdebug_max_luns = DEF_MAX_LUNS;
5591 	}
5592 
5593 	if (sdebug_lowest_aligned > 0x3fff) {
5594 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5595 		return -EINVAL;
5596 	}
5597 
5598 	if (submit_queues < 1) {
5599 		pr_err("submit_queues must be 1 or more\n");
5600 		return -EINVAL;
5601 	}
5602 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5603 			       GFP_KERNEL);
5604 	if (sdebug_q_arr == NULL)
5605 		return -ENOMEM;
5606 	for (k = 0; k < submit_queues; ++k)
5607 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5608 
5609 	if (sdebug_dev_size_mb < 1)
5610 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5611 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5612 	sdebug_store_sectors = sz / sdebug_sector_size;
5613 	sdebug_capacity = get_sdebug_capacity();
5614 
5615 	/* play around with geometry, don't waste too much on track 0 */
5616 	sdebug_heads = 8;
5617 	sdebug_sectors_per = 32;
5618 	if (sdebug_dev_size_mb >= 256)
5619 		sdebug_heads = 64;
5620 	else if (sdebug_dev_size_mb >= 16)
5621 		sdebug_heads = 32;
5622 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5623 			       (sdebug_sectors_per * sdebug_heads);
5624 	if (sdebug_cylinders_per >= 1024) {
5625 		/* other LLDs do this; implies >= 1GB ram disk ... */
5626 		sdebug_heads = 255;
5627 		sdebug_sectors_per = 63;
5628 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5629 			       (sdebug_sectors_per * sdebug_heads);
5630 	}
5631 	if (scsi_debug_lbp()) {
5632 		sdebug_unmap_max_blocks =
5633 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5634 
5635 		sdebug_unmap_max_desc =
5636 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5637 
5638 		sdebug_unmap_granularity =
5639 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5640 
5641 		if (sdebug_unmap_alignment &&
5642 		    sdebug_unmap_granularity <=
5643 		    sdebug_unmap_alignment) {
5644 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5645 			ret = -EINVAL;
5646 			goto free_q_arr;
5647 		}
5648 	}
5649 	xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
5650 	if (want_store) {
5651 		idx = sdebug_add_store();
5652 		if (idx < 0) {
5653 			ret = idx;
5654 			goto free_q_arr;
5655 		}
5656 	}
5657 
5658 	pseudo_primary = root_device_register("pseudo_0");
5659 	if (IS_ERR(pseudo_primary)) {
5660 		pr_warn("root_device_register() error\n");
5661 		ret = PTR_ERR(pseudo_primary);
5662 		goto free_vm;
5663 	}
5664 	ret = bus_register(&pseudo_lld_bus);
5665 	if (ret < 0) {
5666 		pr_warn("bus_register error: %d\n", ret);
5667 		goto dev_unreg;
5668 	}
5669 	ret = driver_register(&sdebug_driverfs_driver);
5670 	if (ret < 0) {
5671 		pr_warn("driver_register error: %d\n", ret);
5672 		goto bus_unreg;
5673 	}
5674 
5675 	hosts_to_add = sdebug_add_host;
5676 	sdebug_add_host = 0;
5677 
5678 	for (k = 0; k < hosts_to_add; k++) {
5679 		if (want_store && k == 0) {
5680 			ret = sdebug_add_host_helper(idx);
5681 			if (ret < 0) {
5682 				pr_err("add_host_helper k=%d, error=%d\n",
5683 				       k, -ret);
5684 				break;
5685 			}
5686 		} else {
5687 			ret = sdebug_do_add_host(want_store &&
5688 						 sdebug_per_host_store);
5689 			if (ret < 0) {
5690 				pr_err("add_host k=%d error=%d\n", k, -ret);
5691 				break;
5692 			}
5693 		}
5694 	}
5695 	if (sdebug_verbose)
5696 		pr_info("built %d host(s)\n", sdebug_num_hosts);
5697 
5698 	return 0;
5699 
5700 bus_unreg:
5701 	bus_unregister(&pseudo_lld_bus);
5702 dev_unreg:
5703 	root_device_unregister(pseudo_primary);
5704 free_vm:
5705 	sdebug_erase_store(idx, NULL);
5706 free_q_arr:
5707 	kfree(sdebug_q_arr);
5708 	return ret;
5709 }
5710 
5711 static void __exit scsi_debug_exit(void)
5712 {
5713 	int k = sdebug_num_hosts;
5714 
5715 	stop_all_queued();
5716 	for (; k; k--)
5717 		sdebug_do_remove_host(true);
5718 	free_all_queued();
5719 	driver_unregister(&sdebug_driverfs_driver);
5720 	bus_unregister(&pseudo_lld_bus);
5721 	root_device_unregister(pseudo_primary);
5722 
5723 	sdebug_erase_all_stores(false);
5724 	xa_destroy(per_store_ap);
5725 }
5726 
5727 device_initcall(scsi_debug_init);
5728 module_exit(scsi_debug_exit);
5729 
5730 static void sdebug_release_adapter(struct device *dev)
5731 {
5732 	struct sdebug_host_info *sdbg_host;
5733 
5734 	sdbg_host = to_sdebug_host(dev);
5735 	kfree(sdbg_host);
5736 }
5737 
5738 /* idx must be valid, if sip is NULL then it will be obtained using idx */
5739 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
5740 {
5741 	if (idx < 0)
5742 		return;
5743 	if (!sip) {
5744 		if (xa_empty(per_store_ap))
5745 			return;
5746 		sip = xa_load(per_store_ap, idx);
5747 		if (!sip)
5748 			return;
5749 	}
5750 	vfree(sip->map_storep);
5751 	vfree(sip->dif_storep);
5752 	vfree(sip->storep);
5753 	xa_erase(per_store_ap, idx);
5754 	kfree(sip);
5755 }
5756 
5757 /* Assume apart_from_first==false only in shutdown case. */
5758 static void sdebug_erase_all_stores(bool apart_from_first)
5759 {
5760 	unsigned long idx;
5761 	struct sdeb_store_info *sip = NULL;
5762 
5763 	xa_for_each(per_store_ap, idx, sip) {
5764 		if (apart_from_first)
5765 			apart_from_first = false;
5766 		else
5767 			sdebug_erase_store(idx, sip);
5768 	}
5769 	if (apart_from_first)
5770 		sdeb_most_recent_idx = sdeb_first_idx;
5771 }
5772 
5773 /*
5774  * Returns store xarray new element index (idx) if >=0 else negated errno.
5775  * Limit the number of stores to 65536.
5776  */
5777 static int sdebug_add_store(void)
5778 {
5779 	int res;
5780 	u32 n_idx;
5781 	unsigned long iflags;
5782 	unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5783 	struct sdeb_store_info *sip = NULL;
5784 	struct xa_limit xal = { .max = 1 << 16, .min = 0 };
5785 
5786 	sip = kzalloc(sizeof(*sip), GFP_KERNEL);
5787 	if (!sip)
5788 		return -ENOMEM;
5789 
5790 	xa_lock_irqsave(per_store_ap, iflags);
5791 	res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
5792 	if (unlikely(res < 0)) {
5793 		xa_unlock_irqrestore(per_store_ap, iflags);
5794 		kfree(sip);
5795 		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
5796 		return res;
5797 	}
5798 	sdeb_most_recent_idx = n_idx;
5799 	if (sdeb_first_idx < 0)
5800 		sdeb_first_idx = n_idx;
5801 	xa_unlock_irqrestore(per_store_ap, iflags);
5802 
5803 	res = -ENOMEM;
5804 	sip->storep = vzalloc(sz);
5805 	if (!sip->storep) {
5806 		pr_err("user data oom\n");
5807 		goto err;
5808 	}
5809 	if (sdebug_num_parts > 0)
5810 		sdebug_build_parts(sip->storep, sz);
5811 
5812 	/* DIF/DIX: what T10 calls Protection Information (PI) */
5813 	if (sdebug_dix) {
5814 		int dif_size;
5815 
5816 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5817 		sip->dif_storep = vmalloc(dif_size);
5818 
5819 		pr_info("dif_storep %u bytes @ %pK\n", dif_size,
5820 			sip->dif_storep);
5821 
5822 		if (!sip->dif_storep) {
5823 			pr_err("DIX oom\n");
5824 			goto err;
5825 		}
5826 		memset(sip->dif_storep, 0xff, dif_size);
5827 	}
5828 	/* Logical Block Provisioning */
5829 	if (scsi_debug_lbp()) {
5830 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5831 		sip->map_storep = vmalloc(array_size(sizeof(long),
5832 						     BITS_TO_LONGS(map_size)));
5833 
5834 		pr_info("%lu provisioning blocks\n", map_size);
5835 
5836 		if (!sip->map_storep) {
5837 			pr_err("LBP map oom\n");
5838 			goto err;
5839 		}
5840 
5841 		bitmap_zero(sip->map_storep, map_size);
5842 
5843 		/* Map first 1KB for partition table */
5844 		if (sdebug_num_parts)
5845 			map_region(sip, 0, 2);
5846 	}
5847 
5848 	rwlock_init(&sip->macc_lck);
5849 	return (int)n_idx;
5850 err:
5851 	sdebug_erase_store((int)n_idx, sip);
5852 	pr_warn("%s: failed, errno=%d\n", __func__, -res);
5853 	return res;
5854 }
5855 
5856 static int sdebug_add_host_helper(int per_host_idx)
5857 {
5858 	int k, devs_per_host, idx;
5859 	int error = -ENOMEM;
5860 	struct sdebug_host_info *sdbg_host;
5861 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5862 
5863 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5864 	if (!sdbg_host)
5865 		return -ENOMEM;
5866 	idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
5867 	if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
5868 		xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
5869 	sdbg_host->si_idx = idx;
5870 
5871 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5872 
5873 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5874 	for (k = 0; k < devs_per_host; k++) {
5875 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5876 		if (!sdbg_devinfo)
5877 			goto clean;
5878 	}
5879 
5880 	spin_lock(&sdebug_host_list_lock);
5881 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5882 	spin_unlock(&sdebug_host_list_lock);
5883 
5884 	sdbg_host->dev.bus = &pseudo_lld_bus;
5885 	sdbg_host->dev.parent = pseudo_primary;
5886 	sdbg_host->dev.release = &sdebug_release_adapter;
5887 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
5888 
5889 	error = device_register(&sdbg_host->dev);
5890 	if (error)
5891 		goto clean;
5892 
5893 	++sdebug_num_hosts;
5894 	return 0;
5895 
5896 clean:
5897 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5898 				 dev_list) {
5899 		list_del(&sdbg_devinfo->dev_list);
5900 		kfree(sdbg_devinfo);
5901 	}
5902 	kfree(sdbg_host);
5903 	pr_warn("%s: failed, errno=%d\n", __func__, -error);
5904 	return error;
5905 }
5906 
5907 static int sdebug_do_add_host(bool mk_new_store)
5908 {
5909 	int ph_idx = sdeb_most_recent_idx;
5910 
5911 	if (mk_new_store) {
5912 		ph_idx = sdebug_add_store();
5913 		if (ph_idx < 0)
5914 			return ph_idx;
5915 	}
5916 	return sdebug_add_host_helper(ph_idx);
5917 }
5918 
5919 static void sdebug_do_remove_host(bool the_end)
5920 {
5921 	int idx = -1;
5922 	struct sdebug_host_info *sdbg_host = NULL;
5923 	struct sdebug_host_info *sdbg_host2;
5924 
5925 	spin_lock(&sdebug_host_list_lock);
5926 	if (!list_empty(&sdebug_host_list)) {
5927 		sdbg_host = list_entry(sdebug_host_list.prev,
5928 				       struct sdebug_host_info, host_list);
5929 		idx = sdbg_host->si_idx;
5930 	}
5931 	if (!the_end && idx >= 0) {
5932 		bool unique = true;
5933 
5934 		list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
5935 			if (sdbg_host2 == sdbg_host)
5936 				continue;
5937 			if (idx == sdbg_host2->si_idx) {
5938 				unique = false;
5939 				break;
5940 			}
5941 		}
5942 		if (unique) {
5943 			xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
5944 			if (idx == sdeb_most_recent_idx)
5945 				--sdeb_most_recent_idx;
5946 		}
5947 	}
5948 	if (sdbg_host)
5949 		list_del(&sdbg_host->host_list);
5950 	spin_unlock(&sdebug_host_list_lock);
5951 
5952 	if (!sdbg_host)
5953 		return;
5954 
5955 	device_unregister(&sdbg_host->dev);
5956 	--sdebug_num_hosts;
5957 }
5958 
5959 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5960 {
5961 	int num_in_q = 0;
5962 	struct sdebug_dev_info *devip;
5963 
5964 	block_unblock_all_queues(true);
5965 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5966 	if (NULL == devip) {
5967 		block_unblock_all_queues(false);
5968 		return	-ENODEV;
5969 	}
5970 	num_in_q = atomic_read(&devip->num_in_q);
5971 
5972 	if (qdepth < 1)
5973 		qdepth = 1;
5974 	/* allow to exceed max host qc_arr elements for testing */
5975 	if (qdepth > SDEBUG_CANQUEUE + 10)
5976 		qdepth = SDEBUG_CANQUEUE + 10;
5977 	scsi_change_queue_depth(sdev, qdepth);
5978 
5979 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5980 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5981 			    __func__, qdepth, num_in_q);
5982 	}
5983 	block_unblock_all_queues(false);
5984 	return sdev->queue_depth;
5985 }
5986 
5987 static bool fake_timeout(struct scsi_cmnd *scp)
5988 {
5989 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5990 		if (sdebug_every_nth < -1)
5991 			sdebug_every_nth = -1;
5992 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5993 			return true; /* ignore command causing timeout */
5994 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5995 			 scsi_medium_access_command(scp))
5996 			return true; /* time out reads and writes */
5997 	}
5998 	return false;
5999 }
6000 
6001 static bool fake_host_busy(struct scsi_cmnd *scp)
6002 {
6003 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
6004 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6005 }
6006 
6007 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
6008 				   struct scsi_cmnd *scp)
6009 {
6010 	u8 sdeb_i;
6011 	struct scsi_device *sdp = scp->device;
6012 	const struct opcode_info_t *oip;
6013 	const struct opcode_info_t *r_oip;
6014 	struct sdebug_dev_info *devip;
6015 
6016 	u8 *cmd = scp->cmnd;
6017 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
6018 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
6019 	int k, na;
6020 	int errsts = 0;
6021 	u32 flags;
6022 	u16 sa;
6023 	u8 opcode = cmd[0];
6024 	bool has_wlun_rl;
6025 
6026 	scsi_set_resid(scp, 0);
6027 	if (sdebug_statistics)
6028 		atomic_inc(&sdebug_cmnd_count);
6029 	if (unlikely(sdebug_verbose &&
6030 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
6031 		char b[120];
6032 		int n, len, sb;
6033 
6034 		len = scp->cmd_len;
6035 		sb = (int)sizeof(b);
6036 		if (len > 32)
6037 			strcpy(b, "too long, over 32 bytes");
6038 		else {
6039 			for (k = 0, n = 0; k < len && n < sb; ++k)
6040 				n += scnprintf(b + n, sb - n, "%02x ",
6041 					       (u32)cmd[k]);
6042 		}
6043 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
6044 			    blk_mq_unique_tag(scp->request), b);
6045 	}
6046 	if (fake_host_busy(scp))
6047 		return SCSI_MLQUEUE_HOST_BUSY;
6048 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
6049 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
6050 		goto err_out;
6051 
6052 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
6053 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
6054 	devip = (struct sdebug_dev_info *)sdp->hostdata;
6055 	if (unlikely(!devip)) {
6056 		devip = find_build_dev_info(sdp);
6057 		if (NULL == devip)
6058 			goto err_out;
6059 	}
6060 	na = oip->num_attached;
6061 	r_pfp = oip->pfp;
6062 	if (na) {	/* multiple commands with this opcode */
6063 		r_oip = oip;
6064 		if (FF_SA & r_oip->flags) {
6065 			if (F_SA_LOW & oip->flags)
6066 				sa = 0x1f & cmd[1];
6067 			else
6068 				sa = get_unaligned_be16(cmd + 8);
6069 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
6070 				if (opcode == oip->opcode && sa == oip->sa)
6071 					break;
6072 			}
6073 		} else {   /* since no service action only check opcode */
6074 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
6075 				if (opcode == oip->opcode)
6076 					break;
6077 			}
6078 		}
6079 		if (k > na) {
6080 			if (F_SA_LOW & r_oip->flags)
6081 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
6082 			else if (F_SA_HIGH & r_oip->flags)
6083 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
6084 			else
6085 				mk_sense_invalid_opcode(scp);
6086 			goto check_cond;
6087 		}
6088 	}	/* else (when na==0) we assume the oip is a match */
6089 	flags = oip->flags;
6090 	if (unlikely(F_INV_OP & flags)) {
6091 		mk_sense_invalid_opcode(scp);
6092 		goto check_cond;
6093 	}
6094 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
6095 		if (sdebug_verbose)
6096 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
6097 				    my_name, opcode, " supported for wlun");
6098 		mk_sense_invalid_opcode(scp);
6099 		goto check_cond;
6100 	}
6101 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
6102 		u8 rem;
6103 		int j;
6104 
6105 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
6106 			rem = ~oip->len_mask[k] & cmd[k];
6107 			if (rem) {
6108 				for (j = 7; j >= 0; --j, rem <<= 1) {
6109 					if (0x80 & rem)
6110 						break;
6111 				}
6112 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
6113 				goto check_cond;
6114 			}
6115 		}
6116 	}
6117 	if (unlikely(!(F_SKIP_UA & flags) &&
6118 		     find_first_bit(devip->uas_bm,
6119 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
6120 		errsts = make_ua(scp, devip);
6121 		if (errsts)
6122 			goto check_cond;
6123 	}
6124 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
6125 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
6126 		if (sdebug_verbose)
6127 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
6128 				    "%s\n", my_name, "initializing command "
6129 				    "required");
6130 		errsts = check_condition_result;
6131 		goto fini;
6132 	}
6133 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
6134 		goto fini;
6135 	if (unlikely(sdebug_every_nth)) {
6136 		if (fake_timeout(scp))
6137 			return 0;	/* ignore command: make trouble */
6138 	}
6139 	if (likely(oip->pfp))
6140 		pfp = oip->pfp;	/* calls a resp_* function */
6141 	else
6142 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
6143 
6144 fini:
6145 	if (F_DELAY_OVERR & flags)	/* cmds like INQUIRY respond asap */
6146 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
6147 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
6148 					    sdebug_ndelay > 10000)) {
6149 		/*
6150 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
6151 		 * for Start Stop Unit (SSU) want at least 1 second delay and
6152 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
6153 		 * For Synchronize Cache want 1/20 of SSU's delay.
6154 		 */
6155 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
6156 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
6157 
6158 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
6159 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
6160 	} else
6161 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
6162 				     sdebug_ndelay);
6163 check_cond:
6164 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
6165 err_out:
6166 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
6167 }
6168 
6169 static struct scsi_host_template sdebug_driver_template = {
6170 	.show_info =		scsi_debug_show_info,
6171 	.write_info =		scsi_debug_write_info,
6172 	.proc_name =		sdebug_proc_name,
6173 	.name =			"SCSI DEBUG",
6174 	.info =			scsi_debug_info,
6175 	.slave_alloc =		scsi_debug_slave_alloc,
6176 	.slave_configure =	scsi_debug_slave_configure,
6177 	.slave_destroy =	scsi_debug_slave_destroy,
6178 	.ioctl =		scsi_debug_ioctl,
6179 	.queuecommand =		scsi_debug_queuecommand,
6180 	.change_queue_depth =	sdebug_change_qdepth,
6181 	.eh_abort_handler =	scsi_debug_abort,
6182 	.eh_device_reset_handler = scsi_debug_device_reset,
6183 	.eh_target_reset_handler = scsi_debug_target_reset,
6184 	.eh_bus_reset_handler = scsi_debug_bus_reset,
6185 	.eh_host_reset_handler = scsi_debug_host_reset,
6186 	.can_queue =		SDEBUG_CANQUEUE,
6187 	.this_id =		7,
6188 	.sg_tablesize =		SG_MAX_SEGMENTS,
6189 	.cmd_per_lun =		DEF_CMD_PER_LUN,
6190 	.max_sectors =		-1U,
6191 	.max_segment_size =	-1U,
6192 	.module =		THIS_MODULE,
6193 	.track_queue_depth =	1,
6194 };
6195 
6196 static int sdebug_driver_probe(struct device *dev)
6197 {
6198 	int error = 0;
6199 	struct sdebug_host_info *sdbg_host;
6200 	struct Scsi_Host *hpnt;
6201 	int hprot;
6202 
6203 	sdbg_host = to_sdebug_host(dev);
6204 
6205 	sdebug_driver_template.can_queue = sdebug_max_queue;
6206 	if (!sdebug_clustering)
6207 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
6208 
6209 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
6210 	if (NULL == hpnt) {
6211 		pr_err("scsi_host_alloc failed\n");
6212 		error = -ENODEV;
6213 		return error;
6214 	}
6215 	if (submit_queues > nr_cpu_ids) {
6216 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
6217 			my_name, submit_queues, nr_cpu_ids);
6218 		submit_queues = nr_cpu_ids;
6219 	}
6220 	/* Decide whether to tell scsi subsystem that we want mq */
6221 	/* Following should give the same answer for each host */
6222 	hpnt->nr_hw_queues = submit_queues;
6223 
6224 	sdbg_host->shost = hpnt;
6225 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
6226 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
6227 		hpnt->max_id = sdebug_num_tgts + 1;
6228 	else
6229 		hpnt->max_id = sdebug_num_tgts;
6230 	/* = sdebug_max_luns; */
6231 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
6232 
6233 	hprot = 0;
6234 
6235 	switch (sdebug_dif) {
6236 
6237 	case T10_PI_TYPE1_PROTECTION:
6238 		hprot = SHOST_DIF_TYPE1_PROTECTION;
6239 		if (sdebug_dix)
6240 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
6241 		break;
6242 
6243 	case T10_PI_TYPE2_PROTECTION:
6244 		hprot = SHOST_DIF_TYPE2_PROTECTION;
6245 		if (sdebug_dix)
6246 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
6247 		break;
6248 
6249 	case T10_PI_TYPE3_PROTECTION:
6250 		hprot = SHOST_DIF_TYPE3_PROTECTION;
6251 		if (sdebug_dix)
6252 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
6253 		break;
6254 
6255 	default:
6256 		if (sdebug_dix)
6257 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
6258 		break;
6259 	}
6260 
6261 	scsi_host_set_prot(hpnt, hprot);
6262 
6263 	if (have_dif_prot || sdebug_dix)
6264 		pr_info("host protection%s%s%s%s%s%s%s\n",
6265 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
6266 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
6267 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
6268 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
6269 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
6270 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
6271 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
6272 
6273 	if (sdebug_guard == 1)
6274 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
6275 	else
6276 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
6277 
6278 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
6279 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
6280 	if (sdebug_every_nth)	/* need stats counters for every_nth */
6281 		sdebug_statistics = true;
6282 	error = scsi_add_host(hpnt, &sdbg_host->dev);
6283 	if (error) {
6284 		pr_err("scsi_add_host failed\n");
6285 		error = -ENODEV;
6286 		scsi_host_put(hpnt);
6287 	} else {
6288 		scsi_scan_host(hpnt);
6289 	}
6290 
6291 	return error;
6292 }
6293 
6294 static int sdebug_driver_remove(struct device *dev)
6295 {
6296 	struct sdebug_host_info *sdbg_host;
6297 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
6298 
6299 	sdbg_host = to_sdebug_host(dev);
6300 
6301 	if (!sdbg_host) {
6302 		pr_err("Unable to locate host info\n");
6303 		return -ENODEV;
6304 	}
6305 
6306 	scsi_remove_host(sdbg_host->shost);
6307 
6308 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
6309 				 dev_list) {
6310 		list_del(&sdbg_devinfo->dev_list);
6311 		kfree(sdbg_devinfo);
6312 	}
6313 
6314 	scsi_host_put(sdbg_host->shost);
6315 	return 0;
6316 }
6317 
6318 static int pseudo_lld_bus_match(struct device *dev,
6319 				struct device_driver *dev_driver)
6320 {
6321 	return 1;
6322 }
6323 
6324 static struct bus_type pseudo_lld_bus = {
6325 	.name = "pseudo",
6326 	.match = pseudo_lld_bus_match,
6327 	.probe = sdebug_driver_probe,
6328 	.remove = sdebug_driver_remove,
6329 	.drv_groups = sdebug_drv_groups,
6330 };
6331