xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 9726bfcd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2018 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
13  */
14 
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 
18 #include <linux/module.h>
19 
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
27 #include <linux/fs.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 
42 #include <net/checksum.h>
43 
44 #include <asm/unaligned.h>
45 
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_host.h>
50 #include <scsi/scsicam.h>
51 #include <scsi/scsi_eh.h>
52 #include <scsi/scsi_tcq.h>
53 #include <scsi/scsi_dbg.h>
54 
55 #include "sd.h"
56 #include "scsi_logging.h"
57 
58 /* make sure inq_product_rev string corresponds to this version */
59 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
60 static const char *sdebug_version_date = "20190125";
61 
62 #define MY_NAME "scsi_debug"
63 
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
68 #define UNRECOVERED_READ_ERR 0x11
69 #define PARAMETER_LIST_LENGTH_ERR 0x1a
70 #define INVALID_OPCODE 0x20
71 #define LBA_OUT_OF_RANGE 0x21
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define WRITE_PROTECTED 0x27
75 #define UA_RESET_ASC 0x29
76 #define UA_CHANGED_ASC 0x2a
77 #define TARGET_CHANGED_ASC 0x3f
78 #define LUNS_CHANGED_ASCQ 0x0e
79 #define INSUFF_RES_ASC 0x55
80 #define INSUFF_RES_ASCQ 0x3
81 #define POWER_ON_RESET_ASCQ 0x0
82 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
83 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
84 #define CAPACITY_CHANGED_ASCQ 0x9
85 #define SAVING_PARAMS_UNSUP 0x39
86 #define TRANSPORT_PROBLEM 0x4b
87 #define THRESHOLD_EXCEEDED 0x5d
88 #define LOW_POWER_COND_ON 0x5e
89 #define MISCOMPARE_VERIFY_ASC 0x1d
90 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
91 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
92 #define WRITE_ERROR_ASC 0xc
93 
94 /* Additional Sense Code Qualifier (ASCQ) */
95 #define ACK_NAK_TO 0x3
96 
97 /* Default values for driver parameters */
98 #define DEF_NUM_HOST   1
99 #define DEF_NUM_TGTS   1
100 #define DEF_MAX_LUNS   1
101 /* With these defaults, this driver will make 1 host with 1 target
102  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
103  */
104 #define DEF_ATO 1
105 #define DEF_CDB_LEN 10
106 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
107 #define DEF_DEV_SIZE_MB   8
108 #define DEF_DIF 0
109 #define DEF_DIX 0
110 #define DEF_D_SENSE   0
111 #define DEF_EVERY_NTH   0
112 #define DEF_FAKE_RW	0
113 #define DEF_GUARD 0
114 #define DEF_HOST_LOCK 0
115 #define DEF_LBPU 0
116 #define DEF_LBPWS 0
117 #define DEF_LBPWS10 0
118 #define DEF_LBPRZ 1
119 #define DEF_LOWEST_ALIGNED 0
120 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
121 #define DEF_NO_LUN_0   0
122 #define DEF_NUM_PARTS   0
123 #define DEF_OPTS   0
124 #define DEF_OPT_BLKS 1024
125 #define DEF_PHYSBLK_EXP 0
126 #define DEF_OPT_XFERLEN_EXP 0
127 #define DEF_PTYPE   TYPE_DISK
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB   0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DEF_STATISTICS false
140 #define DEF_SUBMIT_QUEUES 1
141 #define DEF_UUID_CTL 0
142 #define JDELAY_OVERRIDDEN -9999
143 
144 #define SDEBUG_LUN_0_VAL 0
145 
146 /* bit mask values for sdebug_opts */
147 #define SDEBUG_OPT_NOISE		1
148 #define SDEBUG_OPT_MEDIUM_ERR		2
149 #define SDEBUG_OPT_TIMEOUT		4
150 #define SDEBUG_OPT_RECOVERED_ERR	8
151 #define SDEBUG_OPT_TRANSPORT_ERR	16
152 #define SDEBUG_OPT_DIF_ERR		32
153 #define SDEBUG_OPT_DIX_ERR		64
154 #define SDEBUG_OPT_MAC_TIMEOUT		128
155 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
156 #define SDEBUG_OPT_Q_NOISE		0x200
157 #define SDEBUG_OPT_ALL_TSF		0x400
158 #define SDEBUG_OPT_RARE_TSF		0x800
159 #define SDEBUG_OPT_N_WCE		0x1000
160 #define SDEBUG_OPT_RESET_NOISE		0x2000
161 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
162 #define SDEBUG_OPT_HOST_BUSY		0x8000
163 #define SDEBUG_OPT_CMD_ABORT		0x10000
164 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
165 			      SDEBUG_OPT_RESET_NOISE)
166 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
167 				  SDEBUG_OPT_TRANSPORT_ERR | \
168 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
169 				  SDEBUG_OPT_SHORT_TRANSFER | \
170 				  SDEBUG_OPT_HOST_BUSY | \
171 				  SDEBUG_OPT_CMD_ABORT)
172 /* When "every_nth" > 0 then modulo "every_nth" commands:
173  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
174  *   - a RECOVERED_ERROR is simulated on successful read and write
175  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
176  *   - a TRANSPORT_ERROR is simulated on successful read and write
177  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
178  *   - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
179  *     CMD_ABORT
180  *
181  * When "every_nth" < 0 then after "- every_nth" commands the selected
182  * error will be injected. The error will be injected on every subsequent
183  * command until some other action occurs; for example, the user writing
184  * a new value (other than -1 or 1) to every_nth:
185  *      echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
186  */
187 
188 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
189  * priority order. In the subset implemented here lower numbers have higher
190  * priority. The UA numbers should be a sequence starting from 0 with
191  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
192 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
193 #define SDEBUG_UA_BUS_RESET 1
194 #define SDEBUG_UA_MODE_CHANGED 2
195 #define SDEBUG_UA_CAPACITY_CHANGED 3
196 #define SDEBUG_UA_LUNS_CHANGED 4
197 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
198 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
199 #define SDEBUG_NUM_UAS 7
200 
201 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
202  * sector on read commands: */
203 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
204 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
205 
206 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
207  * or "peripheral device" addressing (value 0) */
208 #define SAM2_LUN_ADDRESS_METHOD 0
209 
210 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
211  * (for response) per submit queue at one time. Can be reduced by max_queue
212  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
213  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
214  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
215  * but cannot exceed SDEBUG_CANQUEUE .
216  */
217 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
218 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
219 #define DEF_CMD_PER_LUN  255
220 
221 #define F_D_IN			1
222 #define F_D_OUT			2
223 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
224 #define F_D_UNKN		8
225 #define F_RL_WLUN_OK		0x10
226 #define F_SKIP_UA		0x20
227 #define F_DELAY_OVERR		0x40
228 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
229 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
230 #define F_INV_OP		0x200
231 #define F_FAKE_RW		0x400
232 #define F_M_ACCESS		0x800	/* media access */
233 #define F_SSU_DELAY		0x1000
234 #define F_SYNC_DELAY		0x2000
235 
236 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
237 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
238 #define FF_SA (F_SA_HIGH | F_SA_LOW)
239 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
240 
241 #define SDEBUG_MAX_PARTS 4
242 
243 #define SDEBUG_MAX_CMD_LEN 32
244 
245 
246 struct sdebug_dev_info {
247 	struct list_head dev_list;
248 	unsigned int channel;
249 	unsigned int target;
250 	u64 lun;
251 	uuid_t lu_name;
252 	struct sdebug_host_info *sdbg_host;
253 	unsigned long uas_bm[1];
254 	atomic_t num_in_q;
255 	atomic_t stopped;
256 	bool used;
257 };
258 
259 struct sdebug_host_info {
260 	struct list_head host_list;
261 	struct Scsi_Host *shost;
262 	struct device dev;
263 	struct list_head dev_info_list;
264 };
265 
266 #define to_sdebug_host(d)	\
267 	container_of(d, struct sdebug_host_info, dev)
268 
269 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
270 		      SDEB_DEFER_WQ = 2};
271 
272 struct sdebug_defer {
273 	struct hrtimer hrt;
274 	struct execute_work ew;
275 	int sqa_idx;	/* index of sdebug_queue array */
276 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
277 	int issuing_cpu;
278 	bool init_hrt;
279 	bool init_wq;
280 	bool aborted;	/* true when blk_abort_request() already called */
281 	enum sdeb_defer_type defer_t;
282 };
283 
284 struct sdebug_queued_cmd {
285 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
286 	 * instance indicates this slot is in use.
287 	 */
288 	struct sdebug_defer *sd_dp;
289 	struct scsi_cmnd *a_cmnd;
290 	unsigned int inj_recovered:1;
291 	unsigned int inj_transport:1;
292 	unsigned int inj_dif:1;
293 	unsigned int inj_dix:1;
294 	unsigned int inj_short:1;
295 	unsigned int inj_host_busy:1;
296 	unsigned int inj_cmd_abort:1;
297 };
298 
299 struct sdebug_queue {
300 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
301 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
302 	spinlock_t qc_lock;
303 	atomic_t blocked;	/* to temporarily stop more being queued */
304 };
305 
306 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
307 static atomic_t sdebug_completions;  /* count of deferred completions */
308 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
309 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
310 
311 struct opcode_info_t {
312 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
313 				/* for terminating element */
314 	u8 opcode;		/* if num_attached > 0, preferred */
315 	u16 sa;			/* service action */
316 	u32 flags;		/* OR-ed set of SDEB_F_* */
317 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
318 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
319 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
320 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
321 };
322 
323 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
324 enum sdeb_opcode_index {
325 	SDEB_I_INVALID_OPCODE =	0,
326 	SDEB_I_INQUIRY = 1,
327 	SDEB_I_REPORT_LUNS = 2,
328 	SDEB_I_REQUEST_SENSE = 3,
329 	SDEB_I_TEST_UNIT_READY = 4,
330 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
331 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
332 	SDEB_I_LOG_SENSE = 7,
333 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
334 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
335 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
336 	SDEB_I_START_STOP = 11,
337 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
338 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
339 	SDEB_I_MAINT_IN = 14,
340 	SDEB_I_MAINT_OUT = 15,
341 	SDEB_I_VERIFY = 16,		/* 10 only */
342 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
343 	SDEB_I_RESERVE = 18,		/* 6, 10 */
344 	SDEB_I_RELEASE = 19,		/* 6, 10 */
345 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
346 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
347 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
348 	SDEB_I_SEND_DIAG = 23,
349 	SDEB_I_UNMAP = 24,
350 	SDEB_I_WRITE_BUFFER = 25,
351 	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
352 	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
353 	SDEB_I_COMP_WRITE = 28,
354 	SDEB_I_LAST_ELEMENT = 29,	/* keep this last (previous + 1) */
355 };
356 
357 
358 static const unsigned char opcode_ind_arr[256] = {
359 /* 0x0; 0x0->0x1f: 6 byte cdbs */
360 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
361 	    0, 0, 0, 0,
362 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
363 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
364 	    SDEB_I_RELEASE,
365 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
366 	    SDEB_I_ALLOW_REMOVAL, 0,
367 /* 0x20; 0x20->0x3f: 10 byte cdbs */
368 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
369 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
370 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
371 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
372 /* 0x40; 0x40->0x5f: 10 byte cdbs */
373 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
374 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
375 	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
376 	    SDEB_I_RELEASE,
377 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
378 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
379 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
380 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
381 	0, SDEB_I_VARIABLE_LEN,
382 /* 0x80; 0x80->0x9f: 16 byte cdbs */
383 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
384 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
385 	0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
386 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
387 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
388 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
389 	     SDEB_I_MAINT_OUT, 0, 0, 0,
390 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
391 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
392 	0, 0, 0, 0, 0, 0, 0, 0,
393 	0, 0, 0, 0, 0, 0, 0, 0,
394 /* 0xc0; 0xc0->0xff: vendor specific */
395 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
396 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
397 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
398 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
399 };
400 
401 /*
402  * The following "response" functions return the SCSI mid-level's 4 byte
403  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
404  * command completion, they can mask their return value with
405  * SDEG_RES_IMMED_MASK .
406  */
407 #define SDEG_RES_IMMED_MASK 0x40000000
408 
409 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
410 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
412 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
423 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
424 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
425 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
426 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
431 
432 /*
433  * The following are overflow arrays for cdbs that "hit" the same index in
434  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
435  * should be placed in opcode_info_arr[], the others should be placed here.
436  */
437 static const struct opcode_info_t msense_iarr[] = {
438 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
439 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
440 };
441 
442 static const struct opcode_info_t mselect_iarr[] = {
443 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
444 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
445 };
446 
447 static const struct opcode_info_t read_iarr[] = {
448 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
449 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
450 	     0, 0, 0, 0} },
451 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
452 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
453 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
454 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
455 	     0xc7, 0, 0, 0, 0} },
456 };
457 
458 static const struct opcode_info_t write_iarr[] = {
459 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
460 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
461 		   0, 0, 0, 0, 0, 0} },
462 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
463 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
464 		   0, 0, 0} },
465 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
466 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
467 		   0xbf, 0xc7, 0, 0, 0, 0} },
468 };
469 
470 static const struct opcode_info_t sa_in_16_iarr[] = {
471 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
472 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
473 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
474 };
475 
476 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
477 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
478 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
479 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
480 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
481 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
482 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
483 };
484 
485 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
486 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
487 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
488 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
489 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
490 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
491 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
492 };
493 
494 static const struct opcode_info_t write_same_iarr[] = {
495 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
496 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
497 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
498 };
499 
500 static const struct opcode_info_t reserve_iarr[] = {
501 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
502 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
503 };
504 
505 static const struct opcode_info_t release_iarr[] = {
506 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
507 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
508 };
509 
510 static const struct opcode_info_t sync_cache_iarr[] = {
511 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
512 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
513 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
514 };
515 
516 
517 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
518  * plus the terminating elements for logic that scans this table such as
519  * REPORT SUPPORTED OPERATION CODES. */
520 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
521 /* 0 */
522 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
523 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
524 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
525 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
527 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
528 	     0, 0} },					/* REPORT LUNS */
529 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
530 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
532 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
533 /* 5 */
534 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
535 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
536 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
537 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
538 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
539 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
540 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
541 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
542 	     0, 0, 0} },
543 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
544 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
545 	     0, 0} },
546 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
547 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
548 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
549 /* 10 */
550 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
551 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
552 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
553 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
554 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
555 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
556 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
557 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
558 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
559 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
560 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
561 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
562 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
563 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
564 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
565 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
566 				0xff, 0, 0xc7, 0, 0, 0, 0} },
567 /* 15 */
568 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
569 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
570 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
571 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
572 	     0, 0, 0, 0, 0, 0} },
573 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
574 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
575 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
576 	     0xff, 0xff} },
577 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
578 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
579 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
580 	     0} },
581 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
582 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
583 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
584 	     0} },
585 /* 20 */
586 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
587 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
589 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
590 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
591 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
593 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
595 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
596 /* 25 */
597 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
598 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
599 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
600 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
601 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
602 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
603 		 0, 0, 0, 0, 0} },
604 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
605 	    resp_sync_cache, sync_cache_iarr,
606 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
607 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
608 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
609 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
610 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
611 
612 /* 29 */
613 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
614 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
615 };
616 
617 static int sdebug_add_host = DEF_NUM_HOST;
618 static int sdebug_ato = DEF_ATO;
619 static int sdebug_cdb_len = DEF_CDB_LEN;
620 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
621 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
622 static int sdebug_dif = DEF_DIF;
623 static int sdebug_dix = DEF_DIX;
624 static int sdebug_dsense = DEF_D_SENSE;
625 static int sdebug_every_nth = DEF_EVERY_NTH;
626 static int sdebug_fake_rw = DEF_FAKE_RW;
627 static unsigned int sdebug_guard = DEF_GUARD;
628 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
629 static int sdebug_max_luns = DEF_MAX_LUNS;
630 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
631 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
632 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
633 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
634 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
635 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
636 static int sdebug_no_uld;
637 static int sdebug_num_parts = DEF_NUM_PARTS;
638 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
639 static int sdebug_opt_blks = DEF_OPT_BLKS;
640 static int sdebug_opts = DEF_OPTS;
641 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
642 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
643 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
644 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
645 static int sdebug_sector_size = DEF_SECTOR_SIZE;
646 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
647 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
648 static unsigned int sdebug_lbpu = DEF_LBPU;
649 static unsigned int sdebug_lbpws = DEF_LBPWS;
650 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
651 static unsigned int sdebug_lbprz = DEF_LBPRZ;
652 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
653 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
654 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
655 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
656 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
657 static int sdebug_uuid_ctl = DEF_UUID_CTL;
658 static bool sdebug_removable = DEF_REMOVABLE;
659 static bool sdebug_clustering;
660 static bool sdebug_host_lock = DEF_HOST_LOCK;
661 static bool sdebug_strict = DEF_STRICT;
662 static bool sdebug_any_injecting_opt;
663 static bool sdebug_verbose;
664 static bool have_dif_prot;
665 static bool write_since_sync;
666 static bool sdebug_statistics = DEF_STATISTICS;
667 static bool sdebug_wp;
668 
669 static unsigned int sdebug_store_sectors;
670 static sector_t sdebug_capacity;	/* in sectors */
671 
672 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
673    may still need them */
674 static int sdebug_heads;		/* heads per disk */
675 static int sdebug_cylinders_per;	/* cylinders per surface */
676 static int sdebug_sectors_per;		/* sectors per cylinder */
677 
678 static LIST_HEAD(sdebug_host_list);
679 static DEFINE_SPINLOCK(sdebug_host_list_lock);
680 
681 static unsigned char *fake_storep;	/* ramdisk storage */
682 static struct t10_pi_tuple *dif_storep;	/* protection info */
683 static void *map_storep;		/* provisioning map */
684 
685 static unsigned long map_size;
686 static int num_aborts;
687 static int num_dev_resets;
688 static int num_target_resets;
689 static int num_bus_resets;
690 static int num_host_resets;
691 static int dix_writes;
692 static int dix_reads;
693 static int dif_errors;
694 
695 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
696 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
697 
698 static DEFINE_RWLOCK(atomic_rw);
699 
700 static char sdebug_proc_name[] = MY_NAME;
701 static const char *my_name = MY_NAME;
702 
703 static struct bus_type pseudo_lld_bus;
704 
705 static struct device_driver sdebug_driverfs_driver = {
706 	.name 		= sdebug_proc_name,
707 	.bus		= &pseudo_lld_bus,
708 };
709 
710 static const int check_condition_result =
711 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
712 
713 static const int illegal_condition_result =
714 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
715 
716 static const int device_qfull_result =
717 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
718 
719 
720 /* Only do the extra work involved in logical block provisioning if one or
721  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
722  * real reads and writes (i.e. not skipping them for speed).
723  */
724 static inline bool scsi_debug_lbp(void)
725 {
726 	return 0 == sdebug_fake_rw &&
727 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
728 }
729 
730 static void *lba2fake_store(unsigned long long lba)
731 {
732 	lba = do_div(lba, sdebug_store_sectors);
733 
734 	return fake_storep + lba * sdebug_sector_size;
735 }
736 
737 static struct t10_pi_tuple *dif_store(sector_t sector)
738 {
739 	sector = sector_div(sector, sdebug_store_sectors);
740 
741 	return dif_storep + sector;
742 }
743 
744 static void sdebug_max_tgts_luns(void)
745 {
746 	struct sdebug_host_info *sdbg_host;
747 	struct Scsi_Host *hpnt;
748 
749 	spin_lock(&sdebug_host_list_lock);
750 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
751 		hpnt = sdbg_host->shost;
752 		if ((hpnt->this_id >= 0) &&
753 		    (sdebug_num_tgts > hpnt->this_id))
754 			hpnt->max_id = sdebug_num_tgts + 1;
755 		else
756 			hpnt->max_id = sdebug_num_tgts;
757 		/* sdebug_max_luns; */
758 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
759 	}
760 	spin_unlock(&sdebug_host_list_lock);
761 }
762 
763 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
764 
765 /* Set in_bit to -1 to indicate no bit position of invalid field */
766 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
767 				 enum sdeb_cmd_data c_d,
768 				 int in_byte, int in_bit)
769 {
770 	unsigned char *sbuff;
771 	u8 sks[4];
772 	int sl, asc;
773 
774 	sbuff = scp->sense_buffer;
775 	if (!sbuff) {
776 		sdev_printk(KERN_ERR, scp->device,
777 			    "%s: sense_buffer is NULL\n", __func__);
778 		return;
779 	}
780 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
781 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
782 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
783 	memset(sks, 0, sizeof(sks));
784 	sks[0] = 0x80;
785 	if (c_d)
786 		sks[0] |= 0x40;
787 	if (in_bit >= 0) {
788 		sks[0] |= 0x8;
789 		sks[0] |= 0x7 & in_bit;
790 	}
791 	put_unaligned_be16(in_byte, sks + 1);
792 	if (sdebug_dsense) {
793 		sl = sbuff[7] + 8;
794 		sbuff[7] = sl;
795 		sbuff[sl] = 0x2;
796 		sbuff[sl + 1] = 0x6;
797 		memcpy(sbuff + sl + 4, sks, 3);
798 	} else
799 		memcpy(sbuff + 15, sks, 3);
800 	if (sdebug_verbose)
801 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
802 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
803 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
804 }
805 
806 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
807 {
808 	unsigned char *sbuff;
809 
810 	sbuff = scp->sense_buffer;
811 	if (!sbuff) {
812 		sdev_printk(KERN_ERR, scp->device,
813 			    "%s: sense_buffer is NULL\n", __func__);
814 		return;
815 	}
816 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
817 
818 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
819 
820 	if (sdebug_verbose)
821 		sdev_printk(KERN_INFO, scp->device,
822 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
823 			    my_name, key, asc, asq);
824 }
825 
826 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
827 {
828 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
829 }
830 
831 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
832 			    void __user *arg)
833 {
834 	if (sdebug_verbose) {
835 		if (0x1261 == cmd)
836 			sdev_printk(KERN_INFO, dev,
837 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
838 		else if (0x5331 == cmd)
839 			sdev_printk(KERN_INFO, dev,
840 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
841 				    __func__);
842 		else
843 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
844 				    __func__, cmd);
845 	}
846 	return -EINVAL;
847 	/* return -ENOTTY; // correct return but upsets fdisk */
848 }
849 
850 static void config_cdb_len(struct scsi_device *sdev)
851 {
852 	switch (sdebug_cdb_len) {
853 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
854 		sdev->use_10_for_rw = false;
855 		sdev->use_16_for_rw = false;
856 		sdev->use_10_for_ms = false;
857 		break;
858 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
859 		sdev->use_10_for_rw = true;
860 		sdev->use_16_for_rw = false;
861 		sdev->use_10_for_ms = false;
862 		break;
863 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
864 		sdev->use_10_for_rw = true;
865 		sdev->use_16_for_rw = false;
866 		sdev->use_10_for_ms = true;
867 		break;
868 	case 16:
869 		sdev->use_10_for_rw = false;
870 		sdev->use_16_for_rw = true;
871 		sdev->use_10_for_ms = true;
872 		break;
873 	case 32: /* No knobs to suggest this so same as 16 for now */
874 		sdev->use_10_for_rw = false;
875 		sdev->use_16_for_rw = true;
876 		sdev->use_10_for_ms = true;
877 		break;
878 	default:
879 		pr_warn("unexpected cdb_len=%d, force to 10\n",
880 			sdebug_cdb_len);
881 		sdev->use_10_for_rw = true;
882 		sdev->use_16_for_rw = false;
883 		sdev->use_10_for_ms = false;
884 		sdebug_cdb_len = 10;
885 		break;
886 	}
887 }
888 
889 static void all_config_cdb_len(void)
890 {
891 	struct sdebug_host_info *sdbg_host;
892 	struct Scsi_Host *shost;
893 	struct scsi_device *sdev;
894 
895 	spin_lock(&sdebug_host_list_lock);
896 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
897 		shost = sdbg_host->shost;
898 		shost_for_each_device(sdev, shost) {
899 			config_cdb_len(sdev);
900 		}
901 	}
902 	spin_unlock(&sdebug_host_list_lock);
903 }
904 
905 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
906 {
907 	struct sdebug_host_info *sdhp;
908 	struct sdebug_dev_info *dp;
909 
910 	spin_lock(&sdebug_host_list_lock);
911 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
912 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
913 			if ((devip->sdbg_host == dp->sdbg_host) &&
914 			    (devip->target == dp->target))
915 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
916 		}
917 	}
918 	spin_unlock(&sdebug_host_list_lock);
919 }
920 
921 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
922 {
923 	int k;
924 
925 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
926 	if (k != SDEBUG_NUM_UAS) {
927 		const char *cp = NULL;
928 
929 		switch (k) {
930 		case SDEBUG_UA_POR:
931 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
932 					POWER_ON_RESET_ASCQ);
933 			if (sdebug_verbose)
934 				cp = "power on reset";
935 			break;
936 		case SDEBUG_UA_BUS_RESET:
937 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
938 					BUS_RESET_ASCQ);
939 			if (sdebug_verbose)
940 				cp = "bus reset";
941 			break;
942 		case SDEBUG_UA_MODE_CHANGED:
943 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
944 					MODE_CHANGED_ASCQ);
945 			if (sdebug_verbose)
946 				cp = "mode parameters changed";
947 			break;
948 		case SDEBUG_UA_CAPACITY_CHANGED:
949 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
950 					CAPACITY_CHANGED_ASCQ);
951 			if (sdebug_verbose)
952 				cp = "capacity data changed";
953 			break;
954 		case SDEBUG_UA_MICROCODE_CHANGED:
955 			mk_sense_buffer(scp, UNIT_ATTENTION,
956 					TARGET_CHANGED_ASC,
957 					MICROCODE_CHANGED_ASCQ);
958 			if (sdebug_verbose)
959 				cp = "microcode has been changed";
960 			break;
961 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
962 			mk_sense_buffer(scp, UNIT_ATTENTION,
963 					TARGET_CHANGED_ASC,
964 					MICROCODE_CHANGED_WO_RESET_ASCQ);
965 			if (sdebug_verbose)
966 				cp = "microcode has been changed without reset";
967 			break;
968 		case SDEBUG_UA_LUNS_CHANGED:
969 			/*
970 			 * SPC-3 behavior is to report a UNIT ATTENTION with
971 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
972 			 * on the target, until a REPORT LUNS command is
973 			 * received.  SPC-4 behavior is to report it only once.
974 			 * NOTE:  sdebug_scsi_level does not use the same
975 			 * values as struct scsi_device->scsi_level.
976 			 */
977 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
978 				clear_luns_changed_on_target(devip);
979 			mk_sense_buffer(scp, UNIT_ATTENTION,
980 					TARGET_CHANGED_ASC,
981 					LUNS_CHANGED_ASCQ);
982 			if (sdebug_verbose)
983 				cp = "reported luns data has changed";
984 			break;
985 		default:
986 			pr_warn("unexpected unit attention code=%d\n", k);
987 			if (sdebug_verbose)
988 				cp = "unknown";
989 			break;
990 		}
991 		clear_bit(k, devip->uas_bm);
992 		if (sdebug_verbose)
993 			sdev_printk(KERN_INFO, scp->device,
994 				   "%s reports: Unit attention: %s\n",
995 				   my_name, cp);
996 		return check_condition_result;
997 	}
998 	return 0;
999 }
1000 
1001 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1002 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1003 				int arr_len)
1004 {
1005 	int act_len;
1006 	struct scsi_data_buffer *sdb = &scp->sdb;
1007 
1008 	if (!sdb->length)
1009 		return 0;
1010 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1011 		return DID_ERROR << 16;
1012 
1013 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1014 				      arr, arr_len);
1015 	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1016 
1017 	return 0;
1018 }
1019 
1020 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1021  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1022  * calls, not required to write in ascending offset order. Assumes resid
1023  * set to scsi_bufflen() prior to any calls.
1024  */
1025 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1026 				  int arr_len, unsigned int off_dst)
1027 {
1028 	int act_len, n;
1029 	struct scsi_data_buffer *sdb = &scp->sdb;
1030 	off_t skip = off_dst;
1031 
1032 	if (sdb->length <= off_dst)
1033 		return 0;
1034 	if (scp->sc_data_direction != DMA_FROM_DEVICE)
1035 		return DID_ERROR << 16;
1036 
1037 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1038 				       arr, arr_len, skip);
1039 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1040 		 __func__, off_dst, scsi_bufflen(scp), act_len,
1041 		 scsi_get_resid(scp));
1042 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1043 	scsi_set_resid(scp, min(scsi_get_resid(scp), n));
1044 	return 0;
1045 }
1046 
1047 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1048  * 'arr' or -1 if error.
1049  */
1050 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1051 			       int arr_len)
1052 {
1053 	if (!scsi_bufflen(scp))
1054 		return 0;
1055 	if (scp->sc_data_direction != DMA_TO_DEVICE)
1056 		return -1;
1057 
1058 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1059 }
1060 
1061 
1062 static char sdebug_inq_vendor_id[9] = "Linux   ";
1063 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1064 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1065 /* Use some locally assigned NAAs for SAS addresses. */
1066 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1067 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1068 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1069 
1070 /* Device identification VPD page. Returns number of bytes placed in arr */
1071 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1072 			  int target_dev_id, int dev_id_num,
1073 			  const char *dev_id_str, int dev_id_str_len,
1074 			  const uuid_t *lu_name)
1075 {
1076 	int num, port_a;
1077 	char b[32];
1078 
1079 	port_a = target_dev_id + 1;
1080 	/* T10 vendor identifier field format (faked) */
1081 	arr[0] = 0x2;	/* ASCII */
1082 	arr[1] = 0x1;
1083 	arr[2] = 0x0;
1084 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1085 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1086 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1087 	num = 8 + 16 + dev_id_str_len;
1088 	arr[3] = num;
1089 	num += 4;
1090 	if (dev_id_num >= 0) {
1091 		if (sdebug_uuid_ctl) {
1092 			/* Locally assigned UUID */
1093 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1094 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1095 			arr[num++] = 0x0;
1096 			arr[num++] = 0x12;
1097 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1098 			arr[num++] = 0x0;
1099 			memcpy(arr + num, lu_name, 16);
1100 			num += 16;
1101 		} else {
1102 			/* NAA-3, Logical unit identifier (binary) */
1103 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1104 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1105 			arr[num++] = 0x0;
1106 			arr[num++] = 0x8;
1107 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1108 			num += 8;
1109 		}
1110 		/* Target relative port number */
1111 		arr[num++] = 0x61;	/* proto=sas, binary */
1112 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1113 		arr[num++] = 0x0;	/* reserved */
1114 		arr[num++] = 0x4;	/* length */
1115 		arr[num++] = 0x0;	/* reserved */
1116 		arr[num++] = 0x0;	/* reserved */
1117 		arr[num++] = 0x0;
1118 		arr[num++] = 0x1;	/* relative port A */
1119 	}
1120 	/* NAA-3, Target port identifier */
1121 	arr[num++] = 0x61;	/* proto=sas, binary */
1122 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1123 	arr[num++] = 0x0;
1124 	arr[num++] = 0x8;
1125 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1126 	num += 8;
1127 	/* NAA-3, Target port group identifier */
1128 	arr[num++] = 0x61;	/* proto=sas, binary */
1129 	arr[num++] = 0x95;	/* piv=1, target port group id */
1130 	arr[num++] = 0x0;
1131 	arr[num++] = 0x4;
1132 	arr[num++] = 0;
1133 	arr[num++] = 0;
1134 	put_unaligned_be16(port_group_id, arr + num);
1135 	num += 2;
1136 	/* NAA-3, Target device identifier */
1137 	arr[num++] = 0x61;	/* proto=sas, binary */
1138 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1139 	arr[num++] = 0x0;
1140 	arr[num++] = 0x8;
1141 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1142 	num += 8;
1143 	/* SCSI name string: Target device identifier */
1144 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1145 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1146 	arr[num++] = 0x0;
1147 	arr[num++] = 24;
1148 	memcpy(arr + num, "naa.32222220", 12);
1149 	num += 12;
1150 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1151 	memcpy(arr + num, b, 8);
1152 	num += 8;
1153 	memset(arr + num, 0, 4);
1154 	num += 4;
1155 	return num;
1156 }
1157 
1158 static unsigned char vpd84_data[] = {
1159 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1160     0x22,0x22,0x22,0x0,0xbb,0x1,
1161     0x22,0x22,0x22,0x0,0xbb,0x2,
1162 };
1163 
1164 /*  Software interface identification VPD page */
1165 static int inquiry_vpd_84(unsigned char *arr)
1166 {
1167 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1168 	return sizeof(vpd84_data);
1169 }
1170 
1171 /* Management network addresses VPD page */
1172 static int inquiry_vpd_85(unsigned char *arr)
1173 {
1174 	int num = 0;
1175 	const char *na1 = "https://www.kernel.org/config";
1176 	const char *na2 = "http://www.kernel.org/log";
1177 	int plen, olen;
1178 
1179 	arr[num++] = 0x1;	/* lu, storage config */
1180 	arr[num++] = 0x0;	/* reserved */
1181 	arr[num++] = 0x0;
1182 	olen = strlen(na1);
1183 	plen = olen + 1;
1184 	if (plen % 4)
1185 		plen = ((plen / 4) + 1) * 4;
1186 	arr[num++] = plen;	/* length, null termianted, padded */
1187 	memcpy(arr + num, na1, olen);
1188 	memset(arr + num + olen, 0, plen - olen);
1189 	num += plen;
1190 
1191 	arr[num++] = 0x4;	/* lu, logging */
1192 	arr[num++] = 0x0;	/* reserved */
1193 	arr[num++] = 0x0;
1194 	olen = strlen(na2);
1195 	plen = olen + 1;
1196 	if (plen % 4)
1197 		plen = ((plen / 4) + 1) * 4;
1198 	arr[num++] = plen;	/* length, null terminated, padded */
1199 	memcpy(arr + num, na2, olen);
1200 	memset(arr + num + olen, 0, plen - olen);
1201 	num += plen;
1202 
1203 	return num;
1204 }
1205 
1206 /* SCSI ports VPD page */
1207 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1208 {
1209 	int num = 0;
1210 	int port_a, port_b;
1211 
1212 	port_a = target_dev_id + 1;
1213 	port_b = port_a + 1;
1214 	arr[num++] = 0x0;	/* reserved */
1215 	arr[num++] = 0x0;	/* reserved */
1216 	arr[num++] = 0x0;
1217 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1218 	memset(arr + num, 0, 6);
1219 	num += 6;
1220 	arr[num++] = 0x0;
1221 	arr[num++] = 12;	/* length tp descriptor */
1222 	/* naa-5 target port identifier (A) */
1223 	arr[num++] = 0x61;	/* proto=sas, binary */
1224 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1225 	arr[num++] = 0x0;	/* reserved */
1226 	arr[num++] = 0x8;	/* length */
1227 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1228 	num += 8;
1229 	arr[num++] = 0x0;	/* reserved */
1230 	arr[num++] = 0x0;	/* reserved */
1231 	arr[num++] = 0x0;
1232 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1233 	memset(arr + num, 0, 6);
1234 	num += 6;
1235 	arr[num++] = 0x0;
1236 	arr[num++] = 12;	/* length tp descriptor */
1237 	/* naa-5 target port identifier (B) */
1238 	arr[num++] = 0x61;	/* proto=sas, binary */
1239 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1240 	arr[num++] = 0x0;	/* reserved */
1241 	arr[num++] = 0x8;	/* length */
1242 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1243 	num += 8;
1244 
1245 	return num;
1246 }
1247 
1248 
1249 static unsigned char vpd89_data[] = {
1250 /* from 4th byte */ 0,0,0,0,
1251 'l','i','n','u','x',' ',' ',' ',
1252 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1253 '1','2','3','4',
1254 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1255 0xec,0,0,0,
1256 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1257 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1258 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1259 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1260 0x53,0x41,
1261 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1262 0x20,0x20,
1263 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1264 0x10,0x80,
1265 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1266 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1267 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1268 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1269 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1270 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1271 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1272 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1273 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1274 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1275 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1276 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1277 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1278 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1279 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1280 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1281 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1282 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1283 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1284 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1291 };
1292 
1293 /* ATA Information VPD page */
1294 static int inquiry_vpd_89(unsigned char *arr)
1295 {
1296 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1297 	return sizeof(vpd89_data);
1298 }
1299 
1300 
1301 static unsigned char vpdb0_data[] = {
1302 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1303 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1304 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1305 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1306 };
1307 
1308 /* Block limits VPD page (SBC-3) */
1309 static int inquiry_vpd_b0(unsigned char *arr)
1310 {
1311 	unsigned int gran;
1312 
1313 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1314 
1315 	/* Optimal transfer length granularity */
1316 	if (sdebug_opt_xferlen_exp != 0 &&
1317 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1318 		gran = 1 << sdebug_opt_xferlen_exp;
1319 	else
1320 		gran = 1 << sdebug_physblk_exp;
1321 	put_unaligned_be16(gran, arr + 2);
1322 
1323 	/* Maximum Transfer Length */
1324 	if (sdebug_store_sectors > 0x400)
1325 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1326 
1327 	/* Optimal Transfer Length */
1328 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1329 
1330 	if (sdebug_lbpu) {
1331 		/* Maximum Unmap LBA Count */
1332 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1333 
1334 		/* Maximum Unmap Block Descriptor Count */
1335 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1336 	}
1337 
1338 	/* Unmap Granularity Alignment */
1339 	if (sdebug_unmap_alignment) {
1340 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1341 		arr[28] |= 0x80; /* UGAVALID */
1342 	}
1343 
1344 	/* Optimal Unmap Granularity */
1345 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1346 
1347 	/* Maximum WRITE SAME Length */
1348 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1349 
1350 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1351 
1352 	return sizeof(vpdb0_data);
1353 }
1354 
1355 /* Block device characteristics VPD page (SBC-3) */
1356 static int inquiry_vpd_b1(unsigned char *arr)
1357 {
1358 	memset(arr, 0, 0x3c);
1359 	arr[0] = 0;
1360 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1361 	arr[2] = 0;
1362 	arr[3] = 5;	/* less than 1.8" */
1363 
1364 	return 0x3c;
1365 }
1366 
1367 /* Logical block provisioning VPD page (SBC-4) */
1368 static int inquiry_vpd_b2(unsigned char *arr)
1369 {
1370 	memset(arr, 0, 0x4);
1371 	arr[0] = 0;			/* threshold exponent */
1372 	if (sdebug_lbpu)
1373 		arr[1] = 1 << 7;
1374 	if (sdebug_lbpws)
1375 		arr[1] |= 1 << 6;
1376 	if (sdebug_lbpws10)
1377 		arr[1] |= 1 << 5;
1378 	if (sdebug_lbprz && scsi_debug_lbp())
1379 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1380 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1381 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1382 	/* threshold_percentage=0 */
1383 	return 0x4;
1384 }
1385 
1386 #define SDEBUG_LONG_INQ_SZ 96
1387 #define SDEBUG_MAX_INQ_ARR_SZ 584
1388 
1389 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1390 {
1391 	unsigned char pq_pdt;
1392 	unsigned char *arr;
1393 	unsigned char *cmd = scp->cmnd;
1394 	int alloc_len, n, ret;
1395 	bool have_wlun, is_disk;
1396 
1397 	alloc_len = get_unaligned_be16(cmd + 3);
1398 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1399 	if (! arr)
1400 		return DID_REQUEUE << 16;
1401 	is_disk = (sdebug_ptype == TYPE_DISK);
1402 	have_wlun = scsi_is_wlun(scp->device->lun);
1403 	if (have_wlun)
1404 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1405 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1406 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1407 	else
1408 		pq_pdt = (sdebug_ptype & 0x1f);
1409 	arr[0] = pq_pdt;
1410 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1411 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1412 		kfree(arr);
1413 		return check_condition_result;
1414 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1415 		int lu_id_num, port_group_id, target_dev_id, len;
1416 		char lu_id_str[6];
1417 		int host_no = devip->sdbg_host->shost->host_no;
1418 
1419 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1420 		    (devip->channel & 0x7f);
1421 		if (sdebug_vpd_use_hostno == 0)
1422 			host_no = 0;
1423 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1424 			    (devip->target * 1000) + devip->lun);
1425 		target_dev_id = ((host_no + 1) * 2000) +
1426 				 (devip->target * 1000) - 3;
1427 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1428 		if (0 == cmd[2]) { /* supported vital product data pages */
1429 			arr[1] = cmd[2];	/*sanity */
1430 			n = 4;
1431 			arr[n++] = 0x0;   /* this page */
1432 			arr[n++] = 0x80;  /* unit serial number */
1433 			arr[n++] = 0x83;  /* device identification */
1434 			arr[n++] = 0x84;  /* software interface ident. */
1435 			arr[n++] = 0x85;  /* management network addresses */
1436 			arr[n++] = 0x86;  /* extended inquiry */
1437 			arr[n++] = 0x87;  /* mode page policy */
1438 			arr[n++] = 0x88;  /* SCSI ports */
1439 			if (is_disk) {	  /* SBC only */
1440 				arr[n++] = 0x89;  /* ATA information */
1441 				arr[n++] = 0xb0;  /* Block limits */
1442 				arr[n++] = 0xb1;  /* Block characteristics */
1443 				arr[n++] = 0xb2;  /* Logical Block Prov */
1444 			}
1445 			arr[3] = n - 4;	  /* number of supported VPD pages */
1446 		} else if (0x80 == cmd[2]) { /* unit serial number */
1447 			arr[1] = cmd[2];	/*sanity */
1448 			arr[3] = len;
1449 			memcpy(&arr[4], lu_id_str, len);
1450 		} else if (0x83 == cmd[2]) { /* device identification */
1451 			arr[1] = cmd[2];	/*sanity */
1452 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1453 						target_dev_id, lu_id_num,
1454 						lu_id_str, len,
1455 						&devip->lu_name);
1456 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1457 			arr[1] = cmd[2];	/*sanity */
1458 			arr[3] = inquiry_vpd_84(&arr[4]);
1459 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1460 			arr[1] = cmd[2];	/*sanity */
1461 			arr[3] = inquiry_vpd_85(&arr[4]);
1462 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1463 			arr[1] = cmd[2];	/*sanity */
1464 			arr[3] = 0x3c;	/* number of following entries */
1465 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1466 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1467 			else if (have_dif_prot)
1468 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1469 			else
1470 				arr[4] = 0x0;   /* no protection stuff */
1471 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1472 		} else if (0x87 == cmd[2]) { /* mode page policy */
1473 			arr[1] = cmd[2];	/*sanity */
1474 			arr[3] = 0x8;	/* number of following entries */
1475 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1476 			arr[6] = 0x80;	/* mlus, shared */
1477 			arr[8] = 0x18;	 /* protocol specific lu */
1478 			arr[10] = 0x82;	 /* mlus, per initiator port */
1479 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1480 			arr[1] = cmd[2];	/*sanity */
1481 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1482 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1483 			arr[1] = cmd[2];        /*sanity */
1484 			n = inquiry_vpd_89(&arr[4]);
1485 			put_unaligned_be16(n, arr + 2);
1486 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1487 			arr[1] = cmd[2];        /*sanity */
1488 			arr[3] = inquiry_vpd_b0(&arr[4]);
1489 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1490 			arr[1] = cmd[2];        /*sanity */
1491 			arr[3] = inquiry_vpd_b1(&arr[4]);
1492 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1493 			arr[1] = cmd[2];        /*sanity */
1494 			arr[3] = inquiry_vpd_b2(&arr[4]);
1495 		} else {
1496 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1497 			kfree(arr);
1498 			return check_condition_result;
1499 		}
1500 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1501 		ret = fill_from_dev_buffer(scp, arr,
1502 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1503 		kfree(arr);
1504 		return ret;
1505 	}
1506 	/* drops through here for a standard inquiry */
1507 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1508 	arr[2] = sdebug_scsi_level;
1509 	arr[3] = 2;    /* response_data_format==2 */
1510 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1511 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1512 	if (sdebug_vpd_use_hostno == 0)
1513 		arr[5] |= 0x10; /* claim: implicit TPGS */
1514 	arr[6] = 0x10; /* claim: MultiP */
1515 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1516 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1517 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1518 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1519 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1520 	/* Use Vendor Specific area to place driver date in ASCII hex */
1521 	memcpy(&arr[36], sdebug_version_date, 8);
1522 	/* version descriptors (2 bytes each) follow */
1523 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1524 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1525 	n = 62;
1526 	if (is_disk) {		/* SBC-4 no version claimed */
1527 		put_unaligned_be16(0x600, arr + n);
1528 		n += 2;
1529 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1530 		put_unaligned_be16(0x525, arr + n);
1531 		n += 2;
1532 	}
1533 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1534 	ret = fill_from_dev_buffer(scp, arr,
1535 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1536 	kfree(arr);
1537 	return ret;
1538 }
1539 
1540 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1541 				   0, 0, 0x0, 0x0};
1542 
1543 static int resp_requests(struct scsi_cmnd *scp,
1544 			 struct sdebug_dev_info *devip)
1545 {
1546 	unsigned char *sbuff;
1547 	unsigned char *cmd = scp->cmnd;
1548 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1549 	bool dsense;
1550 	int len = 18;
1551 
1552 	memset(arr, 0, sizeof(arr));
1553 	dsense = !!(cmd[1] & 1);
1554 	sbuff = scp->sense_buffer;
1555 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1556 		if (dsense) {
1557 			arr[0] = 0x72;
1558 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1559 			arr[2] = THRESHOLD_EXCEEDED;
1560 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1561 			len = 8;
1562 		} else {
1563 			arr[0] = 0x70;
1564 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1565 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1566 			arr[12] = THRESHOLD_EXCEEDED;
1567 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1568 		}
1569 	} else {
1570 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1571 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1572 			;	/* have sense and formats match */
1573 		else if (arr[0] <= 0x70) {
1574 			if (dsense) {
1575 				memset(arr, 0, 8);
1576 				arr[0] = 0x72;
1577 				len = 8;
1578 			} else {
1579 				memset(arr, 0, 18);
1580 				arr[0] = 0x70;
1581 				arr[7] = 0xa;
1582 			}
1583 		} else if (dsense) {
1584 			memset(arr, 0, 8);
1585 			arr[0] = 0x72;
1586 			arr[1] = sbuff[2];     /* sense key */
1587 			arr[2] = sbuff[12];    /* asc */
1588 			arr[3] = sbuff[13];    /* ascq */
1589 			len = 8;
1590 		} else {
1591 			memset(arr, 0, 18);
1592 			arr[0] = 0x70;
1593 			arr[2] = sbuff[1];
1594 			arr[7] = 0xa;
1595 			arr[12] = sbuff[1];
1596 			arr[13] = sbuff[3];
1597 		}
1598 
1599 	}
1600 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1601 	return fill_from_dev_buffer(scp, arr, len);
1602 }
1603 
1604 static int resp_start_stop(struct scsi_cmnd *scp,
1605 			   struct sdebug_dev_info *devip)
1606 {
1607 	unsigned char *cmd = scp->cmnd;
1608 	int power_cond, stop;
1609 	bool changing;
1610 
1611 	power_cond = (cmd[4] & 0xf0) >> 4;
1612 	if (power_cond) {
1613 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1614 		return check_condition_result;
1615 	}
1616 	stop = !(cmd[4] & 1);
1617 	changing = atomic_read(&devip->stopped) == !stop;
1618 	atomic_xchg(&devip->stopped, stop);
1619 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1620 		return SDEG_RES_IMMED_MASK;
1621 	else
1622 		return 0;
1623 }
1624 
1625 static sector_t get_sdebug_capacity(void)
1626 {
1627 	static const unsigned int gibibyte = 1073741824;
1628 
1629 	if (sdebug_virtual_gb > 0)
1630 		return (sector_t)sdebug_virtual_gb *
1631 			(gibibyte / sdebug_sector_size);
1632 	else
1633 		return sdebug_store_sectors;
1634 }
1635 
1636 #define SDEBUG_READCAP_ARR_SZ 8
1637 static int resp_readcap(struct scsi_cmnd *scp,
1638 			struct sdebug_dev_info *devip)
1639 {
1640 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1641 	unsigned int capac;
1642 
1643 	/* following just in case virtual_gb changed */
1644 	sdebug_capacity = get_sdebug_capacity();
1645 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1646 	if (sdebug_capacity < 0xffffffff) {
1647 		capac = (unsigned int)sdebug_capacity - 1;
1648 		put_unaligned_be32(capac, arr + 0);
1649 	} else
1650 		put_unaligned_be32(0xffffffff, arr + 0);
1651 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1652 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1653 }
1654 
1655 #define SDEBUG_READCAP16_ARR_SZ 32
1656 static int resp_readcap16(struct scsi_cmnd *scp,
1657 			  struct sdebug_dev_info *devip)
1658 {
1659 	unsigned char *cmd = scp->cmnd;
1660 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1661 	int alloc_len;
1662 
1663 	alloc_len = get_unaligned_be32(cmd + 10);
1664 	/* following just in case virtual_gb changed */
1665 	sdebug_capacity = get_sdebug_capacity();
1666 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1667 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1668 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1669 	arr[13] = sdebug_physblk_exp & 0xf;
1670 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1671 
1672 	if (scsi_debug_lbp()) {
1673 		arr[14] |= 0x80; /* LBPME */
1674 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1675 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1676 		 * in the wider field maps to 0 in this field.
1677 		 */
1678 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1679 			arr[14] |= 0x40;
1680 	}
1681 
1682 	arr[15] = sdebug_lowest_aligned & 0xff;
1683 
1684 	if (have_dif_prot) {
1685 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1686 		arr[12] |= 1; /* PROT_EN */
1687 	}
1688 
1689 	return fill_from_dev_buffer(scp, arr,
1690 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1691 }
1692 
1693 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1694 
1695 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1696 			      struct sdebug_dev_info *devip)
1697 {
1698 	unsigned char *cmd = scp->cmnd;
1699 	unsigned char *arr;
1700 	int host_no = devip->sdbg_host->shost->host_no;
1701 	int n, ret, alen, rlen;
1702 	int port_group_a, port_group_b, port_a, port_b;
1703 
1704 	alen = get_unaligned_be32(cmd + 6);
1705 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1706 	if (! arr)
1707 		return DID_REQUEUE << 16;
1708 	/*
1709 	 * EVPD page 0x88 states we have two ports, one
1710 	 * real and a fake port with no device connected.
1711 	 * So we create two port groups with one port each
1712 	 * and set the group with port B to unavailable.
1713 	 */
1714 	port_a = 0x1; /* relative port A */
1715 	port_b = 0x2; /* relative port B */
1716 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1717 			(devip->channel & 0x7f);
1718 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1719 			(devip->channel & 0x7f) + 0x80;
1720 
1721 	/*
1722 	 * The asymmetric access state is cycled according to the host_id.
1723 	 */
1724 	n = 4;
1725 	if (sdebug_vpd_use_hostno == 0) {
1726 		arr[n++] = host_no % 3; /* Asymm access state */
1727 		arr[n++] = 0x0F; /* claim: all states are supported */
1728 	} else {
1729 		arr[n++] = 0x0; /* Active/Optimized path */
1730 		arr[n++] = 0x01; /* only support active/optimized paths */
1731 	}
1732 	put_unaligned_be16(port_group_a, arr + n);
1733 	n += 2;
1734 	arr[n++] = 0;    /* Reserved */
1735 	arr[n++] = 0;    /* Status code */
1736 	arr[n++] = 0;    /* Vendor unique */
1737 	arr[n++] = 0x1;  /* One port per group */
1738 	arr[n++] = 0;    /* Reserved */
1739 	arr[n++] = 0;    /* Reserved */
1740 	put_unaligned_be16(port_a, arr + n);
1741 	n += 2;
1742 	arr[n++] = 3;    /* Port unavailable */
1743 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1744 	put_unaligned_be16(port_group_b, arr + n);
1745 	n += 2;
1746 	arr[n++] = 0;    /* Reserved */
1747 	arr[n++] = 0;    /* Status code */
1748 	arr[n++] = 0;    /* Vendor unique */
1749 	arr[n++] = 0x1;  /* One port per group */
1750 	arr[n++] = 0;    /* Reserved */
1751 	arr[n++] = 0;    /* Reserved */
1752 	put_unaligned_be16(port_b, arr + n);
1753 	n += 2;
1754 
1755 	rlen = n - 4;
1756 	put_unaligned_be32(rlen, arr + 0);
1757 
1758 	/*
1759 	 * Return the smallest value of either
1760 	 * - The allocated length
1761 	 * - The constructed command length
1762 	 * - The maximum array size
1763 	 */
1764 	rlen = min(alen,n);
1765 	ret = fill_from_dev_buffer(scp, arr,
1766 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1767 	kfree(arr);
1768 	return ret;
1769 }
1770 
1771 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1772 			     struct sdebug_dev_info *devip)
1773 {
1774 	bool rctd;
1775 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1776 	u16 req_sa, u;
1777 	u32 alloc_len, a_len;
1778 	int k, offset, len, errsts, count, bump, na;
1779 	const struct opcode_info_t *oip;
1780 	const struct opcode_info_t *r_oip;
1781 	u8 *arr;
1782 	u8 *cmd = scp->cmnd;
1783 
1784 	rctd = !!(cmd[2] & 0x80);
1785 	reporting_opts = cmd[2] & 0x7;
1786 	req_opcode = cmd[3];
1787 	req_sa = get_unaligned_be16(cmd + 4);
1788 	alloc_len = get_unaligned_be32(cmd + 6);
1789 	if (alloc_len < 4 || alloc_len > 0xffff) {
1790 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1791 		return check_condition_result;
1792 	}
1793 	if (alloc_len > 8192)
1794 		a_len = 8192;
1795 	else
1796 		a_len = alloc_len;
1797 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1798 	if (NULL == arr) {
1799 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1800 				INSUFF_RES_ASCQ);
1801 		return check_condition_result;
1802 	}
1803 	switch (reporting_opts) {
1804 	case 0:	/* all commands */
1805 		/* count number of commands */
1806 		for (count = 0, oip = opcode_info_arr;
1807 		     oip->num_attached != 0xff; ++oip) {
1808 			if (F_INV_OP & oip->flags)
1809 				continue;
1810 			count += (oip->num_attached + 1);
1811 		}
1812 		bump = rctd ? 20 : 8;
1813 		put_unaligned_be32(count * bump, arr);
1814 		for (offset = 4, oip = opcode_info_arr;
1815 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1816 			if (F_INV_OP & oip->flags)
1817 				continue;
1818 			na = oip->num_attached;
1819 			arr[offset] = oip->opcode;
1820 			put_unaligned_be16(oip->sa, arr + offset + 2);
1821 			if (rctd)
1822 				arr[offset + 5] |= 0x2;
1823 			if (FF_SA & oip->flags)
1824 				arr[offset + 5] |= 0x1;
1825 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1826 			if (rctd)
1827 				put_unaligned_be16(0xa, arr + offset + 8);
1828 			r_oip = oip;
1829 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1830 				if (F_INV_OP & oip->flags)
1831 					continue;
1832 				offset += bump;
1833 				arr[offset] = oip->opcode;
1834 				put_unaligned_be16(oip->sa, arr + offset + 2);
1835 				if (rctd)
1836 					arr[offset + 5] |= 0x2;
1837 				if (FF_SA & oip->flags)
1838 					arr[offset + 5] |= 0x1;
1839 				put_unaligned_be16(oip->len_mask[0],
1840 						   arr + offset + 6);
1841 				if (rctd)
1842 					put_unaligned_be16(0xa,
1843 							   arr + offset + 8);
1844 			}
1845 			oip = r_oip;
1846 			offset += bump;
1847 		}
1848 		break;
1849 	case 1:	/* one command: opcode only */
1850 	case 2:	/* one command: opcode plus service action */
1851 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1852 		sdeb_i = opcode_ind_arr[req_opcode];
1853 		oip = &opcode_info_arr[sdeb_i];
1854 		if (F_INV_OP & oip->flags) {
1855 			supp = 1;
1856 			offset = 4;
1857 		} else {
1858 			if (1 == reporting_opts) {
1859 				if (FF_SA & oip->flags) {
1860 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1861 							     2, 2);
1862 					kfree(arr);
1863 					return check_condition_result;
1864 				}
1865 				req_sa = 0;
1866 			} else if (2 == reporting_opts &&
1867 				   0 == (FF_SA & oip->flags)) {
1868 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1869 				kfree(arr);	/* point at requested sa */
1870 				return check_condition_result;
1871 			}
1872 			if (0 == (FF_SA & oip->flags) &&
1873 			    req_opcode == oip->opcode)
1874 				supp = 3;
1875 			else if (0 == (FF_SA & oip->flags)) {
1876 				na = oip->num_attached;
1877 				for (k = 0, oip = oip->arrp; k < na;
1878 				     ++k, ++oip) {
1879 					if (req_opcode == oip->opcode)
1880 						break;
1881 				}
1882 				supp = (k >= na) ? 1 : 3;
1883 			} else if (req_sa != oip->sa) {
1884 				na = oip->num_attached;
1885 				for (k = 0, oip = oip->arrp; k < na;
1886 				     ++k, ++oip) {
1887 					if (req_sa == oip->sa)
1888 						break;
1889 				}
1890 				supp = (k >= na) ? 1 : 3;
1891 			} else
1892 				supp = 3;
1893 			if (3 == supp) {
1894 				u = oip->len_mask[0];
1895 				put_unaligned_be16(u, arr + 2);
1896 				arr[4] = oip->opcode;
1897 				for (k = 1; k < u; ++k)
1898 					arr[4 + k] = (k < 16) ?
1899 						 oip->len_mask[k] : 0xff;
1900 				offset = 4 + u;
1901 			} else
1902 				offset = 4;
1903 		}
1904 		arr[1] = (rctd ? 0x80 : 0) | supp;
1905 		if (rctd) {
1906 			put_unaligned_be16(0xa, arr + offset);
1907 			offset += 12;
1908 		}
1909 		break;
1910 	default:
1911 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1912 		kfree(arr);
1913 		return check_condition_result;
1914 	}
1915 	offset = (offset < a_len) ? offset : a_len;
1916 	len = (offset < alloc_len) ? offset : alloc_len;
1917 	errsts = fill_from_dev_buffer(scp, arr, len);
1918 	kfree(arr);
1919 	return errsts;
1920 }
1921 
1922 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1923 			  struct sdebug_dev_info *devip)
1924 {
1925 	bool repd;
1926 	u32 alloc_len, len;
1927 	u8 arr[16];
1928 	u8 *cmd = scp->cmnd;
1929 
1930 	memset(arr, 0, sizeof(arr));
1931 	repd = !!(cmd[2] & 0x80);
1932 	alloc_len = get_unaligned_be32(cmd + 6);
1933 	if (alloc_len < 4) {
1934 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1935 		return check_condition_result;
1936 	}
1937 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1938 	arr[1] = 0x1;		/* ITNRS */
1939 	if (repd) {
1940 		arr[3] = 0xc;
1941 		len = 16;
1942 	} else
1943 		len = 4;
1944 
1945 	len = (len < alloc_len) ? len : alloc_len;
1946 	return fill_from_dev_buffer(scp, arr, len);
1947 }
1948 
1949 /* <<Following mode page info copied from ST318451LW>> */
1950 
1951 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1952 {	/* Read-Write Error Recovery page for mode_sense */
1953 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1954 					5, 0, 0xff, 0xff};
1955 
1956 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1957 	if (1 == pcontrol)
1958 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1959 	return sizeof(err_recov_pg);
1960 }
1961 
1962 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1963 { 	/* Disconnect-Reconnect page for mode_sense */
1964 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1965 					 0, 0, 0, 0, 0, 0, 0, 0};
1966 
1967 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1968 	if (1 == pcontrol)
1969 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1970 	return sizeof(disconnect_pg);
1971 }
1972 
1973 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1974 {       /* Format device page for mode_sense */
1975 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1976 				     0, 0, 0, 0, 0, 0, 0, 0,
1977 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1978 
1979 	memcpy(p, format_pg, sizeof(format_pg));
1980 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1981 	put_unaligned_be16(sdebug_sector_size, p + 12);
1982 	if (sdebug_removable)
1983 		p[20] |= 0x20; /* should agree with INQUIRY */
1984 	if (1 == pcontrol)
1985 		memset(p + 2, 0, sizeof(format_pg) - 2);
1986 	return sizeof(format_pg);
1987 }
1988 
1989 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1990 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1991 				     0, 0, 0, 0};
1992 
1993 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1994 { 	/* Caching page for mode_sense */
1995 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1996 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1997 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1998 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1999 
2000 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2001 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2002 	memcpy(p, caching_pg, sizeof(caching_pg));
2003 	if (1 == pcontrol)
2004 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2005 	else if (2 == pcontrol)
2006 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2007 	return sizeof(caching_pg);
2008 }
2009 
2010 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2011 				    0, 0, 0x2, 0x4b};
2012 
2013 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2014 { 	/* Control mode page for mode_sense */
2015 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2016 					0, 0, 0, 0};
2017 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2018 				     0, 0, 0x2, 0x4b};
2019 
2020 	if (sdebug_dsense)
2021 		ctrl_m_pg[2] |= 0x4;
2022 	else
2023 		ctrl_m_pg[2] &= ~0x4;
2024 
2025 	if (sdebug_ato)
2026 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2027 
2028 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2029 	if (1 == pcontrol)
2030 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2031 	else if (2 == pcontrol)
2032 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2033 	return sizeof(ctrl_m_pg);
2034 }
2035 
2036 
2037 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2038 {	/* Informational Exceptions control mode page for mode_sense */
2039 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2040 				       0, 0, 0x0, 0x0};
2041 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2042 				      0, 0, 0x0, 0x0};
2043 
2044 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2045 	if (1 == pcontrol)
2046 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2047 	else if (2 == pcontrol)
2048 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2049 	return sizeof(iec_m_pg);
2050 }
2051 
2052 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2053 {	/* SAS SSP mode page - short format for mode_sense */
2054 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2055 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2056 
2057 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2058 	if (1 == pcontrol)
2059 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2060 	return sizeof(sas_sf_m_pg);
2061 }
2062 
2063 
2064 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2065 			      int target_dev_id)
2066 {	/* SAS phy control and discover mode page for mode_sense */
2067 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2068 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2069 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2070 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2071 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2072 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2073 		    0, 0, 0, 0, 0, 0, 0, 0,
2074 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2075 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2076 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2077 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2078 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2079 		    0, 0, 0, 0, 0, 0, 0, 0,
2080 		};
2081 	int port_a, port_b;
2082 
2083 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2084 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2085 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2086 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2087 	port_a = target_dev_id + 1;
2088 	port_b = port_a + 1;
2089 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2090 	put_unaligned_be32(port_a, p + 20);
2091 	put_unaligned_be32(port_b, p + 48 + 20);
2092 	if (1 == pcontrol)
2093 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2094 	return sizeof(sas_pcd_m_pg);
2095 }
2096 
2097 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2098 {	/* SAS SSP shared protocol specific port mode subpage */
2099 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2100 		    0, 0, 0, 0, 0, 0, 0, 0,
2101 		};
2102 
2103 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2104 	if (1 == pcontrol)
2105 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2106 	return sizeof(sas_sha_m_pg);
2107 }
2108 
2109 #define SDEBUG_MAX_MSENSE_SZ 256
2110 
2111 static int resp_mode_sense(struct scsi_cmnd *scp,
2112 			   struct sdebug_dev_info *devip)
2113 {
2114 	int pcontrol, pcode, subpcode, bd_len;
2115 	unsigned char dev_spec;
2116 	int alloc_len, offset, len, target_dev_id;
2117 	int target = scp->device->id;
2118 	unsigned char *ap;
2119 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2120 	unsigned char *cmd = scp->cmnd;
2121 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2122 
2123 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2124 	pcontrol = (cmd[2] & 0xc0) >> 6;
2125 	pcode = cmd[2] & 0x3f;
2126 	subpcode = cmd[3];
2127 	msense_6 = (MODE_SENSE == cmd[0]);
2128 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2129 	is_disk = (sdebug_ptype == TYPE_DISK);
2130 	if (is_disk && !dbd)
2131 		bd_len = llbaa ? 16 : 8;
2132 	else
2133 		bd_len = 0;
2134 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2135 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2136 	if (0x3 == pcontrol) {  /* Saving values not supported */
2137 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2138 		return check_condition_result;
2139 	}
2140 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2141 			(devip->target * 1000) - 3;
2142 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2143 	if (is_disk) {
2144 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2145 		if (sdebug_wp)
2146 			dev_spec |= 0x80;
2147 	} else
2148 		dev_spec = 0x0;
2149 	if (msense_6) {
2150 		arr[2] = dev_spec;
2151 		arr[3] = bd_len;
2152 		offset = 4;
2153 	} else {
2154 		arr[3] = dev_spec;
2155 		if (16 == bd_len)
2156 			arr[4] = 0x1;	/* set LONGLBA bit */
2157 		arr[7] = bd_len;	/* assume 255 or less */
2158 		offset = 8;
2159 	}
2160 	ap = arr + offset;
2161 	if ((bd_len > 0) && (!sdebug_capacity))
2162 		sdebug_capacity = get_sdebug_capacity();
2163 
2164 	if (8 == bd_len) {
2165 		if (sdebug_capacity > 0xfffffffe)
2166 			put_unaligned_be32(0xffffffff, ap + 0);
2167 		else
2168 			put_unaligned_be32(sdebug_capacity, ap + 0);
2169 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2170 		offset += bd_len;
2171 		ap = arr + offset;
2172 	} else if (16 == bd_len) {
2173 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2174 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2175 		offset += bd_len;
2176 		ap = arr + offset;
2177 	}
2178 
2179 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2180 		/* TODO: Control Extension page */
2181 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2182 		return check_condition_result;
2183 	}
2184 	bad_pcode = false;
2185 
2186 	switch (pcode) {
2187 	case 0x1:	/* Read-Write error recovery page, direct access */
2188 		len = resp_err_recov_pg(ap, pcontrol, target);
2189 		offset += len;
2190 		break;
2191 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2192 		len = resp_disconnect_pg(ap, pcontrol, target);
2193 		offset += len;
2194 		break;
2195 	case 0x3:       /* Format device page, direct access */
2196 		if (is_disk) {
2197 			len = resp_format_pg(ap, pcontrol, target);
2198 			offset += len;
2199 		} else
2200 			bad_pcode = true;
2201 		break;
2202 	case 0x8:	/* Caching page, direct access */
2203 		if (is_disk) {
2204 			len = resp_caching_pg(ap, pcontrol, target);
2205 			offset += len;
2206 		} else
2207 			bad_pcode = true;
2208 		break;
2209 	case 0xa:	/* Control Mode page, all devices */
2210 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2211 		offset += len;
2212 		break;
2213 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2214 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2215 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2216 			return check_condition_result;
2217 		}
2218 		len = 0;
2219 		if ((0x0 == subpcode) || (0xff == subpcode))
2220 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2221 		if ((0x1 == subpcode) || (0xff == subpcode))
2222 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2223 						  target_dev_id);
2224 		if ((0x2 == subpcode) || (0xff == subpcode))
2225 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2226 		offset += len;
2227 		break;
2228 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2229 		len = resp_iec_m_pg(ap, pcontrol, target);
2230 		offset += len;
2231 		break;
2232 	case 0x3f:	/* Read all Mode pages */
2233 		if ((0 == subpcode) || (0xff == subpcode)) {
2234 			len = resp_err_recov_pg(ap, pcontrol, target);
2235 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2236 			if (is_disk) {
2237 				len += resp_format_pg(ap + len, pcontrol,
2238 						      target);
2239 				len += resp_caching_pg(ap + len, pcontrol,
2240 						       target);
2241 			}
2242 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2243 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2244 			if (0xff == subpcode) {
2245 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2246 						  target, target_dev_id);
2247 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2248 			}
2249 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2250 			offset += len;
2251 		} else {
2252 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2253 			return check_condition_result;
2254 		}
2255 		break;
2256 	default:
2257 		bad_pcode = true;
2258 		break;
2259 	}
2260 	if (bad_pcode) {
2261 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2262 		return check_condition_result;
2263 	}
2264 	if (msense_6)
2265 		arr[0] = offset - 1;
2266 	else
2267 		put_unaligned_be16((offset - 2), arr + 0);
2268 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2269 }
2270 
2271 #define SDEBUG_MAX_MSELECT_SZ 512
2272 
2273 static int resp_mode_select(struct scsi_cmnd *scp,
2274 			    struct sdebug_dev_info *devip)
2275 {
2276 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2277 	int param_len, res, mpage;
2278 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2279 	unsigned char *cmd = scp->cmnd;
2280 	int mselect6 = (MODE_SELECT == cmd[0]);
2281 
2282 	memset(arr, 0, sizeof(arr));
2283 	pf = cmd[1] & 0x10;
2284 	sp = cmd[1] & 0x1;
2285 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2286 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2287 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2288 		return check_condition_result;
2289 	}
2290 	res = fetch_to_dev_buffer(scp, arr, param_len);
2291 	if (-1 == res)
2292 		return DID_ERROR << 16;
2293 	else if (sdebug_verbose && (res < param_len))
2294 		sdev_printk(KERN_INFO, scp->device,
2295 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2296 			    __func__, param_len, res);
2297 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2298 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2299 	if (md_len > 2) {
2300 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2301 		return check_condition_result;
2302 	}
2303 	off = bd_len + (mselect6 ? 4 : 8);
2304 	mpage = arr[off] & 0x3f;
2305 	ps = !!(arr[off] & 0x80);
2306 	if (ps) {
2307 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2308 		return check_condition_result;
2309 	}
2310 	spf = !!(arr[off] & 0x40);
2311 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2312 		       (arr[off + 1] + 2);
2313 	if ((pg_len + off) > param_len) {
2314 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2315 				PARAMETER_LIST_LENGTH_ERR, 0);
2316 		return check_condition_result;
2317 	}
2318 	switch (mpage) {
2319 	case 0x8:      /* Caching Mode page */
2320 		if (caching_pg[1] == arr[off + 1]) {
2321 			memcpy(caching_pg + 2, arr + off + 2,
2322 			       sizeof(caching_pg) - 2);
2323 			goto set_mode_changed_ua;
2324 		}
2325 		break;
2326 	case 0xa:      /* Control Mode page */
2327 		if (ctrl_m_pg[1] == arr[off + 1]) {
2328 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2329 			       sizeof(ctrl_m_pg) - 2);
2330 			if (ctrl_m_pg[4] & 0x8)
2331 				sdebug_wp = true;
2332 			else
2333 				sdebug_wp = false;
2334 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2335 			goto set_mode_changed_ua;
2336 		}
2337 		break;
2338 	case 0x1c:      /* Informational Exceptions Mode page */
2339 		if (iec_m_pg[1] == arr[off + 1]) {
2340 			memcpy(iec_m_pg + 2, arr + off + 2,
2341 			       sizeof(iec_m_pg) - 2);
2342 			goto set_mode_changed_ua;
2343 		}
2344 		break;
2345 	default:
2346 		break;
2347 	}
2348 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2349 	return check_condition_result;
2350 set_mode_changed_ua:
2351 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2352 	return 0;
2353 }
2354 
2355 static int resp_temp_l_pg(unsigned char *arr)
2356 {
2357 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2358 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2359 		};
2360 
2361 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2362 	return sizeof(temp_l_pg);
2363 }
2364 
2365 static int resp_ie_l_pg(unsigned char *arr)
2366 {
2367 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2368 		};
2369 
2370 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2371 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2372 		arr[4] = THRESHOLD_EXCEEDED;
2373 		arr[5] = 0xff;
2374 	}
2375 	return sizeof(ie_l_pg);
2376 }
2377 
2378 #define SDEBUG_MAX_LSENSE_SZ 512
2379 
2380 static int resp_log_sense(struct scsi_cmnd *scp,
2381 			  struct sdebug_dev_info *devip)
2382 {
2383 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2384 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2385 	unsigned char *cmd = scp->cmnd;
2386 
2387 	memset(arr, 0, sizeof(arr));
2388 	ppc = cmd[1] & 0x2;
2389 	sp = cmd[1] & 0x1;
2390 	if (ppc || sp) {
2391 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2392 		return check_condition_result;
2393 	}
2394 	pcode = cmd[2] & 0x3f;
2395 	subpcode = cmd[3] & 0xff;
2396 	alloc_len = get_unaligned_be16(cmd + 7);
2397 	arr[0] = pcode;
2398 	if (0 == subpcode) {
2399 		switch (pcode) {
2400 		case 0x0:	/* Supported log pages log page */
2401 			n = 4;
2402 			arr[n++] = 0x0;		/* this page */
2403 			arr[n++] = 0xd;		/* Temperature */
2404 			arr[n++] = 0x2f;	/* Informational exceptions */
2405 			arr[3] = n - 4;
2406 			break;
2407 		case 0xd:	/* Temperature log page */
2408 			arr[3] = resp_temp_l_pg(arr + 4);
2409 			break;
2410 		case 0x2f:	/* Informational exceptions log page */
2411 			arr[3] = resp_ie_l_pg(arr + 4);
2412 			break;
2413 		default:
2414 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2415 			return check_condition_result;
2416 		}
2417 	} else if (0xff == subpcode) {
2418 		arr[0] |= 0x40;
2419 		arr[1] = subpcode;
2420 		switch (pcode) {
2421 		case 0x0:	/* Supported log pages and subpages log page */
2422 			n = 4;
2423 			arr[n++] = 0x0;
2424 			arr[n++] = 0x0;		/* 0,0 page */
2425 			arr[n++] = 0x0;
2426 			arr[n++] = 0xff;	/* this page */
2427 			arr[n++] = 0xd;
2428 			arr[n++] = 0x0;		/* Temperature */
2429 			arr[n++] = 0x2f;
2430 			arr[n++] = 0x0;	/* Informational exceptions */
2431 			arr[3] = n - 4;
2432 			break;
2433 		case 0xd:	/* Temperature subpages */
2434 			n = 4;
2435 			arr[n++] = 0xd;
2436 			arr[n++] = 0x0;		/* Temperature */
2437 			arr[3] = n - 4;
2438 			break;
2439 		case 0x2f:	/* Informational exceptions subpages */
2440 			n = 4;
2441 			arr[n++] = 0x2f;
2442 			arr[n++] = 0x0;		/* Informational exceptions */
2443 			arr[3] = n - 4;
2444 			break;
2445 		default:
2446 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2447 			return check_condition_result;
2448 		}
2449 	} else {
2450 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2451 		return check_condition_result;
2452 	}
2453 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2454 	return fill_from_dev_buffer(scp, arr,
2455 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2456 }
2457 
2458 static inline int check_device_access_params(struct scsi_cmnd *scp,
2459 	unsigned long long lba, unsigned int num, bool write)
2460 {
2461 	if (lba + num > sdebug_capacity) {
2462 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2463 		return check_condition_result;
2464 	}
2465 	/* transfer length excessive (tie in to block limits VPD page) */
2466 	if (num > sdebug_store_sectors) {
2467 		/* needs work to find which cdb byte 'num' comes from */
2468 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2469 		return check_condition_result;
2470 	}
2471 	if (write && unlikely(sdebug_wp)) {
2472 		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2473 		return check_condition_result;
2474 	}
2475 	return 0;
2476 }
2477 
2478 /* Returns number of bytes copied or -1 if error. */
2479 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2480 			    u32 num, bool do_write)
2481 {
2482 	int ret;
2483 	u64 block, rest = 0;
2484 	struct scsi_data_buffer *sdb = &scmd->sdb;
2485 	enum dma_data_direction dir;
2486 
2487 	if (do_write) {
2488 		dir = DMA_TO_DEVICE;
2489 		write_since_sync = true;
2490 	} else {
2491 		dir = DMA_FROM_DEVICE;
2492 	}
2493 
2494 	if (!sdb->length)
2495 		return 0;
2496 	if (scmd->sc_data_direction != dir)
2497 		return -1;
2498 
2499 	block = do_div(lba, sdebug_store_sectors);
2500 	if (block + num > sdebug_store_sectors)
2501 		rest = block + num - sdebug_store_sectors;
2502 
2503 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2504 		   fake_storep + (block * sdebug_sector_size),
2505 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2506 	if (ret != (num - rest) * sdebug_sector_size)
2507 		return ret;
2508 
2509 	if (rest) {
2510 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2511 			    fake_storep, rest * sdebug_sector_size,
2512 			    sg_skip + ((num - rest) * sdebug_sector_size),
2513 			    do_write);
2514 	}
2515 
2516 	return ret;
2517 }
2518 
2519 /* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2520  * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2521  * return false. */
2522 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2523 {
2524 	bool res;
2525 	u64 block, rest = 0;
2526 	u32 store_blks = sdebug_store_sectors;
2527 	u32 lb_size = sdebug_sector_size;
2528 
2529 	block = do_div(lba, store_blks);
2530 	if (block + num > store_blks)
2531 		rest = block + num - store_blks;
2532 
2533 	res = !memcmp(fake_storep + (block * lb_size), arr,
2534 		      (num - rest) * lb_size);
2535 	if (!res)
2536 		return res;
2537 	if (rest)
2538 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2539 			     rest * lb_size);
2540 	if (!res)
2541 		return res;
2542 	arr += num * lb_size;
2543 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2544 	if (rest)
2545 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2546 		       rest * lb_size);
2547 	return res;
2548 }
2549 
2550 static __be16 dif_compute_csum(const void *buf, int len)
2551 {
2552 	__be16 csum;
2553 
2554 	if (sdebug_guard)
2555 		csum = (__force __be16)ip_compute_csum(buf, len);
2556 	else
2557 		csum = cpu_to_be16(crc_t10dif(buf, len));
2558 
2559 	return csum;
2560 }
2561 
2562 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2563 		      sector_t sector, u32 ei_lba)
2564 {
2565 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2566 
2567 	if (sdt->guard_tag != csum) {
2568 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2569 			(unsigned long)sector,
2570 			be16_to_cpu(sdt->guard_tag),
2571 			be16_to_cpu(csum));
2572 		return 0x01;
2573 	}
2574 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2575 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2576 		pr_err("REF check failed on sector %lu\n",
2577 			(unsigned long)sector);
2578 		return 0x03;
2579 	}
2580 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2581 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2582 		pr_err("REF check failed on sector %lu\n",
2583 			(unsigned long)sector);
2584 		return 0x03;
2585 	}
2586 	return 0;
2587 }
2588 
2589 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2590 			  unsigned int sectors, bool read)
2591 {
2592 	size_t resid;
2593 	void *paddr;
2594 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2595 	struct sg_mapping_iter miter;
2596 
2597 	/* Bytes of protection data to copy into sgl */
2598 	resid = sectors * sizeof(*dif_storep);
2599 
2600 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2601 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2602 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2603 
2604 	while (sg_miter_next(&miter) && resid > 0) {
2605 		size_t len = min(miter.length, resid);
2606 		void *start = dif_store(sector);
2607 		size_t rest = 0;
2608 
2609 		if (dif_store_end < start + len)
2610 			rest = start + len - dif_store_end;
2611 
2612 		paddr = miter.addr;
2613 
2614 		if (read)
2615 			memcpy(paddr, start, len - rest);
2616 		else
2617 			memcpy(start, paddr, len - rest);
2618 
2619 		if (rest) {
2620 			if (read)
2621 				memcpy(paddr + len - rest, dif_storep, rest);
2622 			else
2623 				memcpy(dif_storep, paddr + len - rest, rest);
2624 		}
2625 
2626 		sector += len / sizeof(*dif_storep);
2627 		resid -= len;
2628 	}
2629 	sg_miter_stop(&miter);
2630 }
2631 
2632 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2633 			    unsigned int sectors, u32 ei_lba)
2634 {
2635 	unsigned int i;
2636 	struct t10_pi_tuple *sdt;
2637 	sector_t sector;
2638 
2639 	for (i = 0; i < sectors; i++, ei_lba++) {
2640 		int ret;
2641 
2642 		sector = start_sec + i;
2643 		sdt = dif_store(sector);
2644 
2645 		if (sdt->app_tag == cpu_to_be16(0xffff))
2646 			continue;
2647 
2648 		ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2649 		if (ret) {
2650 			dif_errors++;
2651 			return ret;
2652 		}
2653 	}
2654 
2655 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2656 	dix_reads++;
2657 
2658 	return 0;
2659 }
2660 
2661 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2662 {
2663 	u8 *cmd = scp->cmnd;
2664 	struct sdebug_queued_cmd *sqcp;
2665 	u64 lba;
2666 	u32 num;
2667 	u32 ei_lba;
2668 	unsigned long iflags;
2669 	int ret;
2670 	bool check_prot;
2671 
2672 	switch (cmd[0]) {
2673 	case READ_16:
2674 		ei_lba = 0;
2675 		lba = get_unaligned_be64(cmd + 2);
2676 		num = get_unaligned_be32(cmd + 10);
2677 		check_prot = true;
2678 		break;
2679 	case READ_10:
2680 		ei_lba = 0;
2681 		lba = get_unaligned_be32(cmd + 2);
2682 		num = get_unaligned_be16(cmd + 7);
2683 		check_prot = true;
2684 		break;
2685 	case READ_6:
2686 		ei_lba = 0;
2687 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2688 		      (u32)(cmd[1] & 0x1f) << 16;
2689 		num = (0 == cmd[4]) ? 256 : cmd[4];
2690 		check_prot = true;
2691 		break;
2692 	case READ_12:
2693 		ei_lba = 0;
2694 		lba = get_unaligned_be32(cmd + 2);
2695 		num = get_unaligned_be32(cmd + 6);
2696 		check_prot = true;
2697 		break;
2698 	case XDWRITEREAD_10:
2699 		ei_lba = 0;
2700 		lba = get_unaligned_be32(cmd + 2);
2701 		num = get_unaligned_be16(cmd + 7);
2702 		check_prot = false;
2703 		break;
2704 	default:	/* assume READ(32) */
2705 		lba = get_unaligned_be64(cmd + 12);
2706 		ei_lba = get_unaligned_be32(cmd + 20);
2707 		num = get_unaligned_be32(cmd + 28);
2708 		check_prot = false;
2709 		break;
2710 	}
2711 	if (unlikely(have_dif_prot && check_prot)) {
2712 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2713 		    (cmd[1] & 0xe0)) {
2714 			mk_sense_invalid_opcode(scp);
2715 			return check_condition_result;
2716 		}
2717 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2718 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2719 		    (cmd[1] & 0xe0) == 0)
2720 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2721 				    "to DIF device\n");
2722 	}
2723 	if (unlikely(sdebug_any_injecting_opt)) {
2724 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2725 
2726 		if (sqcp) {
2727 			if (sqcp->inj_short)
2728 				num /= 2;
2729 		}
2730 	} else
2731 		sqcp = NULL;
2732 
2733 	ret = check_device_access_params(scp, lba, num, false);
2734 	if (ret)
2735 		return ret;
2736 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2737 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2738 		     ((lba + num) > sdebug_medium_error_start))) {
2739 		/* claim unrecoverable read error */
2740 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2741 		/* set info field and valid bit for fixed descriptor */
2742 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2743 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2744 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2745 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2746 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2747 		}
2748 		scsi_set_resid(scp, scsi_bufflen(scp));
2749 		return check_condition_result;
2750 	}
2751 
2752 	read_lock_irqsave(&atomic_rw, iflags);
2753 
2754 	/* DIX + T10 DIF */
2755 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2756 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2757 
2758 		if (prot_ret) {
2759 			read_unlock_irqrestore(&atomic_rw, iflags);
2760 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2761 			return illegal_condition_result;
2762 		}
2763 	}
2764 
2765 	ret = do_device_access(scp, 0, lba, num, false);
2766 	read_unlock_irqrestore(&atomic_rw, iflags);
2767 	if (unlikely(ret == -1))
2768 		return DID_ERROR << 16;
2769 
2770 	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
2771 
2772 	if (unlikely(sqcp)) {
2773 		if (sqcp->inj_recovered) {
2774 			mk_sense_buffer(scp, RECOVERED_ERROR,
2775 					THRESHOLD_EXCEEDED, 0);
2776 			return check_condition_result;
2777 		} else if (sqcp->inj_transport) {
2778 			mk_sense_buffer(scp, ABORTED_COMMAND,
2779 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2780 			return check_condition_result;
2781 		} else if (sqcp->inj_dif) {
2782 			/* Logical block guard check failed */
2783 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2784 			return illegal_condition_result;
2785 		} else if (sqcp->inj_dix) {
2786 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2787 			return illegal_condition_result;
2788 		}
2789 	}
2790 	return 0;
2791 }
2792 
2793 static void dump_sector(unsigned char *buf, int len)
2794 {
2795 	int i, j, n;
2796 
2797 	pr_err(">>> Sector Dump <<<\n");
2798 	for (i = 0 ; i < len ; i += 16) {
2799 		char b[128];
2800 
2801 		for (j = 0, n = 0; j < 16; j++) {
2802 			unsigned char c = buf[i+j];
2803 
2804 			if (c >= 0x20 && c < 0x7e)
2805 				n += scnprintf(b + n, sizeof(b) - n,
2806 					       " %c ", buf[i+j]);
2807 			else
2808 				n += scnprintf(b + n, sizeof(b) - n,
2809 					       "%02x ", buf[i+j]);
2810 		}
2811 		pr_err("%04d: %s\n", i, b);
2812 	}
2813 }
2814 
2815 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2816 			     unsigned int sectors, u32 ei_lba)
2817 {
2818 	int ret;
2819 	struct t10_pi_tuple *sdt;
2820 	void *daddr;
2821 	sector_t sector = start_sec;
2822 	int ppage_offset;
2823 	int dpage_offset;
2824 	struct sg_mapping_iter diter;
2825 	struct sg_mapping_iter piter;
2826 
2827 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2828 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2829 
2830 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2831 			scsi_prot_sg_count(SCpnt),
2832 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2833 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2834 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2835 
2836 	/* For each protection page */
2837 	while (sg_miter_next(&piter)) {
2838 		dpage_offset = 0;
2839 		if (WARN_ON(!sg_miter_next(&diter))) {
2840 			ret = 0x01;
2841 			goto out;
2842 		}
2843 
2844 		for (ppage_offset = 0; ppage_offset < piter.length;
2845 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2846 			/* If we're at the end of the current
2847 			 * data page advance to the next one
2848 			 */
2849 			if (dpage_offset >= diter.length) {
2850 				if (WARN_ON(!sg_miter_next(&diter))) {
2851 					ret = 0x01;
2852 					goto out;
2853 				}
2854 				dpage_offset = 0;
2855 			}
2856 
2857 			sdt = piter.addr + ppage_offset;
2858 			daddr = diter.addr + dpage_offset;
2859 
2860 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2861 			if (ret) {
2862 				dump_sector(daddr, sdebug_sector_size);
2863 				goto out;
2864 			}
2865 
2866 			sector++;
2867 			ei_lba++;
2868 			dpage_offset += sdebug_sector_size;
2869 		}
2870 		diter.consumed = dpage_offset;
2871 		sg_miter_stop(&diter);
2872 	}
2873 	sg_miter_stop(&piter);
2874 
2875 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2876 	dix_writes++;
2877 
2878 	return 0;
2879 
2880 out:
2881 	dif_errors++;
2882 	sg_miter_stop(&diter);
2883 	sg_miter_stop(&piter);
2884 	return ret;
2885 }
2886 
2887 static unsigned long lba_to_map_index(sector_t lba)
2888 {
2889 	if (sdebug_unmap_alignment)
2890 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2891 	sector_div(lba, sdebug_unmap_granularity);
2892 	return lba;
2893 }
2894 
2895 static sector_t map_index_to_lba(unsigned long index)
2896 {
2897 	sector_t lba = index * sdebug_unmap_granularity;
2898 
2899 	if (sdebug_unmap_alignment)
2900 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2901 	return lba;
2902 }
2903 
2904 static unsigned int map_state(sector_t lba, unsigned int *num)
2905 {
2906 	sector_t end;
2907 	unsigned int mapped;
2908 	unsigned long index;
2909 	unsigned long next;
2910 
2911 	index = lba_to_map_index(lba);
2912 	mapped = test_bit(index, map_storep);
2913 
2914 	if (mapped)
2915 		next = find_next_zero_bit(map_storep, map_size, index);
2916 	else
2917 		next = find_next_bit(map_storep, map_size, index);
2918 
2919 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2920 	*num = end - lba;
2921 	return mapped;
2922 }
2923 
2924 static void map_region(sector_t lba, unsigned int len)
2925 {
2926 	sector_t end = lba + len;
2927 
2928 	while (lba < end) {
2929 		unsigned long index = lba_to_map_index(lba);
2930 
2931 		if (index < map_size)
2932 			set_bit(index, map_storep);
2933 
2934 		lba = map_index_to_lba(index + 1);
2935 	}
2936 }
2937 
2938 static void unmap_region(sector_t lba, unsigned int len)
2939 {
2940 	sector_t end = lba + len;
2941 
2942 	while (lba < end) {
2943 		unsigned long index = lba_to_map_index(lba);
2944 
2945 		if (lba == map_index_to_lba(index) &&
2946 		    lba + sdebug_unmap_granularity <= end &&
2947 		    index < map_size) {
2948 			clear_bit(index, map_storep);
2949 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2950 				memset(fake_storep +
2951 				       lba * sdebug_sector_size,
2952 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2953 				       sdebug_sector_size *
2954 				       sdebug_unmap_granularity);
2955 			}
2956 			if (dif_storep) {
2957 				memset(dif_storep + lba, 0xff,
2958 				       sizeof(*dif_storep) *
2959 				       sdebug_unmap_granularity);
2960 			}
2961 		}
2962 		lba = map_index_to_lba(index + 1);
2963 	}
2964 }
2965 
2966 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2967 {
2968 	u8 *cmd = scp->cmnd;
2969 	u64 lba;
2970 	u32 num;
2971 	u32 ei_lba;
2972 	unsigned long iflags;
2973 	int ret;
2974 	bool check_prot;
2975 
2976 	switch (cmd[0]) {
2977 	case WRITE_16:
2978 		ei_lba = 0;
2979 		lba = get_unaligned_be64(cmd + 2);
2980 		num = get_unaligned_be32(cmd + 10);
2981 		check_prot = true;
2982 		break;
2983 	case WRITE_10:
2984 		ei_lba = 0;
2985 		lba = get_unaligned_be32(cmd + 2);
2986 		num = get_unaligned_be16(cmd + 7);
2987 		check_prot = true;
2988 		break;
2989 	case WRITE_6:
2990 		ei_lba = 0;
2991 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2992 		      (u32)(cmd[1] & 0x1f) << 16;
2993 		num = (0 == cmd[4]) ? 256 : cmd[4];
2994 		check_prot = true;
2995 		break;
2996 	case WRITE_12:
2997 		ei_lba = 0;
2998 		lba = get_unaligned_be32(cmd + 2);
2999 		num = get_unaligned_be32(cmd + 6);
3000 		check_prot = true;
3001 		break;
3002 	case 0x53:	/* XDWRITEREAD(10) */
3003 		ei_lba = 0;
3004 		lba = get_unaligned_be32(cmd + 2);
3005 		num = get_unaligned_be16(cmd + 7);
3006 		check_prot = false;
3007 		break;
3008 	default:	/* assume WRITE(32) */
3009 		lba = get_unaligned_be64(cmd + 12);
3010 		ei_lba = get_unaligned_be32(cmd + 20);
3011 		num = get_unaligned_be32(cmd + 28);
3012 		check_prot = false;
3013 		break;
3014 	}
3015 	if (unlikely(have_dif_prot && check_prot)) {
3016 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3017 		    (cmd[1] & 0xe0)) {
3018 			mk_sense_invalid_opcode(scp);
3019 			return check_condition_result;
3020 		}
3021 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3022 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3023 		    (cmd[1] & 0xe0) == 0)
3024 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3025 				    "to DIF device\n");
3026 	}
3027 	ret = check_device_access_params(scp, lba, num, true);
3028 	if (ret)
3029 		return ret;
3030 	write_lock_irqsave(&atomic_rw, iflags);
3031 
3032 	/* DIX + T10 DIF */
3033 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3034 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3035 
3036 		if (prot_ret) {
3037 			write_unlock_irqrestore(&atomic_rw, iflags);
3038 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3039 			return illegal_condition_result;
3040 		}
3041 	}
3042 
3043 	ret = do_device_access(scp, 0, lba, num, true);
3044 	if (unlikely(scsi_debug_lbp()))
3045 		map_region(lba, num);
3046 	write_unlock_irqrestore(&atomic_rw, iflags);
3047 	if (unlikely(-1 == ret))
3048 		return DID_ERROR << 16;
3049 	else if (unlikely(sdebug_verbose &&
3050 			  (ret < (num * sdebug_sector_size))))
3051 		sdev_printk(KERN_INFO, scp->device,
3052 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3053 			    my_name, num * sdebug_sector_size, ret);
3054 
3055 	if (unlikely(sdebug_any_injecting_opt)) {
3056 		struct sdebug_queued_cmd *sqcp =
3057 				(struct sdebug_queued_cmd *)scp->host_scribble;
3058 
3059 		if (sqcp) {
3060 			if (sqcp->inj_recovered) {
3061 				mk_sense_buffer(scp, RECOVERED_ERROR,
3062 						THRESHOLD_EXCEEDED, 0);
3063 				return check_condition_result;
3064 			} else if (sqcp->inj_dif) {
3065 				/* Logical block guard check failed */
3066 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3067 				return illegal_condition_result;
3068 			} else if (sqcp->inj_dix) {
3069 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3070 				return illegal_condition_result;
3071 			}
3072 		}
3073 	}
3074 	return 0;
3075 }
3076 
3077 /*
3078  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3079  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3080  */
3081 static int resp_write_scat(struct scsi_cmnd *scp,
3082 			   struct sdebug_dev_info *devip)
3083 {
3084 	u8 *cmd = scp->cmnd;
3085 	u8 *lrdp = NULL;
3086 	u8 *up;
3087 	u8 wrprotect;
3088 	u16 lbdof, num_lrd, k;
3089 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3090 	u32 lb_size = sdebug_sector_size;
3091 	u32 ei_lba;
3092 	u64 lba;
3093 	unsigned long iflags;
3094 	int ret, res;
3095 	bool is_16;
3096 	static const u32 lrd_size = 32; /* + parameter list header size */
3097 
3098 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3099 		is_16 = false;
3100 		wrprotect = (cmd[10] >> 5) & 0x7;
3101 		lbdof = get_unaligned_be16(cmd + 12);
3102 		num_lrd = get_unaligned_be16(cmd + 16);
3103 		bt_len = get_unaligned_be32(cmd + 28);
3104 	} else {        /* that leaves WRITE SCATTERED(16) */
3105 		is_16 = true;
3106 		wrprotect = (cmd[2] >> 5) & 0x7;
3107 		lbdof = get_unaligned_be16(cmd + 4);
3108 		num_lrd = get_unaligned_be16(cmd + 8);
3109 		bt_len = get_unaligned_be32(cmd + 10);
3110 		if (unlikely(have_dif_prot)) {
3111 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3112 			    wrprotect) {
3113 				mk_sense_invalid_opcode(scp);
3114 				return illegal_condition_result;
3115 			}
3116 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3117 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3118 			     wrprotect == 0)
3119 				sdev_printk(KERN_ERR, scp->device,
3120 					    "Unprotected WR to DIF device\n");
3121 		}
3122 	}
3123 	if ((num_lrd == 0) || (bt_len == 0))
3124 		return 0;       /* T10 says these do-nothings are not errors */
3125 	if (lbdof == 0) {
3126 		if (sdebug_verbose)
3127 			sdev_printk(KERN_INFO, scp->device,
3128 				"%s: %s: LB Data Offset field bad\n",
3129 				my_name, __func__);
3130 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3131 		return illegal_condition_result;
3132 	}
3133 	lbdof_blen = lbdof * lb_size;
3134 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3135 		if (sdebug_verbose)
3136 			sdev_printk(KERN_INFO, scp->device,
3137 				"%s: %s: LBA range descriptors don't fit\n",
3138 				my_name, __func__);
3139 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3140 		return illegal_condition_result;
3141 	}
3142 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3143 	if (lrdp == NULL)
3144 		return SCSI_MLQUEUE_HOST_BUSY;
3145 	if (sdebug_verbose)
3146 		sdev_printk(KERN_INFO, scp->device,
3147 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3148 			my_name, __func__, lbdof_blen);
3149 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3150 	if (res == -1) {
3151 		ret = DID_ERROR << 16;
3152 		goto err_out;
3153 	}
3154 
3155 	write_lock_irqsave(&atomic_rw, iflags);
3156 	sg_off = lbdof_blen;
3157 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3158 	cum_lb = 0;
3159 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3160 		lba = get_unaligned_be64(up + 0);
3161 		num = get_unaligned_be32(up + 8);
3162 		if (sdebug_verbose)
3163 			sdev_printk(KERN_INFO, scp->device,
3164 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3165 				my_name, __func__, k, lba, num, sg_off);
3166 		if (num == 0)
3167 			continue;
3168 		ret = check_device_access_params(scp, lba, num, true);
3169 		if (ret)
3170 			goto err_out_unlock;
3171 		num_by = num * lb_size;
3172 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3173 
3174 		if ((cum_lb + num) > bt_len) {
3175 			if (sdebug_verbose)
3176 				sdev_printk(KERN_INFO, scp->device,
3177 				    "%s: %s: sum of blocks > data provided\n",
3178 				    my_name, __func__);
3179 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3180 					0);
3181 			ret = illegal_condition_result;
3182 			goto err_out_unlock;
3183 		}
3184 
3185 		/* DIX + T10 DIF */
3186 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3187 			int prot_ret = prot_verify_write(scp, lba, num,
3188 							 ei_lba);
3189 
3190 			if (prot_ret) {
3191 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3192 						prot_ret);
3193 				ret = illegal_condition_result;
3194 				goto err_out_unlock;
3195 			}
3196 		}
3197 
3198 		ret = do_device_access(scp, sg_off, lba, num, true);
3199 		if (unlikely(scsi_debug_lbp()))
3200 			map_region(lba, num);
3201 		if (unlikely(-1 == ret)) {
3202 			ret = DID_ERROR << 16;
3203 			goto err_out_unlock;
3204 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3205 			sdev_printk(KERN_INFO, scp->device,
3206 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3207 			    my_name, num_by, ret);
3208 
3209 		if (unlikely(sdebug_any_injecting_opt)) {
3210 			struct sdebug_queued_cmd *sqcp =
3211 				(struct sdebug_queued_cmd *)scp->host_scribble;
3212 
3213 			if (sqcp) {
3214 				if (sqcp->inj_recovered) {
3215 					mk_sense_buffer(scp, RECOVERED_ERROR,
3216 							THRESHOLD_EXCEEDED, 0);
3217 					ret = illegal_condition_result;
3218 					goto err_out_unlock;
3219 				} else if (sqcp->inj_dif) {
3220 					/* Logical block guard check failed */
3221 					mk_sense_buffer(scp, ABORTED_COMMAND,
3222 							0x10, 1);
3223 					ret = illegal_condition_result;
3224 					goto err_out_unlock;
3225 				} else if (sqcp->inj_dix) {
3226 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3227 							0x10, 1);
3228 					ret = illegal_condition_result;
3229 					goto err_out_unlock;
3230 				}
3231 			}
3232 		}
3233 		sg_off += num_by;
3234 		cum_lb += num;
3235 	}
3236 	ret = 0;
3237 err_out_unlock:
3238 	write_unlock_irqrestore(&atomic_rw, iflags);
3239 err_out:
3240 	kfree(lrdp);
3241 	return ret;
3242 }
3243 
3244 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3245 			   u32 ei_lba, bool unmap, bool ndob)
3246 {
3247 	int ret;
3248 	unsigned long iflags;
3249 	unsigned long long i;
3250 	u32 lb_size = sdebug_sector_size;
3251 	u64 block, lbaa;
3252 	u8 *fs1p;
3253 
3254 	ret = check_device_access_params(scp, lba, num, true);
3255 	if (ret)
3256 		return ret;
3257 
3258 	write_lock_irqsave(&atomic_rw, iflags);
3259 
3260 	if (unmap && scsi_debug_lbp()) {
3261 		unmap_region(lba, num);
3262 		goto out;
3263 	}
3264 	lbaa = lba;
3265 	block = do_div(lbaa, sdebug_store_sectors);
3266 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3267 	fs1p = fake_storep + (block * lb_size);
3268 	if (ndob) {
3269 		memset(fs1p, 0, lb_size);
3270 		ret = 0;
3271 	} else
3272 		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3273 
3274 	if (-1 == ret) {
3275 		write_unlock_irqrestore(&atomic_rw, iflags);
3276 		return DID_ERROR << 16;
3277 	} else if (sdebug_verbose && !ndob && (ret < lb_size))
3278 		sdev_printk(KERN_INFO, scp->device,
3279 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3280 			    my_name, "write same", lb_size, ret);
3281 
3282 	/* Copy first sector to remaining blocks */
3283 	for (i = 1 ; i < num ; i++) {
3284 		lbaa = lba + i;
3285 		block = do_div(lbaa, sdebug_store_sectors);
3286 		memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3287 	}
3288 	if (scsi_debug_lbp())
3289 		map_region(lba, num);
3290 out:
3291 	write_unlock_irqrestore(&atomic_rw, iflags);
3292 
3293 	return 0;
3294 }
3295 
3296 static int resp_write_same_10(struct scsi_cmnd *scp,
3297 			      struct sdebug_dev_info *devip)
3298 {
3299 	u8 *cmd = scp->cmnd;
3300 	u32 lba;
3301 	u16 num;
3302 	u32 ei_lba = 0;
3303 	bool unmap = false;
3304 
3305 	if (cmd[1] & 0x8) {
3306 		if (sdebug_lbpws10 == 0) {
3307 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3308 			return check_condition_result;
3309 		} else
3310 			unmap = true;
3311 	}
3312 	lba = get_unaligned_be32(cmd + 2);
3313 	num = get_unaligned_be16(cmd + 7);
3314 	if (num > sdebug_write_same_length) {
3315 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3316 		return check_condition_result;
3317 	}
3318 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3319 }
3320 
3321 static int resp_write_same_16(struct scsi_cmnd *scp,
3322 			      struct sdebug_dev_info *devip)
3323 {
3324 	u8 *cmd = scp->cmnd;
3325 	u64 lba;
3326 	u32 num;
3327 	u32 ei_lba = 0;
3328 	bool unmap = false;
3329 	bool ndob = false;
3330 
3331 	if (cmd[1] & 0x8) {	/* UNMAP */
3332 		if (sdebug_lbpws == 0) {
3333 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3334 			return check_condition_result;
3335 		} else
3336 			unmap = true;
3337 	}
3338 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3339 		ndob = true;
3340 	lba = get_unaligned_be64(cmd + 2);
3341 	num = get_unaligned_be32(cmd + 10);
3342 	if (num > sdebug_write_same_length) {
3343 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3344 		return check_condition_result;
3345 	}
3346 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3347 }
3348 
3349 /* Note the mode field is in the same position as the (lower) service action
3350  * field. For the Report supported operation codes command, SPC-4 suggests
3351  * each mode of this command should be reported separately; for future. */
3352 static int resp_write_buffer(struct scsi_cmnd *scp,
3353 			     struct sdebug_dev_info *devip)
3354 {
3355 	u8 *cmd = scp->cmnd;
3356 	struct scsi_device *sdp = scp->device;
3357 	struct sdebug_dev_info *dp;
3358 	u8 mode;
3359 
3360 	mode = cmd[1] & 0x1f;
3361 	switch (mode) {
3362 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3363 		/* set UAs on this device only */
3364 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3365 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3366 		break;
3367 	case 0x5:	/* download MC, save and ACT */
3368 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3369 		break;
3370 	case 0x6:	/* download MC with offsets and ACT */
3371 		/* set UAs on most devices (LUs) in this target */
3372 		list_for_each_entry(dp,
3373 				    &devip->sdbg_host->dev_info_list,
3374 				    dev_list)
3375 			if (dp->target == sdp->id) {
3376 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3377 				if (devip != dp)
3378 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3379 						dp->uas_bm);
3380 			}
3381 		break;
3382 	case 0x7:	/* download MC with offsets, save, and ACT */
3383 		/* set UA on all devices (LUs) in this target */
3384 		list_for_each_entry(dp,
3385 				    &devip->sdbg_host->dev_info_list,
3386 				    dev_list)
3387 			if (dp->target == sdp->id)
3388 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3389 					dp->uas_bm);
3390 		break;
3391 	default:
3392 		/* do nothing for this command for other mode values */
3393 		break;
3394 	}
3395 	return 0;
3396 }
3397 
3398 static int resp_comp_write(struct scsi_cmnd *scp,
3399 			   struct sdebug_dev_info *devip)
3400 {
3401 	u8 *cmd = scp->cmnd;
3402 	u8 *arr;
3403 	u8 *fake_storep_hold;
3404 	u64 lba;
3405 	u32 dnum;
3406 	u32 lb_size = sdebug_sector_size;
3407 	u8 num;
3408 	unsigned long iflags;
3409 	int ret;
3410 	int retval = 0;
3411 
3412 	lba = get_unaligned_be64(cmd + 2);
3413 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3414 	if (0 == num)
3415 		return 0;	/* degenerate case, not an error */
3416 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3417 	    (cmd[1] & 0xe0)) {
3418 		mk_sense_invalid_opcode(scp);
3419 		return check_condition_result;
3420 	}
3421 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3422 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3423 	    (cmd[1] & 0xe0) == 0)
3424 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3425 			    "to DIF device\n");
3426 	ret = check_device_access_params(scp, lba, num, false);
3427 	if (ret)
3428 		return ret;
3429 	dnum = 2 * num;
3430 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3431 	if (NULL == arr) {
3432 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3433 				INSUFF_RES_ASCQ);
3434 		return check_condition_result;
3435 	}
3436 
3437 	write_lock_irqsave(&atomic_rw, iflags);
3438 
3439 	/* trick do_device_access() to fetch both compare and write buffers
3440 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3441 	fake_storep_hold = fake_storep;
3442 	fake_storep = arr;
3443 	ret = do_device_access(scp, 0, 0, dnum, true);
3444 	fake_storep = fake_storep_hold;
3445 	if (ret == -1) {
3446 		retval = DID_ERROR << 16;
3447 		goto cleanup;
3448 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3449 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3450 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3451 			    dnum * lb_size, ret);
3452 	if (!comp_write_worker(lba, num, arr)) {
3453 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3454 		retval = check_condition_result;
3455 		goto cleanup;
3456 	}
3457 	if (scsi_debug_lbp())
3458 		map_region(lba, num);
3459 cleanup:
3460 	write_unlock_irqrestore(&atomic_rw, iflags);
3461 	kfree(arr);
3462 	return retval;
3463 }
3464 
3465 struct unmap_block_desc {
3466 	__be64	lba;
3467 	__be32	blocks;
3468 	__be32	__reserved;
3469 };
3470 
3471 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3472 {
3473 	unsigned char *buf;
3474 	struct unmap_block_desc *desc;
3475 	unsigned int i, payload_len, descriptors;
3476 	int ret;
3477 	unsigned long iflags;
3478 
3479 
3480 	if (!scsi_debug_lbp())
3481 		return 0;	/* fib and say its done */
3482 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3483 	BUG_ON(scsi_bufflen(scp) != payload_len);
3484 
3485 	descriptors = (payload_len - 8) / 16;
3486 	if (descriptors > sdebug_unmap_max_desc) {
3487 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3488 		return check_condition_result;
3489 	}
3490 
3491 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3492 	if (!buf) {
3493 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3494 				INSUFF_RES_ASCQ);
3495 		return check_condition_result;
3496 	}
3497 
3498 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3499 
3500 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3501 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3502 
3503 	desc = (void *)&buf[8];
3504 
3505 	write_lock_irqsave(&atomic_rw, iflags);
3506 
3507 	for (i = 0 ; i < descriptors ; i++) {
3508 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3509 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3510 
3511 		ret = check_device_access_params(scp, lba, num, true);
3512 		if (ret)
3513 			goto out;
3514 
3515 		unmap_region(lba, num);
3516 	}
3517 
3518 	ret = 0;
3519 
3520 out:
3521 	write_unlock_irqrestore(&atomic_rw, iflags);
3522 	kfree(buf);
3523 
3524 	return ret;
3525 }
3526 
3527 #define SDEBUG_GET_LBA_STATUS_LEN 32
3528 
3529 static int resp_get_lba_status(struct scsi_cmnd *scp,
3530 			       struct sdebug_dev_info *devip)
3531 {
3532 	u8 *cmd = scp->cmnd;
3533 	u64 lba;
3534 	u32 alloc_len, mapped, num;
3535 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3536 	int ret;
3537 
3538 	lba = get_unaligned_be64(cmd + 2);
3539 	alloc_len = get_unaligned_be32(cmd + 10);
3540 
3541 	if (alloc_len < 24)
3542 		return 0;
3543 
3544 	ret = check_device_access_params(scp, lba, 1, false);
3545 	if (ret)
3546 		return ret;
3547 
3548 	if (scsi_debug_lbp())
3549 		mapped = map_state(lba, &num);
3550 	else {
3551 		mapped = 1;
3552 		/* following just in case virtual_gb changed */
3553 		sdebug_capacity = get_sdebug_capacity();
3554 		if (sdebug_capacity - lba <= 0xffffffff)
3555 			num = sdebug_capacity - lba;
3556 		else
3557 			num = 0xffffffff;
3558 	}
3559 
3560 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3561 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3562 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3563 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3564 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3565 
3566 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3567 }
3568 
3569 static int resp_sync_cache(struct scsi_cmnd *scp,
3570 			   struct sdebug_dev_info *devip)
3571 {
3572 	int res = 0;
3573 	u64 lba;
3574 	u32 num_blocks;
3575 	u8 *cmd = scp->cmnd;
3576 
3577 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
3578 		lba = get_unaligned_be32(cmd + 2);
3579 		num_blocks = get_unaligned_be16(cmd + 7);
3580 	} else {				/* SYNCHRONIZE_CACHE(16) */
3581 		lba = get_unaligned_be64(cmd + 2);
3582 		num_blocks = get_unaligned_be32(cmd + 10);
3583 	}
3584 	if (lba + num_blocks > sdebug_capacity) {
3585 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3586 		return check_condition_result;
3587 	}
3588 	if (!write_since_sync || cmd[1] & 0x2)
3589 		res = SDEG_RES_IMMED_MASK;
3590 	else		/* delay if write_since_sync and IMMED clear */
3591 		write_since_sync = false;
3592 	return res;
3593 }
3594 
3595 #define RL_BUCKET_ELEMS 8
3596 
3597 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3598  * (W-LUN), the normal Linux scanning logic does not associate it with a
3599  * device (e.g. /dev/sg7). The following magic will make that association:
3600  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3601  * where <n> is a host number. If there are multiple targets in a host then
3602  * the above will associate a W-LUN to each target. To only get a W-LUN
3603  * for target 2, then use "echo '- 2 49409' > scan" .
3604  */
3605 static int resp_report_luns(struct scsi_cmnd *scp,
3606 			    struct sdebug_dev_info *devip)
3607 {
3608 	unsigned char *cmd = scp->cmnd;
3609 	unsigned int alloc_len;
3610 	unsigned char select_report;
3611 	u64 lun;
3612 	struct scsi_lun *lun_p;
3613 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3614 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3615 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3616 	unsigned int tlun_cnt;	/* total LUN count */
3617 	unsigned int rlen;	/* response length (in bytes) */
3618 	int k, j, n, res;
3619 	unsigned int off_rsp = 0;
3620 	const int sz_lun = sizeof(struct scsi_lun);
3621 
3622 	clear_luns_changed_on_target(devip);
3623 
3624 	select_report = cmd[2];
3625 	alloc_len = get_unaligned_be32(cmd + 6);
3626 
3627 	if (alloc_len < 4) {
3628 		pr_err("alloc len too small %d\n", alloc_len);
3629 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3630 		return check_condition_result;
3631 	}
3632 
3633 	switch (select_report) {
3634 	case 0:		/* all LUNs apart from W-LUNs */
3635 		lun_cnt = sdebug_max_luns;
3636 		wlun_cnt = 0;
3637 		break;
3638 	case 1:		/* only W-LUNs */
3639 		lun_cnt = 0;
3640 		wlun_cnt = 1;
3641 		break;
3642 	case 2:		/* all LUNs */
3643 		lun_cnt = sdebug_max_luns;
3644 		wlun_cnt = 1;
3645 		break;
3646 	case 0x10:	/* only administrative LUs */
3647 	case 0x11:	/* see SPC-5 */
3648 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3649 	default:
3650 		pr_debug("select report invalid %d\n", select_report);
3651 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3652 		return check_condition_result;
3653 	}
3654 
3655 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3656 		--lun_cnt;
3657 
3658 	tlun_cnt = lun_cnt + wlun_cnt;
3659 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3660 	scsi_set_resid(scp, scsi_bufflen(scp));
3661 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3662 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3663 
3664 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3665 	lun = sdebug_no_lun_0 ? 1 : 0;
3666 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3667 		memset(arr, 0, sizeof(arr));
3668 		lun_p = (struct scsi_lun *)&arr[0];
3669 		if (k == 0) {
3670 			put_unaligned_be32(rlen, &arr[0]);
3671 			++lun_p;
3672 			j = 1;
3673 		}
3674 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3675 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3676 				break;
3677 			int_to_scsilun(lun++, lun_p);
3678 		}
3679 		if (j < RL_BUCKET_ELEMS)
3680 			break;
3681 		n = j * sz_lun;
3682 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3683 		if (res)
3684 			return res;
3685 		off_rsp += n;
3686 	}
3687 	if (wlun_cnt) {
3688 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3689 		++j;
3690 	}
3691 	if (j > 0)
3692 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3693 	return res;
3694 }
3695 
3696 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3697 {
3698 	u32 tag = blk_mq_unique_tag(cmnd->request);
3699 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3700 
3701 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3702 	if (WARN_ON_ONCE(hwq >= submit_queues))
3703 		hwq = 0;
3704 	return sdebug_q_arr + hwq;
3705 }
3706 
3707 /* Queued (deferred) command completions converge here. */
3708 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3709 {
3710 	bool aborted = sd_dp->aborted;
3711 	int qc_idx;
3712 	int retiring = 0;
3713 	unsigned long iflags;
3714 	struct sdebug_queue *sqp;
3715 	struct sdebug_queued_cmd *sqcp;
3716 	struct scsi_cmnd *scp;
3717 	struct sdebug_dev_info *devip;
3718 
3719 	sd_dp->defer_t = SDEB_DEFER_NONE;
3720 	if (unlikely(aborted))
3721 		sd_dp->aborted = false;
3722 	qc_idx = sd_dp->qc_idx;
3723 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3724 	if (sdebug_statistics) {
3725 		atomic_inc(&sdebug_completions);
3726 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3727 			atomic_inc(&sdebug_miss_cpus);
3728 	}
3729 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3730 		pr_err("wild qc_idx=%d\n", qc_idx);
3731 		return;
3732 	}
3733 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3734 	sqcp = &sqp->qc_arr[qc_idx];
3735 	scp = sqcp->a_cmnd;
3736 	if (unlikely(scp == NULL)) {
3737 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3738 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3739 		       sd_dp->sqa_idx, qc_idx);
3740 		return;
3741 	}
3742 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3743 	if (likely(devip))
3744 		atomic_dec(&devip->num_in_q);
3745 	else
3746 		pr_err("devip=NULL\n");
3747 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3748 		retiring = 1;
3749 
3750 	sqcp->a_cmnd = NULL;
3751 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3752 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3753 		pr_err("Unexpected completion\n");
3754 		return;
3755 	}
3756 
3757 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3758 		int k, retval;
3759 
3760 		retval = atomic_read(&retired_max_queue);
3761 		if (qc_idx >= retval) {
3762 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3763 			pr_err("index %d too large\n", retval);
3764 			return;
3765 		}
3766 		k = find_last_bit(sqp->in_use_bm, retval);
3767 		if ((k < sdebug_max_queue) || (k == retval))
3768 			atomic_set(&retired_max_queue, 0);
3769 		else
3770 			atomic_set(&retired_max_queue, k + 1);
3771 	}
3772 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3773 	if (unlikely(aborted)) {
3774 		if (sdebug_verbose)
3775 			pr_info("bypassing scsi_done() due to aborted cmd\n");
3776 		return;
3777 	}
3778 	scp->scsi_done(scp); /* callback to mid level */
3779 }
3780 
3781 /* When high resolution timer goes off this function is called. */
3782 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3783 {
3784 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3785 						  hrt);
3786 	sdebug_q_cmd_complete(sd_dp);
3787 	return HRTIMER_NORESTART;
3788 }
3789 
3790 /* When work queue schedules work, it calls this function. */
3791 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3792 {
3793 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3794 						  ew.work);
3795 	sdebug_q_cmd_complete(sd_dp);
3796 }
3797 
3798 static bool got_shared_uuid;
3799 static uuid_t shared_uuid;
3800 
3801 static struct sdebug_dev_info *sdebug_device_create(
3802 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3803 {
3804 	struct sdebug_dev_info *devip;
3805 
3806 	devip = kzalloc(sizeof(*devip), flags);
3807 	if (devip) {
3808 		if (sdebug_uuid_ctl == 1)
3809 			uuid_gen(&devip->lu_name);
3810 		else if (sdebug_uuid_ctl == 2) {
3811 			if (got_shared_uuid)
3812 				devip->lu_name = shared_uuid;
3813 			else {
3814 				uuid_gen(&shared_uuid);
3815 				got_shared_uuid = true;
3816 				devip->lu_name = shared_uuid;
3817 			}
3818 		}
3819 		devip->sdbg_host = sdbg_host;
3820 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3821 	}
3822 	return devip;
3823 }
3824 
3825 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3826 {
3827 	struct sdebug_host_info *sdbg_host;
3828 	struct sdebug_dev_info *open_devip = NULL;
3829 	struct sdebug_dev_info *devip;
3830 
3831 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3832 	if (!sdbg_host) {
3833 		pr_err("Host info NULL\n");
3834 		return NULL;
3835 	}
3836 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3837 		if ((devip->used) && (devip->channel == sdev->channel) &&
3838 		    (devip->target == sdev->id) &&
3839 		    (devip->lun == sdev->lun))
3840 			return devip;
3841 		else {
3842 			if ((!devip->used) && (!open_devip))
3843 				open_devip = devip;
3844 		}
3845 	}
3846 	if (!open_devip) { /* try and make a new one */
3847 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3848 		if (!open_devip) {
3849 			pr_err("out of memory at line %d\n", __LINE__);
3850 			return NULL;
3851 		}
3852 	}
3853 
3854 	open_devip->channel = sdev->channel;
3855 	open_devip->target = sdev->id;
3856 	open_devip->lun = sdev->lun;
3857 	open_devip->sdbg_host = sdbg_host;
3858 	atomic_set(&open_devip->num_in_q, 0);
3859 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3860 	open_devip->used = true;
3861 	return open_devip;
3862 }
3863 
3864 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3865 {
3866 	if (sdebug_verbose)
3867 		pr_info("slave_alloc <%u %u %u %llu>\n",
3868 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3869 	return 0;
3870 }
3871 
3872 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3873 {
3874 	struct sdebug_dev_info *devip =
3875 			(struct sdebug_dev_info *)sdp->hostdata;
3876 
3877 	if (sdebug_verbose)
3878 		pr_info("slave_configure <%u %u %u %llu>\n",
3879 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3880 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3881 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3882 	if (devip == NULL) {
3883 		devip = find_build_dev_info(sdp);
3884 		if (devip == NULL)
3885 			return 1;  /* no resources, will be marked offline */
3886 	}
3887 	sdp->hostdata = devip;
3888 	if (sdebug_no_uld)
3889 		sdp->no_uld_attach = 1;
3890 	config_cdb_len(sdp);
3891 	return 0;
3892 }
3893 
3894 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3895 {
3896 	struct sdebug_dev_info *devip =
3897 		(struct sdebug_dev_info *)sdp->hostdata;
3898 
3899 	if (sdebug_verbose)
3900 		pr_info("slave_destroy <%u %u %u %llu>\n",
3901 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3902 	if (devip) {
3903 		/* make this slot available for re-use */
3904 		devip->used = false;
3905 		sdp->hostdata = NULL;
3906 	}
3907 }
3908 
3909 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3910 			   enum sdeb_defer_type defer_t)
3911 {
3912 	if (!sd_dp)
3913 		return;
3914 	if (defer_t == SDEB_DEFER_HRT)
3915 		hrtimer_cancel(&sd_dp->hrt);
3916 	else if (defer_t == SDEB_DEFER_WQ)
3917 		cancel_work_sync(&sd_dp->ew.work);
3918 }
3919 
3920 /* If @cmnd found deletes its timer or work queue and returns true; else
3921    returns false */
3922 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3923 {
3924 	unsigned long iflags;
3925 	int j, k, qmax, r_qmax;
3926 	enum sdeb_defer_type l_defer_t;
3927 	struct sdebug_queue *sqp;
3928 	struct sdebug_queued_cmd *sqcp;
3929 	struct sdebug_dev_info *devip;
3930 	struct sdebug_defer *sd_dp;
3931 
3932 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3933 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3934 		qmax = sdebug_max_queue;
3935 		r_qmax = atomic_read(&retired_max_queue);
3936 		if (r_qmax > qmax)
3937 			qmax = r_qmax;
3938 		for (k = 0; k < qmax; ++k) {
3939 			if (test_bit(k, sqp->in_use_bm)) {
3940 				sqcp = &sqp->qc_arr[k];
3941 				if (cmnd != sqcp->a_cmnd)
3942 					continue;
3943 				/* found */
3944 				devip = (struct sdebug_dev_info *)
3945 						cmnd->device->hostdata;
3946 				if (devip)
3947 					atomic_dec(&devip->num_in_q);
3948 				sqcp->a_cmnd = NULL;
3949 				sd_dp = sqcp->sd_dp;
3950 				if (sd_dp) {
3951 					l_defer_t = sd_dp->defer_t;
3952 					sd_dp->defer_t = SDEB_DEFER_NONE;
3953 				} else
3954 					l_defer_t = SDEB_DEFER_NONE;
3955 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3956 				stop_qc_helper(sd_dp, l_defer_t);
3957 				clear_bit(k, sqp->in_use_bm);
3958 				return true;
3959 			}
3960 		}
3961 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3962 	}
3963 	return false;
3964 }
3965 
3966 /* Deletes (stops) timers or work queues of all queued commands */
3967 static void stop_all_queued(void)
3968 {
3969 	unsigned long iflags;
3970 	int j, k;
3971 	enum sdeb_defer_type l_defer_t;
3972 	struct sdebug_queue *sqp;
3973 	struct sdebug_queued_cmd *sqcp;
3974 	struct sdebug_dev_info *devip;
3975 	struct sdebug_defer *sd_dp;
3976 
3977 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3978 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3979 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3980 			if (test_bit(k, sqp->in_use_bm)) {
3981 				sqcp = &sqp->qc_arr[k];
3982 				if (sqcp->a_cmnd == NULL)
3983 					continue;
3984 				devip = (struct sdebug_dev_info *)
3985 					sqcp->a_cmnd->device->hostdata;
3986 				if (devip)
3987 					atomic_dec(&devip->num_in_q);
3988 				sqcp->a_cmnd = NULL;
3989 				sd_dp = sqcp->sd_dp;
3990 				if (sd_dp) {
3991 					l_defer_t = sd_dp->defer_t;
3992 					sd_dp->defer_t = SDEB_DEFER_NONE;
3993 				} else
3994 					l_defer_t = SDEB_DEFER_NONE;
3995 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3996 				stop_qc_helper(sd_dp, l_defer_t);
3997 				clear_bit(k, sqp->in_use_bm);
3998 				spin_lock_irqsave(&sqp->qc_lock, iflags);
3999 			}
4000 		}
4001 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4002 	}
4003 }
4004 
4005 /* Free queued command memory on heap */
4006 static void free_all_queued(void)
4007 {
4008 	int j, k;
4009 	struct sdebug_queue *sqp;
4010 	struct sdebug_queued_cmd *sqcp;
4011 
4012 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4013 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4014 			sqcp = &sqp->qc_arr[k];
4015 			kfree(sqcp->sd_dp);
4016 			sqcp->sd_dp = NULL;
4017 		}
4018 	}
4019 }
4020 
4021 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4022 {
4023 	bool ok;
4024 
4025 	++num_aborts;
4026 	if (SCpnt) {
4027 		ok = stop_queued_cmnd(SCpnt);
4028 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4029 			sdev_printk(KERN_INFO, SCpnt->device,
4030 				    "%s: command%s found\n", __func__,
4031 				    ok ? "" : " not");
4032 	}
4033 	return SUCCESS;
4034 }
4035 
4036 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4037 {
4038 	++num_dev_resets;
4039 	if (SCpnt && SCpnt->device) {
4040 		struct scsi_device *sdp = SCpnt->device;
4041 		struct sdebug_dev_info *devip =
4042 				(struct sdebug_dev_info *)sdp->hostdata;
4043 
4044 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4045 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4046 		if (devip)
4047 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4048 	}
4049 	return SUCCESS;
4050 }
4051 
4052 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4053 {
4054 	struct sdebug_host_info *sdbg_host;
4055 	struct sdebug_dev_info *devip;
4056 	struct scsi_device *sdp;
4057 	struct Scsi_Host *hp;
4058 	int k = 0;
4059 
4060 	++num_target_resets;
4061 	if (!SCpnt)
4062 		goto lie;
4063 	sdp = SCpnt->device;
4064 	if (!sdp)
4065 		goto lie;
4066 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4067 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4068 	hp = sdp->host;
4069 	if (!hp)
4070 		goto lie;
4071 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4072 	if (sdbg_host) {
4073 		list_for_each_entry(devip,
4074 				    &sdbg_host->dev_info_list,
4075 				    dev_list)
4076 			if (devip->target == sdp->id) {
4077 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4078 				++k;
4079 			}
4080 	}
4081 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4082 		sdev_printk(KERN_INFO, sdp,
4083 			    "%s: %d device(s) found in target\n", __func__, k);
4084 lie:
4085 	return SUCCESS;
4086 }
4087 
4088 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4089 {
4090 	struct sdebug_host_info *sdbg_host;
4091 	struct sdebug_dev_info *devip;
4092 	struct scsi_device *sdp;
4093 	struct Scsi_Host *hp;
4094 	int k = 0;
4095 
4096 	++num_bus_resets;
4097 	if (!(SCpnt && SCpnt->device))
4098 		goto lie;
4099 	sdp = SCpnt->device;
4100 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4101 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4102 	hp = sdp->host;
4103 	if (hp) {
4104 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4105 		if (sdbg_host) {
4106 			list_for_each_entry(devip,
4107 					    &sdbg_host->dev_info_list,
4108 					    dev_list) {
4109 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4110 				++k;
4111 			}
4112 		}
4113 	}
4114 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4115 		sdev_printk(KERN_INFO, sdp,
4116 			    "%s: %d device(s) found in host\n", __func__, k);
4117 lie:
4118 	return SUCCESS;
4119 }
4120 
4121 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4122 {
4123 	struct sdebug_host_info *sdbg_host;
4124 	struct sdebug_dev_info *devip;
4125 	int k = 0;
4126 
4127 	++num_host_resets;
4128 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4129 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4130 	spin_lock(&sdebug_host_list_lock);
4131 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4132 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4133 				    dev_list) {
4134 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4135 			++k;
4136 		}
4137 	}
4138 	spin_unlock(&sdebug_host_list_lock);
4139 	stop_all_queued();
4140 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4141 		sdev_printk(KERN_INFO, SCpnt->device,
4142 			    "%s: %d device(s) found\n", __func__, k);
4143 	return SUCCESS;
4144 }
4145 
4146 static void __init sdebug_build_parts(unsigned char *ramp,
4147 				      unsigned long store_size)
4148 {
4149 	struct partition *pp;
4150 	int starts[SDEBUG_MAX_PARTS + 2];
4151 	int sectors_per_part, num_sectors, k;
4152 	int heads_by_sects, start_sec, end_sec;
4153 
4154 	/* assume partition table already zeroed */
4155 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4156 		return;
4157 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4158 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4159 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4160 	}
4161 	num_sectors = (int)sdebug_store_sectors;
4162 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4163 			   / sdebug_num_parts;
4164 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4165 	starts[0] = sdebug_sectors_per;
4166 	for (k = 1; k < sdebug_num_parts; ++k)
4167 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4168 			    * heads_by_sects;
4169 	starts[sdebug_num_parts] = num_sectors;
4170 	starts[sdebug_num_parts + 1] = 0;
4171 
4172 	ramp[510] = 0x55;	/* magic partition markings */
4173 	ramp[511] = 0xAA;
4174 	pp = (struct partition *)(ramp + 0x1be);
4175 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4176 		start_sec = starts[k];
4177 		end_sec = starts[k + 1] - 1;
4178 		pp->boot_ind = 0;
4179 
4180 		pp->cyl = start_sec / heads_by_sects;
4181 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4182 			   / sdebug_sectors_per;
4183 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4184 
4185 		pp->end_cyl = end_sec / heads_by_sects;
4186 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4187 			       / sdebug_sectors_per;
4188 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4189 
4190 		pp->start_sect = cpu_to_le32(start_sec);
4191 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4192 		pp->sys_ind = 0x83;	/* plain Linux partition */
4193 	}
4194 }
4195 
4196 static void block_unblock_all_queues(bool block)
4197 {
4198 	int j;
4199 	struct sdebug_queue *sqp;
4200 
4201 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4202 		atomic_set(&sqp->blocked, (int)block);
4203 }
4204 
4205 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4206  * commands will be processed normally before triggers occur.
4207  */
4208 static void tweak_cmnd_count(void)
4209 {
4210 	int count, modulo;
4211 
4212 	modulo = abs(sdebug_every_nth);
4213 	if (modulo < 2)
4214 		return;
4215 	block_unblock_all_queues(true);
4216 	count = atomic_read(&sdebug_cmnd_count);
4217 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4218 	block_unblock_all_queues(false);
4219 }
4220 
4221 static void clear_queue_stats(void)
4222 {
4223 	atomic_set(&sdebug_cmnd_count, 0);
4224 	atomic_set(&sdebug_completions, 0);
4225 	atomic_set(&sdebug_miss_cpus, 0);
4226 	atomic_set(&sdebug_a_tsf, 0);
4227 }
4228 
4229 static void setup_inject(struct sdebug_queue *sqp,
4230 			 struct sdebug_queued_cmd *sqcp)
4231 {
4232 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4233 		if (sdebug_every_nth > 0)
4234 			sqcp->inj_recovered = sqcp->inj_transport
4235 				= sqcp->inj_dif
4236 				= sqcp->inj_dix = sqcp->inj_short
4237 				= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
4238 		return;
4239 	}
4240 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4241 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4242 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4243 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4244 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4245 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4246 	sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
4247 }
4248 
4249 /* Complete the processing of the thread that queued a SCSI command to this
4250  * driver. It either completes the command by calling cmnd_done() or
4251  * schedules a hr timer or work queue then returns 0. Returns
4252  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4253  */
4254 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4255 			 int scsi_result,
4256 			 int (*pfp)(struct scsi_cmnd *,
4257 				    struct sdebug_dev_info *),
4258 			 int delta_jiff, int ndelay)
4259 {
4260 	unsigned long iflags;
4261 	int k, num_in_q, qdepth, inject;
4262 	struct sdebug_queue *sqp;
4263 	struct sdebug_queued_cmd *sqcp;
4264 	struct scsi_device *sdp;
4265 	struct sdebug_defer *sd_dp;
4266 
4267 	if (unlikely(devip == NULL)) {
4268 		if (scsi_result == 0)
4269 			scsi_result = DID_NO_CONNECT << 16;
4270 		goto respond_in_thread;
4271 	}
4272 	sdp = cmnd->device;
4273 
4274 	if (delta_jiff == 0)
4275 		goto respond_in_thread;
4276 
4277 	/* schedule the response at a later time if resources permit */
4278 	sqp = get_queue(cmnd);
4279 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4280 	if (unlikely(atomic_read(&sqp->blocked))) {
4281 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4282 		return SCSI_MLQUEUE_HOST_BUSY;
4283 	}
4284 	num_in_q = atomic_read(&devip->num_in_q);
4285 	qdepth = cmnd->device->queue_depth;
4286 	inject = 0;
4287 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4288 		if (scsi_result) {
4289 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4290 			goto respond_in_thread;
4291 		} else
4292 			scsi_result = device_qfull_result;
4293 	} else if (unlikely(sdebug_every_nth &&
4294 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4295 			    (scsi_result == 0))) {
4296 		if ((num_in_q == (qdepth - 1)) &&
4297 		    (atomic_inc_return(&sdebug_a_tsf) >=
4298 		     abs(sdebug_every_nth))) {
4299 			atomic_set(&sdebug_a_tsf, 0);
4300 			inject = 1;
4301 			scsi_result = device_qfull_result;
4302 		}
4303 	}
4304 
4305 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4306 	if (unlikely(k >= sdebug_max_queue)) {
4307 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4308 		if (scsi_result)
4309 			goto respond_in_thread;
4310 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4311 			scsi_result = device_qfull_result;
4312 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4313 			sdev_printk(KERN_INFO, sdp,
4314 				    "%s: max_queue=%d exceeded, %s\n",
4315 				    __func__, sdebug_max_queue,
4316 				    (scsi_result ?  "status: TASK SET FULL" :
4317 						    "report: host busy"));
4318 		if (scsi_result)
4319 			goto respond_in_thread;
4320 		else
4321 			return SCSI_MLQUEUE_HOST_BUSY;
4322 	}
4323 	__set_bit(k, sqp->in_use_bm);
4324 	atomic_inc(&devip->num_in_q);
4325 	sqcp = &sqp->qc_arr[k];
4326 	sqcp->a_cmnd = cmnd;
4327 	cmnd->host_scribble = (unsigned char *)sqcp;
4328 	sd_dp = sqcp->sd_dp;
4329 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4330 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4331 		setup_inject(sqp, sqcp);
4332 	if (sd_dp == NULL) {
4333 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4334 		if (sd_dp == NULL)
4335 			return SCSI_MLQUEUE_HOST_BUSY;
4336 	}
4337 
4338 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4339 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
4340 		/*
4341 		 * This is the F_DELAY_OVERR case. No delay.
4342 		 */
4343 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
4344 		delta_jiff = ndelay = 0;
4345 	}
4346 	if (cmnd->result == 0 && scsi_result != 0)
4347 		cmnd->result = scsi_result;
4348 
4349 	if (unlikely(sdebug_verbose && cmnd->result))
4350 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4351 			    __func__, cmnd->result);
4352 
4353 	if (delta_jiff > 0 || ndelay > 0) {
4354 		ktime_t kt;
4355 
4356 		if (delta_jiff > 0) {
4357 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4358 		} else
4359 			kt = ndelay;
4360 		if (!sd_dp->init_hrt) {
4361 			sd_dp->init_hrt = true;
4362 			sqcp->sd_dp = sd_dp;
4363 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4364 				     HRTIMER_MODE_REL_PINNED);
4365 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4366 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4367 			sd_dp->qc_idx = k;
4368 		}
4369 		if (sdebug_statistics)
4370 			sd_dp->issuing_cpu = raw_smp_processor_id();
4371 		sd_dp->defer_t = SDEB_DEFER_HRT;
4372 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4373 	} else {	/* jdelay < 0, use work queue */
4374 		if (!sd_dp->init_wq) {
4375 			sd_dp->init_wq = true;
4376 			sqcp->sd_dp = sd_dp;
4377 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4378 			sd_dp->qc_idx = k;
4379 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4380 		}
4381 		if (sdebug_statistics)
4382 			sd_dp->issuing_cpu = raw_smp_processor_id();
4383 		sd_dp->defer_t = SDEB_DEFER_WQ;
4384 		if (unlikely(sqcp->inj_cmd_abort))
4385 			sd_dp->aborted = true;
4386 		schedule_work(&sd_dp->ew.work);
4387 		if (unlikely(sqcp->inj_cmd_abort)) {
4388 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
4389 				    cmnd->request->tag);
4390 			blk_abort_request(cmnd->request);
4391 		}
4392 	}
4393 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4394 		     (scsi_result == device_qfull_result)))
4395 		sdev_printk(KERN_INFO, sdp,
4396 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4397 			    num_in_q, (inject ? "<inject> " : ""),
4398 			    "status: TASK SET FULL");
4399 	return 0;
4400 
4401 respond_in_thread:	/* call back to mid-layer using invocation thread */
4402 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4403 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
4404 	if (cmnd->result == 0 && scsi_result != 0)
4405 		cmnd->result = scsi_result;
4406 	cmnd->scsi_done(cmnd);
4407 	return 0;
4408 }
4409 
4410 /* Note: The following macros create attribute files in the
4411    /sys/module/scsi_debug/parameters directory. Unfortunately this
4412    driver is unaware of a change and cannot trigger auxiliary actions
4413    as it can when the corresponding attribute in the
4414    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4415  */
4416 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4417 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4418 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4419 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4420 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4421 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4422 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4423 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4424 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4425 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4426 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4427 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4428 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4429 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4430 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4431 module_param_string(inq_product, sdebug_inq_product_id,
4432 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4433 module_param_string(inq_rev, sdebug_inq_product_rev,
4434 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4435 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4436 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4437 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4438 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4439 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4440 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4441 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4442 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4443 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4444 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4445 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4446 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4447 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4448 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4449 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4450 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4451 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4452 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4453 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4454 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4455 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4456 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4457 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4458 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4459 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4460 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4461 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4462 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4463 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4464 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4465 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4466 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4467 		   S_IRUGO | S_IWUSR);
4468 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
4469 module_param_named(write_same_length, sdebug_write_same_length, int,
4470 		   S_IRUGO | S_IWUSR);
4471 
4472 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4473 MODULE_DESCRIPTION("SCSI debug adapter driver");
4474 MODULE_LICENSE("GPL");
4475 MODULE_VERSION(SDEBUG_VERSION);
4476 
4477 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4478 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4479 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4480 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4481 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4482 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4483 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4484 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4485 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4486 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4487 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4488 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4489 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4490 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4491 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4492 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4493 		 SDEBUG_VERSION "\")");
4494 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4495 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4496 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4497 MODULE_PARM_DESC(lbprz,
4498 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4499 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4500 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4501 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4502 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4503 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4504 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4505 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4506 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4507 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4508 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4509 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4510 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4511 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4512 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4513 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4514 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4515 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4516 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4517 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4518 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4519 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4520 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4521 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4522 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4523 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4524 MODULE_PARM_DESC(uuid_ctl,
4525 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4526 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4527 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4528 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
4529 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4530 
4531 #define SDEBUG_INFO_LEN 256
4532 static char sdebug_info[SDEBUG_INFO_LEN];
4533 
4534 static const char *scsi_debug_info(struct Scsi_Host *shp)
4535 {
4536 	int k;
4537 
4538 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4539 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4540 	if (k >= (SDEBUG_INFO_LEN - 1))
4541 		return sdebug_info;
4542 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4543 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4544 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4545 		  "statistics", (int)sdebug_statistics);
4546 	return sdebug_info;
4547 }
4548 
4549 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4550 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4551 				 int length)
4552 {
4553 	char arr[16];
4554 	int opts;
4555 	int minLen = length > 15 ? 15 : length;
4556 
4557 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4558 		return -EACCES;
4559 	memcpy(arr, buffer, minLen);
4560 	arr[minLen] = '\0';
4561 	if (1 != sscanf(arr, "%d", &opts))
4562 		return -EINVAL;
4563 	sdebug_opts = opts;
4564 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4565 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4566 	if (sdebug_every_nth != 0)
4567 		tweak_cmnd_count();
4568 	return length;
4569 }
4570 
4571 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4572  * same for each scsi_debug host (if more than one). Some of the counters
4573  * output are not atomics so might be inaccurate in a busy system. */
4574 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4575 {
4576 	int f, j, l;
4577 	struct sdebug_queue *sqp;
4578 
4579 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4580 		   SDEBUG_VERSION, sdebug_version_date);
4581 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4582 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4583 		   sdebug_opts, sdebug_every_nth);
4584 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4585 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4586 		   sdebug_sector_size, "bytes");
4587 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4588 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4589 		   num_aborts);
4590 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4591 		   num_dev_resets, num_target_resets, num_bus_resets,
4592 		   num_host_resets);
4593 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4594 		   dix_reads, dix_writes, dif_errors);
4595 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4596 		   sdebug_statistics);
4597 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4598 		   atomic_read(&sdebug_cmnd_count),
4599 		   atomic_read(&sdebug_completions),
4600 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4601 		   atomic_read(&sdebug_a_tsf));
4602 
4603 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4604 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4605 		seq_printf(m, "  queue %d:\n", j);
4606 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4607 		if (f != sdebug_max_queue) {
4608 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4609 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4610 				   "first,last bits", f, l);
4611 		}
4612 	}
4613 	return 0;
4614 }
4615 
4616 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4617 {
4618 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4619 }
4620 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4621  * of delay is jiffies.
4622  */
4623 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4624 			   size_t count)
4625 {
4626 	int jdelay, res;
4627 
4628 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4629 		res = count;
4630 		if (sdebug_jdelay != jdelay) {
4631 			int j, k;
4632 			struct sdebug_queue *sqp;
4633 
4634 			block_unblock_all_queues(true);
4635 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4636 			     ++j, ++sqp) {
4637 				k = find_first_bit(sqp->in_use_bm,
4638 						   sdebug_max_queue);
4639 				if (k != sdebug_max_queue) {
4640 					res = -EBUSY;   /* queued commands */
4641 					break;
4642 				}
4643 			}
4644 			if (res > 0) {
4645 				sdebug_jdelay = jdelay;
4646 				sdebug_ndelay = 0;
4647 			}
4648 			block_unblock_all_queues(false);
4649 		}
4650 		return res;
4651 	}
4652 	return -EINVAL;
4653 }
4654 static DRIVER_ATTR_RW(delay);
4655 
4656 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4657 {
4658 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4659 }
4660 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4661 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4662 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4663 			    size_t count)
4664 {
4665 	int ndelay, res;
4666 
4667 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4668 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4669 		res = count;
4670 		if (sdebug_ndelay != ndelay) {
4671 			int j, k;
4672 			struct sdebug_queue *sqp;
4673 
4674 			block_unblock_all_queues(true);
4675 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4676 			     ++j, ++sqp) {
4677 				k = find_first_bit(sqp->in_use_bm,
4678 						   sdebug_max_queue);
4679 				if (k != sdebug_max_queue) {
4680 					res = -EBUSY;   /* queued commands */
4681 					break;
4682 				}
4683 			}
4684 			if (res > 0) {
4685 				sdebug_ndelay = ndelay;
4686 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4687 							: DEF_JDELAY;
4688 			}
4689 			block_unblock_all_queues(false);
4690 		}
4691 		return res;
4692 	}
4693 	return -EINVAL;
4694 }
4695 static DRIVER_ATTR_RW(ndelay);
4696 
4697 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4698 {
4699 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4700 }
4701 
4702 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4703 			  size_t count)
4704 {
4705 	int opts;
4706 	char work[20];
4707 
4708 	if (sscanf(buf, "%10s", work) == 1) {
4709 		if (strncasecmp(work, "0x", 2) == 0) {
4710 			if (kstrtoint(work + 2, 16, &opts) == 0)
4711 				goto opts_done;
4712 		} else {
4713 			if (kstrtoint(work, 10, &opts) == 0)
4714 				goto opts_done;
4715 		}
4716 	}
4717 	return -EINVAL;
4718 opts_done:
4719 	sdebug_opts = opts;
4720 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4721 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4722 	tweak_cmnd_count();
4723 	return count;
4724 }
4725 static DRIVER_ATTR_RW(opts);
4726 
4727 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4728 {
4729 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4730 }
4731 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4732 			   size_t count)
4733 {
4734 	int n;
4735 
4736 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4737 		sdebug_ptype = n;
4738 		return count;
4739 	}
4740 	return -EINVAL;
4741 }
4742 static DRIVER_ATTR_RW(ptype);
4743 
4744 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4745 {
4746 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4747 }
4748 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4749 			    size_t count)
4750 {
4751 	int n;
4752 
4753 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4754 		sdebug_dsense = n;
4755 		return count;
4756 	}
4757 	return -EINVAL;
4758 }
4759 static DRIVER_ATTR_RW(dsense);
4760 
4761 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4762 {
4763 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4764 }
4765 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4766 			     size_t count)
4767 {
4768 	int n;
4769 
4770 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4771 		n = (n > 0);
4772 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4773 		if (sdebug_fake_rw != n) {
4774 			if ((0 == n) && (NULL == fake_storep)) {
4775 				unsigned long sz =
4776 					(unsigned long)sdebug_dev_size_mb *
4777 					1048576;
4778 
4779 				fake_storep = vzalloc(sz);
4780 				if (NULL == fake_storep) {
4781 					pr_err("out of memory, 9\n");
4782 					return -ENOMEM;
4783 				}
4784 			}
4785 			sdebug_fake_rw = n;
4786 		}
4787 		return count;
4788 	}
4789 	return -EINVAL;
4790 }
4791 static DRIVER_ATTR_RW(fake_rw);
4792 
4793 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4794 {
4795 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4796 }
4797 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4798 			      size_t count)
4799 {
4800 	int n;
4801 
4802 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4803 		sdebug_no_lun_0 = n;
4804 		return count;
4805 	}
4806 	return -EINVAL;
4807 }
4808 static DRIVER_ATTR_RW(no_lun_0);
4809 
4810 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4811 {
4812 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4813 }
4814 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4815 			      size_t count)
4816 {
4817 	int n;
4818 
4819 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4820 		sdebug_num_tgts = n;
4821 		sdebug_max_tgts_luns();
4822 		return count;
4823 	}
4824 	return -EINVAL;
4825 }
4826 static DRIVER_ATTR_RW(num_tgts);
4827 
4828 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4829 {
4830 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4831 }
4832 static DRIVER_ATTR_RO(dev_size_mb);
4833 
4834 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4835 {
4836 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4837 }
4838 static DRIVER_ATTR_RO(num_parts);
4839 
4840 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4841 {
4842 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4843 }
4844 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4845 			       size_t count)
4846 {
4847 	int nth;
4848 
4849 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4850 		sdebug_every_nth = nth;
4851 		if (nth && !sdebug_statistics) {
4852 			pr_info("every_nth needs statistics=1, set it\n");
4853 			sdebug_statistics = true;
4854 		}
4855 		tweak_cmnd_count();
4856 		return count;
4857 	}
4858 	return -EINVAL;
4859 }
4860 static DRIVER_ATTR_RW(every_nth);
4861 
4862 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4863 {
4864 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4865 }
4866 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4867 			      size_t count)
4868 {
4869 	int n;
4870 	bool changed;
4871 
4872 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4873 		if (n > 256) {
4874 			pr_warn("max_luns can be no more than 256\n");
4875 			return -EINVAL;
4876 		}
4877 		changed = (sdebug_max_luns != n);
4878 		sdebug_max_luns = n;
4879 		sdebug_max_tgts_luns();
4880 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4881 			struct sdebug_host_info *sdhp;
4882 			struct sdebug_dev_info *dp;
4883 
4884 			spin_lock(&sdebug_host_list_lock);
4885 			list_for_each_entry(sdhp, &sdebug_host_list,
4886 					    host_list) {
4887 				list_for_each_entry(dp, &sdhp->dev_info_list,
4888 						    dev_list) {
4889 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4890 						dp->uas_bm);
4891 				}
4892 			}
4893 			spin_unlock(&sdebug_host_list_lock);
4894 		}
4895 		return count;
4896 	}
4897 	return -EINVAL;
4898 }
4899 static DRIVER_ATTR_RW(max_luns);
4900 
4901 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4902 {
4903 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4904 }
4905 /* N.B. max_queue can be changed while there are queued commands. In flight
4906  * commands beyond the new max_queue will be completed. */
4907 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4908 			       size_t count)
4909 {
4910 	int j, n, k, a;
4911 	struct sdebug_queue *sqp;
4912 
4913 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4914 	    (n <= SDEBUG_CANQUEUE)) {
4915 		block_unblock_all_queues(true);
4916 		k = 0;
4917 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4918 		     ++j, ++sqp) {
4919 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4920 			if (a > k)
4921 				k = a;
4922 		}
4923 		sdebug_max_queue = n;
4924 		if (k == SDEBUG_CANQUEUE)
4925 			atomic_set(&retired_max_queue, 0);
4926 		else if (k >= n)
4927 			atomic_set(&retired_max_queue, k + 1);
4928 		else
4929 			atomic_set(&retired_max_queue, 0);
4930 		block_unblock_all_queues(false);
4931 		return count;
4932 	}
4933 	return -EINVAL;
4934 }
4935 static DRIVER_ATTR_RW(max_queue);
4936 
4937 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4938 {
4939 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4940 }
4941 static DRIVER_ATTR_RO(no_uld);
4942 
4943 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4944 {
4945 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4946 }
4947 static DRIVER_ATTR_RO(scsi_level);
4948 
4949 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4950 {
4951 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4952 }
4953 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4954 				size_t count)
4955 {
4956 	int n;
4957 	bool changed;
4958 
4959 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4960 		changed = (sdebug_virtual_gb != n);
4961 		sdebug_virtual_gb = n;
4962 		sdebug_capacity = get_sdebug_capacity();
4963 		if (changed) {
4964 			struct sdebug_host_info *sdhp;
4965 			struct sdebug_dev_info *dp;
4966 
4967 			spin_lock(&sdebug_host_list_lock);
4968 			list_for_each_entry(sdhp, &sdebug_host_list,
4969 					    host_list) {
4970 				list_for_each_entry(dp, &sdhp->dev_info_list,
4971 						    dev_list) {
4972 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4973 						dp->uas_bm);
4974 				}
4975 			}
4976 			spin_unlock(&sdebug_host_list_lock);
4977 		}
4978 		return count;
4979 	}
4980 	return -EINVAL;
4981 }
4982 static DRIVER_ATTR_RW(virtual_gb);
4983 
4984 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4985 {
4986 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4987 }
4988 
4989 static int sdebug_add_adapter(void);
4990 static void sdebug_remove_adapter(void);
4991 
4992 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4993 			      size_t count)
4994 {
4995 	int delta_hosts;
4996 
4997 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4998 		return -EINVAL;
4999 	if (delta_hosts > 0) {
5000 		do {
5001 			sdebug_add_adapter();
5002 		} while (--delta_hosts);
5003 	} else if (delta_hosts < 0) {
5004 		do {
5005 			sdebug_remove_adapter();
5006 		} while (++delta_hosts);
5007 	}
5008 	return count;
5009 }
5010 static DRIVER_ATTR_RW(add_host);
5011 
5012 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5013 {
5014 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5015 }
5016 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5017 				    size_t count)
5018 {
5019 	int n;
5020 
5021 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5022 		sdebug_vpd_use_hostno = n;
5023 		return count;
5024 	}
5025 	return -EINVAL;
5026 }
5027 static DRIVER_ATTR_RW(vpd_use_hostno);
5028 
5029 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5030 {
5031 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5032 }
5033 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5034 				size_t count)
5035 {
5036 	int n;
5037 
5038 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5039 		if (n > 0)
5040 			sdebug_statistics = true;
5041 		else {
5042 			clear_queue_stats();
5043 			sdebug_statistics = false;
5044 		}
5045 		return count;
5046 	}
5047 	return -EINVAL;
5048 }
5049 static DRIVER_ATTR_RW(statistics);
5050 
5051 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5052 {
5053 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5054 }
5055 static DRIVER_ATTR_RO(sector_size);
5056 
5057 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5058 {
5059 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5060 }
5061 static DRIVER_ATTR_RO(submit_queues);
5062 
5063 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5064 {
5065 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5066 }
5067 static DRIVER_ATTR_RO(dix);
5068 
5069 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5070 {
5071 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5072 }
5073 static DRIVER_ATTR_RO(dif);
5074 
5075 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5076 {
5077 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5078 }
5079 static DRIVER_ATTR_RO(guard);
5080 
5081 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5082 {
5083 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5084 }
5085 static DRIVER_ATTR_RO(ato);
5086 
5087 static ssize_t map_show(struct device_driver *ddp, char *buf)
5088 {
5089 	ssize_t count;
5090 
5091 	if (!scsi_debug_lbp())
5092 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5093 				 sdebug_store_sectors);
5094 
5095 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5096 			  (int)map_size, map_storep);
5097 	buf[count++] = '\n';
5098 	buf[count] = '\0';
5099 
5100 	return count;
5101 }
5102 static DRIVER_ATTR_RO(map);
5103 
5104 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5105 {
5106 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5107 }
5108 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5109 			       size_t count)
5110 {
5111 	int n;
5112 
5113 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5114 		sdebug_removable = (n > 0);
5115 		return count;
5116 	}
5117 	return -EINVAL;
5118 }
5119 static DRIVER_ATTR_RW(removable);
5120 
5121 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5122 {
5123 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5124 }
5125 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5126 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5127 			       size_t count)
5128 {
5129 	int n;
5130 
5131 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5132 		sdebug_host_lock = (n > 0);
5133 		return count;
5134 	}
5135 	return -EINVAL;
5136 }
5137 static DRIVER_ATTR_RW(host_lock);
5138 
5139 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5140 {
5141 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5142 }
5143 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5144 			    size_t count)
5145 {
5146 	int n;
5147 
5148 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5149 		sdebug_strict = (n > 0);
5150 		return count;
5151 	}
5152 	return -EINVAL;
5153 }
5154 static DRIVER_ATTR_RW(strict);
5155 
5156 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5157 {
5158 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5159 }
5160 static DRIVER_ATTR_RO(uuid_ctl);
5161 
5162 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5163 {
5164 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5165 }
5166 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5167 			     size_t count)
5168 {
5169 	int ret, n;
5170 
5171 	ret = kstrtoint(buf, 0, &n);
5172 	if (ret)
5173 		return ret;
5174 	sdebug_cdb_len = n;
5175 	all_config_cdb_len();
5176 	return count;
5177 }
5178 static DRIVER_ATTR_RW(cdb_len);
5179 
5180 
5181 /* Note: The following array creates attribute files in the
5182    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5183    files (over those found in the /sys/module/scsi_debug/parameters
5184    directory) is that auxiliary actions can be triggered when an attribute
5185    is changed. For example see: sdebug_add_host_store() above.
5186  */
5187 
5188 static struct attribute *sdebug_drv_attrs[] = {
5189 	&driver_attr_delay.attr,
5190 	&driver_attr_opts.attr,
5191 	&driver_attr_ptype.attr,
5192 	&driver_attr_dsense.attr,
5193 	&driver_attr_fake_rw.attr,
5194 	&driver_attr_no_lun_0.attr,
5195 	&driver_attr_num_tgts.attr,
5196 	&driver_attr_dev_size_mb.attr,
5197 	&driver_attr_num_parts.attr,
5198 	&driver_attr_every_nth.attr,
5199 	&driver_attr_max_luns.attr,
5200 	&driver_attr_max_queue.attr,
5201 	&driver_attr_no_uld.attr,
5202 	&driver_attr_scsi_level.attr,
5203 	&driver_attr_virtual_gb.attr,
5204 	&driver_attr_add_host.attr,
5205 	&driver_attr_vpd_use_hostno.attr,
5206 	&driver_attr_sector_size.attr,
5207 	&driver_attr_statistics.attr,
5208 	&driver_attr_submit_queues.attr,
5209 	&driver_attr_dix.attr,
5210 	&driver_attr_dif.attr,
5211 	&driver_attr_guard.attr,
5212 	&driver_attr_ato.attr,
5213 	&driver_attr_map.attr,
5214 	&driver_attr_removable.attr,
5215 	&driver_attr_host_lock.attr,
5216 	&driver_attr_ndelay.attr,
5217 	&driver_attr_strict.attr,
5218 	&driver_attr_uuid_ctl.attr,
5219 	&driver_attr_cdb_len.attr,
5220 	NULL,
5221 };
5222 ATTRIBUTE_GROUPS(sdebug_drv);
5223 
5224 static struct device *pseudo_primary;
5225 
5226 static int __init scsi_debug_init(void)
5227 {
5228 	unsigned long sz;
5229 	int host_to_add;
5230 	int k;
5231 	int ret;
5232 
5233 	atomic_set(&retired_max_queue, 0);
5234 
5235 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5236 		pr_warn("ndelay must be less than 1 second, ignored\n");
5237 		sdebug_ndelay = 0;
5238 	} else if (sdebug_ndelay > 0)
5239 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5240 
5241 	switch (sdebug_sector_size) {
5242 	case  512:
5243 	case 1024:
5244 	case 2048:
5245 	case 4096:
5246 		break;
5247 	default:
5248 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5249 		return -EINVAL;
5250 	}
5251 
5252 	switch (sdebug_dif) {
5253 	case T10_PI_TYPE0_PROTECTION:
5254 		break;
5255 	case T10_PI_TYPE1_PROTECTION:
5256 	case T10_PI_TYPE2_PROTECTION:
5257 	case T10_PI_TYPE3_PROTECTION:
5258 		have_dif_prot = true;
5259 		break;
5260 
5261 	default:
5262 		pr_err("dif must be 0, 1, 2 or 3\n");
5263 		return -EINVAL;
5264 	}
5265 
5266 	if (sdebug_guard > 1) {
5267 		pr_err("guard must be 0 or 1\n");
5268 		return -EINVAL;
5269 	}
5270 
5271 	if (sdebug_ato > 1) {
5272 		pr_err("ato must be 0 or 1\n");
5273 		return -EINVAL;
5274 	}
5275 
5276 	if (sdebug_physblk_exp > 15) {
5277 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5278 		return -EINVAL;
5279 	}
5280 	if (sdebug_max_luns > 256) {
5281 		pr_warn("max_luns can be no more than 256, use default\n");
5282 		sdebug_max_luns = DEF_MAX_LUNS;
5283 	}
5284 
5285 	if (sdebug_lowest_aligned > 0x3fff) {
5286 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5287 		return -EINVAL;
5288 	}
5289 
5290 	if (submit_queues < 1) {
5291 		pr_err("submit_queues must be 1 or more\n");
5292 		return -EINVAL;
5293 	}
5294 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5295 			       GFP_KERNEL);
5296 	if (sdebug_q_arr == NULL)
5297 		return -ENOMEM;
5298 	for (k = 0; k < submit_queues; ++k)
5299 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5300 
5301 	if (sdebug_dev_size_mb < 1)
5302 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5303 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5304 	sdebug_store_sectors = sz / sdebug_sector_size;
5305 	sdebug_capacity = get_sdebug_capacity();
5306 
5307 	/* play around with geometry, don't waste too much on track 0 */
5308 	sdebug_heads = 8;
5309 	sdebug_sectors_per = 32;
5310 	if (sdebug_dev_size_mb >= 256)
5311 		sdebug_heads = 64;
5312 	else if (sdebug_dev_size_mb >= 16)
5313 		sdebug_heads = 32;
5314 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5315 			       (sdebug_sectors_per * sdebug_heads);
5316 	if (sdebug_cylinders_per >= 1024) {
5317 		/* other LLDs do this; implies >= 1GB ram disk ... */
5318 		sdebug_heads = 255;
5319 		sdebug_sectors_per = 63;
5320 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5321 			       (sdebug_sectors_per * sdebug_heads);
5322 	}
5323 
5324 	if (sdebug_fake_rw == 0) {
5325 		fake_storep = vzalloc(sz);
5326 		if (NULL == fake_storep) {
5327 			pr_err("out of memory, 1\n");
5328 			ret = -ENOMEM;
5329 			goto free_q_arr;
5330 		}
5331 		if (sdebug_num_parts > 0)
5332 			sdebug_build_parts(fake_storep, sz);
5333 	}
5334 
5335 	if (sdebug_dix) {
5336 		int dif_size;
5337 
5338 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5339 		dif_storep = vmalloc(dif_size);
5340 
5341 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5342 
5343 		if (dif_storep == NULL) {
5344 			pr_err("out of mem. (DIX)\n");
5345 			ret = -ENOMEM;
5346 			goto free_vm;
5347 		}
5348 
5349 		memset(dif_storep, 0xff, dif_size);
5350 	}
5351 
5352 	/* Logical Block Provisioning */
5353 	if (scsi_debug_lbp()) {
5354 		sdebug_unmap_max_blocks =
5355 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5356 
5357 		sdebug_unmap_max_desc =
5358 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5359 
5360 		sdebug_unmap_granularity =
5361 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5362 
5363 		if (sdebug_unmap_alignment &&
5364 		    sdebug_unmap_granularity <=
5365 		    sdebug_unmap_alignment) {
5366 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5367 			ret = -EINVAL;
5368 			goto free_vm;
5369 		}
5370 
5371 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5372 		map_storep = vmalloc(array_size(sizeof(long),
5373 						BITS_TO_LONGS(map_size)));
5374 
5375 		pr_info("%lu provisioning blocks\n", map_size);
5376 
5377 		if (map_storep == NULL) {
5378 			pr_err("out of mem. (MAP)\n");
5379 			ret = -ENOMEM;
5380 			goto free_vm;
5381 		}
5382 
5383 		bitmap_zero(map_storep, map_size);
5384 
5385 		/* Map first 1KB for partition table */
5386 		if (sdebug_num_parts)
5387 			map_region(0, 2);
5388 	}
5389 
5390 	pseudo_primary = root_device_register("pseudo_0");
5391 	if (IS_ERR(pseudo_primary)) {
5392 		pr_warn("root_device_register() error\n");
5393 		ret = PTR_ERR(pseudo_primary);
5394 		goto free_vm;
5395 	}
5396 	ret = bus_register(&pseudo_lld_bus);
5397 	if (ret < 0) {
5398 		pr_warn("bus_register error: %d\n", ret);
5399 		goto dev_unreg;
5400 	}
5401 	ret = driver_register(&sdebug_driverfs_driver);
5402 	if (ret < 0) {
5403 		pr_warn("driver_register error: %d\n", ret);
5404 		goto bus_unreg;
5405 	}
5406 
5407 	host_to_add = sdebug_add_host;
5408 	sdebug_add_host = 0;
5409 
5410 	for (k = 0; k < host_to_add; k++) {
5411 		if (sdebug_add_adapter()) {
5412 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5413 			break;
5414 		}
5415 	}
5416 
5417 	if (sdebug_verbose)
5418 		pr_info("built %d host(s)\n", sdebug_add_host);
5419 
5420 	return 0;
5421 
5422 bus_unreg:
5423 	bus_unregister(&pseudo_lld_bus);
5424 dev_unreg:
5425 	root_device_unregister(pseudo_primary);
5426 free_vm:
5427 	vfree(map_storep);
5428 	vfree(dif_storep);
5429 	vfree(fake_storep);
5430 free_q_arr:
5431 	kfree(sdebug_q_arr);
5432 	return ret;
5433 }
5434 
5435 static void __exit scsi_debug_exit(void)
5436 {
5437 	int k = sdebug_add_host;
5438 
5439 	stop_all_queued();
5440 	for (; k; k--)
5441 		sdebug_remove_adapter();
5442 	free_all_queued();
5443 	driver_unregister(&sdebug_driverfs_driver);
5444 	bus_unregister(&pseudo_lld_bus);
5445 	root_device_unregister(pseudo_primary);
5446 
5447 	vfree(map_storep);
5448 	vfree(dif_storep);
5449 	vfree(fake_storep);
5450 	kfree(sdebug_q_arr);
5451 }
5452 
5453 device_initcall(scsi_debug_init);
5454 module_exit(scsi_debug_exit);
5455 
5456 static void sdebug_release_adapter(struct device *dev)
5457 {
5458 	struct sdebug_host_info *sdbg_host;
5459 
5460 	sdbg_host = to_sdebug_host(dev);
5461 	kfree(sdbg_host);
5462 }
5463 
5464 static int sdebug_add_adapter(void)
5465 {
5466 	int k, devs_per_host;
5467 	int error = 0;
5468 	struct sdebug_host_info *sdbg_host;
5469 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5470 
5471 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5472 	if (sdbg_host == NULL) {
5473 		pr_err("out of memory at line %d\n", __LINE__);
5474 		return -ENOMEM;
5475 	}
5476 
5477 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5478 
5479 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5480 	for (k = 0; k < devs_per_host; k++) {
5481 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5482 		if (!sdbg_devinfo) {
5483 			pr_err("out of memory at line %d\n", __LINE__);
5484 			error = -ENOMEM;
5485 			goto clean;
5486 		}
5487 	}
5488 
5489 	spin_lock(&sdebug_host_list_lock);
5490 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5491 	spin_unlock(&sdebug_host_list_lock);
5492 
5493 	sdbg_host->dev.bus = &pseudo_lld_bus;
5494 	sdbg_host->dev.parent = pseudo_primary;
5495 	sdbg_host->dev.release = &sdebug_release_adapter;
5496 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5497 
5498 	error = device_register(&sdbg_host->dev);
5499 
5500 	if (error)
5501 		goto clean;
5502 
5503 	++sdebug_add_host;
5504 	return error;
5505 
5506 clean:
5507 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5508 				 dev_list) {
5509 		list_del(&sdbg_devinfo->dev_list);
5510 		kfree(sdbg_devinfo);
5511 	}
5512 
5513 	kfree(sdbg_host);
5514 	return error;
5515 }
5516 
5517 static void sdebug_remove_adapter(void)
5518 {
5519 	struct sdebug_host_info *sdbg_host = NULL;
5520 
5521 	spin_lock(&sdebug_host_list_lock);
5522 	if (!list_empty(&sdebug_host_list)) {
5523 		sdbg_host = list_entry(sdebug_host_list.prev,
5524 				       struct sdebug_host_info, host_list);
5525 		list_del(&sdbg_host->host_list);
5526 	}
5527 	spin_unlock(&sdebug_host_list_lock);
5528 
5529 	if (!sdbg_host)
5530 		return;
5531 
5532 	device_unregister(&sdbg_host->dev);
5533 	--sdebug_add_host;
5534 }
5535 
5536 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5537 {
5538 	int num_in_q = 0;
5539 	struct sdebug_dev_info *devip;
5540 
5541 	block_unblock_all_queues(true);
5542 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5543 	if (NULL == devip) {
5544 		block_unblock_all_queues(false);
5545 		return	-ENODEV;
5546 	}
5547 	num_in_q = atomic_read(&devip->num_in_q);
5548 
5549 	if (qdepth < 1)
5550 		qdepth = 1;
5551 	/* allow to exceed max host qc_arr elements for testing */
5552 	if (qdepth > SDEBUG_CANQUEUE + 10)
5553 		qdepth = SDEBUG_CANQUEUE + 10;
5554 	scsi_change_queue_depth(sdev, qdepth);
5555 
5556 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5557 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5558 			    __func__, qdepth, num_in_q);
5559 	}
5560 	block_unblock_all_queues(false);
5561 	return sdev->queue_depth;
5562 }
5563 
5564 static bool fake_timeout(struct scsi_cmnd *scp)
5565 {
5566 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5567 		if (sdebug_every_nth < -1)
5568 			sdebug_every_nth = -1;
5569 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5570 			return true; /* ignore command causing timeout */
5571 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5572 			 scsi_medium_access_command(scp))
5573 			return true; /* time out reads and writes */
5574 	}
5575 	return false;
5576 }
5577 
5578 static bool fake_host_busy(struct scsi_cmnd *scp)
5579 {
5580 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5581 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5582 }
5583 
5584 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5585 				   struct scsi_cmnd *scp)
5586 {
5587 	u8 sdeb_i;
5588 	struct scsi_device *sdp = scp->device;
5589 	const struct opcode_info_t *oip;
5590 	const struct opcode_info_t *r_oip;
5591 	struct sdebug_dev_info *devip;
5592 	u8 *cmd = scp->cmnd;
5593 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5594 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5595 	int k, na;
5596 	int errsts = 0;
5597 	u32 flags;
5598 	u16 sa;
5599 	u8 opcode = cmd[0];
5600 	bool has_wlun_rl;
5601 
5602 	scsi_set_resid(scp, 0);
5603 	if (sdebug_statistics)
5604 		atomic_inc(&sdebug_cmnd_count);
5605 	if (unlikely(sdebug_verbose &&
5606 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5607 		char b[120];
5608 		int n, len, sb;
5609 
5610 		len = scp->cmd_len;
5611 		sb = (int)sizeof(b);
5612 		if (len > 32)
5613 			strcpy(b, "too long, over 32 bytes");
5614 		else {
5615 			for (k = 0, n = 0; k < len && n < sb; ++k)
5616 				n += scnprintf(b + n, sb - n, "%02x ",
5617 					       (u32)cmd[k]);
5618 		}
5619 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5620 			    blk_mq_unique_tag(scp->request), b);
5621 	}
5622 	if (fake_host_busy(scp))
5623 		return SCSI_MLQUEUE_HOST_BUSY;
5624 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5625 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5626 		goto err_out;
5627 
5628 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5629 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5630 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5631 	if (unlikely(!devip)) {
5632 		devip = find_build_dev_info(sdp);
5633 		if (NULL == devip)
5634 			goto err_out;
5635 	}
5636 	na = oip->num_attached;
5637 	r_pfp = oip->pfp;
5638 	if (na) {	/* multiple commands with this opcode */
5639 		r_oip = oip;
5640 		if (FF_SA & r_oip->flags) {
5641 			if (F_SA_LOW & oip->flags)
5642 				sa = 0x1f & cmd[1];
5643 			else
5644 				sa = get_unaligned_be16(cmd + 8);
5645 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5646 				if (opcode == oip->opcode && sa == oip->sa)
5647 					break;
5648 			}
5649 		} else {   /* since no service action only check opcode */
5650 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5651 				if (opcode == oip->opcode)
5652 					break;
5653 			}
5654 		}
5655 		if (k > na) {
5656 			if (F_SA_LOW & r_oip->flags)
5657 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5658 			else if (F_SA_HIGH & r_oip->flags)
5659 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5660 			else
5661 				mk_sense_invalid_opcode(scp);
5662 			goto check_cond;
5663 		}
5664 	}	/* else (when na==0) we assume the oip is a match */
5665 	flags = oip->flags;
5666 	if (unlikely(F_INV_OP & flags)) {
5667 		mk_sense_invalid_opcode(scp);
5668 		goto check_cond;
5669 	}
5670 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5671 		if (sdebug_verbose)
5672 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5673 				    my_name, opcode, " supported for wlun");
5674 		mk_sense_invalid_opcode(scp);
5675 		goto check_cond;
5676 	}
5677 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5678 		u8 rem;
5679 		int j;
5680 
5681 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5682 			rem = ~oip->len_mask[k] & cmd[k];
5683 			if (rem) {
5684 				for (j = 7; j >= 0; --j, rem <<= 1) {
5685 					if (0x80 & rem)
5686 						break;
5687 				}
5688 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5689 				goto check_cond;
5690 			}
5691 		}
5692 	}
5693 	if (unlikely(!(F_SKIP_UA & flags) &&
5694 		     find_first_bit(devip->uas_bm,
5695 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5696 		errsts = make_ua(scp, devip);
5697 		if (errsts)
5698 			goto check_cond;
5699 	}
5700 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5701 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5702 		if (sdebug_verbose)
5703 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5704 				    "%s\n", my_name, "initializing command "
5705 				    "required");
5706 		errsts = check_condition_result;
5707 		goto fini;
5708 	}
5709 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5710 		goto fini;
5711 	if (unlikely(sdebug_every_nth)) {
5712 		if (fake_timeout(scp))
5713 			return 0;	/* ignore command: make trouble */
5714 	}
5715 	if (likely(oip->pfp))
5716 		pfp = oip->pfp;	/* calls a resp_* function */
5717 	else
5718 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5719 
5720 fini:
5721 	if (F_DELAY_OVERR & flags)
5722 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5723 	else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
5724 					    sdebug_ndelay > 10000)) {
5725 		/*
5726 		 * Skip long delays if ndelay <= 10 microseconds. Otherwise
5727 		 * for Start Stop Unit (SSU) want at least 1 second delay and
5728 		 * if sdebug_jdelay>1 want a long delay of that many seconds.
5729 		 * For Synchronize Cache want 1/20 of SSU's delay.
5730 		 */
5731 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5732 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5733 
5734 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5735 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5736 	} else
5737 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5738 				     sdebug_ndelay);
5739 check_cond:
5740 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5741 err_out:
5742 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5743 }
5744 
5745 static struct scsi_host_template sdebug_driver_template = {
5746 	.show_info =		scsi_debug_show_info,
5747 	.write_info =		scsi_debug_write_info,
5748 	.proc_name =		sdebug_proc_name,
5749 	.name =			"SCSI DEBUG",
5750 	.info =			scsi_debug_info,
5751 	.slave_alloc =		scsi_debug_slave_alloc,
5752 	.slave_configure =	scsi_debug_slave_configure,
5753 	.slave_destroy =	scsi_debug_slave_destroy,
5754 	.ioctl =		scsi_debug_ioctl,
5755 	.queuecommand =		scsi_debug_queuecommand,
5756 	.change_queue_depth =	sdebug_change_qdepth,
5757 	.eh_abort_handler =	scsi_debug_abort,
5758 	.eh_device_reset_handler = scsi_debug_device_reset,
5759 	.eh_target_reset_handler = scsi_debug_target_reset,
5760 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5761 	.eh_host_reset_handler = scsi_debug_host_reset,
5762 	.can_queue =		SDEBUG_CANQUEUE,
5763 	.this_id =		7,
5764 	.sg_tablesize =		SG_MAX_SEGMENTS,
5765 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5766 	.max_sectors =		-1U,
5767 	.max_segment_size =	-1U,
5768 	.module =		THIS_MODULE,
5769 	.track_queue_depth =	1,
5770 };
5771 
5772 static int sdebug_driver_probe(struct device *dev)
5773 {
5774 	int error = 0;
5775 	struct sdebug_host_info *sdbg_host;
5776 	struct Scsi_Host *hpnt;
5777 	int hprot;
5778 
5779 	sdbg_host = to_sdebug_host(dev);
5780 
5781 	sdebug_driver_template.can_queue = sdebug_max_queue;
5782 	if (!sdebug_clustering)
5783 		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
5784 
5785 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5786 	if (NULL == hpnt) {
5787 		pr_err("scsi_host_alloc failed\n");
5788 		error = -ENODEV;
5789 		return error;
5790 	}
5791 	if (submit_queues > nr_cpu_ids) {
5792 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5793 			my_name, submit_queues, nr_cpu_ids);
5794 		submit_queues = nr_cpu_ids;
5795 	}
5796 	/* Decide whether to tell scsi subsystem that we want mq */
5797 	/* Following should give the same answer for each host */
5798 	hpnt->nr_hw_queues = submit_queues;
5799 
5800 	sdbg_host->shost = hpnt;
5801 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5802 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5803 		hpnt->max_id = sdebug_num_tgts + 1;
5804 	else
5805 		hpnt->max_id = sdebug_num_tgts;
5806 	/* = sdebug_max_luns; */
5807 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5808 
5809 	hprot = 0;
5810 
5811 	switch (sdebug_dif) {
5812 
5813 	case T10_PI_TYPE1_PROTECTION:
5814 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5815 		if (sdebug_dix)
5816 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5817 		break;
5818 
5819 	case T10_PI_TYPE2_PROTECTION:
5820 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5821 		if (sdebug_dix)
5822 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5823 		break;
5824 
5825 	case T10_PI_TYPE3_PROTECTION:
5826 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5827 		if (sdebug_dix)
5828 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5829 		break;
5830 
5831 	default:
5832 		if (sdebug_dix)
5833 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5834 		break;
5835 	}
5836 
5837 	scsi_host_set_prot(hpnt, hprot);
5838 
5839 	if (have_dif_prot || sdebug_dix)
5840 		pr_info("host protection%s%s%s%s%s%s%s\n",
5841 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5842 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5843 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5844 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5845 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5846 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5847 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5848 
5849 	if (sdebug_guard == 1)
5850 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5851 	else
5852 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5853 
5854 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5855 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5856 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5857 		sdebug_statistics = true;
5858 	error = scsi_add_host(hpnt, &sdbg_host->dev);
5859 	if (error) {
5860 		pr_err("scsi_add_host failed\n");
5861 		error = -ENODEV;
5862 		scsi_host_put(hpnt);
5863 	} else
5864 		scsi_scan_host(hpnt);
5865 
5866 	return error;
5867 }
5868 
5869 static int sdebug_driver_remove(struct device *dev)
5870 {
5871 	struct sdebug_host_info *sdbg_host;
5872 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5873 
5874 	sdbg_host = to_sdebug_host(dev);
5875 
5876 	if (!sdbg_host) {
5877 		pr_err("Unable to locate host info\n");
5878 		return -ENODEV;
5879 	}
5880 
5881 	scsi_remove_host(sdbg_host->shost);
5882 
5883 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5884 				 dev_list) {
5885 		list_del(&sdbg_devinfo->dev_list);
5886 		kfree(sdbg_devinfo);
5887 	}
5888 
5889 	scsi_host_put(sdbg_host->shost);
5890 	return 0;
5891 }
5892 
5893 static int pseudo_lld_bus_match(struct device *dev,
5894 				struct device_driver *dev_driver)
5895 {
5896 	return 1;
5897 }
5898 
5899 static struct bus_type pseudo_lld_bus = {
5900 	.name = "pseudo",
5901 	.match = pseudo_lld_bus_match,
5902 	.probe = sdebug_driver_probe,
5903 	.remove = sdebug_driver_remove,
5904 	.drv_groups = sdebug_drv_groups,
5905 };
5906