xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 98ddec80)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2018 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20180128";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
100 
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST   1
103 #define DEF_NUM_TGTS   1
104 #define DEF_MAX_LUNS   1
105 /* With these defaults, this driver will make 1 host with 1 target
106  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107  */
108 #define DEF_ATO 1
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB   8
112 #define DEF_DIF 0
113 #define DEF_DIX 0
114 #define DEF_D_SENSE   0
115 #define DEF_EVERY_NTH   0
116 #define DEF_FAKE_RW	0
117 #define DEF_GUARD 0
118 #define DEF_HOST_LOCK 0
119 #define DEF_LBPU 0
120 #define DEF_LBPWS 0
121 #define DEF_LBPWS10 0
122 #define DEF_LBPRZ 1
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0   0
126 #define DEF_NUM_PARTS   0
127 #define DEF_OPTS   0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE   TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB   0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
147 
148 #define SDEBUG_LUN_0_VAL 0
149 
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE		1
152 #define SDEBUG_OPT_MEDIUM_ERR		2
153 #define SDEBUG_OPT_TIMEOUT		4
154 #define SDEBUG_OPT_RECOVERED_ERR	8
155 #define SDEBUG_OPT_TRANSPORT_ERR	16
156 #define SDEBUG_OPT_DIF_ERR		32
157 #define SDEBUG_OPT_DIX_ERR		64
158 #define SDEBUG_OPT_MAC_TIMEOUT		128
159 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
160 #define SDEBUG_OPT_Q_NOISE		0x200
161 #define SDEBUG_OPT_ALL_TSF		0x400
162 #define SDEBUG_OPT_RARE_TSF		0x800
163 #define SDEBUG_OPT_N_WCE		0x1000
164 #define SDEBUG_OPT_RESET_NOISE		0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
166 #define SDEBUG_OPT_HOST_BUSY		0x8000
167 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
168 			      SDEBUG_OPT_RESET_NOISE)
169 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
170 				  SDEBUG_OPT_TRANSPORT_ERR | \
171 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
172 				  SDEBUG_OPT_SHORT_TRANSFER | \
173 				  SDEBUG_OPT_HOST_BUSY)
174 /* When "every_nth" > 0 then modulo "every_nth" commands:
175  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
176  *   - a RECOVERED_ERROR is simulated on successful read and write
177  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
178  *   - a TRANSPORT_ERROR is simulated on successful read and write
179  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
180  *
181  * When "every_nth" < 0 then after "- every_nth" commands:
182  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
183  *   - a RECOVERED_ERROR is simulated on successful read and write
184  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
185  *   - a TRANSPORT_ERROR is simulated on successful read and write
186  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
187  * This will continue on every subsequent command until some other action
188  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
189  * every_nth via sysfs).
190  */
191 
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193  * priority order. In the subset implemented here lower numbers have higher
194  * priority. The UA numbers should be a sequence starting from 0 with
195  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
204 
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206  * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
209 
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211  * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  255
224 
225 #define F_D_IN			1
226 #define F_D_OUT			2
227 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
228 #define F_D_UNKN		8
229 #define F_RL_WLUN_OK		0x10
230 #define F_SKIP_UA		0x20
231 #define F_DELAY_OVERR		0x40
232 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
234 #define F_INV_OP		0x200
235 #define F_FAKE_RW		0x400
236 #define F_M_ACCESS		0x800	/* media access */
237 #define F_SSU_DELAY		0x1000
238 #define F_SYNC_DELAY		0x2000
239 
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY		(F_SSU_DELAY | F_SYNC_DELAY)
244 
245 #define SDEBUG_MAX_PARTS 4
246 
247 #define SDEBUG_MAX_CMD_LEN 32
248 
249 
250 struct sdebug_dev_info {
251 	struct list_head dev_list;
252 	unsigned int channel;
253 	unsigned int target;
254 	u64 lun;
255 	uuid_t lu_name;
256 	struct sdebug_host_info *sdbg_host;
257 	unsigned long uas_bm[1];
258 	atomic_t num_in_q;
259 	atomic_t stopped;
260 	bool used;
261 };
262 
263 struct sdebug_host_info {
264 	struct list_head host_list;
265 	struct Scsi_Host *shost;
266 	struct device dev;
267 	struct list_head dev_info_list;
268 };
269 
270 #define to_sdebug_host(d)	\
271 	container_of(d, struct sdebug_host_info, dev)
272 
273 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
274 		      SDEB_DEFER_WQ = 2};
275 
276 struct sdebug_defer {
277 	struct hrtimer hrt;
278 	struct execute_work ew;
279 	int sqa_idx;	/* index of sdebug_queue array */
280 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
281 	int issuing_cpu;
282 	bool init_hrt;
283 	bool init_wq;
284 	enum sdeb_defer_type defer_t;
285 };
286 
287 struct sdebug_queued_cmd {
288 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
289 	 * instance indicates this slot is in use.
290 	 */
291 	struct sdebug_defer *sd_dp;
292 	struct scsi_cmnd *a_cmnd;
293 	unsigned int inj_recovered:1;
294 	unsigned int inj_transport:1;
295 	unsigned int inj_dif:1;
296 	unsigned int inj_dix:1;
297 	unsigned int inj_short:1;
298 	unsigned int inj_host_busy:1;
299 };
300 
301 struct sdebug_queue {
302 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
303 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
304 	spinlock_t qc_lock;
305 	atomic_t blocked;	/* to temporarily stop more being queued */
306 };
307 
308 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
309 static atomic_t sdebug_completions;  /* count of deferred completions */
310 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
311 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
312 
313 struct opcode_info_t {
314 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
315 				/* for terminating element */
316 	u8 opcode;		/* if num_attached > 0, preferred */
317 	u16 sa;			/* service action */
318 	u32 flags;		/* OR-ed set of SDEB_F_* */
319 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
320 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
321 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
322 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
323 };
324 
325 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
326 enum sdeb_opcode_index {
327 	SDEB_I_INVALID_OPCODE =	0,
328 	SDEB_I_INQUIRY = 1,
329 	SDEB_I_REPORT_LUNS = 2,
330 	SDEB_I_REQUEST_SENSE = 3,
331 	SDEB_I_TEST_UNIT_READY = 4,
332 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
333 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
334 	SDEB_I_LOG_SENSE = 7,
335 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
336 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
337 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
338 	SDEB_I_START_STOP = 11,
339 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
340 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
341 	SDEB_I_MAINT_IN = 14,
342 	SDEB_I_MAINT_OUT = 15,
343 	SDEB_I_VERIFY = 16,		/* 10 only */
344 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
345 	SDEB_I_RESERVE = 18,		/* 6, 10 */
346 	SDEB_I_RELEASE = 19,		/* 6, 10 */
347 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
348 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
349 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
350 	SDEB_I_SEND_DIAG = 23,
351 	SDEB_I_UNMAP = 24,
352 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
353 	SDEB_I_WRITE_BUFFER = 26,
354 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
355 	SDEB_I_SYNC_CACHE = 28,		/* 10, 16 */
356 	SDEB_I_COMP_WRITE = 29,
357 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
358 };
359 
360 
361 static const unsigned char opcode_ind_arr[256] = {
362 /* 0x0; 0x0->0x1f: 6 byte cdbs */
363 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
364 	    0, 0, 0, 0,
365 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
366 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
367 	    SDEB_I_RELEASE,
368 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
369 	    SDEB_I_ALLOW_REMOVAL, 0,
370 /* 0x20; 0x20->0x3f: 10 byte cdbs */
371 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
372 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
373 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
374 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
375 /* 0x40; 0x40->0x5f: 10 byte cdbs */
376 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
377 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
378 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
379 	    SDEB_I_RELEASE,
380 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
381 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
382 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
383 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
384 	0, SDEB_I_VARIABLE_LEN,
385 /* 0x80; 0x80->0x9f: 16 byte cdbs */
386 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
387 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
388 	0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
389 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
390 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
391 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
392 	     SDEB_I_MAINT_OUT, 0, 0, 0,
393 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
394 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
395 	0, 0, 0, 0, 0, 0, 0, 0,
396 	0, 0, 0, 0, 0, 0, 0, 0,
397 /* 0xc0; 0xc0->0xff: vendor specific */
398 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
399 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
400 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
401 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
402 };
403 
404 /*
405  * The following "response" functions return the SCSI mid-level's 4 byte
406  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
407  * command completion, they can mask their return value with
408  * SDEG_RES_IMMED_MASK .
409  */
410 #define SDEG_RES_IMMED_MASK 0x40000000
411 
412 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
423 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
424 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
425 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
426 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
427 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
428 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
429 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
430 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
431 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
432 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
433 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
434 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
435 
436 /*
437  * The following are overflow arrays for cdbs that "hit" the same index in
438  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
439  * should be placed in opcode_info_arr[], the others should be placed here.
440  */
441 static const struct opcode_info_t msense_iarr[] = {
442 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
443 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
444 };
445 
446 static const struct opcode_info_t mselect_iarr[] = {
447 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
448 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
449 };
450 
451 static const struct opcode_info_t read_iarr[] = {
452 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
453 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
454 	     0, 0, 0, 0} },
455 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
456 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
457 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
458 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
459 	     0xc7, 0, 0, 0, 0} },
460 };
461 
462 static const struct opcode_info_t write_iarr[] = {
463 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
464 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
465 		   0, 0, 0, 0, 0, 0} },
466 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
467 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
468 		   0, 0, 0} },
469 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
470 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
471 		   0xbf, 0xc7, 0, 0, 0, 0} },
472 };
473 
474 static const struct opcode_info_t sa_in_16_iarr[] = {
475 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
476 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
477 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
478 };
479 
480 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
481 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
482 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
483 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
484 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
485 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
486 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
487 };
488 
489 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
490 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
491 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
492 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
493 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
494 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
495 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
496 };
497 
498 static const struct opcode_info_t write_same_iarr[] = {
499 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
500 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
501 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
502 };
503 
504 static const struct opcode_info_t reserve_iarr[] = {
505 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
506 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
507 };
508 
509 static const struct opcode_info_t release_iarr[] = {
510 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
511 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512 };
513 
514 static const struct opcode_info_t sync_cache_iarr[] = {
515 	{0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
516 	    {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
517 	     0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },	/* SYNC_CACHE (16) */
518 };
519 
520 
521 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
522  * plus the terminating elements for logic that scans this table such as
523  * REPORT SUPPORTED OPERATION CODES. */
524 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
525 /* 0 */
526 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
527 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
529 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
530 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
531 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
532 	     0, 0} },					/* REPORT LUNS */
533 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
534 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
535 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
536 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
537 /* 5 */
538 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
539 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
540 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
541 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
542 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
543 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
544 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
545 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
546 	     0, 0, 0} },
547 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
548 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
549 	     0, 0} },
550 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
551 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
552 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
553 /* 10 */
554 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
555 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
556 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
558 	{0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
559 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
560 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
561 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
562 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
563 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
564 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
566 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
567 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
568 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
569 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
570 				0xff, 0, 0xc7, 0, 0, 0, 0} },
571 /* 15 */
572 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
573 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
575 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
576 	     0, 0, 0, 0, 0, 0} },
577 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
578 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
579 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
580 	     0xff, 0xff} },
581 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
582 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
583 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
584 	     0} },
585 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
586 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
587 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
588 	     0} },
589 /* 20 */
590 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
591 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
593 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
595 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
597 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
598 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
599 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
600 /* 25 */
601 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
602 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
603 		   0, 0, 0, 0, 0, 0} },		/* XDWRITEREAD(10) */
604 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
605 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
606 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
607 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
608 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
609 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
610 		 0, 0, 0, 0, 0} },
611 	{ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
612 	    resp_sync_cache, sync_cache_iarr,
613 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
614 	     0, 0, 0, 0} },			/* SYNC_CACHE (10) */
615 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
616 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
617 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
618 
619 /* 30 */
620 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
621 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
622 };
623 
624 static int sdebug_add_host = DEF_NUM_HOST;
625 static int sdebug_ato = DEF_ATO;
626 static int sdebug_cdb_len = DEF_CDB_LEN;
627 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
628 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
629 static int sdebug_dif = DEF_DIF;
630 static int sdebug_dix = DEF_DIX;
631 static int sdebug_dsense = DEF_D_SENSE;
632 static int sdebug_every_nth = DEF_EVERY_NTH;
633 static int sdebug_fake_rw = DEF_FAKE_RW;
634 static unsigned int sdebug_guard = DEF_GUARD;
635 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
636 static int sdebug_max_luns = DEF_MAX_LUNS;
637 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
638 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
639 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
640 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
641 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
642 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
643 static int sdebug_no_uld;
644 static int sdebug_num_parts = DEF_NUM_PARTS;
645 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
646 static int sdebug_opt_blks = DEF_OPT_BLKS;
647 static int sdebug_opts = DEF_OPTS;
648 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
649 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
650 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
651 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
652 static int sdebug_sector_size = DEF_SECTOR_SIZE;
653 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
654 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
655 static unsigned int sdebug_lbpu = DEF_LBPU;
656 static unsigned int sdebug_lbpws = DEF_LBPWS;
657 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
658 static unsigned int sdebug_lbprz = DEF_LBPRZ;
659 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
660 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
661 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
662 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
663 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
664 static int sdebug_uuid_ctl = DEF_UUID_CTL;
665 static bool sdebug_removable = DEF_REMOVABLE;
666 static bool sdebug_clustering;
667 static bool sdebug_host_lock = DEF_HOST_LOCK;
668 static bool sdebug_strict = DEF_STRICT;
669 static bool sdebug_any_injecting_opt;
670 static bool sdebug_verbose;
671 static bool have_dif_prot;
672 static bool write_since_sync;
673 static bool sdebug_statistics = DEF_STATISTICS;
674 
675 static unsigned int sdebug_store_sectors;
676 static sector_t sdebug_capacity;	/* in sectors */
677 
678 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
679    may still need them */
680 static int sdebug_heads;		/* heads per disk */
681 static int sdebug_cylinders_per;	/* cylinders per surface */
682 static int sdebug_sectors_per;		/* sectors per cylinder */
683 
684 static LIST_HEAD(sdebug_host_list);
685 static DEFINE_SPINLOCK(sdebug_host_list_lock);
686 
687 static unsigned char *fake_storep;	/* ramdisk storage */
688 static struct t10_pi_tuple *dif_storep;	/* protection info */
689 static void *map_storep;		/* provisioning map */
690 
691 static unsigned long map_size;
692 static int num_aborts;
693 static int num_dev_resets;
694 static int num_target_resets;
695 static int num_bus_resets;
696 static int num_host_resets;
697 static int dix_writes;
698 static int dix_reads;
699 static int dif_errors;
700 
701 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
702 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
703 
704 static DEFINE_RWLOCK(atomic_rw);
705 
706 static char sdebug_proc_name[] = MY_NAME;
707 static const char *my_name = MY_NAME;
708 
709 static struct bus_type pseudo_lld_bus;
710 
711 static struct device_driver sdebug_driverfs_driver = {
712 	.name 		= sdebug_proc_name,
713 	.bus		= &pseudo_lld_bus,
714 };
715 
716 static const int check_condition_result =
717 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
718 
719 static const int illegal_condition_result =
720 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
721 
722 static const int device_qfull_result =
723 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
724 
725 
726 /* Only do the extra work involved in logical block provisioning if one or
727  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
728  * real reads and writes (i.e. not skipping them for speed).
729  */
730 static inline bool scsi_debug_lbp(void)
731 {
732 	return 0 == sdebug_fake_rw &&
733 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
734 }
735 
736 static void *fake_store(unsigned long long lba)
737 {
738 	lba = do_div(lba, sdebug_store_sectors);
739 
740 	return fake_storep + lba * sdebug_sector_size;
741 }
742 
743 static struct t10_pi_tuple *dif_store(sector_t sector)
744 {
745 	sector = sector_div(sector, sdebug_store_sectors);
746 
747 	return dif_storep + sector;
748 }
749 
750 static void sdebug_max_tgts_luns(void)
751 {
752 	struct sdebug_host_info *sdbg_host;
753 	struct Scsi_Host *hpnt;
754 
755 	spin_lock(&sdebug_host_list_lock);
756 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
757 		hpnt = sdbg_host->shost;
758 		if ((hpnt->this_id >= 0) &&
759 		    (sdebug_num_tgts > hpnt->this_id))
760 			hpnt->max_id = sdebug_num_tgts + 1;
761 		else
762 			hpnt->max_id = sdebug_num_tgts;
763 		/* sdebug_max_luns; */
764 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
765 	}
766 	spin_unlock(&sdebug_host_list_lock);
767 }
768 
769 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
770 
771 /* Set in_bit to -1 to indicate no bit position of invalid field */
772 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
773 				 enum sdeb_cmd_data c_d,
774 				 int in_byte, int in_bit)
775 {
776 	unsigned char *sbuff;
777 	u8 sks[4];
778 	int sl, asc;
779 
780 	sbuff = scp->sense_buffer;
781 	if (!sbuff) {
782 		sdev_printk(KERN_ERR, scp->device,
783 			    "%s: sense_buffer is NULL\n", __func__);
784 		return;
785 	}
786 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
787 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
788 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
789 	memset(sks, 0, sizeof(sks));
790 	sks[0] = 0x80;
791 	if (c_d)
792 		sks[0] |= 0x40;
793 	if (in_bit >= 0) {
794 		sks[0] |= 0x8;
795 		sks[0] |= 0x7 & in_bit;
796 	}
797 	put_unaligned_be16(in_byte, sks + 1);
798 	if (sdebug_dsense) {
799 		sl = sbuff[7] + 8;
800 		sbuff[7] = sl;
801 		sbuff[sl] = 0x2;
802 		sbuff[sl + 1] = 0x6;
803 		memcpy(sbuff + sl + 4, sks, 3);
804 	} else
805 		memcpy(sbuff + 15, sks, 3);
806 	if (sdebug_verbose)
807 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
808 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
809 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
810 }
811 
812 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
813 {
814 	unsigned char *sbuff;
815 
816 	sbuff = scp->sense_buffer;
817 	if (!sbuff) {
818 		sdev_printk(KERN_ERR, scp->device,
819 			    "%s: sense_buffer is NULL\n", __func__);
820 		return;
821 	}
822 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
823 
824 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
825 
826 	if (sdebug_verbose)
827 		sdev_printk(KERN_INFO, scp->device,
828 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
829 			    my_name, key, asc, asq);
830 }
831 
832 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
833 {
834 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
835 }
836 
837 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
838 {
839 	if (sdebug_verbose) {
840 		if (0x1261 == cmd)
841 			sdev_printk(KERN_INFO, dev,
842 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
843 		else if (0x5331 == cmd)
844 			sdev_printk(KERN_INFO, dev,
845 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
846 				    __func__);
847 		else
848 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
849 				    __func__, cmd);
850 	}
851 	return -EINVAL;
852 	/* return -ENOTTY; // correct return but upsets fdisk */
853 }
854 
855 static void config_cdb_len(struct scsi_device *sdev)
856 {
857 	switch (sdebug_cdb_len) {
858 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
859 		sdev->use_10_for_rw = false;
860 		sdev->use_16_for_rw = false;
861 		sdev->use_10_for_ms = false;
862 		break;
863 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
864 		sdev->use_10_for_rw = true;
865 		sdev->use_16_for_rw = false;
866 		sdev->use_10_for_ms = false;
867 		break;
868 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
869 		sdev->use_10_for_rw = true;
870 		sdev->use_16_for_rw = false;
871 		sdev->use_10_for_ms = true;
872 		break;
873 	case 16:
874 		sdev->use_10_for_rw = false;
875 		sdev->use_16_for_rw = true;
876 		sdev->use_10_for_ms = true;
877 		break;
878 	case 32: /* No knobs to suggest this so same as 16 for now */
879 		sdev->use_10_for_rw = false;
880 		sdev->use_16_for_rw = true;
881 		sdev->use_10_for_ms = true;
882 		break;
883 	default:
884 		pr_warn("unexpected cdb_len=%d, force to 10\n",
885 			sdebug_cdb_len);
886 		sdev->use_10_for_rw = true;
887 		sdev->use_16_for_rw = false;
888 		sdev->use_10_for_ms = false;
889 		sdebug_cdb_len = 10;
890 		break;
891 	}
892 }
893 
894 static void all_config_cdb_len(void)
895 {
896 	struct sdebug_host_info *sdbg_host;
897 	struct Scsi_Host *shost;
898 	struct scsi_device *sdev;
899 
900 	spin_lock(&sdebug_host_list_lock);
901 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
902 		shost = sdbg_host->shost;
903 		shost_for_each_device(sdev, shost) {
904 			config_cdb_len(sdev);
905 		}
906 	}
907 	spin_unlock(&sdebug_host_list_lock);
908 }
909 
910 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
911 {
912 	struct sdebug_host_info *sdhp;
913 	struct sdebug_dev_info *dp;
914 
915 	spin_lock(&sdebug_host_list_lock);
916 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
917 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
918 			if ((devip->sdbg_host == dp->sdbg_host) &&
919 			    (devip->target == dp->target))
920 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
921 		}
922 	}
923 	spin_unlock(&sdebug_host_list_lock);
924 }
925 
926 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
927 {
928 	int k;
929 
930 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
931 	if (k != SDEBUG_NUM_UAS) {
932 		const char *cp = NULL;
933 
934 		switch (k) {
935 		case SDEBUG_UA_POR:
936 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
937 					POWER_ON_RESET_ASCQ);
938 			if (sdebug_verbose)
939 				cp = "power on reset";
940 			break;
941 		case SDEBUG_UA_BUS_RESET:
942 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
943 					BUS_RESET_ASCQ);
944 			if (sdebug_verbose)
945 				cp = "bus reset";
946 			break;
947 		case SDEBUG_UA_MODE_CHANGED:
948 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
949 					MODE_CHANGED_ASCQ);
950 			if (sdebug_verbose)
951 				cp = "mode parameters changed";
952 			break;
953 		case SDEBUG_UA_CAPACITY_CHANGED:
954 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
955 					CAPACITY_CHANGED_ASCQ);
956 			if (sdebug_verbose)
957 				cp = "capacity data changed";
958 			break;
959 		case SDEBUG_UA_MICROCODE_CHANGED:
960 			mk_sense_buffer(scp, UNIT_ATTENTION,
961 					TARGET_CHANGED_ASC,
962 					MICROCODE_CHANGED_ASCQ);
963 			if (sdebug_verbose)
964 				cp = "microcode has been changed";
965 			break;
966 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
967 			mk_sense_buffer(scp, UNIT_ATTENTION,
968 					TARGET_CHANGED_ASC,
969 					MICROCODE_CHANGED_WO_RESET_ASCQ);
970 			if (sdebug_verbose)
971 				cp = "microcode has been changed without reset";
972 			break;
973 		case SDEBUG_UA_LUNS_CHANGED:
974 			/*
975 			 * SPC-3 behavior is to report a UNIT ATTENTION with
976 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
977 			 * on the target, until a REPORT LUNS command is
978 			 * received.  SPC-4 behavior is to report it only once.
979 			 * NOTE:  sdebug_scsi_level does not use the same
980 			 * values as struct scsi_device->scsi_level.
981 			 */
982 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
983 				clear_luns_changed_on_target(devip);
984 			mk_sense_buffer(scp, UNIT_ATTENTION,
985 					TARGET_CHANGED_ASC,
986 					LUNS_CHANGED_ASCQ);
987 			if (sdebug_verbose)
988 				cp = "reported luns data has changed";
989 			break;
990 		default:
991 			pr_warn("unexpected unit attention code=%d\n", k);
992 			if (sdebug_verbose)
993 				cp = "unknown";
994 			break;
995 		}
996 		clear_bit(k, devip->uas_bm);
997 		if (sdebug_verbose)
998 			sdev_printk(KERN_INFO, scp->device,
999 				   "%s reports: Unit attention: %s\n",
1000 				   my_name, cp);
1001 		return check_condition_result;
1002 	}
1003 	return 0;
1004 }
1005 
1006 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1007 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1008 				int arr_len)
1009 {
1010 	int act_len;
1011 	struct scsi_data_buffer *sdb = scsi_in(scp);
1012 
1013 	if (!sdb->length)
1014 		return 0;
1015 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1016 		return DID_ERROR << 16;
1017 
1018 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1019 				      arr, arr_len);
1020 	sdb->resid = scsi_bufflen(scp) - act_len;
1021 
1022 	return 0;
1023 }
1024 
1025 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1026  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1027  * calls, not required to write in ascending offset order. Assumes resid
1028  * set to scsi_bufflen() prior to any calls.
1029  */
1030 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1031 				  int arr_len, unsigned int off_dst)
1032 {
1033 	int act_len, n;
1034 	struct scsi_data_buffer *sdb = scsi_in(scp);
1035 	off_t skip = off_dst;
1036 
1037 	if (sdb->length <= off_dst)
1038 		return 0;
1039 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1040 		return DID_ERROR << 16;
1041 
1042 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1043 				       arr, arr_len, skip);
1044 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1045 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1046 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1047 	sdb->resid = min(sdb->resid, n);
1048 	return 0;
1049 }
1050 
1051 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1052  * 'arr' or -1 if error.
1053  */
1054 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1055 			       int arr_len)
1056 {
1057 	if (!scsi_bufflen(scp))
1058 		return 0;
1059 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1060 		return -1;
1061 
1062 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1063 }
1064 
1065 
1066 static char sdebug_inq_vendor_id[9] = "Linux   ";
1067 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1068 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1069 /* Use some locally assigned NAAs for SAS addresses. */
1070 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1071 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1072 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1073 
1074 /* Device identification VPD page. Returns number of bytes placed in arr */
1075 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1076 			  int target_dev_id, int dev_id_num,
1077 			  const char *dev_id_str, int dev_id_str_len,
1078 			  const uuid_t *lu_name)
1079 {
1080 	int num, port_a;
1081 	char b[32];
1082 
1083 	port_a = target_dev_id + 1;
1084 	/* T10 vendor identifier field format (faked) */
1085 	arr[0] = 0x2;	/* ASCII */
1086 	arr[1] = 0x1;
1087 	arr[2] = 0x0;
1088 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1089 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1090 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1091 	num = 8 + 16 + dev_id_str_len;
1092 	arr[3] = num;
1093 	num += 4;
1094 	if (dev_id_num >= 0) {
1095 		if (sdebug_uuid_ctl) {
1096 			/* Locally assigned UUID */
1097 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1098 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1099 			arr[num++] = 0x0;
1100 			arr[num++] = 0x12;
1101 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1102 			arr[num++] = 0x0;
1103 			memcpy(arr + num, lu_name, 16);
1104 			num += 16;
1105 		} else {
1106 			/* NAA-3, Logical unit identifier (binary) */
1107 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1108 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1109 			arr[num++] = 0x0;
1110 			arr[num++] = 0x8;
1111 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1112 			num += 8;
1113 		}
1114 		/* Target relative port number */
1115 		arr[num++] = 0x61;	/* proto=sas, binary */
1116 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1117 		arr[num++] = 0x0;	/* reserved */
1118 		arr[num++] = 0x4;	/* length */
1119 		arr[num++] = 0x0;	/* reserved */
1120 		arr[num++] = 0x0;	/* reserved */
1121 		arr[num++] = 0x0;
1122 		arr[num++] = 0x1;	/* relative port A */
1123 	}
1124 	/* NAA-3, Target port identifier */
1125 	arr[num++] = 0x61;	/* proto=sas, binary */
1126 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1127 	arr[num++] = 0x0;
1128 	arr[num++] = 0x8;
1129 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1130 	num += 8;
1131 	/* NAA-3, Target port group identifier */
1132 	arr[num++] = 0x61;	/* proto=sas, binary */
1133 	arr[num++] = 0x95;	/* piv=1, target port group id */
1134 	arr[num++] = 0x0;
1135 	arr[num++] = 0x4;
1136 	arr[num++] = 0;
1137 	arr[num++] = 0;
1138 	put_unaligned_be16(port_group_id, arr + num);
1139 	num += 2;
1140 	/* NAA-3, Target device identifier */
1141 	arr[num++] = 0x61;	/* proto=sas, binary */
1142 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1143 	arr[num++] = 0x0;
1144 	arr[num++] = 0x8;
1145 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1146 	num += 8;
1147 	/* SCSI name string: Target device identifier */
1148 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1149 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1150 	arr[num++] = 0x0;
1151 	arr[num++] = 24;
1152 	memcpy(arr + num, "naa.32222220", 12);
1153 	num += 12;
1154 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1155 	memcpy(arr + num, b, 8);
1156 	num += 8;
1157 	memset(arr + num, 0, 4);
1158 	num += 4;
1159 	return num;
1160 }
1161 
1162 static unsigned char vpd84_data[] = {
1163 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1164     0x22,0x22,0x22,0x0,0xbb,0x1,
1165     0x22,0x22,0x22,0x0,0xbb,0x2,
1166 };
1167 
1168 /*  Software interface identification VPD page */
1169 static int inquiry_vpd_84(unsigned char *arr)
1170 {
1171 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1172 	return sizeof(vpd84_data);
1173 }
1174 
1175 /* Management network addresses VPD page */
1176 static int inquiry_vpd_85(unsigned char *arr)
1177 {
1178 	int num = 0;
1179 	const char *na1 = "https://www.kernel.org/config";
1180 	const char *na2 = "http://www.kernel.org/log";
1181 	int plen, olen;
1182 
1183 	arr[num++] = 0x1;	/* lu, storage config */
1184 	arr[num++] = 0x0;	/* reserved */
1185 	arr[num++] = 0x0;
1186 	olen = strlen(na1);
1187 	plen = olen + 1;
1188 	if (plen % 4)
1189 		plen = ((plen / 4) + 1) * 4;
1190 	arr[num++] = plen;	/* length, null termianted, padded */
1191 	memcpy(arr + num, na1, olen);
1192 	memset(arr + num + olen, 0, plen - olen);
1193 	num += plen;
1194 
1195 	arr[num++] = 0x4;	/* lu, logging */
1196 	arr[num++] = 0x0;	/* reserved */
1197 	arr[num++] = 0x0;
1198 	olen = strlen(na2);
1199 	plen = olen + 1;
1200 	if (plen % 4)
1201 		plen = ((plen / 4) + 1) * 4;
1202 	arr[num++] = plen;	/* length, null terminated, padded */
1203 	memcpy(arr + num, na2, olen);
1204 	memset(arr + num + olen, 0, plen - olen);
1205 	num += plen;
1206 
1207 	return num;
1208 }
1209 
1210 /* SCSI ports VPD page */
1211 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1212 {
1213 	int num = 0;
1214 	int port_a, port_b;
1215 
1216 	port_a = target_dev_id + 1;
1217 	port_b = port_a + 1;
1218 	arr[num++] = 0x0;	/* reserved */
1219 	arr[num++] = 0x0;	/* reserved */
1220 	arr[num++] = 0x0;
1221 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1222 	memset(arr + num, 0, 6);
1223 	num += 6;
1224 	arr[num++] = 0x0;
1225 	arr[num++] = 12;	/* length tp descriptor */
1226 	/* naa-5 target port identifier (A) */
1227 	arr[num++] = 0x61;	/* proto=sas, binary */
1228 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1229 	arr[num++] = 0x0;	/* reserved */
1230 	arr[num++] = 0x8;	/* length */
1231 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1232 	num += 8;
1233 	arr[num++] = 0x0;	/* reserved */
1234 	arr[num++] = 0x0;	/* reserved */
1235 	arr[num++] = 0x0;
1236 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1237 	memset(arr + num, 0, 6);
1238 	num += 6;
1239 	arr[num++] = 0x0;
1240 	arr[num++] = 12;	/* length tp descriptor */
1241 	/* naa-5 target port identifier (B) */
1242 	arr[num++] = 0x61;	/* proto=sas, binary */
1243 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1244 	arr[num++] = 0x0;	/* reserved */
1245 	arr[num++] = 0x8;	/* length */
1246 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1247 	num += 8;
1248 
1249 	return num;
1250 }
1251 
1252 
1253 static unsigned char vpd89_data[] = {
1254 /* from 4th byte */ 0,0,0,0,
1255 'l','i','n','u','x',' ',' ',' ',
1256 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1257 '1','2','3','4',
1258 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1259 0xec,0,0,0,
1260 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1261 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1262 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1263 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1264 0x53,0x41,
1265 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1266 0x20,0x20,
1267 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1268 0x10,0x80,
1269 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1270 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1271 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1272 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1273 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1274 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1275 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1276 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1277 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1278 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1279 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1280 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1281 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1282 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1283 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1284 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1292 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1293 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1294 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1295 };
1296 
1297 /* ATA Information VPD page */
1298 static int inquiry_vpd_89(unsigned char *arr)
1299 {
1300 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1301 	return sizeof(vpd89_data);
1302 }
1303 
1304 
1305 static unsigned char vpdb0_data[] = {
1306 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1307 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1308 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1309 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1310 };
1311 
1312 /* Block limits VPD page (SBC-3) */
1313 static int inquiry_vpd_b0(unsigned char *arr)
1314 {
1315 	unsigned int gran;
1316 
1317 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1318 
1319 	/* Optimal transfer length granularity */
1320 	if (sdebug_opt_xferlen_exp != 0 &&
1321 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1322 		gran = 1 << sdebug_opt_xferlen_exp;
1323 	else
1324 		gran = 1 << sdebug_physblk_exp;
1325 	put_unaligned_be16(gran, arr + 2);
1326 
1327 	/* Maximum Transfer Length */
1328 	if (sdebug_store_sectors > 0x400)
1329 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1330 
1331 	/* Optimal Transfer Length */
1332 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1333 
1334 	if (sdebug_lbpu) {
1335 		/* Maximum Unmap LBA Count */
1336 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1337 
1338 		/* Maximum Unmap Block Descriptor Count */
1339 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1340 	}
1341 
1342 	/* Unmap Granularity Alignment */
1343 	if (sdebug_unmap_alignment) {
1344 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1345 		arr[28] |= 0x80; /* UGAVALID */
1346 	}
1347 
1348 	/* Optimal Unmap Granularity */
1349 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1350 
1351 	/* Maximum WRITE SAME Length */
1352 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1353 
1354 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1355 
1356 	return sizeof(vpdb0_data);
1357 }
1358 
1359 /* Block device characteristics VPD page (SBC-3) */
1360 static int inquiry_vpd_b1(unsigned char *arr)
1361 {
1362 	memset(arr, 0, 0x3c);
1363 	arr[0] = 0;
1364 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1365 	arr[2] = 0;
1366 	arr[3] = 5;	/* less than 1.8" */
1367 
1368 	return 0x3c;
1369 }
1370 
1371 /* Logical block provisioning VPD page (SBC-4) */
1372 static int inquiry_vpd_b2(unsigned char *arr)
1373 {
1374 	memset(arr, 0, 0x4);
1375 	arr[0] = 0;			/* threshold exponent */
1376 	if (sdebug_lbpu)
1377 		arr[1] = 1 << 7;
1378 	if (sdebug_lbpws)
1379 		arr[1] |= 1 << 6;
1380 	if (sdebug_lbpws10)
1381 		arr[1] |= 1 << 5;
1382 	if (sdebug_lbprz && scsi_debug_lbp())
1383 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1384 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1385 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1386 	/* threshold_percentage=0 */
1387 	return 0x4;
1388 }
1389 
1390 #define SDEBUG_LONG_INQ_SZ 96
1391 #define SDEBUG_MAX_INQ_ARR_SZ 584
1392 
1393 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1394 {
1395 	unsigned char pq_pdt;
1396 	unsigned char *arr;
1397 	unsigned char *cmd = scp->cmnd;
1398 	int alloc_len, n, ret;
1399 	bool have_wlun, is_disk;
1400 
1401 	alloc_len = get_unaligned_be16(cmd + 3);
1402 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1403 	if (! arr)
1404 		return DID_REQUEUE << 16;
1405 	is_disk = (sdebug_ptype == TYPE_DISK);
1406 	have_wlun = scsi_is_wlun(scp->device->lun);
1407 	if (have_wlun)
1408 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1409 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1410 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1411 	else
1412 		pq_pdt = (sdebug_ptype & 0x1f);
1413 	arr[0] = pq_pdt;
1414 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1415 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1416 		kfree(arr);
1417 		return check_condition_result;
1418 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1419 		int lu_id_num, port_group_id, target_dev_id, len;
1420 		char lu_id_str[6];
1421 		int host_no = devip->sdbg_host->shost->host_no;
1422 
1423 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1424 		    (devip->channel & 0x7f);
1425 		if (sdebug_vpd_use_hostno == 0)
1426 			host_no = 0;
1427 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1428 			    (devip->target * 1000) + devip->lun);
1429 		target_dev_id = ((host_no + 1) * 2000) +
1430 				 (devip->target * 1000) - 3;
1431 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1432 		if (0 == cmd[2]) { /* supported vital product data pages */
1433 			arr[1] = cmd[2];	/*sanity */
1434 			n = 4;
1435 			arr[n++] = 0x0;   /* this page */
1436 			arr[n++] = 0x80;  /* unit serial number */
1437 			arr[n++] = 0x83;  /* device identification */
1438 			arr[n++] = 0x84;  /* software interface ident. */
1439 			arr[n++] = 0x85;  /* management network addresses */
1440 			arr[n++] = 0x86;  /* extended inquiry */
1441 			arr[n++] = 0x87;  /* mode page policy */
1442 			arr[n++] = 0x88;  /* SCSI ports */
1443 			if (is_disk) {	  /* SBC only */
1444 				arr[n++] = 0x89;  /* ATA information */
1445 				arr[n++] = 0xb0;  /* Block limits */
1446 				arr[n++] = 0xb1;  /* Block characteristics */
1447 				arr[n++] = 0xb2;  /* Logical Block Prov */
1448 			}
1449 			arr[3] = n - 4;	  /* number of supported VPD pages */
1450 		} else if (0x80 == cmd[2]) { /* unit serial number */
1451 			arr[1] = cmd[2];	/*sanity */
1452 			arr[3] = len;
1453 			memcpy(&arr[4], lu_id_str, len);
1454 		} else if (0x83 == cmd[2]) { /* device identification */
1455 			arr[1] = cmd[2];	/*sanity */
1456 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1457 						target_dev_id, lu_id_num,
1458 						lu_id_str, len,
1459 						&devip->lu_name);
1460 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1461 			arr[1] = cmd[2];	/*sanity */
1462 			arr[3] = inquiry_vpd_84(&arr[4]);
1463 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1464 			arr[1] = cmd[2];	/*sanity */
1465 			arr[3] = inquiry_vpd_85(&arr[4]);
1466 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1467 			arr[1] = cmd[2];	/*sanity */
1468 			arr[3] = 0x3c;	/* number of following entries */
1469 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1470 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1471 			else if (have_dif_prot)
1472 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1473 			else
1474 				arr[4] = 0x0;   /* no protection stuff */
1475 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1476 		} else if (0x87 == cmd[2]) { /* mode page policy */
1477 			arr[1] = cmd[2];	/*sanity */
1478 			arr[3] = 0x8;	/* number of following entries */
1479 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1480 			arr[6] = 0x80;	/* mlus, shared */
1481 			arr[8] = 0x18;	 /* protocol specific lu */
1482 			arr[10] = 0x82;	 /* mlus, per initiator port */
1483 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1484 			arr[1] = cmd[2];	/*sanity */
1485 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1486 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1487 			arr[1] = cmd[2];        /*sanity */
1488 			n = inquiry_vpd_89(&arr[4]);
1489 			put_unaligned_be16(n, arr + 2);
1490 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1491 			arr[1] = cmd[2];        /*sanity */
1492 			arr[3] = inquiry_vpd_b0(&arr[4]);
1493 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1494 			arr[1] = cmd[2];        /*sanity */
1495 			arr[3] = inquiry_vpd_b1(&arr[4]);
1496 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1497 			arr[1] = cmd[2];        /*sanity */
1498 			arr[3] = inquiry_vpd_b2(&arr[4]);
1499 		} else {
1500 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1501 			kfree(arr);
1502 			return check_condition_result;
1503 		}
1504 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1505 		ret = fill_from_dev_buffer(scp, arr,
1506 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1507 		kfree(arr);
1508 		return ret;
1509 	}
1510 	/* drops through here for a standard inquiry */
1511 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1512 	arr[2] = sdebug_scsi_level;
1513 	arr[3] = 2;    /* response_data_format==2 */
1514 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1515 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1516 	if (sdebug_vpd_use_hostno == 0)
1517 		arr[5] |= 0x10; /* claim: implicit TPGS */
1518 	arr[6] = 0x10; /* claim: MultiP */
1519 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1520 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1521 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1522 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1523 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1524 	/* Use Vendor Specific area to place driver date in ASCII hex */
1525 	memcpy(&arr[36], sdebug_version_date, 8);
1526 	/* version descriptors (2 bytes each) follow */
1527 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1528 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1529 	n = 62;
1530 	if (is_disk) {		/* SBC-4 no version claimed */
1531 		put_unaligned_be16(0x600, arr + n);
1532 		n += 2;
1533 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1534 		put_unaligned_be16(0x525, arr + n);
1535 		n += 2;
1536 	}
1537 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1538 	ret = fill_from_dev_buffer(scp, arr,
1539 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1540 	kfree(arr);
1541 	return ret;
1542 }
1543 
1544 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1545 				   0, 0, 0x0, 0x0};
1546 
1547 static int resp_requests(struct scsi_cmnd *scp,
1548 			 struct sdebug_dev_info *devip)
1549 {
1550 	unsigned char *sbuff;
1551 	unsigned char *cmd = scp->cmnd;
1552 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1553 	bool dsense;
1554 	int len = 18;
1555 
1556 	memset(arr, 0, sizeof(arr));
1557 	dsense = !!(cmd[1] & 1);
1558 	sbuff = scp->sense_buffer;
1559 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1560 		if (dsense) {
1561 			arr[0] = 0x72;
1562 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1563 			arr[2] = THRESHOLD_EXCEEDED;
1564 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1565 			len = 8;
1566 		} else {
1567 			arr[0] = 0x70;
1568 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1569 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1570 			arr[12] = THRESHOLD_EXCEEDED;
1571 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1572 		}
1573 	} else {
1574 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1575 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1576 			;	/* have sense and formats match */
1577 		else if (arr[0] <= 0x70) {
1578 			if (dsense) {
1579 				memset(arr, 0, 8);
1580 				arr[0] = 0x72;
1581 				len = 8;
1582 			} else {
1583 				memset(arr, 0, 18);
1584 				arr[0] = 0x70;
1585 				arr[7] = 0xa;
1586 			}
1587 		} else if (dsense) {
1588 			memset(arr, 0, 8);
1589 			arr[0] = 0x72;
1590 			arr[1] = sbuff[2];     /* sense key */
1591 			arr[2] = sbuff[12];    /* asc */
1592 			arr[3] = sbuff[13];    /* ascq */
1593 			len = 8;
1594 		} else {
1595 			memset(arr, 0, 18);
1596 			arr[0] = 0x70;
1597 			arr[2] = sbuff[1];
1598 			arr[7] = 0xa;
1599 			arr[12] = sbuff[1];
1600 			arr[13] = sbuff[3];
1601 		}
1602 
1603 	}
1604 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1605 	return fill_from_dev_buffer(scp, arr, len);
1606 }
1607 
1608 static int resp_start_stop(struct scsi_cmnd *scp,
1609 			   struct sdebug_dev_info *devip)
1610 {
1611 	unsigned char *cmd = scp->cmnd;
1612 	int power_cond, stop;
1613 	bool changing;
1614 
1615 	power_cond = (cmd[4] & 0xf0) >> 4;
1616 	if (power_cond) {
1617 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1618 		return check_condition_result;
1619 	}
1620 	stop = !(cmd[4] & 1);
1621 	changing = atomic_read(&devip->stopped) == !stop;
1622 	atomic_xchg(&devip->stopped, stop);
1623 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
1624 		return SDEG_RES_IMMED_MASK;
1625 	else
1626 		return 0;
1627 }
1628 
1629 static sector_t get_sdebug_capacity(void)
1630 {
1631 	static const unsigned int gibibyte = 1073741824;
1632 
1633 	if (sdebug_virtual_gb > 0)
1634 		return (sector_t)sdebug_virtual_gb *
1635 			(gibibyte / sdebug_sector_size);
1636 	else
1637 		return sdebug_store_sectors;
1638 }
1639 
1640 #define SDEBUG_READCAP_ARR_SZ 8
1641 static int resp_readcap(struct scsi_cmnd *scp,
1642 			struct sdebug_dev_info *devip)
1643 {
1644 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1645 	unsigned int capac;
1646 
1647 	/* following just in case virtual_gb changed */
1648 	sdebug_capacity = get_sdebug_capacity();
1649 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1650 	if (sdebug_capacity < 0xffffffff) {
1651 		capac = (unsigned int)sdebug_capacity - 1;
1652 		put_unaligned_be32(capac, arr + 0);
1653 	} else
1654 		put_unaligned_be32(0xffffffff, arr + 0);
1655 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1656 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1657 }
1658 
1659 #define SDEBUG_READCAP16_ARR_SZ 32
1660 static int resp_readcap16(struct scsi_cmnd *scp,
1661 			  struct sdebug_dev_info *devip)
1662 {
1663 	unsigned char *cmd = scp->cmnd;
1664 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1665 	int alloc_len;
1666 
1667 	alloc_len = get_unaligned_be32(cmd + 10);
1668 	/* following just in case virtual_gb changed */
1669 	sdebug_capacity = get_sdebug_capacity();
1670 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1671 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1672 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1673 	arr[13] = sdebug_physblk_exp & 0xf;
1674 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1675 
1676 	if (scsi_debug_lbp()) {
1677 		arr[14] |= 0x80; /* LBPME */
1678 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1679 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1680 		 * in the wider field maps to 0 in this field.
1681 		 */
1682 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1683 			arr[14] |= 0x40;
1684 	}
1685 
1686 	arr[15] = sdebug_lowest_aligned & 0xff;
1687 
1688 	if (have_dif_prot) {
1689 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1690 		arr[12] |= 1; /* PROT_EN */
1691 	}
1692 
1693 	return fill_from_dev_buffer(scp, arr,
1694 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1695 }
1696 
1697 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1698 
1699 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1700 			      struct sdebug_dev_info *devip)
1701 {
1702 	unsigned char *cmd = scp->cmnd;
1703 	unsigned char *arr;
1704 	int host_no = devip->sdbg_host->shost->host_no;
1705 	int n, ret, alen, rlen;
1706 	int port_group_a, port_group_b, port_a, port_b;
1707 
1708 	alen = get_unaligned_be32(cmd + 6);
1709 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1710 	if (! arr)
1711 		return DID_REQUEUE << 16;
1712 	/*
1713 	 * EVPD page 0x88 states we have two ports, one
1714 	 * real and a fake port with no device connected.
1715 	 * So we create two port groups with one port each
1716 	 * and set the group with port B to unavailable.
1717 	 */
1718 	port_a = 0x1; /* relative port A */
1719 	port_b = 0x2; /* relative port B */
1720 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1721 			(devip->channel & 0x7f);
1722 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1723 			(devip->channel & 0x7f) + 0x80;
1724 
1725 	/*
1726 	 * The asymmetric access state is cycled according to the host_id.
1727 	 */
1728 	n = 4;
1729 	if (sdebug_vpd_use_hostno == 0) {
1730 		arr[n++] = host_no % 3; /* Asymm access state */
1731 		arr[n++] = 0x0F; /* claim: all states are supported */
1732 	} else {
1733 		arr[n++] = 0x0; /* Active/Optimized path */
1734 		arr[n++] = 0x01; /* only support active/optimized paths */
1735 	}
1736 	put_unaligned_be16(port_group_a, arr + n);
1737 	n += 2;
1738 	arr[n++] = 0;    /* Reserved */
1739 	arr[n++] = 0;    /* Status code */
1740 	arr[n++] = 0;    /* Vendor unique */
1741 	arr[n++] = 0x1;  /* One port per group */
1742 	arr[n++] = 0;    /* Reserved */
1743 	arr[n++] = 0;    /* Reserved */
1744 	put_unaligned_be16(port_a, arr + n);
1745 	n += 2;
1746 	arr[n++] = 3;    /* Port unavailable */
1747 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1748 	put_unaligned_be16(port_group_b, arr + n);
1749 	n += 2;
1750 	arr[n++] = 0;    /* Reserved */
1751 	arr[n++] = 0;    /* Status code */
1752 	arr[n++] = 0;    /* Vendor unique */
1753 	arr[n++] = 0x1;  /* One port per group */
1754 	arr[n++] = 0;    /* Reserved */
1755 	arr[n++] = 0;    /* Reserved */
1756 	put_unaligned_be16(port_b, arr + n);
1757 	n += 2;
1758 
1759 	rlen = n - 4;
1760 	put_unaligned_be32(rlen, arr + 0);
1761 
1762 	/*
1763 	 * Return the smallest value of either
1764 	 * - The allocated length
1765 	 * - The constructed command length
1766 	 * - The maximum array size
1767 	 */
1768 	rlen = min(alen,n);
1769 	ret = fill_from_dev_buffer(scp, arr,
1770 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1771 	kfree(arr);
1772 	return ret;
1773 }
1774 
1775 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1776 			     struct sdebug_dev_info *devip)
1777 {
1778 	bool rctd;
1779 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1780 	u16 req_sa, u;
1781 	u32 alloc_len, a_len;
1782 	int k, offset, len, errsts, count, bump, na;
1783 	const struct opcode_info_t *oip;
1784 	const struct opcode_info_t *r_oip;
1785 	u8 *arr;
1786 	u8 *cmd = scp->cmnd;
1787 
1788 	rctd = !!(cmd[2] & 0x80);
1789 	reporting_opts = cmd[2] & 0x7;
1790 	req_opcode = cmd[3];
1791 	req_sa = get_unaligned_be16(cmd + 4);
1792 	alloc_len = get_unaligned_be32(cmd + 6);
1793 	if (alloc_len < 4 || alloc_len > 0xffff) {
1794 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1795 		return check_condition_result;
1796 	}
1797 	if (alloc_len > 8192)
1798 		a_len = 8192;
1799 	else
1800 		a_len = alloc_len;
1801 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1802 	if (NULL == arr) {
1803 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1804 				INSUFF_RES_ASCQ);
1805 		return check_condition_result;
1806 	}
1807 	switch (reporting_opts) {
1808 	case 0:	/* all commands */
1809 		/* count number of commands */
1810 		for (count = 0, oip = opcode_info_arr;
1811 		     oip->num_attached != 0xff; ++oip) {
1812 			if (F_INV_OP & oip->flags)
1813 				continue;
1814 			count += (oip->num_attached + 1);
1815 		}
1816 		bump = rctd ? 20 : 8;
1817 		put_unaligned_be32(count * bump, arr);
1818 		for (offset = 4, oip = opcode_info_arr;
1819 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1820 			if (F_INV_OP & oip->flags)
1821 				continue;
1822 			na = oip->num_attached;
1823 			arr[offset] = oip->opcode;
1824 			put_unaligned_be16(oip->sa, arr + offset + 2);
1825 			if (rctd)
1826 				arr[offset + 5] |= 0x2;
1827 			if (FF_SA & oip->flags)
1828 				arr[offset + 5] |= 0x1;
1829 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1830 			if (rctd)
1831 				put_unaligned_be16(0xa, arr + offset + 8);
1832 			r_oip = oip;
1833 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1834 				if (F_INV_OP & oip->flags)
1835 					continue;
1836 				offset += bump;
1837 				arr[offset] = oip->opcode;
1838 				put_unaligned_be16(oip->sa, arr + offset + 2);
1839 				if (rctd)
1840 					arr[offset + 5] |= 0x2;
1841 				if (FF_SA & oip->flags)
1842 					arr[offset + 5] |= 0x1;
1843 				put_unaligned_be16(oip->len_mask[0],
1844 						   arr + offset + 6);
1845 				if (rctd)
1846 					put_unaligned_be16(0xa,
1847 							   arr + offset + 8);
1848 			}
1849 			oip = r_oip;
1850 			offset += bump;
1851 		}
1852 		break;
1853 	case 1:	/* one command: opcode only */
1854 	case 2:	/* one command: opcode plus service action */
1855 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1856 		sdeb_i = opcode_ind_arr[req_opcode];
1857 		oip = &opcode_info_arr[sdeb_i];
1858 		if (F_INV_OP & oip->flags) {
1859 			supp = 1;
1860 			offset = 4;
1861 		} else {
1862 			if (1 == reporting_opts) {
1863 				if (FF_SA & oip->flags) {
1864 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1865 							     2, 2);
1866 					kfree(arr);
1867 					return check_condition_result;
1868 				}
1869 				req_sa = 0;
1870 			} else if (2 == reporting_opts &&
1871 				   0 == (FF_SA & oip->flags)) {
1872 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1873 				kfree(arr);	/* point at requested sa */
1874 				return check_condition_result;
1875 			}
1876 			if (0 == (FF_SA & oip->flags) &&
1877 			    req_opcode == oip->opcode)
1878 				supp = 3;
1879 			else if (0 == (FF_SA & oip->flags)) {
1880 				na = oip->num_attached;
1881 				for (k = 0, oip = oip->arrp; k < na;
1882 				     ++k, ++oip) {
1883 					if (req_opcode == oip->opcode)
1884 						break;
1885 				}
1886 				supp = (k >= na) ? 1 : 3;
1887 			} else if (req_sa != oip->sa) {
1888 				na = oip->num_attached;
1889 				for (k = 0, oip = oip->arrp; k < na;
1890 				     ++k, ++oip) {
1891 					if (req_sa == oip->sa)
1892 						break;
1893 				}
1894 				supp = (k >= na) ? 1 : 3;
1895 			} else
1896 				supp = 3;
1897 			if (3 == supp) {
1898 				u = oip->len_mask[0];
1899 				put_unaligned_be16(u, arr + 2);
1900 				arr[4] = oip->opcode;
1901 				for (k = 1; k < u; ++k)
1902 					arr[4 + k] = (k < 16) ?
1903 						 oip->len_mask[k] : 0xff;
1904 				offset = 4 + u;
1905 			} else
1906 				offset = 4;
1907 		}
1908 		arr[1] = (rctd ? 0x80 : 0) | supp;
1909 		if (rctd) {
1910 			put_unaligned_be16(0xa, arr + offset);
1911 			offset += 12;
1912 		}
1913 		break;
1914 	default:
1915 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1916 		kfree(arr);
1917 		return check_condition_result;
1918 	}
1919 	offset = (offset < a_len) ? offset : a_len;
1920 	len = (offset < alloc_len) ? offset : alloc_len;
1921 	errsts = fill_from_dev_buffer(scp, arr, len);
1922 	kfree(arr);
1923 	return errsts;
1924 }
1925 
1926 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1927 			  struct sdebug_dev_info *devip)
1928 {
1929 	bool repd;
1930 	u32 alloc_len, len;
1931 	u8 arr[16];
1932 	u8 *cmd = scp->cmnd;
1933 
1934 	memset(arr, 0, sizeof(arr));
1935 	repd = !!(cmd[2] & 0x80);
1936 	alloc_len = get_unaligned_be32(cmd + 6);
1937 	if (alloc_len < 4) {
1938 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1939 		return check_condition_result;
1940 	}
1941 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1942 	arr[1] = 0x1;		/* ITNRS */
1943 	if (repd) {
1944 		arr[3] = 0xc;
1945 		len = 16;
1946 	} else
1947 		len = 4;
1948 
1949 	len = (len < alloc_len) ? len : alloc_len;
1950 	return fill_from_dev_buffer(scp, arr, len);
1951 }
1952 
1953 /* <<Following mode page info copied from ST318451LW>> */
1954 
1955 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
1956 {	/* Read-Write Error Recovery page for mode_sense */
1957 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1958 					5, 0, 0xff, 0xff};
1959 
1960 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1961 	if (1 == pcontrol)
1962 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1963 	return sizeof(err_recov_pg);
1964 }
1965 
1966 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
1967 { 	/* Disconnect-Reconnect page for mode_sense */
1968 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1969 					 0, 0, 0, 0, 0, 0, 0, 0};
1970 
1971 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1972 	if (1 == pcontrol)
1973 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1974 	return sizeof(disconnect_pg);
1975 }
1976 
1977 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
1978 {       /* Format device page for mode_sense */
1979 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1980 				     0, 0, 0, 0, 0, 0, 0, 0,
1981 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1982 
1983 	memcpy(p, format_pg, sizeof(format_pg));
1984 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1985 	put_unaligned_be16(sdebug_sector_size, p + 12);
1986 	if (sdebug_removable)
1987 		p[20] |= 0x20; /* should agree with INQUIRY */
1988 	if (1 == pcontrol)
1989 		memset(p + 2, 0, sizeof(format_pg) - 2);
1990 	return sizeof(format_pg);
1991 }
1992 
1993 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1994 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1995 				     0, 0, 0, 0};
1996 
1997 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
1998 { 	/* Caching page for mode_sense */
1999 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2000 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2001 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2002 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2003 
2004 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
2005 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
2006 	memcpy(p, caching_pg, sizeof(caching_pg));
2007 	if (1 == pcontrol)
2008 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2009 	else if (2 == pcontrol)
2010 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2011 	return sizeof(caching_pg);
2012 }
2013 
2014 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2015 				    0, 0, 0x2, 0x4b};
2016 
2017 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2018 { 	/* Control mode page for mode_sense */
2019 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2020 					0, 0, 0, 0};
2021 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2022 				     0, 0, 0x2, 0x4b};
2023 
2024 	if (sdebug_dsense)
2025 		ctrl_m_pg[2] |= 0x4;
2026 	else
2027 		ctrl_m_pg[2] &= ~0x4;
2028 
2029 	if (sdebug_ato)
2030 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2031 
2032 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2033 	if (1 == pcontrol)
2034 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2035 	else if (2 == pcontrol)
2036 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2037 	return sizeof(ctrl_m_pg);
2038 }
2039 
2040 
2041 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2042 {	/* Informational Exceptions control mode page for mode_sense */
2043 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2044 				       0, 0, 0x0, 0x0};
2045 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2046 				      0, 0, 0x0, 0x0};
2047 
2048 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2049 	if (1 == pcontrol)
2050 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2051 	else if (2 == pcontrol)
2052 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2053 	return sizeof(iec_m_pg);
2054 }
2055 
2056 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2057 {	/* SAS SSP mode page - short format for mode_sense */
2058 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2059 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2060 
2061 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2062 	if (1 == pcontrol)
2063 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2064 	return sizeof(sas_sf_m_pg);
2065 }
2066 
2067 
2068 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2069 			      int target_dev_id)
2070 {	/* SAS phy control and discover mode page for mode_sense */
2071 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2072 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2073 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2074 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2075 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2076 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2077 		    0, 0, 0, 0, 0, 0, 0, 0,
2078 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2079 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2080 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2081 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2082 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2083 		    0, 0, 0, 0, 0, 0, 0, 0,
2084 		};
2085 	int port_a, port_b;
2086 
2087 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2088 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2089 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2090 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2091 	port_a = target_dev_id + 1;
2092 	port_b = port_a + 1;
2093 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2094 	put_unaligned_be32(port_a, p + 20);
2095 	put_unaligned_be32(port_b, p + 48 + 20);
2096 	if (1 == pcontrol)
2097 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2098 	return sizeof(sas_pcd_m_pg);
2099 }
2100 
2101 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2102 {	/* SAS SSP shared protocol specific port mode subpage */
2103 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2104 		    0, 0, 0, 0, 0, 0, 0, 0,
2105 		};
2106 
2107 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2108 	if (1 == pcontrol)
2109 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2110 	return sizeof(sas_sha_m_pg);
2111 }
2112 
2113 #define SDEBUG_MAX_MSENSE_SZ 256
2114 
2115 static int resp_mode_sense(struct scsi_cmnd *scp,
2116 			   struct sdebug_dev_info *devip)
2117 {
2118 	int pcontrol, pcode, subpcode, bd_len;
2119 	unsigned char dev_spec;
2120 	int alloc_len, offset, len, target_dev_id;
2121 	int target = scp->device->id;
2122 	unsigned char *ap;
2123 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2124 	unsigned char *cmd = scp->cmnd;
2125 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2126 
2127 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2128 	pcontrol = (cmd[2] & 0xc0) >> 6;
2129 	pcode = cmd[2] & 0x3f;
2130 	subpcode = cmd[3];
2131 	msense_6 = (MODE_SENSE == cmd[0]);
2132 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2133 	is_disk = (sdebug_ptype == TYPE_DISK);
2134 	if (is_disk && !dbd)
2135 		bd_len = llbaa ? 16 : 8;
2136 	else
2137 		bd_len = 0;
2138 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2139 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2140 	if (0x3 == pcontrol) {  /* Saving values not supported */
2141 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2142 		return check_condition_result;
2143 	}
2144 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2145 			(devip->target * 1000) - 3;
2146 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2147 	if (is_disk)
2148 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2149 	else
2150 		dev_spec = 0x0;
2151 	if (msense_6) {
2152 		arr[2] = dev_spec;
2153 		arr[3] = bd_len;
2154 		offset = 4;
2155 	} else {
2156 		arr[3] = dev_spec;
2157 		if (16 == bd_len)
2158 			arr[4] = 0x1;	/* set LONGLBA bit */
2159 		arr[7] = bd_len;	/* assume 255 or less */
2160 		offset = 8;
2161 	}
2162 	ap = arr + offset;
2163 	if ((bd_len > 0) && (!sdebug_capacity))
2164 		sdebug_capacity = get_sdebug_capacity();
2165 
2166 	if (8 == bd_len) {
2167 		if (sdebug_capacity > 0xfffffffe)
2168 			put_unaligned_be32(0xffffffff, ap + 0);
2169 		else
2170 			put_unaligned_be32(sdebug_capacity, ap + 0);
2171 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2172 		offset += bd_len;
2173 		ap = arr + offset;
2174 	} else if (16 == bd_len) {
2175 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2176 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2177 		offset += bd_len;
2178 		ap = arr + offset;
2179 	}
2180 
2181 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2182 		/* TODO: Control Extension page */
2183 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2184 		return check_condition_result;
2185 	}
2186 	bad_pcode = false;
2187 
2188 	switch (pcode) {
2189 	case 0x1:	/* Read-Write error recovery page, direct access */
2190 		len = resp_err_recov_pg(ap, pcontrol, target);
2191 		offset += len;
2192 		break;
2193 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2194 		len = resp_disconnect_pg(ap, pcontrol, target);
2195 		offset += len;
2196 		break;
2197 	case 0x3:       /* Format device page, direct access */
2198 		if (is_disk) {
2199 			len = resp_format_pg(ap, pcontrol, target);
2200 			offset += len;
2201 		} else
2202 			bad_pcode = true;
2203 		break;
2204 	case 0x8:	/* Caching page, direct access */
2205 		if (is_disk) {
2206 			len = resp_caching_pg(ap, pcontrol, target);
2207 			offset += len;
2208 		} else
2209 			bad_pcode = true;
2210 		break;
2211 	case 0xa:	/* Control Mode page, all devices */
2212 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2213 		offset += len;
2214 		break;
2215 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2216 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2217 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2218 			return check_condition_result;
2219 		}
2220 		len = 0;
2221 		if ((0x0 == subpcode) || (0xff == subpcode))
2222 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2223 		if ((0x1 == subpcode) || (0xff == subpcode))
2224 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2225 						  target_dev_id);
2226 		if ((0x2 == subpcode) || (0xff == subpcode))
2227 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2228 		offset += len;
2229 		break;
2230 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2231 		len = resp_iec_m_pg(ap, pcontrol, target);
2232 		offset += len;
2233 		break;
2234 	case 0x3f:	/* Read all Mode pages */
2235 		if ((0 == subpcode) || (0xff == subpcode)) {
2236 			len = resp_err_recov_pg(ap, pcontrol, target);
2237 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2238 			if (is_disk) {
2239 				len += resp_format_pg(ap + len, pcontrol,
2240 						      target);
2241 				len += resp_caching_pg(ap + len, pcontrol,
2242 						       target);
2243 			}
2244 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2245 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2246 			if (0xff == subpcode) {
2247 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2248 						  target, target_dev_id);
2249 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2250 			}
2251 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2252 			offset += len;
2253 		} else {
2254 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2255 			return check_condition_result;
2256 		}
2257 		break;
2258 	default:
2259 		bad_pcode = true;
2260 		break;
2261 	}
2262 	if (bad_pcode) {
2263 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2264 		return check_condition_result;
2265 	}
2266 	if (msense_6)
2267 		arr[0] = offset - 1;
2268 	else
2269 		put_unaligned_be16((offset - 2), arr + 0);
2270 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2271 }
2272 
2273 #define SDEBUG_MAX_MSELECT_SZ 512
2274 
2275 static int resp_mode_select(struct scsi_cmnd *scp,
2276 			    struct sdebug_dev_info *devip)
2277 {
2278 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2279 	int param_len, res, mpage;
2280 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2281 	unsigned char *cmd = scp->cmnd;
2282 	int mselect6 = (MODE_SELECT == cmd[0]);
2283 
2284 	memset(arr, 0, sizeof(arr));
2285 	pf = cmd[1] & 0x10;
2286 	sp = cmd[1] & 0x1;
2287 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2288 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2289 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2290 		return check_condition_result;
2291 	}
2292 	res = fetch_to_dev_buffer(scp, arr, param_len);
2293 	if (-1 == res)
2294 		return DID_ERROR << 16;
2295 	else if (sdebug_verbose && (res < param_len))
2296 		sdev_printk(KERN_INFO, scp->device,
2297 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2298 			    __func__, param_len, res);
2299 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2300 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2301 	if (md_len > 2) {
2302 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2303 		return check_condition_result;
2304 	}
2305 	off = bd_len + (mselect6 ? 4 : 8);
2306 	mpage = arr[off] & 0x3f;
2307 	ps = !!(arr[off] & 0x80);
2308 	if (ps) {
2309 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2310 		return check_condition_result;
2311 	}
2312 	spf = !!(arr[off] & 0x40);
2313 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2314 		       (arr[off + 1] + 2);
2315 	if ((pg_len + off) > param_len) {
2316 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2317 				PARAMETER_LIST_LENGTH_ERR, 0);
2318 		return check_condition_result;
2319 	}
2320 	switch (mpage) {
2321 	case 0x8:      /* Caching Mode page */
2322 		if (caching_pg[1] == arr[off + 1]) {
2323 			memcpy(caching_pg + 2, arr + off + 2,
2324 			       sizeof(caching_pg) - 2);
2325 			goto set_mode_changed_ua;
2326 		}
2327 		break;
2328 	case 0xa:      /* Control Mode page */
2329 		if (ctrl_m_pg[1] == arr[off + 1]) {
2330 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2331 			       sizeof(ctrl_m_pg) - 2);
2332 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2333 			goto set_mode_changed_ua;
2334 		}
2335 		break;
2336 	case 0x1c:      /* Informational Exceptions Mode page */
2337 		if (iec_m_pg[1] == arr[off + 1]) {
2338 			memcpy(iec_m_pg + 2, arr + off + 2,
2339 			       sizeof(iec_m_pg) - 2);
2340 			goto set_mode_changed_ua;
2341 		}
2342 		break;
2343 	default:
2344 		break;
2345 	}
2346 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2347 	return check_condition_result;
2348 set_mode_changed_ua:
2349 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2350 	return 0;
2351 }
2352 
2353 static int resp_temp_l_pg(unsigned char *arr)
2354 {
2355 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2356 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2357 		};
2358 
2359 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2360 	return sizeof(temp_l_pg);
2361 }
2362 
2363 static int resp_ie_l_pg(unsigned char *arr)
2364 {
2365 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2366 		};
2367 
2368 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2369 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2370 		arr[4] = THRESHOLD_EXCEEDED;
2371 		arr[5] = 0xff;
2372 	}
2373 	return sizeof(ie_l_pg);
2374 }
2375 
2376 #define SDEBUG_MAX_LSENSE_SZ 512
2377 
2378 static int resp_log_sense(struct scsi_cmnd *scp,
2379 			  struct sdebug_dev_info *devip)
2380 {
2381 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2382 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2383 	unsigned char *cmd = scp->cmnd;
2384 
2385 	memset(arr, 0, sizeof(arr));
2386 	ppc = cmd[1] & 0x2;
2387 	sp = cmd[1] & 0x1;
2388 	if (ppc || sp) {
2389 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2390 		return check_condition_result;
2391 	}
2392 	pcode = cmd[2] & 0x3f;
2393 	subpcode = cmd[3] & 0xff;
2394 	alloc_len = get_unaligned_be16(cmd + 7);
2395 	arr[0] = pcode;
2396 	if (0 == subpcode) {
2397 		switch (pcode) {
2398 		case 0x0:	/* Supported log pages log page */
2399 			n = 4;
2400 			arr[n++] = 0x0;		/* this page */
2401 			arr[n++] = 0xd;		/* Temperature */
2402 			arr[n++] = 0x2f;	/* Informational exceptions */
2403 			arr[3] = n - 4;
2404 			break;
2405 		case 0xd:	/* Temperature log page */
2406 			arr[3] = resp_temp_l_pg(arr + 4);
2407 			break;
2408 		case 0x2f:	/* Informational exceptions log page */
2409 			arr[3] = resp_ie_l_pg(arr + 4);
2410 			break;
2411 		default:
2412 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2413 			return check_condition_result;
2414 		}
2415 	} else if (0xff == subpcode) {
2416 		arr[0] |= 0x40;
2417 		arr[1] = subpcode;
2418 		switch (pcode) {
2419 		case 0x0:	/* Supported log pages and subpages log page */
2420 			n = 4;
2421 			arr[n++] = 0x0;
2422 			arr[n++] = 0x0;		/* 0,0 page */
2423 			arr[n++] = 0x0;
2424 			arr[n++] = 0xff;	/* this page */
2425 			arr[n++] = 0xd;
2426 			arr[n++] = 0x0;		/* Temperature */
2427 			arr[n++] = 0x2f;
2428 			arr[n++] = 0x0;	/* Informational exceptions */
2429 			arr[3] = n - 4;
2430 			break;
2431 		case 0xd:	/* Temperature subpages */
2432 			n = 4;
2433 			arr[n++] = 0xd;
2434 			arr[n++] = 0x0;		/* Temperature */
2435 			arr[3] = n - 4;
2436 			break;
2437 		case 0x2f:	/* Informational exceptions subpages */
2438 			n = 4;
2439 			arr[n++] = 0x2f;
2440 			arr[n++] = 0x0;		/* Informational exceptions */
2441 			arr[3] = n - 4;
2442 			break;
2443 		default:
2444 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2445 			return check_condition_result;
2446 		}
2447 	} else {
2448 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2449 		return check_condition_result;
2450 	}
2451 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2452 	return fill_from_dev_buffer(scp, arr,
2453 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2454 }
2455 
2456 static int check_device_access_params(struct scsi_cmnd *scp,
2457 				      unsigned long long lba, unsigned int num)
2458 {
2459 	if (lba + num > sdebug_capacity) {
2460 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2461 		return check_condition_result;
2462 	}
2463 	/* transfer length excessive (tie in to block limits VPD page) */
2464 	if (num > sdebug_store_sectors) {
2465 		/* needs work to find which cdb byte 'num' comes from */
2466 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2467 		return check_condition_result;
2468 	}
2469 	return 0;
2470 }
2471 
2472 /* Returns number of bytes copied or -1 if error. */
2473 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2474 			    u32 num, bool do_write)
2475 {
2476 	int ret;
2477 	u64 block, rest = 0;
2478 	struct scsi_data_buffer *sdb;
2479 	enum dma_data_direction dir;
2480 
2481 	if (do_write) {
2482 		sdb = scsi_out(scmd);
2483 		dir = DMA_TO_DEVICE;
2484 		write_since_sync = true;
2485 	} else {
2486 		sdb = scsi_in(scmd);
2487 		dir = DMA_FROM_DEVICE;
2488 	}
2489 
2490 	if (!sdb->length)
2491 		return 0;
2492 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2493 		return -1;
2494 
2495 	block = do_div(lba, sdebug_store_sectors);
2496 	if (block + num > sdebug_store_sectors)
2497 		rest = block + num - sdebug_store_sectors;
2498 
2499 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2500 		   fake_storep + (block * sdebug_sector_size),
2501 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2502 	if (ret != (num - rest) * sdebug_sector_size)
2503 		return ret;
2504 
2505 	if (rest) {
2506 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2507 			    fake_storep, rest * sdebug_sector_size,
2508 			    sg_skip + ((num - rest) * sdebug_sector_size),
2509 			    do_write);
2510 	}
2511 
2512 	return ret;
2513 }
2514 
2515 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2516  * arr into fake_store(lba,num) and return true. If comparison fails then
2517  * return false. */
2518 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2519 {
2520 	bool res;
2521 	u64 block, rest = 0;
2522 	u32 store_blks = sdebug_store_sectors;
2523 	u32 lb_size = sdebug_sector_size;
2524 
2525 	block = do_div(lba, store_blks);
2526 	if (block + num > store_blks)
2527 		rest = block + num - store_blks;
2528 
2529 	res = !memcmp(fake_storep + (block * lb_size), arr,
2530 		      (num - rest) * lb_size);
2531 	if (!res)
2532 		return res;
2533 	if (rest)
2534 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2535 			     rest * lb_size);
2536 	if (!res)
2537 		return res;
2538 	arr += num * lb_size;
2539 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2540 	if (rest)
2541 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2542 		       rest * lb_size);
2543 	return res;
2544 }
2545 
2546 static __be16 dif_compute_csum(const void *buf, int len)
2547 {
2548 	__be16 csum;
2549 
2550 	if (sdebug_guard)
2551 		csum = (__force __be16)ip_compute_csum(buf, len);
2552 	else
2553 		csum = cpu_to_be16(crc_t10dif(buf, len));
2554 
2555 	return csum;
2556 }
2557 
2558 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2559 		      sector_t sector, u32 ei_lba)
2560 {
2561 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2562 
2563 	if (sdt->guard_tag != csum) {
2564 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2565 			(unsigned long)sector,
2566 			be16_to_cpu(sdt->guard_tag),
2567 			be16_to_cpu(csum));
2568 		return 0x01;
2569 	}
2570 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2571 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2572 		pr_err("REF check failed on sector %lu\n",
2573 			(unsigned long)sector);
2574 		return 0x03;
2575 	}
2576 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2577 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2578 		pr_err("REF check failed on sector %lu\n",
2579 			(unsigned long)sector);
2580 		return 0x03;
2581 	}
2582 	return 0;
2583 }
2584 
2585 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2586 			  unsigned int sectors, bool read)
2587 {
2588 	size_t resid;
2589 	void *paddr;
2590 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2591 	struct sg_mapping_iter miter;
2592 
2593 	/* Bytes of protection data to copy into sgl */
2594 	resid = sectors * sizeof(*dif_storep);
2595 
2596 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2597 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2598 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2599 
2600 	while (sg_miter_next(&miter) && resid > 0) {
2601 		size_t len = min(miter.length, resid);
2602 		void *start = dif_store(sector);
2603 		size_t rest = 0;
2604 
2605 		if (dif_store_end < start + len)
2606 			rest = start + len - dif_store_end;
2607 
2608 		paddr = miter.addr;
2609 
2610 		if (read)
2611 			memcpy(paddr, start, len - rest);
2612 		else
2613 			memcpy(start, paddr, len - rest);
2614 
2615 		if (rest) {
2616 			if (read)
2617 				memcpy(paddr + len - rest, dif_storep, rest);
2618 			else
2619 				memcpy(dif_storep, paddr + len - rest, rest);
2620 		}
2621 
2622 		sector += len / sizeof(*dif_storep);
2623 		resid -= len;
2624 	}
2625 	sg_miter_stop(&miter);
2626 }
2627 
2628 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2629 			    unsigned int sectors, u32 ei_lba)
2630 {
2631 	unsigned int i;
2632 	struct t10_pi_tuple *sdt;
2633 	sector_t sector;
2634 
2635 	for (i = 0; i < sectors; i++, ei_lba++) {
2636 		int ret;
2637 
2638 		sector = start_sec + i;
2639 		sdt = dif_store(sector);
2640 
2641 		if (sdt->app_tag == cpu_to_be16(0xffff))
2642 			continue;
2643 
2644 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2645 		if (ret) {
2646 			dif_errors++;
2647 			return ret;
2648 		}
2649 	}
2650 
2651 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2652 	dix_reads++;
2653 
2654 	return 0;
2655 }
2656 
2657 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2658 {
2659 	u8 *cmd = scp->cmnd;
2660 	struct sdebug_queued_cmd *sqcp;
2661 	u64 lba;
2662 	u32 num;
2663 	u32 ei_lba;
2664 	unsigned long iflags;
2665 	int ret;
2666 	bool check_prot;
2667 
2668 	switch (cmd[0]) {
2669 	case READ_16:
2670 		ei_lba = 0;
2671 		lba = get_unaligned_be64(cmd + 2);
2672 		num = get_unaligned_be32(cmd + 10);
2673 		check_prot = true;
2674 		break;
2675 	case READ_10:
2676 		ei_lba = 0;
2677 		lba = get_unaligned_be32(cmd + 2);
2678 		num = get_unaligned_be16(cmd + 7);
2679 		check_prot = true;
2680 		break;
2681 	case READ_6:
2682 		ei_lba = 0;
2683 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2684 		      (u32)(cmd[1] & 0x1f) << 16;
2685 		num = (0 == cmd[4]) ? 256 : cmd[4];
2686 		check_prot = true;
2687 		break;
2688 	case READ_12:
2689 		ei_lba = 0;
2690 		lba = get_unaligned_be32(cmd + 2);
2691 		num = get_unaligned_be32(cmd + 6);
2692 		check_prot = true;
2693 		break;
2694 	case XDWRITEREAD_10:
2695 		ei_lba = 0;
2696 		lba = get_unaligned_be32(cmd + 2);
2697 		num = get_unaligned_be16(cmd + 7);
2698 		check_prot = false;
2699 		break;
2700 	default:	/* assume READ(32) */
2701 		lba = get_unaligned_be64(cmd + 12);
2702 		ei_lba = get_unaligned_be32(cmd + 20);
2703 		num = get_unaligned_be32(cmd + 28);
2704 		check_prot = false;
2705 		break;
2706 	}
2707 	if (unlikely(have_dif_prot && check_prot)) {
2708 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2709 		    (cmd[1] & 0xe0)) {
2710 			mk_sense_invalid_opcode(scp);
2711 			return check_condition_result;
2712 		}
2713 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2714 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2715 		    (cmd[1] & 0xe0) == 0)
2716 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2717 				    "to DIF device\n");
2718 	}
2719 	if (unlikely(sdebug_any_injecting_opt)) {
2720 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2721 
2722 		if (sqcp) {
2723 			if (sqcp->inj_short)
2724 				num /= 2;
2725 		}
2726 	} else
2727 		sqcp = NULL;
2728 
2729 	/* inline check_device_access_params() */
2730 	if (unlikely(lba + num > sdebug_capacity)) {
2731 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2732 		return check_condition_result;
2733 	}
2734 	/* transfer length excessive (tie in to block limits VPD page) */
2735 	if (unlikely(num > sdebug_store_sectors)) {
2736 		/* needs work to find which cdb byte 'num' comes from */
2737 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2738 		return check_condition_result;
2739 	}
2740 
2741 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2742 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2743 		     ((lba + num) > sdebug_medium_error_start))) {
2744 		/* claim unrecoverable read error */
2745 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2746 		/* set info field and valid bit for fixed descriptor */
2747 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2748 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2749 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2750 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2751 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2752 		}
2753 		scsi_set_resid(scp, scsi_bufflen(scp));
2754 		return check_condition_result;
2755 	}
2756 
2757 	read_lock_irqsave(&atomic_rw, iflags);
2758 
2759 	/* DIX + T10 DIF */
2760 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2761 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2762 
2763 		if (prot_ret) {
2764 			read_unlock_irqrestore(&atomic_rw, iflags);
2765 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2766 			return illegal_condition_result;
2767 		}
2768 	}
2769 
2770 	ret = do_device_access(scp, 0, lba, num, false);
2771 	read_unlock_irqrestore(&atomic_rw, iflags);
2772 	if (unlikely(ret == -1))
2773 		return DID_ERROR << 16;
2774 
2775 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2776 
2777 	if (unlikely(sqcp)) {
2778 		if (sqcp->inj_recovered) {
2779 			mk_sense_buffer(scp, RECOVERED_ERROR,
2780 					THRESHOLD_EXCEEDED, 0);
2781 			return check_condition_result;
2782 		} else if (sqcp->inj_transport) {
2783 			mk_sense_buffer(scp, ABORTED_COMMAND,
2784 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2785 			return check_condition_result;
2786 		} else if (sqcp->inj_dif) {
2787 			/* Logical block guard check failed */
2788 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2789 			return illegal_condition_result;
2790 		} else if (sqcp->inj_dix) {
2791 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2792 			return illegal_condition_result;
2793 		}
2794 	}
2795 	return 0;
2796 }
2797 
2798 static void dump_sector(unsigned char *buf, int len)
2799 {
2800 	int i, j, n;
2801 
2802 	pr_err(">>> Sector Dump <<<\n");
2803 	for (i = 0 ; i < len ; i += 16) {
2804 		char b[128];
2805 
2806 		for (j = 0, n = 0; j < 16; j++) {
2807 			unsigned char c = buf[i+j];
2808 
2809 			if (c >= 0x20 && c < 0x7e)
2810 				n += scnprintf(b + n, sizeof(b) - n,
2811 					       " %c ", buf[i+j]);
2812 			else
2813 				n += scnprintf(b + n, sizeof(b) - n,
2814 					       "%02x ", buf[i+j]);
2815 		}
2816 		pr_err("%04d: %s\n", i, b);
2817 	}
2818 }
2819 
2820 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2821 			     unsigned int sectors, u32 ei_lba)
2822 {
2823 	int ret;
2824 	struct t10_pi_tuple *sdt;
2825 	void *daddr;
2826 	sector_t sector = start_sec;
2827 	int ppage_offset;
2828 	int dpage_offset;
2829 	struct sg_mapping_iter diter;
2830 	struct sg_mapping_iter piter;
2831 
2832 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2833 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2834 
2835 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2836 			scsi_prot_sg_count(SCpnt),
2837 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2838 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2839 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2840 
2841 	/* For each protection page */
2842 	while (sg_miter_next(&piter)) {
2843 		dpage_offset = 0;
2844 		if (WARN_ON(!sg_miter_next(&diter))) {
2845 			ret = 0x01;
2846 			goto out;
2847 		}
2848 
2849 		for (ppage_offset = 0; ppage_offset < piter.length;
2850 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2851 			/* If we're at the end of the current
2852 			 * data page advance to the next one
2853 			 */
2854 			if (dpage_offset >= diter.length) {
2855 				if (WARN_ON(!sg_miter_next(&diter))) {
2856 					ret = 0x01;
2857 					goto out;
2858 				}
2859 				dpage_offset = 0;
2860 			}
2861 
2862 			sdt = piter.addr + ppage_offset;
2863 			daddr = diter.addr + dpage_offset;
2864 
2865 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2866 			if (ret) {
2867 				dump_sector(daddr, sdebug_sector_size);
2868 				goto out;
2869 			}
2870 
2871 			sector++;
2872 			ei_lba++;
2873 			dpage_offset += sdebug_sector_size;
2874 		}
2875 		diter.consumed = dpage_offset;
2876 		sg_miter_stop(&diter);
2877 	}
2878 	sg_miter_stop(&piter);
2879 
2880 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2881 	dix_writes++;
2882 
2883 	return 0;
2884 
2885 out:
2886 	dif_errors++;
2887 	sg_miter_stop(&diter);
2888 	sg_miter_stop(&piter);
2889 	return ret;
2890 }
2891 
2892 static unsigned long lba_to_map_index(sector_t lba)
2893 {
2894 	if (sdebug_unmap_alignment)
2895 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2896 	sector_div(lba, sdebug_unmap_granularity);
2897 	return lba;
2898 }
2899 
2900 static sector_t map_index_to_lba(unsigned long index)
2901 {
2902 	sector_t lba = index * sdebug_unmap_granularity;
2903 
2904 	if (sdebug_unmap_alignment)
2905 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2906 	return lba;
2907 }
2908 
2909 static unsigned int map_state(sector_t lba, unsigned int *num)
2910 {
2911 	sector_t end;
2912 	unsigned int mapped;
2913 	unsigned long index;
2914 	unsigned long next;
2915 
2916 	index = lba_to_map_index(lba);
2917 	mapped = test_bit(index, map_storep);
2918 
2919 	if (mapped)
2920 		next = find_next_zero_bit(map_storep, map_size, index);
2921 	else
2922 		next = find_next_bit(map_storep, map_size, index);
2923 
2924 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2925 	*num = end - lba;
2926 	return mapped;
2927 }
2928 
2929 static void map_region(sector_t lba, unsigned int len)
2930 {
2931 	sector_t end = lba + len;
2932 
2933 	while (lba < end) {
2934 		unsigned long index = lba_to_map_index(lba);
2935 
2936 		if (index < map_size)
2937 			set_bit(index, map_storep);
2938 
2939 		lba = map_index_to_lba(index + 1);
2940 	}
2941 }
2942 
2943 static void unmap_region(sector_t lba, unsigned int len)
2944 {
2945 	sector_t end = lba + len;
2946 
2947 	while (lba < end) {
2948 		unsigned long index = lba_to_map_index(lba);
2949 
2950 		if (lba == map_index_to_lba(index) &&
2951 		    lba + sdebug_unmap_granularity <= end &&
2952 		    index < map_size) {
2953 			clear_bit(index, map_storep);
2954 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2955 				memset(fake_storep +
2956 				       lba * sdebug_sector_size,
2957 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2958 				       sdebug_sector_size *
2959 				       sdebug_unmap_granularity);
2960 			}
2961 			if (dif_storep) {
2962 				memset(dif_storep + lba, 0xff,
2963 				       sizeof(*dif_storep) *
2964 				       sdebug_unmap_granularity);
2965 			}
2966 		}
2967 		lba = map_index_to_lba(index + 1);
2968 	}
2969 }
2970 
2971 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2972 {
2973 	u8 *cmd = scp->cmnd;
2974 	u64 lba;
2975 	u32 num;
2976 	u32 ei_lba;
2977 	unsigned long iflags;
2978 	int ret;
2979 	bool check_prot;
2980 
2981 	switch (cmd[0]) {
2982 	case WRITE_16:
2983 		ei_lba = 0;
2984 		lba = get_unaligned_be64(cmd + 2);
2985 		num = get_unaligned_be32(cmd + 10);
2986 		check_prot = true;
2987 		break;
2988 	case WRITE_10:
2989 		ei_lba = 0;
2990 		lba = get_unaligned_be32(cmd + 2);
2991 		num = get_unaligned_be16(cmd + 7);
2992 		check_prot = true;
2993 		break;
2994 	case WRITE_6:
2995 		ei_lba = 0;
2996 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2997 		      (u32)(cmd[1] & 0x1f) << 16;
2998 		num = (0 == cmd[4]) ? 256 : cmd[4];
2999 		check_prot = true;
3000 		break;
3001 	case WRITE_12:
3002 		ei_lba = 0;
3003 		lba = get_unaligned_be32(cmd + 2);
3004 		num = get_unaligned_be32(cmd + 6);
3005 		check_prot = true;
3006 		break;
3007 	case 0x53:	/* XDWRITEREAD(10) */
3008 		ei_lba = 0;
3009 		lba = get_unaligned_be32(cmd + 2);
3010 		num = get_unaligned_be16(cmd + 7);
3011 		check_prot = false;
3012 		break;
3013 	default:	/* assume WRITE(32) */
3014 		lba = get_unaligned_be64(cmd + 12);
3015 		ei_lba = get_unaligned_be32(cmd + 20);
3016 		num = get_unaligned_be32(cmd + 28);
3017 		check_prot = false;
3018 		break;
3019 	}
3020 	if (unlikely(have_dif_prot && check_prot)) {
3021 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3022 		    (cmd[1] & 0xe0)) {
3023 			mk_sense_invalid_opcode(scp);
3024 			return check_condition_result;
3025 		}
3026 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3027 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3028 		    (cmd[1] & 0xe0) == 0)
3029 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3030 				    "to DIF device\n");
3031 	}
3032 
3033 	/* inline check_device_access_params() */
3034 	if (unlikely(lba + num > sdebug_capacity)) {
3035 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3036 		return check_condition_result;
3037 	}
3038 	/* transfer length excessive (tie in to block limits VPD page) */
3039 	if (unlikely(num > sdebug_store_sectors)) {
3040 		/* needs work to find which cdb byte 'num' comes from */
3041 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3042 		return check_condition_result;
3043 	}
3044 
3045 	write_lock_irqsave(&atomic_rw, iflags);
3046 
3047 	/* DIX + T10 DIF */
3048 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3049 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3050 
3051 		if (prot_ret) {
3052 			write_unlock_irqrestore(&atomic_rw, iflags);
3053 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3054 			return illegal_condition_result;
3055 		}
3056 	}
3057 
3058 	ret = do_device_access(scp, 0, lba, num, true);
3059 	if (unlikely(scsi_debug_lbp()))
3060 		map_region(lba, num);
3061 	write_unlock_irqrestore(&atomic_rw, iflags);
3062 	if (unlikely(-1 == ret))
3063 		return DID_ERROR << 16;
3064 	else if (unlikely(sdebug_verbose &&
3065 			  (ret < (num * sdebug_sector_size))))
3066 		sdev_printk(KERN_INFO, scp->device,
3067 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3068 			    my_name, num * sdebug_sector_size, ret);
3069 
3070 	if (unlikely(sdebug_any_injecting_opt)) {
3071 		struct sdebug_queued_cmd *sqcp =
3072 				(struct sdebug_queued_cmd *)scp->host_scribble;
3073 
3074 		if (sqcp) {
3075 			if (sqcp->inj_recovered) {
3076 				mk_sense_buffer(scp, RECOVERED_ERROR,
3077 						THRESHOLD_EXCEEDED, 0);
3078 				return check_condition_result;
3079 			} else if (sqcp->inj_dif) {
3080 				/* Logical block guard check failed */
3081 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3082 				return illegal_condition_result;
3083 			} else if (sqcp->inj_dix) {
3084 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3085 				return illegal_condition_result;
3086 			}
3087 		}
3088 	}
3089 	return 0;
3090 }
3091 
3092 /*
3093  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3094  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3095  */
3096 static int resp_write_scat(struct scsi_cmnd *scp,
3097 			   struct sdebug_dev_info *devip)
3098 {
3099 	u8 *cmd = scp->cmnd;
3100 	u8 *lrdp = NULL;
3101 	u8 *up;
3102 	u8 wrprotect;
3103 	u16 lbdof, num_lrd, k;
3104 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3105 	u32 lb_size = sdebug_sector_size;
3106 	u32 ei_lba;
3107 	u64 lba;
3108 	unsigned long iflags;
3109 	int ret, res;
3110 	bool is_16;
3111 	static const u32 lrd_size = 32; /* + parameter list header size */
3112 
3113 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3114 		is_16 = false;
3115 		wrprotect = (cmd[10] >> 5) & 0x7;
3116 		lbdof = get_unaligned_be16(cmd + 12);
3117 		num_lrd = get_unaligned_be16(cmd + 16);
3118 		bt_len = get_unaligned_be32(cmd + 28);
3119 	} else {        /* that leaves WRITE SCATTERED(16) */
3120 		is_16 = true;
3121 		wrprotect = (cmd[2] >> 5) & 0x7;
3122 		lbdof = get_unaligned_be16(cmd + 4);
3123 		num_lrd = get_unaligned_be16(cmd + 8);
3124 		bt_len = get_unaligned_be32(cmd + 10);
3125 		if (unlikely(have_dif_prot)) {
3126 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3127 			    wrprotect) {
3128 				mk_sense_invalid_opcode(scp);
3129 				return illegal_condition_result;
3130 			}
3131 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3132 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3133 			     wrprotect == 0)
3134 				sdev_printk(KERN_ERR, scp->device,
3135 					    "Unprotected WR to DIF device\n");
3136 		}
3137 	}
3138 	if ((num_lrd == 0) || (bt_len == 0))
3139 		return 0;       /* T10 says these do-nothings are not errors */
3140 	if (lbdof == 0) {
3141 		if (sdebug_verbose)
3142 			sdev_printk(KERN_INFO, scp->device,
3143 				"%s: %s: LB Data Offset field bad\n",
3144 				my_name, __func__);
3145 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3146 		return illegal_condition_result;
3147 	}
3148 	lbdof_blen = lbdof * lb_size;
3149 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3150 		if (sdebug_verbose)
3151 			sdev_printk(KERN_INFO, scp->device,
3152 				"%s: %s: LBA range descriptors don't fit\n",
3153 				my_name, __func__);
3154 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3155 		return illegal_condition_result;
3156 	}
3157 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3158 	if (lrdp == NULL)
3159 		return SCSI_MLQUEUE_HOST_BUSY;
3160 	if (sdebug_verbose)
3161 		sdev_printk(KERN_INFO, scp->device,
3162 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3163 			my_name, __func__, lbdof_blen);
3164 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3165 	if (res == -1) {
3166 		ret = DID_ERROR << 16;
3167 		goto err_out;
3168 	}
3169 
3170 	write_lock_irqsave(&atomic_rw, iflags);
3171 	sg_off = lbdof_blen;
3172 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3173 	cum_lb = 0;
3174 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3175 		lba = get_unaligned_be64(up + 0);
3176 		num = get_unaligned_be32(up + 8);
3177 		if (sdebug_verbose)
3178 			sdev_printk(KERN_INFO, scp->device,
3179 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3180 				my_name, __func__, k, lba, num, sg_off);
3181 		if (num == 0)
3182 			continue;
3183 		ret = check_device_access_params(scp, lba, num);
3184 		if (ret)
3185 			goto err_out_unlock;
3186 		num_by = num * lb_size;
3187 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3188 
3189 		if ((cum_lb + num) > bt_len) {
3190 			if (sdebug_verbose)
3191 				sdev_printk(KERN_INFO, scp->device,
3192 				    "%s: %s: sum of blocks > data provided\n",
3193 				    my_name, __func__);
3194 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3195 					0);
3196 			ret = illegal_condition_result;
3197 			goto err_out_unlock;
3198 		}
3199 
3200 		/* DIX + T10 DIF */
3201 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3202 			int prot_ret = prot_verify_write(scp, lba, num,
3203 							 ei_lba);
3204 
3205 			if (prot_ret) {
3206 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3207 						prot_ret);
3208 				ret = illegal_condition_result;
3209 				goto err_out_unlock;
3210 			}
3211 		}
3212 
3213 		ret = do_device_access(scp, sg_off, lba, num, true);
3214 		if (unlikely(scsi_debug_lbp()))
3215 			map_region(lba, num);
3216 		if (unlikely(-1 == ret)) {
3217 			ret = DID_ERROR << 16;
3218 			goto err_out_unlock;
3219 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3220 			sdev_printk(KERN_INFO, scp->device,
3221 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3222 			    my_name, num_by, ret);
3223 
3224 		if (unlikely(sdebug_any_injecting_opt)) {
3225 			struct sdebug_queued_cmd *sqcp =
3226 				(struct sdebug_queued_cmd *)scp->host_scribble;
3227 
3228 			if (sqcp) {
3229 				if (sqcp->inj_recovered) {
3230 					mk_sense_buffer(scp, RECOVERED_ERROR,
3231 							THRESHOLD_EXCEEDED, 0);
3232 					ret = illegal_condition_result;
3233 					goto err_out_unlock;
3234 				} else if (sqcp->inj_dif) {
3235 					/* Logical block guard check failed */
3236 					mk_sense_buffer(scp, ABORTED_COMMAND,
3237 							0x10, 1);
3238 					ret = illegal_condition_result;
3239 					goto err_out_unlock;
3240 				} else if (sqcp->inj_dix) {
3241 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3242 							0x10, 1);
3243 					ret = illegal_condition_result;
3244 					goto err_out_unlock;
3245 				}
3246 			}
3247 		}
3248 		sg_off += num_by;
3249 		cum_lb += num;
3250 	}
3251 	ret = 0;
3252 err_out_unlock:
3253 	write_unlock_irqrestore(&atomic_rw, iflags);
3254 err_out:
3255 	kfree(lrdp);
3256 	return ret;
3257 }
3258 
3259 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3260 			   u32 ei_lba, bool unmap, bool ndob)
3261 {
3262 	unsigned long iflags;
3263 	unsigned long long i;
3264 	int ret;
3265 	u64 lba_off;
3266 
3267 	ret = check_device_access_params(scp, lba, num);
3268 	if (ret)
3269 		return ret;
3270 
3271 	write_lock_irqsave(&atomic_rw, iflags);
3272 
3273 	if (unmap && scsi_debug_lbp()) {
3274 		unmap_region(lba, num);
3275 		goto out;
3276 	}
3277 
3278 	lba_off = lba * sdebug_sector_size;
3279 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3280 	if (ndob) {
3281 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
3282 		ret = 0;
3283 	} else
3284 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3285 					  sdebug_sector_size);
3286 
3287 	if (-1 == ret) {
3288 		write_unlock_irqrestore(&atomic_rw, iflags);
3289 		return DID_ERROR << 16;
3290 	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3291 		sdev_printk(KERN_INFO, scp->device,
3292 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3293 			    my_name, "write same",
3294 			    sdebug_sector_size, ret);
3295 
3296 	/* Copy first sector to remaining blocks */
3297 	for (i = 1 ; i < num ; i++)
3298 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3299 		       fake_storep + lba_off,
3300 		       sdebug_sector_size);
3301 
3302 	if (scsi_debug_lbp())
3303 		map_region(lba, num);
3304 out:
3305 	write_unlock_irqrestore(&atomic_rw, iflags);
3306 
3307 	return 0;
3308 }
3309 
3310 static int resp_write_same_10(struct scsi_cmnd *scp,
3311 			      struct sdebug_dev_info *devip)
3312 {
3313 	u8 *cmd = scp->cmnd;
3314 	u32 lba;
3315 	u16 num;
3316 	u32 ei_lba = 0;
3317 	bool unmap = false;
3318 
3319 	if (cmd[1] & 0x8) {
3320 		if (sdebug_lbpws10 == 0) {
3321 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3322 			return check_condition_result;
3323 		} else
3324 			unmap = true;
3325 	}
3326 	lba = get_unaligned_be32(cmd + 2);
3327 	num = get_unaligned_be16(cmd + 7);
3328 	if (num > sdebug_write_same_length) {
3329 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3330 		return check_condition_result;
3331 	}
3332 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3333 }
3334 
3335 static int resp_write_same_16(struct scsi_cmnd *scp,
3336 			      struct sdebug_dev_info *devip)
3337 {
3338 	u8 *cmd = scp->cmnd;
3339 	u64 lba;
3340 	u32 num;
3341 	u32 ei_lba = 0;
3342 	bool unmap = false;
3343 	bool ndob = false;
3344 
3345 	if (cmd[1] & 0x8) {	/* UNMAP */
3346 		if (sdebug_lbpws == 0) {
3347 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3348 			return check_condition_result;
3349 		} else
3350 			unmap = true;
3351 	}
3352 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3353 		ndob = true;
3354 	lba = get_unaligned_be64(cmd + 2);
3355 	num = get_unaligned_be32(cmd + 10);
3356 	if (num > sdebug_write_same_length) {
3357 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3358 		return check_condition_result;
3359 	}
3360 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3361 }
3362 
3363 /* Note the mode field is in the same position as the (lower) service action
3364  * field. For the Report supported operation codes command, SPC-4 suggests
3365  * each mode of this command should be reported separately; for future. */
3366 static int resp_write_buffer(struct scsi_cmnd *scp,
3367 			     struct sdebug_dev_info *devip)
3368 {
3369 	u8 *cmd = scp->cmnd;
3370 	struct scsi_device *sdp = scp->device;
3371 	struct sdebug_dev_info *dp;
3372 	u8 mode;
3373 
3374 	mode = cmd[1] & 0x1f;
3375 	switch (mode) {
3376 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3377 		/* set UAs on this device only */
3378 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3379 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3380 		break;
3381 	case 0x5:	/* download MC, save and ACT */
3382 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3383 		break;
3384 	case 0x6:	/* download MC with offsets and ACT */
3385 		/* set UAs on most devices (LUs) in this target */
3386 		list_for_each_entry(dp,
3387 				    &devip->sdbg_host->dev_info_list,
3388 				    dev_list)
3389 			if (dp->target == sdp->id) {
3390 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3391 				if (devip != dp)
3392 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3393 						dp->uas_bm);
3394 			}
3395 		break;
3396 	case 0x7:	/* download MC with offsets, save, and ACT */
3397 		/* set UA on all devices (LUs) in this target */
3398 		list_for_each_entry(dp,
3399 				    &devip->sdbg_host->dev_info_list,
3400 				    dev_list)
3401 			if (dp->target == sdp->id)
3402 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3403 					dp->uas_bm);
3404 		break;
3405 	default:
3406 		/* do nothing for this command for other mode values */
3407 		break;
3408 	}
3409 	return 0;
3410 }
3411 
3412 static int resp_comp_write(struct scsi_cmnd *scp,
3413 			   struct sdebug_dev_info *devip)
3414 {
3415 	u8 *cmd = scp->cmnd;
3416 	u8 *arr;
3417 	u8 *fake_storep_hold;
3418 	u64 lba;
3419 	u32 dnum;
3420 	u32 lb_size = sdebug_sector_size;
3421 	u8 num;
3422 	unsigned long iflags;
3423 	int ret;
3424 	int retval = 0;
3425 
3426 	lba = get_unaligned_be64(cmd + 2);
3427 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3428 	if (0 == num)
3429 		return 0;	/* degenerate case, not an error */
3430 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3431 	    (cmd[1] & 0xe0)) {
3432 		mk_sense_invalid_opcode(scp);
3433 		return check_condition_result;
3434 	}
3435 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3436 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3437 	    (cmd[1] & 0xe0) == 0)
3438 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3439 			    "to DIF device\n");
3440 
3441 	/* inline check_device_access_params() */
3442 	if (lba + num > sdebug_capacity) {
3443 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3444 		return check_condition_result;
3445 	}
3446 	/* transfer length excessive (tie in to block limits VPD page) */
3447 	if (num > sdebug_store_sectors) {
3448 		/* needs work to find which cdb byte 'num' comes from */
3449 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3450 		return check_condition_result;
3451 	}
3452 	dnum = 2 * num;
3453 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3454 	if (NULL == arr) {
3455 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3456 				INSUFF_RES_ASCQ);
3457 		return check_condition_result;
3458 	}
3459 
3460 	write_lock_irqsave(&atomic_rw, iflags);
3461 
3462 	/* trick do_device_access() to fetch both compare and write buffers
3463 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3464 	fake_storep_hold = fake_storep;
3465 	fake_storep = arr;
3466 	ret = do_device_access(scp, 0, 0, dnum, true);
3467 	fake_storep = fake_storep_hold;
3468 	if (ret == -1) {
3469 		retval = DID_ERROR << 16;
3470 		goto cleanup;
3471 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3472 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3473 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3474 			    dnum * lb_size, ret);
3475 	if (!comp_write_worker(lba, num, arr)) {
3476 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3477 		retval = check_condition_result;
3478 		goto cleanup;
3479 	}
3480 	if (scsi_debug_lbp())
3481 		map_region(lba, num);
3482 cleanup:
3483 	write_unlock_irqrestore(&atomic_rw, iflags);
3484 	kfree(arr);
3485 	return retval;
3486 }
3487 
3488 struct unmap_block_desc {
3489 	__be64	lba;
3490 	__be32	blocks;
3491 	__be32	__reserved;
3492 };
3493 
3494 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3495 {
3496 	unsigned char *buf;
3497 	struct unmap_block_desc *desc;
3498 	unsigned int i, payload_len, descriptors;
3499 	int ret;
3500 	unsigned long iflags;
3501 
3502 
3503 	if (!scsi_debug_lbp())
3504 		return 0;	/* fib and say its done */
3505 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3506 	BUG_ON(scsi_bufflen(scp) != payload_len);
3507 
3508 	descriptors = (payload_len - 8) / 16;
3509 	if (descriptors > sdebug_unmap_max_desc) {
3510 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3511 		return check_condition_result;
3512 	}
3513 
3514 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3515 	if (!buf) {
3516 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3517 				INSUFF_RES_ASCQ);
3518 		return check_condition_result;
3519 	}
3520 
3521 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3522 
3523 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3524 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3525 
3526 	desc = (void *)&buf[8];
3527 
3528 	write_lock_irqsave(&atomic_rw, iflags);
3529 
3530 	for (i = 0 ; i < descriptors ; i++) {
3531 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3532 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3533 
3534 		ret = check_device_access_params(scp, lba, num);
3535 		if (ret)
3536 			goto out;
3537 
3538 		unmap_region(lba, num);
3539 	}
3540 
3541 	ret = 0;
3542 
3543 out:
3544 	write_unlock_irqrestore(&atomic_rw, iflags);
3545 	kfree(buf);
3546 
3547 	return ret;
3548 }
3549 
3550 #define SDEBUG_GET_LBA_STATUS_LEN 32
3551 
3552 static int resp_get_lba_status(struct scsi_cmnd *scp,
3553 			       struct sdebug_dev_info *devip)
3554 {
3555 	u8 *cmd = scp->cmnd;
3556 	u64 lba;
3557 	u32 alloc_len, mapped, num;
3558 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3559 	int ret;
3560 
3561 	lba = get_unaligned_be64(cmd + 2);
3562 	alloc_len = get_unaligned_be32(cmd + 10);
3563 
3564 	if (alloc_len < 24)
3565 		return 0;
3566 
3567 	ret = check_device_access_params(scp, lba, 1);
3568 	if (ret)
3569 		return ret;
3570 
3571 	if (scsi_debug_lbp())
3572 		mapped = map_state(lba, &num);
3573 	else {
3574 		mapped = 1;
3575 		/* following just in case virtual_gb changed */
3576 		sdebug_capacity = get_sdebug_capacity();
3577 		if (sdebug_capacity - lba <= 0xffffffff)
3578 			num = sdebug_capacity - lba;
3579 		else
3580 			num = 0xffffffff;
3581 	}
3582 
3583 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3584 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3585 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3586 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3587 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3588 
3589 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3590 }
3591 
3592 static int resp_sync_cache(struct scsi_cmnd *scp,
3593 			   struct sdebug_dev_info *devip)
3594 {
3595 	int res = 0;
3596 	u64 lba;
3597 	u32 num_blocks;
3598 	u8 *cmd = scp->cmnd;
3599 
3600 	if (cmd[0] == SYNCHRONIZE_CACHE) {	/* 10 byte cdb */
3601 		lba = get_unaligned_be32(cmd + 2);
3602 		num_blocks = get_unaligned_be16(cmd + 7);
3603 	} else {				/* SYNCHRONIZE_CACHE(16) */
3604 		lba = get_unaligned_be64(cmd + 2);
3605 		num_blocks = get_unaligned_be32(cmd + 10);
3606 	}
3607 	if (lba + num_blocks > sdebug_capacity) {
3608 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3609 		return check_condition_result;
3610 	}
3611 	if (!write_since_sync || cmd[1] & 0x2)
3612 		res = SDEG_RES_IMMED_MASK;
3613 	else		/* delay if write_since_sync and IMMED clear */
3614 		write_since_sync = false;
3615 	return res;
3616 }
3617 
3618 #define RL_BUCKET_ELEMS 8
3619 
3620 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3621  * (W-LUN), the normal Linux scanning logic does not associate it with a
3622  * device (e.g. /dev/sg7). The following magic will make that association:
3623  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3624  * where <n> is a host number. If there are multiple targets in a host then
3625  * the above will associate a W-LUN to each target. To only get a W-LUN
3626  * for target 2, then use "echo '- 2 49409' > scan" .
3627  */
3628 static int resp_report_luns(struct scsi_cmnd *scp,
3629 			    struct sdebug_dev_info *devip)
3630 {
3631 	unsigned char *cmd = scp->cmnd;
3632 	unsigned int alloc_len;
3633 	unsigned char select_report;
3634 	u64 lun;
3635 	struct scsi_lun *lun_p;
3636 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3637 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3638 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3639 	unsigned int tlun_cnt;	/* total LUN count */
3640 	unsigned int rlen;	/* response length (in bytes) */
3641 	int k, j, n, res;
3642 	unsigned int off_rsp = 0;
3643 	const int sz_lun = sizeof(struct scsi_lun);
3644 
3645 	clear_luns_changed_on_target(devip);
3646 
3647 	select_report = cmd[2];
3648 	alloc_len = get_unaligned_be32(cmd + 6);
3649 
3650 	if (alloc_len < 4) {
3651 		pr_err("alloc len too small %d\n", alloc_len);
3652 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3653 		return check_condition_result;
3654 	}
3655 
3656 	switch (select_report) {
3657 	case 0:		/* all LUNs apart from W-LUNs */
3658 		lun_cnt = sdebug_max_luns;
3659 		wlun_cnt = 0;
3660 		break;
3661 	case 1:		/* only W-LUNs */
3662 		lun_cnt = 0;
3663 		wlun_cnt = 1;
3664 		break;
3665 	case 2:		/* all LUNs */
3666 		lun_cnt = sdebug_max_luns;
3667 		wlun_cnt = 1;
3668 		break;
3669 	case 0x10:	/* only administrative LUs */
3670 	case 0x11:	/* see SPC-5 */
3671 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3672 	default:
3673 		pr_debug("select report invalid %d\n", select_report);
3674 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3675 		return check_condition_result;
3676 	}
3677 
3678 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3679 		--lun_cnt;
3680 
3681 	tlun_cnt = lun_cnt + wlun_cnt;
3682 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3683 	scsi_set_resid(scp, scsi_bufflen(scp));
3684 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3685 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3686 
3687 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3688 	lun = sdebug_no_lun_0 ? 1 : 0;
3689 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3690 		memset(arr, 0, sizeof(arr));
3691 		lun_p = (struct scsi_lun *)&arr[0];
3692 		if (k == 0) {
3693 			put_unaligned_be32(rlen, &arr[0]);
3694 			++lun_p;
3695 			j = 1;
3696 		}
3697 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3698 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3699 				break;
3700 			int_to_scsilun(lun++, lun_p);
3701 		}
3702 		if (j < RL_BUCKET_ELEMS)
3703 			break;
3704 		n = j * sz_lun;
3705 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3706 		if (res)
3707 			return res;
3708 		off_rsp += n;
3709 	}
3710 	if (wlun_cnt) {
3711 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3712 		++j;
3713 	}
3714 	if (j > 0)
3715 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3716 	return res;
3717 }
3718 
3719 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3720 			    unsigned int num, struct sdebug_dev_info *devip)
3721 {
3722 	int j;
3723 	unsigned char *kaddr, *buf;
3724 	unsigned int offset;
3725 	struct scsi_data_buffer *sdb = scsi_in(scp);
3726 	struct sg_mapping_iter miter;
3727 
3728 	/* better not to use temporary buffer. */
3729 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3730 	if (!buf) {
3731 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3732 				INSUFF_RES_ASCQ);
3733 		return check_condition_result;
3734 	}
3735 
3736 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3737 
3738 	offset = 0;
3739 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3740 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3741 
3742 	while (sg_miter_next(&miter)) {
3743 		kaddr = miter.addr;
3744 		for (j = 0; j < miter.length; j++)
3745 			*(kaddr + j) ^= *(buf + offset + j);
3746 
3747 		offset += miter.length;
3748 	}
3749 	sg_miter_stop(&miter);
3750 	kfree(buf);
3751 
3752 	return 0;
3753 }
3754 
3755 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3756 			       struct sdebug_dev_info *devip)
3757 {
3758 	u8 *cmd = scp->cmnd;
3759 	u64 lba;
3760 	u32 num;
3761 	int errsts;
3762 
3763 	if (!scsi_bidi_cmnd(scp)) {
3764 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3765 				INSUFF_RES_ASCQ);
3766 		return check_condition_result;
3767 	}
3768 	errsts = resp_read_dt0(scp, devip);
3769 	if (errsts)
3770 		return errsts;
3771 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3772 		errsts = resp_write_dt0(scp, devip);
3773 		if (errsts)
3774 			return errsts;
3775 	}
3776 	lba = get_unaligned_be32(cmd + 2);
3777 	num = get_unaligned_be16(cmd + 7);
3778 	return resp_xdwriteread(scp, lba, num, devip);
3779 }
3780 
3781 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3782 {
3783 	u32 tag = blk_mq_unique_tag(cmnd->request);
3784 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3785 
3786 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3787 	if (WARN_ON_ONCE(hwq >= submit_queues))
3788 		hwq = 0;
3789 	return sdebug_q_arr + hwq;
3790 }
3791 
3792 /* Queued (deferred) command completions converge here. */
3793 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3794 {
3795 	int qc_idx;
3796 	int retiring = 0;
3797 	unsigned long iflags;
3798 	struct sdebug_queue *sqp;
3799 	struct sdebug_queued_cmd *sqcp;
3800 	struct scsi_cmnd *scp;
3801 	struct sdebug_dev_info *devip;
3802 
3803 	sd_dp->defer_t = SDEB_DEFER_NONE;
3804 	qc_idx = sd_dp->qc_idx;
3805 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3806 	if (sdebug_statistics) {
3807 		atomic_inc(&sdebug_completions);
3808 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3809 			atomic_inc(&sdebug_miss_cpus);
3810 	}
3811 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3812 		pr_err("wild qc_idx=%d\n", qc_idx);
3813 		return;
3814 	}
3815 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3816 	sqcp = &sqp->qc_arr[qc_idx];
3817 	scp = sqcp->a_cmnd;
3818 	if (unlikely(scp == NULL)) {
3819 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3820 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3821 		       sd_dp->sqa_idx, qc_idx);
3822 		return;
3823 	}
3824 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3825 	if (likely(devip))
3826 		atomic_dec(&devip->num_in_q);
3827 	else
3828 		pr_err("devip=NULL\n");
3829 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3830 		retiring = 1;
3831 
3832 	sqcp->a_cmnd = NULL;
3833 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3834 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3835 		pr_err("Unexpected completion\n");
3836 		return;
3837 	}
3838 
3839 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3840 		int k, retval;
3841 
3842 		retval = atomic_read(&retired_max_queue);
3843 		if (qc_idx >= retval) {
3844 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3845 			pr_err("index %d too large\n", retval);
3846 			return;
3847 		}
3848 		k = find_last_bit(sqp->in_use_bm, retval);
3849 		if ((k < sdebug_max_queue) || (k == retval))
3850 			atomic_set(&retired_max_queue, 0);
3851 		else
3852 			atomic_set(&retired_max_queue, k + 1);
3853 	}
3854 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3855 	scp->scsi_done(scp); /* callback to mid level */
3856 }
3857 
3858 /* When high resolution timer goes off this function is called. */
3859 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3860 {
3861 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3862 						  hrt);
3863 	sdebug_q_cmd_complete(sd_dp);
3864 	return HRTIMER_NORESTART;
3865 }
3866 
3867 /* When work queue schedules work, it calls this function. */
3868 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3869 {
3870 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3871 						  ew.work);
3872 	sdebug_q_cmd_complete(sd_dp);
3873 }
3874 
3875 static bool got_shared_uuid;
3876 static uuid_t shared_uuid;
3877 
3878 static struct sdebug_dev_info *sdebug_device_create(
3879 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3880 {
3881 	struct sdebug_dev_info *devip;
3882 
3883 	devip = kzalloc(sizeof(*devip), flags);
3884 	if (devip) {
3885 		if (sdebug_uuid_ctl == 1)
3886 			uuid_gen(&devip->lu_name);
3887 		else if (sdebug_uuid_ctl == 2) {
3888 			if (got_shared_uuid)
3889 				devip->lu_name = shared_uuid;
3890 			else {
3891 				uuid_gen(&shared_uuid);
3892 				got_shared_uuid = true;
3893 				devip->lu_name = shared_uuid;
3894 			}
3895 		}
3896 		devip->sdbg_host = sdbg_host;
3897 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3898 	}
3899 	return devip;
3900 }
3901 
3902 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3903 {
3904 	struct sdebug_host_info *sdbg_host;
3905 	struct sdebug_dev_info *open_devip = NULL;
3906 	struct sdebug_dev_info *devip;
3907 
3908 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3909 	if (!sdbg_host) {
3910 		pr_err("Host info NULL\n");
3911 		return NULL;
3912 	}
3913 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3914 		if ((devip->used) && (devip->channel == sdev->channel) &&
3915 		    (devip->target == sdev->id) &&
3916 		    (devip->lun == sdev->lun))
3917 			return devip;
3918 		else {
3919 			if ((!devip->used) && (!open_devip))
3920 				open_devip = devip;
3921 		}
3922 	}
3923 	if (!open_devip) { /* try and make a new one */
3924 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3925 		if (!open_devip) {
3926 			pr_err("out of memory at line %d\n", __LINE__);
3927 			return NULL;
3928 		}
3929 	}
3930 
3931 	open_devip->channel = sdev->channel;
3932 	open_devip->target = sdev->id;
3933 	open_devip->lun = sdev->lun;
3934 	open_devip->sdbg_host = sdbg_host;
3935 	atomic_set(&open_devip->num_in_q, 0);
3936 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3937 	open_devip->used = true;
3938 	return open_devip;
3939 }
3940 
3941 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3942 {
3943 	if (sdebug_verbose)
3944 		pr_info("slave_alloc <%u %u %u %llu>\n",
3945 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3946 	blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
3947 	return 0;
3948 }
3949 
3950 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3951 {
3952 	struct sdebug_dev_info *devip =
3953 			(struct sdebug_dev_info *)sdp->hostdata;
3954 
3955 	if (sdebug_verbose)
3956 		pr_info("slave_configure <%u %u %u %llu>\n",
3957 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3958 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3959 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3960 	if (devip == NULL) {
3961 		devip = find_build_dev_info(sdp);
3962 		if (devip == NULL)
3963 			return 1;  /* no resources, will be marked offline */
3964 	}
3965 	sdp->hostdata = devip;
3966 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3967 	if (sdebug_no_uld)
3968 		sdp->no_uld_attach = 1;
3969 	config_cdb_len(sdp);
3970 	return 0;
3971 }
3972 
3973 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3974 {
3975 	struct sdebug_dev_info *devip =
3976 		(struct sdebug_dev_info *)sdp->hostdata;
3977 
3978 	if (sdebug_verbose)
3979 		pr_info("slave_destroy <%u %u %u %llu>\n",
3980 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3981 	if (devip) {
3982 		/* make this slot available for re-use */
3983 		devip->used = false;
3984 		sdp->hostdata = NULL;
3985 	}
3986 }
3987 
3988 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3989 			   enum sdeb_defer_type defer_t)
3990 {
3991 	if (!sd_dp)
3992 		return;
3993 	if (defer_t == SDEB_DEFER_HRT)
3994 		hrtimer_cancel(&sd_dp->hrt);
3995 	else if (defer_t == SDEB_DEFER_WQ)
3996 		cancel_work_sync(&sd_dp->ew.work);
3997 }
3998 
3999 /* If @cmnd found deletes its timer or work queue and returns true; else
4000    returns false */
4001 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
4002 {
4003 	unsigned long iflags;
4004 	int j, k, qmax, r_qmax;
4005 	enum sdeb_defer_type l_defer_t;
4006 	struct sdebug_queue *sqp;
4007 	struct sdebug_queued_cmd *sqcp;
4008 	struct sdebug_dev_info *devip;
4009 	struct sdebug_defer *sd_dp;
4010 
4011 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4012 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4013 		qmax = sdebug_max_queue;
4014 		r_qmax = atomic_read(&retired_max_queue);
4015 		if (r_qmax > qmax)
4016 			qmax = r_qmax;
4017 		for (k = 0; k < qmax; ++k) {
4018 			if (test_bit(k, sqp->in_use_bm)) {
4019 				sqcp = &sqp->qc_arr[k];
4020 				if (cmnd != sqcp->a_cmnd)
4021 					continue;
4022 				/* found */
4023 				devip = (struct sdebug_dev_info *)
4024 						cmnd->device->hostdata;
4025 				if (devip)
4026 					atomic_dec(&devip->num_in_q);
4027 				sqcp->a_cmnd = NULL;
4028 				sd_dp = sqcp->sd_dp;
4029 				if (sd_dp) {
4030 					l_defer_t = sd_dp->defer_t;
4031 					sd_dp->defer_t = SDEB_DEFER_NONE;
4032 				} else
4033 					l_defer_t = SDEB_DEFER_NONE;
4034 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4035 				stop_qc_helper(sd_dp, l_defer_t);
4036 				clear_bit(k, sqp->in_use_bm);
4037 				return true;
4038 			}
4039 		}
4040 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4041 	}
4042 	return false;
4043 }
4044 
4045 /* Deletes (stops) timers or work queues of all queued commands */
4046 static void stop_all_queued(void)
4047 {
4048 	unsigned long iflags;
4049 	int j, k;
4050 	enum sdeb_defer_type l_defer_t;
4051 	struct sdebug_queue *sqp;
4052 	struct sdebug_queued_cmd *sqcp;
4053 	struct sdebug_dev_info *devip;
4054 	struct sdebug_defer *sd_dp;
4055 
4056 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4057 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4058 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4059 			if (test_bit(k, sqp->in_use_bm)) {
4060 				sqcp = &sqp->qc_arr[k];
4061 				if (sqcp->a_cmnd == NULL)
4062 					continue;
4063 				devip = (struct sdebug_dev_info *)
4064 					sqcp->a_cmnd->device->hostdata;
4065 				if (devip)
4066 					atomic_dec(&devip->num_in_q);
4067 				sqcp->a_cmnd = NULL;
4068 				sd_dp = sqcp->sd_dp;
4069 				if (sd_dp) {
4070 					l_defer_t = sd_dp->defer_t;
4071 					sd_dp->defer_t = SDEB_DEFER_NONE;
4072 				} else
4073 					l_defer_t = SDEB_DEFER_NONE;
4074 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4075 				stop_qc_helper(sd_dp, l_defer_t);
4076 				clear_bit(k, sqp->in_use_bm);
4077 				spin_lock_irqsave(&sqp->qc_lock, iflags);
4078 			}
4079 		}
4080 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4081 	}
4082 }
4083 
4084 /* Free queued command memory on heap */
4085 static void free_all_queued(void)
4086 {
4087 	int j, k;
4088 	struct sdebug_queue *sqp;
4089 	struct sdebug_queued_cmd *sqcp;
4090 
4091 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4092 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4093 			sqcp = &sqp->qc_arr[k];
4094 			kfree(sqcp->sd_dp);
4095 			sqcp->sd_dp = NULL;
4096 		}
4097 	}
4098 }
4099 
4100 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4101 {
4102 	bool ok;
4103 
4104 	++num_aborts;
4105 	if (SCpnt) {
4106 		ok = stop_queued_cmnd(SCpnt);
4107 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4108 			sdev_printk(KERN_INFO, SCpnt->device,
4109 				    "%s: command%s found\n", __func__,
4110 				    ok ? "" : " not");
4111 	}
4112 	return SUCCESS;
4113 }
4114 
4115 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
4116 {
4117 	++num_dev_resets;
4118 	if (SCpnt && SCpnt->device) {
4119 		struct scsi_device *sdp = SCpnt->device;
4120 		struct sdebug_dev_info *devip =
4121 				(struct sdebug_dev_info *)sdp->hostdata;
4122 
4123 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4124 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4125 		if (devip)
4126 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4127 	}
4128 	return SUCCESS;
4129 }
4130 
4131 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4132 {
4133 	struct sdebug_host_info *sdbg_host;
4134 	struct sdebug_dev_info *devip;
4135 	struct scsi_device *sdp;
4136 	struct Scsi_Host *hp;
4137 	int k = 0;
4138 
4139 	++num_target_resets;
4140 	if (!SCpnt)
4141 		goto lie;
4142 	sdp = SCpnt->device;
4143 	if (!sdp)
4144 		goto lie;
4145 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4146 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4147 	hp = sdp->host;
4148 	if (!hp)
4149 		goto lie;
4150 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4151 	if (sdbg_host) {
4152 		list_for_each_entry(devip,
4153 				    &sdbg_host->dev_info_list,
4154 				    dev_list)
4155 			if (devip->target == sdp->id) {
4156 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4157 				++k;
4158 			}
4159 	}
4160 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4161 		sdev_printk(KERN_INFO, sdp,
4162 			    "%s: %d device(s) found in target\n", __func__, k);
4163 lie:
4164 	return SUCCESS;
4165 }
4166 
4167 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
4168 {
4169 	struct sdebug_host_info *sdbg_host;
4170 	struct sdebug_dev_info *devip;
4171 	struct scsi_device *sdp;
4172 	struct Scsi_Host *hp;
4173 	int k = 0;
4174 
4175 	++num_bus_resets;
4176 	if (!(SCpnt && SCpnt->device))
4177 		goto lie;
4178 	sdp = SCpnt->device;
4179 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4180 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4181 	hp = sdp->host;
4182 	if (hp) {
4183 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4184 		if (sdbg_host) {
4185 			list_for_each_entry(devip,
4186 					    &sdbg_host->dev_info_list,
4187 					    dev_list) {
4188 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4189 				++k;
4190 			}
4191 		}
4192 	}
4193 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4194 		sdev_printk(KERN_INFO, sdp,
4195 			    "%s: %d device(s) found in host\n", __func__, k);
4196 lie:
4197 	return SUCCESS;
4198 }
4199 
4200 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
4201 {
4202 	struct sdebug_host_info *sdbg_host;
4203 	struct sdebug_dev_info *devip;
4204 	int k = 0;
4205 
4206 	++num_host_resets;
4207 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4208 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4209 	spin_lock(&sdebug_host_list_lock);
4210 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4211 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4212 				    dev_list) {
4213 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4214 			++k;
4215 		}
4216 	}
4217 	spin_unlock(&sdebug_host_list_lock);
4218 	stop_all_queued();
4219 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4220 		sdev_printk(KERN_INFO, SCpnt->device,
4221 			    "%s: %d device(s) found\n", __func__, k);
4222 	return SUCCESS;
4223 }
4224 
4225 static void __init sdebug_build_parts(unsigned char *ramp,
4226 				      unsigned long store_size)
4227 {
4228 	struct partition *pp;
4229 	int starts[SDEBUG_MAX_PARTS + 2];
4230 	int sectors_per_part, num_sectors, k;
4231 	int heads_by_sects, start_sec, end_sec;
4232 
4233 	/* assume partition table already zeroed */
4234 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4235 		return;
4236 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4237 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4238 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4239 	}
4240 	num_sectors = (int)sdebug_store_sectors;
4241 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4242 			   / sdebug_num_parts;
4243 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4244 	starts[0] = sdebug_sectors_per;
4245 	for (k = 1; k < sdebug_num_parts; ++k)
4246 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4247 			    * heads_by_sects;
4248 	starts[sdebug_num_parts] = num_sectors;
4249 	starts[sdebug_num_parts + 1] = 0;
4250 
4251 	ramp[510] = 0x55;	/* magic partition markings */
4252 	ramp[511] = 0xAA;
4253 	pp = (struct partition *)(ramp + 0x1be);
4254 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4255 		start_sec = starts[k];
4256 		end_sec = starts[k + 1] - 1;
4257 		pp->boot_ind = 0;
4258 
4259 		pp->cyl = start_sec / heads_by_sects;
4260 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4261 			   / sdebug_sectors_per;
4262 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4263 
4264 		pp->end_cyl = end_sec / heads_by_sects;
4265 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4266 			       / sdebug_sectors_per;
4267 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4268 
4269 		pp->start_sect = cpu_to_le32(start_sec);
4270 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4271 		pp->sys_ind = 0x83;	/* plain Linux partition */
4272 	}
4273 }
4274 
4275 static void block_unblock_all_queues(bool block)
4276 {
4277 	int j;
4278 	struct sdebug_queue *sqp;
4279 
4280 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4281 		atomic_set(&sqp->blocked, (int)block);
4282 }
4283 
4284 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4285  * commands will be processed normally before triggers occur.
4286  */
4287 static void tweak_cmnd_count(void)
4288 {
4289 	int count, modulo;
4290 
4291 	modulo = abs(sdebug_every_nth);
4292 	if (modulo < 2)
4293 		return;
4294 	block_unblock_all_queues(true);
4295 	count = atomic_read(&sdebug_cmnd_count);
4296 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4297 	block_unblock_all_queues(false);
4298 }
4299 
4300 static void clear_queue_stats(void)
4301 {
4302 	atomic_set(&sdebug_cmnd_count, 0);
4303 	atomic_set(&sdebug_completions, 0);
4304 	atomic_set(&sdebug_miss_cpus, 0);
4305 	atomic_set(&sdebug_a_tsf, 0);
4306 }
4307 
4308 static void setup_inject(struct sdebug_queue *sqp,
4309 			 struct sdebug_queued_cmd *sqcp)
4310 {
4311 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) {
4312 		if (sdebug_every_nth > 0)
4313 			sqcp->inj_recovered = sqcp->inj_transport
4314 				= sqcp->inj_dif
4315 				= sqcp->inj_dix = sqcp->inj_short = 0;
4316 		return;
4317 	}
4318 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4319 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4320 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4321 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4322 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4323 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4324 }
4325 
4326 /* Complete the processing of the thread that queued a SCSI command to this
4327  * driver. It either completes the command by calling cmnd_done() or
4328  * schedules a hr timer or work queue then returns 0. Returns
4329  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4330  */
4331 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4332 			 int scsi_result,
4333 			 int (*pfp)(struct scsi_cmnd *,
4334 				    struct sdebug_dev_info *),
4335 			 int delta_jiff, int ndelay)
4336 {
4337 	unsigned long iflags;
4338 	int k, num_in_q, qdepth, inject;
4339 	struct sdebug_queue *sqp;
4340 	struct sdebug_queued_cmd *sqcp;
4341 	struct scsi_device *sdp;
4342 	struct sdebug_defer *sd_dp;
4343 
4344 	if (unlikely(devip == NULL)) {
4345 		if (scsi_result == 0)
4346 			scsi_result = DID_NO_CONNECT << 16;
4347 		goto respond_in_thread;
4348 	}
4349 	sdp = cmnd->device;
4350 
4351 	if (delta_jiff == 0)
4352 		goto respond_in_thread;
4353 
4354 	/* schedule the response at a later time if resources permit */
4355 	sqp = get_queue(cmnd);
4356 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4357 	if (unlikely(atomic_read(&sqp->blocked))) {
4358 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4359 		return SCSI_MLQUEUE_HOST_BUSY;
4360 	}
4361 	num_in_q = atomic_read(&devip->num_in_q);
4362 	qdepth = cmnd->device->queue_depth;
4363 	inject = 0;
4364 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4365 		if (scsi_result) {
4366 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4367 			goto respond_in_thread;
4368 		} else
4369 			scsi_result = device_qfull_result;
4370 	} else if (unlikely(sdebug_every_nth &&
4371 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4372 			    (scsi_result == 0))) {
4373 		if ((num_in_q == (qdepth - 1)) &&
4374 		    (atomic_inc_return(&sdebug_a_tsf) >=
4375 		     abs(sdebug_every_nth))) {
4376 			atomic_set(&sdebug_a_tsf, 0);
4377 			inject = 1;
4378 			scsi_result = device_qfull_result;
4379 		}
4380 	}
4381 
4382 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4383 	if (unlikely(k >= sdebug_max_queue)) {
4384 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4385 		if (scsi_result)
4386 			goto respond_in_thread;
4387 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4388 			scsi_result = device_qfull_result;
4389 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4390 			sdev_printk(KERN_INFO, sdp,
4391 				    "%s: max_queue=%d exceeded, %s\n",
4392 				    __func__, sdebug_max_queue,
4393 				    (scsi_result ?  "status: TASK SET FULL" :
4394 						    "report: host busy"));
4395 		if (scsi_result)
4396 			goto respond_in_thread;
4397 		else
4398 			return SCSI_MLQUEUE_HOST_BUSY;
4399 	}
4400 	__set_bit(k, sqp->in_use_bm);
4401 	atomic_inc(&devip->num_in_q);
4402 	sqcp = &sqp->qc_arr[k];
4403 	sqcp->a_cmnd = cmnd;
4404 	cmnd->host_scribble = (unsigned char *)sqcp;
4405 	sd_dp = sqcp->sd_dp;
4406 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4407 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4408 		setup_inject(sqp, sqcp);
4409 	if (sd_dp == NULL) {
4410 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4411 		if (sd_dp == NULL)
4412 			return SCSI_MLQUEUE_HOST_BUSY;
4413 	}
4414 
4415 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4416 	if (cmnd->result & SDEG_RES_IMMED_MASK) {
4417 		/*
4418 		 * This is the F_DELAY_OVERR case. No delay.
4419 		 */
4420 		cmnd->result &= ~SDEG_RES_IMMED_MASK;
4421 		delta_jiff = ndelay = 0;
4422 	}
4423 	if (cmnd->result == 0 && scsi_result != 0)
4424 		cmnd->result = scsi_result;
4425 
4426 	if (unlikely(sdebug_verbose && cmnd->result))
4427 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4428 			    __func__, cmnd->result);
4429 
4430 	if (delta_jiff > 0 || ndelay > 0) {
4431 		ktime_t kt;
4432 
4433 		if (delta_jiff > 0) {
4434 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4435 		} else
4436 			kt = ndelay;
4437 		if (!sd_dp->init_hrt) {
4438 			sd_dp->init_hrt = true;
4439 			sqcp->sd_dp = sd_dp;
4440 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4441 				     HRTIMER_MODE_REL_PINNED);
4442 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4443 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4444 			sd_dp->qc_idx = k;
4445 		}
4446 		if (sdebug_statistics)
4447 			sd_dp->issuing_cpu = raw_smp_processor_id();
4448 		sd_dp->defer_t = SDEB_DEFER_HRT;
4449 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4450 	} else {	/* jdelay < 0, use work queue */
4451 		if (!sd_dp->init_wq) {
4452 			sd_dp->init_wq = true;
4453 			sqcp->sd_dp = sd_dp;
4454 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4455 			sd_dp->qc_idx = k;
4456 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4457 		}
4458 		if (sdebug_statistics)
4459 			sd_dp->issuing_cpu = raw_smp_processor_id();
4460 		sd_dp->defer_t = SDEB_DEFER_WQ;
4461 		schedule_work(&sd_dp->ew.work);
4462 	}
4463 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4464 		     (scsi_result == device_qfull_result)))
4465 		sdev_printk(KERN_INFO, sdp,
4466 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4467 			    num_in_q, (inject ? "<inject> " : ""),
4468 			    "status: TASK SET FULL");
4469 	return 0;
4470 
4471 respond_in_thread:	/* call back to mid-layer using invocation thread */
4472 	cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
4473 	cmnd->result &= ~SDEG_RES_IMMED_MASK;
4474 	if (cmnd->result == 0 && scsi_result != 0)
4475 		cmnd->result = scsi_result;
4476 	cmnd->scsi_done(cmnd);
4477 	return 0;
4478 }
4479 
4480 /* Note: The following macros create attribute files in the
4481    /sys/module/scsi_debug/parameters directory. Unfortunately this
4482    driver is unaware of a change and cannot trigger auxiliary actions
4483    as it can when the corresponding attribute in the
4484    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4485  */
4486 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4487 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4488 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4489 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4490 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4491 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4492 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4493 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4494 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4495 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4496 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4497 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4498 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4499 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4500 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4501 module_param_string(inq_product, sdebug_inq_product_id,
4502 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4503 module_param_string(inq_rev, sdebug_inq_product_rev,
4504 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4505 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4506 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4507 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4508 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4509 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4510 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4511 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4512 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4513 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4514 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4515 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4516 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4517 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4518 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4519 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4520 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4521 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4522 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4523 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4524 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4525 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4526 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4527 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4528 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4529 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4530 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4531 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4532 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4533 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4534 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4535 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4536 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4537 		   S_IRUGO | S_IWUSR);
4538 module_param_named(write_same_length, sdebug_write_same_length, int,
4539 		   S_IRUGO | S_IWUSR);
4540 
4541 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4542 MODULE_DESCRIPTION("SCSI debug adapter driver");
4543 MODULE_LICENSE("GPL");
4544 MODULE_VERSION(SDEBUG_VERSION);
4545 
4546 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4547 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4548 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4549 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4550 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4551 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4552 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4553 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4554 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4555 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4556 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4557 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4558 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4559 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4560 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4561 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4562 		 SDEBUG_VERSION "\")");
4563 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4564 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4565 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4566 MODULE_PARM_DESC(lbprz,
4567 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4568 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4569 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4570 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4571 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4572 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4573 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4574 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4575 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4576 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4577 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4578 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4579 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4580 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4581 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4582 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4583 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4584 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4585 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4586 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4587 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4588 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4589 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4590 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4591 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4592 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4593 MODULE_PARM_DESC(uuid_ctl,
4594 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4595 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4596 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4597 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4598 
4599 #define SDEBUG_INFO_LEN 256
4600 static char sdebug_info[SDEBUG_INFO_LEN];
4601 
4602 static const char *scsi_debug_info(struct Scsi_Host *shp)
4603 {
4604 	int k;
4605 
4606 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4607 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4608 	if (k >= (SDEBUG_INFO_LEN - 1))
4609 		return sdebug_info;
4610 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4611 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4612 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4613 		  "statistics", (int)sdebug_statistics);
4614 	return sdebug_info;
4615 }
4616 
4617 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4618 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4619 				 int length)
4620 {
4621 	char arr[16];
4622 	int opts;
4623 	int minLen = length > 15 ? 15 : length;
4624 
4625 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4626 		return -EACCES;
4627 	memcpy(arr, buffer, minLen);
4628 	arr[minLen] = '\0';
4629 	if (1 != sscanf(arr, "%d", &opts))
4630 		return -EINVAL;
4631 	sdebug_opts = opts;
4632 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4633 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4634 	if (sdebug_every_nth != 0)
4635 		tweak_cmnd_count();
4636 	return length;
4637 }
4638 
4639 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4640  * same for each scsi_debug host (if more than one). Some of the counters
4641  * output are not atomics so might be inaccurate in a busy system. */
4642 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4643 {
4644 	int f, j, l;
4645 	struct sdebug_queue *sqp;
4646 
4647 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4648 		   SDEBUG_VERSION, sdebug_version_date);
4649 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4650 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4651 		   sdebug_opts, sdebug_every_nth);
4652 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4653 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4654 		   sdebug_sector_size, "bytes");
4655 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4656 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4657 		   num_aborts);
4658 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4659 		   num_dev_resets, num_target_resets, num_bus_resets,
4660 		   num_host_resets);
4661 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4662 		   dix_reads, dix_writes, dif_errors);
4663 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4664 		   sdebug_statistics);
4665 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4666 		   atomic_read(&sdebug_cmnd_count),
4667 		   atomic_read(&sdebug_completions),
4668 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4669 		   atomic_read(&sdebug_a_tsf));
4670 
4671 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4672 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4673 		seq_printf(m, "  queue %d:\n", j);
4674 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4675 		if (f != sdebug_max_queue) {
4676 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4677 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4678 				   "first,last bits", f, l);
4679 		}
4680 	}
4681 	return 0;
4682 }
4683 
4684 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4685 {
4686 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4687 }
4688 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4689  * of delay is jiffies.
4690  */
4691 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4692 			   size_t count)
4693 {
4694 	int jdelay, res;
4695 
4696 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4697 		res = count;
4698 		if (sdebug_jdelay != jdelay) {
4699 			int j, k;
4700 			struct sdebug_queue *sqp;
4701 
4702 			block_unblock_all_queues(true);
4703 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4704 			     ++j, ++sqp) {
4705 				k = find_first_bit(sqp->in_use_bm,
4706 						   sdebug_max_queue);
4707 				if (k != sdebug_max_queue) {
4708 					res = -EBUSY;   /* queued commands */
4709 					break;
4710 				}
4711 			}
4712 			if (res > 0) {
4713 				sdebug_jdelay = jdelay;
4714 				sdebug_ndelay = 0;
4715 			}
4716 			block_unblock_all_queues(false);
4717 		}
4718 		return res;
4719 	}
4720 	return -EINVAL;
4721 }
4722 static DRIVER_ATTR_RW(delay);
4723 
4724 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4725 {
4726 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4727 }
4728 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4729 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4730 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4731 			    size_t count)
4732 {
4733 	int ndelay, res;
4734 
4735 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4736 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4737 		res = count;
4738 		if (sdebug_ndelay != ndelay) {
4739 			int j, k;
4740 			struct sdebug_queue *sqp;
4741 
4742 			block_unblock_all_queues(true);
4743 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4744 			     ++j, ++sqp) {
4745 				k = find_first_bit(sqp->in_use_bm,
4746 						   sdebug_max_queue);
4747 				if (k != sdebug_max_queue) {
4748 					res = -EBUSY;   /* queued commands */
4749 					break;
4750 				}
4751 			}
4752 			if (res > 0) {
4753 				sdebug_ndelay = ndelay;
4754 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4755 							: DEF_JDELAY;
4756 			}
4757 			block_unblock_all_queues(false);
4758 		}
4759 		return res;
4760 	}
4761 	return -EINVAL;
4762 }
4763 static DRIVER_ATTR_RW(ndelay);
4764 
4765 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4766 {
4767 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4768 }
4769 
4770 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4771 			  size_t count)
4772 {
4773 	int opts;
4774 	char work[20];
4775 
4776 	if (sscanf(buf, "%10s", work) == 1) {
4777 		if (strncasecmp(work, "0x", 2) == 0) {
4778 			if (kstrtoint(work + 2, 16, &opts) == 0)
4779 				goto opts_done;
4780 		} else {
4781 			if (kstrtoint(work, 10, &opts) == 0)
4782 				goto opts_done;
4783 		}
4784 	}
4785 	return -EINVAL;
4786 opts_done:
4787 	sdebug_opts = opts;
4788 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4789 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4790 	tweak_cmnd_count();
4791 	return count;
4792 }
4793 static DRIVER_ATTR_RW(opts);
4794 
4795 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4796 {
4797 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4798 }
4799 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4800 			   size_t count)
4801 {
4802 	int n;
4803 
4804 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4805 		sdebug_ptype = n;
4806 		return count;
4807 	}
4808 	return -EINVAL;
4809 }
4810 static DRIVER_ATTR_RW(ptype);
4811 
4812 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4813 {
4814 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4815 }
4816 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4817 			    size_t count)
4818 {
4819 	int n;
4820 
4821 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4822 		sdebug_dsense = n;
4823 		return count;
4824 	}
4825 	return -EINVAL;
4826 }
4827 static DRIVER_ATTR_RW(dsense);
4828 
4829 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4830 {
4831 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4832 }
4833 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4834 			     size_t count)
4835 {
4836 	int n;
4837 
4838 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4839 		n = (n > 0);
4840 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4841 		if (sdebug_fake_rw != n) {
4842 			if ((0 == n) && (NULL == fake_storep)) {
4843 				unsigned long sz =
4844 					(unsigned long)sdebug_dev_size_mb *
4845 					1048576;
4846 
4847 				fake_storep = vmalloc(sz);
4848 				if (NULL == fake_storep) {
4849 					pr_err("out of memory, 9\n");
4850 					return -ENOMEM;
4851 				}
4852 				memset(fake_storep, 0, sz);
4853 			}
4854 			sdebug_fake_rw = n;
4855 		}
4856 		return count;
4857 	}
4858 	return -EINVAL;
4859 }
4860 static DRIVER_ATTR_RW(fake_rw);
4861 
4862 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4863 {
4864 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4865 }
4866 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4867 			      size_t count)
4868 {
4869 	int n;
4870 
4871 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4872 		sdebug_no_lun_0 = n;
4873 		return count;
4874 	}
4875 	return -EINVAL;
4876 }
4877 static DRIVER_ATTR_RW(no_lun_0);
4878 
4879 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4880 {
4881 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4882 }
4883 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4884 			      size_t count)
4885 {
4886 	int n;
4887 
4888 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4889 		sdebug_num_tgts = n;
4890 		sdebug_max_tgts_luns();
4891 		return count;
4892 	}
4893 	return -EINVAL;
4894 }
4895 static DRIVER_ATTR_RW(num_tgts);
4896 
4897 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4898 {
4899 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4900 }
4901 static DRIVER_ATTR_RO(dev_size_mb);
4902 
4903 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4904 {
4905 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4906 }
4907 static DRIVER_ATTR_RO(num_parts);
4908 
4909 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4910 {
4911 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4912 }
4913 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4914 			       size_t count)
4915 {
4916 	int nth;
4917 
4918 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4919 		sdebug_every_nth = nth;
4920 		if (nth && !sdebug_statistics) {
4921 			pr_info("every_nth needs statistics=1, set it\n");
4922 			sdebug_statistics = true;
4923 		}
4924 		tweak_cmnd_count();
4925 		return count;
4926 	}
4927 	return -EINVAL;
4928 }
4929 static DRIVER_ATTR_RW(every_nth);
4930 
4931 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4932 {
4933 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4934 }
4935 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4936 			      size_t count)
4937 {
4938 	int n;
4939 	bool changed;
4940 
4941 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4942 		if (n > 256) {
4943 			pr_warn("max_luns can be no more than 256\n");
4944 			return -EINVAL;
4945 		}
4946 		changed = (sdebug_max_luns != n);
4947 		sdebug_max_luns = n;
4948 		sdebug_max_tgts_luns();
4949 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4950 			struct sdebug_host_info *sdhp;
4951 			struct sdebug_dev_info *dp;
4952 
4953 			spin_lock(&sdebug_host_list_lock);
4954 			list_for_each_entry(sdhp, &sdebug_host_list,
4955 					    host_list) {
4956 				list_for_each_entry(dp, &sdhp->dev_info_list,
4957 						    dev_list) {
4958 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4959 						dp->uas_bm);
4960 				}
4961 			}
4962 			spin_unlock(&sdebug_host_list_lock);
4963 		}
4964 		return count;
4965 	}
4966 	return -EINVAL;
4967 }
4968 static DRIVER_ATTR_RW(max_luns);
4969 
4970 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4971 {
4972 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4973 }
4974 /* N.B. max_queue can be changed while there are queued commands. In flight
4975  * commands beyond the new max_queue will be completed. */
4976 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4977 			       size_t count)
4978 {
4979 	int j, n, k, a;
4980 	struct sdebug_queue *sqp;
4981 
4982 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4983 	    (n <= SDEBUG_CANQUEUE)) {
4984 		block_unblock_all_queues(true);
4985 		k = 0;
4986 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4987 		     ++j, ++sqp) {
4988 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4989 			if (a > k)
4990 				k = a;
4991 		}
4992 		sdebug_max_queue = n;
4993 		if (k == SDEBUG_CANQUEUE)
4994 			atomic_set(&retired_max_queue, 0);
4995 		else if (k >= n)
4996 			atomic_set(&retired_max_queue, k + 1);
4997 		else
4998 			atomic_set(&retired_max_queue, 0);
4999 		block_unblock_all_queues(false);
5000 		return count;
5001 	}
5002 	return -EINVAL;
5003 }
5004 static DRIVER_ATTR_RW(max_queue);
5005 
5006 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
5007 {
5008 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
5009 }
5010 static DRIVER_ATTR_RO(no_uld);
5011 
5012 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
5013 {
5014 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
5015 }
5016 static DRIVER_ATTR_RO(scsi_level);
5017 
5018 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
5019 {
5020 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
5021 }
5022 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
5023 				size_t count)
5024 {
5025 	int n;
5026 	bool changed;
5027 
5028 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5029 		changed = (sdebug_virtual_gb != n);
5030 		sdebug_virtual_gb = n;
5031 		sdebug_capacity = get_sdebug_capacity();
5032 		if (changed) {
5033 			struct sdebug_host_info *sdhp;
5034 			struct sdebug_dev_info *dp;
5035 
5036 			spin_lock(&sdebug_host_list_lock);
5037 			list_for_each_entry(sdhp, &sdebug_host_list,
5038 					    host_list) {
5039 				list_for_each_entry(dp, &sdhp->dev_info_list,
5040 						    dev_list) {
5041 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
5042 						dp->uas_bm);
5043 				}
5044 			}
5045 			spin_unlock(&sdebug_host_list_lock);
5046 		}
5047 		return count;
5048 	}
5049 	return -EINVAL;
5050 }
5051 static DRIVER_ATTR_RW(virtual_gb);
5052 
5053 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
5054 {
5055 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
5056 }
5057 
5058 static int sdebug_add_adapter(void);
5059 static void sdebug_remove_adapter(void);
5060 
5061 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
5062 			      size_t count)
5063 {
5064 	int delta_hosts;
5065 
5066 	if (sscanf(buf, "%d", &delta_hosts) != 1)
5067 		return -EINVAL;
5068 	if (delta_hosts > 0) {
5069 		do {
5070 			sdebug_add_adapter();
5071 		} while (--delta_hosts);
5072 	} else if (delta_hosts < 0) {
5073 		do {
5074 			sdebug_remove_adapter();
5075 		} while (++delta_hosts);
5076 	}
5077 	return count;
5078 }
5079 static DRIVER_ATTR_RW(add_host);
5080 
5081 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5082 {
5083 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5084 }
5085 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5086 				    size_t count)
5087 {
5088 	int n;
5089 
5090 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5091 		sdebug_vpd_use_hostno = n;
5092 		return count;
5093 	}
5094 	return -EINVAL;
5095 }
5096 static DRIVER_ATTR_RW(vpd_use_hostno);
5097 
5098 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5099 {
5100 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5101 }
5102 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5103 				size_t count)
5104 {
5105 	int n;
5106 
5107 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5108 		if (n > 0)
5109 			sdebug_statistics = true;
5110 		else {
5111 			clear_queue_stats();
5112 			sdebug_statistics = false;
5113 		}
5114 		return count;
5115 	}
5116 	return -EINVAL;
5117 }
5118 static DRIVER_ATTR_RW(statistics);
5119 
5120 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5121 {
5122 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5123 }
5124 static DRIVER_ATTR_RO(sector_size);
5125 
5126 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5127 {
5128 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5129 }
5130 static DRIVER_ATTR_RO(submit_queues);
5131 
5132 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5133 {
5134 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5135 }
5136 static DRIVER_ATTR_RO(dix);
5137 
5138 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5139 {
5140 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5141 }
5142 static DRIVER_ATTR_RO(dif);
5143 
5144 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5145 {
5146 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5147 }
5148 static DRIVER_ATTR_RO(guard);
5149 
5150 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5151 {
5152 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5153 }
5154 static DRIVER_ATTR_RO(ato);
5155 
5156 static ssize_t map_show(struct device_driver *ddp, char *buf)
5157 {
5158 	ssize_t count;
5159 
5160 	if (!scsi_debug_lbp())
5161 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5162 				 sdebug_store_sectors);
5163 
5164 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5165 			  (int)map_size, map_storep);
5166 	buf[count++] = '\n';
5167 	buf[count] = '\0';
5168 
5169 	return count;
5170 }
5171 static DRIVER_ATTR_RO(map);
5172 
5173 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5174 {
5175 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5176 }
5177 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5178 			       size_t count)
5179 {
5180 	int n;
5181 
5182 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5183 		sdebug_removable = (n > 0);
5184 		return count;
5185 	}
5186 	return -EINVAL;
5187 }
5188 static DRIVER_ATTR_RW(removable);
5189 
5190 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5191 {
5192 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5193 }
5194 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5195 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5196 			       size_t count)
5197 {
5198 	int n;
5199 
5200 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5201 		sdebug_host_lock = (n > 0);
5202 		return count;
5203 	}
5204 	return -EINVAL;
5205 }
5206 static DRIVER_ATTR_RW(host_lock);
5207 
5208 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5209 {
5210 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5211 }
5212 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5213 			    size_t count)
5214 {
5215 	int n;
5216 
5217 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5218 		sdebug_strict = (n > 0);
5219 		return count;
5220 	}
5221 	return -EINVAL;
5222 }
5223 static DRIVER_ATTR_RW(strict);
5224 
5225 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5226 {
5227 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5228 }
5229 static DRIVER_ATTR_RO(uuid_ctl);
5230 
5231 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5232 {
5233 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5234 }
5235 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5236 			     size_t count)
5237 {
5238 	int ret, n;
5239 
5240 	ret = kstrtoint(buf, 0, &n);
5241 	if (ret)
5242 		return ret;
5243 	sdebug_cdb_len = n;
5244 	all_config_cdb_len();
5245 	return count;
5246 }
5247 static DRIVER_ATTR_RW(cdb_len);
5248 
5249 
5250 /* Note: The following array creates attribute files in the
5251    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5252    files (over those found in the /sys/module/scsi_debug/parameters
5253    directory) is that auxiliary actions can be triggered when an attribute
5254    is changed. For example see: sdebug_add_host_store() above.
5255  */
5256 
5257 static struct attribute *sdebug_drv_attrs[] = {
5258 	&driver_attr_delay.attr,
5259 	&driver_attr_opts.attr,
5260 	&driver_attr_ptype.attr,
5261 	&driver_attr_dsense.attr,
5262 	&driver_attr_fake_rw.attr,
5263 	&driver_attr_no_lun_0.attr,
5264 	&driver_attr_num_tgts.attr,
5265 	&driver_attr_dev_size_mb.attr,
5266 	&driver_attr_num_parts.attr,
5267 	&driver_attr_every_nth.attr,
5268 	&driver_attr_max_luns.attr,
5269 	&driver_attr_max_queue.attr,
5270 	&driver_attr_no_uld.attr,
5271 	&driver_attr_scsi_level.attr,
5272 	&driver_attr_virtual_gb.attr,
5273 	&driver_attr_add_host.attr,
5274 	&driver_attr_vpd_use_hostno.attr,
5275 	&driver_attr_sector_size.attr,
5276 	&driver_attr_statistics.attr,
5277 	&driver_attr_submit_queues.attr,
5278 	&driver_attr_dix.attr,
5279 	&driver_attr_dif.attr,
5280 	&driver_attr_guard.attr,
5281 	&driver_attr_ato.attr,
5282 	&driver_attr_map.attr,
5283 	&driver_attr_removable.attr,
5284 	&driver_attr_host_lock.attr,
5285 	&driver_attr_ndelay.attr,
5286 	&driver_attr_strict.attr,
5287 	&driver_attr_uuid_ctl.attr,
5288 	&driver_attr_cdb_len.attr,
5289 	NULL,
5290 };
5291 ATTRIBUTE_GROUPS(sdebug_drv);
5292 
5293 static struct device *pseudo_primary;
5294 
5295 static int __init scsi_debug_init(void)
5296 {
5297 	unsigned long sz;
5298 	int host_to_add;
5299 	int k;
5300 	int ret;
5301 
5302 	atomic_set(&retired_max_queue, 0);
5303 
5304 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5305 		pr_warn("ndelay must be less than 1 second, ignored\n");
5306 		sdebug_ndelay = 0;
5307 	} else if (sdebug_ndelay > 0)
5308 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5309 
5310 	switch (sdebug_sector_size) {
5311 	case  512:
5312 	case 1024:
5313 	case 2048:
5314 	case 4096:
5315 		break;
5316 	default:
5317 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5318 		return -EINVAL;
5319 	}
5320 
5321 	switch (sdebug_dif) {
5322 	case T10_PI_TYPE0_PROTECTION:
5323 		break;
5324 	case T10_PI_TYPE1_PROTECTION:
5325 	case T10_PI_TYPE2_PROTECTION:
5326 	case T10_PI_TYPE3_PROTECTION:
5327 		have_dif_prot = true;
5328 		break;
5329 
5330 	default:
5331 		pr_err("dif must be 0, 1, 2 or 3\n");
5332 		return -EINVAL;
5333 	}
5334 
5335 	if (sdebug_guard > 1) {
5336 		pr_err("guard must be 0 or 1\n");
5337 		return -EINVAL;
5338 	}
5339 
5340 	if (sdebug_ato > 1) {
5341 		pr_err("ato must be 0 or 1\n");
5342 		return -EINVAL;
5343 	}
5344 
5345 	if (sdebug_physblk_exp > 15) {
5346 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5347 		return -EINVAL;
5348 	}
5349 	if (sdebug_max_luns > 256) {
5350 		pr_warn("max_luns can be no more than 256, use default\n");
5351 		sdebug_max_luns = DEF_MAX_LUNS;
5352 	}
5353 
5354 	if (sdebug_lowest_aligned > 0x3fff) {
5355 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5356 		return -EINVAL;
5357 	}
5358 
5359 	if (submit_queues < 1) {
5360 		pr_err("submit_queues must be 1 or more\n");
5361 		return -EINVAL;
5362 	}
5363 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5364 			       GFP_KERNEL);
5365 	if (sdebug_q_arr == NULL)
5366 		return -ENOMEM;
5367 	for (k = 0; k < submit_queues; ++k)
5368 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5369 
5370 	if (sdebug_dev_size_mb < 1)
5371 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5372 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5373 	sdebug_store_sectors = sz / sdebug_sector_size;
5374 	sdebug_capacity = get_sdebug_capacity();
5375 
5376 	/* play around with geometry, don't waste too much on track 0 */
5377 	sdebug_heads = 8;
5378 	sdebug_sectors_per = 32;
5379 	if (sdebug_dev_size_mb >= 256)
5380 		sdebug_heads = 64;
5381 	else if (sdebug_dev_size_mb >= 16)
5382 		sdebug_heads = 32;
5383 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5384 			       (sdebug_sectors_per * sdebug_heads);
5385 	if (sdebug_cylinders_per >= 1024) {
5386 		/* other LLDs do this; implies >= 1GB ram disk ... */
5387 		sdebug_heads = 255;
5388 		sdebug_sectors_per = 63;
5389 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5390 			       (sdebug_sectors_per * sdebug_heads);
5391 	}
5392 
5393 	if (sdebug_fake_rw == 0) {
5394 		fake_storep = vmalloc(sz);
5395 		if (NULL == fake_storep) {
5396 			pr_err("out of memory, 1\n");
5397 			ret = -ENOMEM;
5398 			goto free_q_arr;
5399 		}
5400 		memset(fake_storep, 0, sz);
5401 		if (sdebug_num_parts > 0)
5402 			sdebug_build_parts(fake_storep, sz);
5403 	}
5404 
5405 	if (sdebug_dix) {
5406 		int dif_size;
5407 
5408 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5409 		dif_storep = vmalloc(dif_size);
5410 
5411 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5412 
5413 		if (dif_storep == NULL) {
5414 			pr_err("out of mem. (DIX)\n");
5415 			ret = -ENOMEM;
5416 			goto free_vm;
5417 		}
5418 
5419 		memset(dif_storep, 0xff, dif_size);
5420 	}
5421 
5422 	/* Logical Block Provisioning */
5423 	if (scsi_debug_lbp()) {
5424 		sdebug_unmap_max_blocks =
5425 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5426 
5427 		sdebug_unmap_max_desc =
5428 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5429 
5430 		sdebug_unmap_granularity =
5431 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5432 
5433 		if (sdebug_unmap_alignment &&
5434 		    sdebug_unmap_granularity <=
5435 		    sdebug_unmap_alignment) {
5436 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5437 			ret = -EINVAL;
5438 			goto free_vm;
5439 		}
5440 
5441 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5442 		map_storep = vmalloc(array_size(sizeof(long),
5443 						BITS_TO_LONGS(map_size)));
5444 
5445 		pr_info("%lu provisioning blocks\n", map_size);
5446 
5447 		if (map_storep == NULL) {
5448 			pr_err("out of mem. (MAP)\n");
5449 			ret = -ENOMEM;
5450 			goto free_vm;
5451 		}
5452 
5453 		bitmap_zero(map_storep, map_size);
5454 
5455 		/* Map first 1KB for partition table */
5456 		if (sdebug_num_parts)
5457 			map_region(0, 2);
5458 	}
5459 
5460 	pseudo_primary = root_device_register("pseudo_0");
5461 	if (IS_ERR(pseudo_primary)) {
5462 		pr_warn("root_device_register() error\n");
5463 		ret = PTR_ERR(pseudo_primary);
5464 		goto free_vm;
5465 	}
5466 	ret = bus_register(&pseudo_lld_bus);
5467 	if (ret < 0) {
5468 		pr_warn("bus_register error: %d\n", ret);
5469 		goto dev_unreg;
5470 	}
5471 	ret = driver_register(&sdebug_driverfs_driver);
5472 	if (ret < 0) {
5473 		pr_warn("driver_register error: %d\n", ret);
5474 		goto bus_unreg;
5475 	}
5476 
5477 	host_to_add = sdebug_add_host;
5478 	sdebug_add_host = 0;
5479 
5480 	for (k = 0; k < host_to_add; k++) {
5481 		if (sdebug_add_adapter()) {
5482 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5483 			break;
5484 		}
5485 	}
5486 
5487 	if (sdebug_verbose)
5488 		pr_info("built %d host(s)\n", sdebug_add_host);
5489 
5490 	return 0;
5491 
5492 bus_unreg:
5493 	bus_unregister(&pseudo_lld_bus);
5494 dev_unreg:
5495 	root_device_unregister(pseudo_primary);
5496 free_vm:
5497 	vfree(map_storep);
5498 	vfree(dif_storep);
5499 	vfree(fake_storep);
5500 free_q_arr:
5501 	kfree(sdebug_q_arr);
5502 	return ret;
5503 }
5504 
5505 static void __exit scsi_debug_exit(void)
5506 {
5507 	int k = sdebug_add_host;
5508 
5509 	stop_all_queued();
5510 	free_all_queued();
5511 	for (; k; k--)
5512 		sdebug_remove_adapter();
5513 	driver_unregister(&sdebug_driverfs_driver);
5514 	bus_unregister(&pseudo_lld_bus);
5515 	root_device_unregister(pseudo_primary);
5516 
5517 	vfree(map_storep);
5518 	vfree(dif_storep);
5519 	vfree(fake_storep);
5520 	kfree(sdebug_q_arr);
5521 }
5522 
5523 device_initcall(scsi_debug_init);
5524 module_exit(scsi_debug_exit);
5525 
5526 static void sdebug_release_adapter(struct device *dev)
5527 {
5528 	struct sdebug_host_info *sdbg_host;
5529 
5530 	sdbg_host = to_sdebug_host(dev);
5531 	kfree(sdbg_host);
5532 }
5533 
5534 static int sdebug_add_adapter(void)
5535 {
5536 	int k, devs_per_host;
5537 	int error = 0;
5538 	struct sdebug_host_info *sdbg_host;
5539 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5540 
5541 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5542 	if (sdbg_host == NULL) {
5543 		pr_err("out of memory at line %d\n", __LINE__);
5544 		return -ENOMEM;
5545 	}
5546 
5547 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5548 
5549 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5550 	for (k = 0; k < devs_per_host; k++) {
5551 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5552 		if (!sdbg_devinfo) {
5553 			pr_err("out of memory at line %d\n", __LINE__);
5554 			error = -ENOMEM;
5555 			goto clean;
5556 		}
5557 	}
5558 
5559 	spin_lock(&sdebug_host_list_lock);
5560 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5561 	spin_unlock(&sdebug_host_list_lock);
5562 
5563 	sdbg_host->dev.bus = &pseudo_lld_bus;
5564 	sdbg_host->dev.parent = pseudo_primary;
5565 	sdbg_host->dev.release = &sdebug_release_adapter;
5566 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5567 
5568 	error = device_register(&sdbg_host->dev);
5569 
5570 	if (error)
5571 		goto clean;
5572 
5573 	++sdebug_add_host;
5574 	return error;
5575 
5576 clean:
5577 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5578 				 dev_list) {
5579 		list_del(&sdbg_devinfo->dev_list);
5580 		kfree(sdbg_devinfo);
5581 	}
5582 
5583 	kfree(sdbg_host);
5584 	return error;
5585 }
5586 
5587 static void sdebug_remove_adapter(void)
5588 {
5589 	struct sdebug_host_info *sdbg_host = NULL;
5590 
5591 	spin_lock(&sdebug_host_list_lock);
5592 	if (!list_empty(&sdebug_host_list)) {
5593 		sdbg_host = list_entry(sdebug_host_list.prev,
5594 				       struct sdebug_host_info, host_list);
5595 		list_del(&sdbg_host->host_list);
5596 	}
5597 	spin_unlock(&sdebug_host_list_lock);
5598 
5599 	if (!sdbg_host)
5600 		return;
5601 
5602 	device_unregister(&sdbg_host->dev);
5603 	--sdebug_add_host;
5604 }
5605 
5606 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5607 {
5608 	int num_in_q = 0;
5609 	struct sdebug_dev_info *devip;
5610 
5611 	block_unblock_all_queues(true);
5612 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5613 	if (NULL == devip) {
5614 		block_unblock_all_queues(false);
5615 		return	-ENODEV;
5616 	}
5617 	num_in_q = atomic_read(&devip->num_in_q);
5618 
5619 	if (qdepth < 1)
5620 		qdepth = 1;
5621 	/* allow to exceed max host qc_arr elements for testing */
5622 	if (qdepth > SDEBUG_CANQUEUE + 10)
5623 		qdepth = SDEBUG_CANQUEUE + 10;
5624 	scsi_change_queue_depth(sdev, qdepth);
5625 
5626 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5627 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5628 			    __func__, qdepth, num_in_q);
5629 	}
5630 	block_unblock_all_queues(false);
5631 	return sdev->queue_depth;
5632 }
5633 
5634 static bool fake_timeout(struct scsi_cmnd *scp)
5635 {
5636 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5637 		if (sdebug_every_nth < -1)
5638 			sdebug_every_nth = -1;
5639 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5640 			return true; /* ignore command causing timeout */
5641 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5642 			 scsi_medium_access_command(scp))
5643 			return true; /* time out reads and writes */
5644 	}
5645 	return false;
5646 }
5647 
5648 static bool fake_host_busy(struct scsi_cmnd *scp)
5649 {
5650 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5651 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5652 }
5653 
5654 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5655 				   struct scsi_cmnd *scp)
5656 {
5657 	u8 sdeb_i;
5658 	struct scsi_device *sdp = scp->device;
5659 	const struct opcode_info_t *oip;
5660 	const struct opcode_info_t *r_oip;
5661 	struct sdebug_dev_info *devip;
5662 	u8 *cmd = scp->cmnd;
5663 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5664 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
5665 	int k, na;
5666 	int errsts = 0;
5667 	u32 flags;
5668 	u16 sa;
5669 	u8 opcode = cmd[0];
5670 	bool has_wlun_rl;
5671 
5672 	scsi_set_resid(scp, 0);
5673 	if (sdebug_statistics)
5674 		atomic_inc(&sdebug_cmnd_count);
5675 	if (unlikely(sdebug_verbose &&
5676 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5677 		char b[120];
5678 		int n, len, sb;
5679 
5680 		len = scp->cmd_len;
5681 		sb = (int)sizeof(b);
5682 		if (len > 32)
5683 			strcpy(b, "too long, over 32 bytes");
5684 		else {
5685 			for (k = 0, n = 0; k < len && n < sb; ++k)
5686 				n += scnprintf(b + n, sb - n, "%02x ",
5687 					       (u32)cmd[k]);
5688 		}
5689 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5690 			    blk_mq_unique_tag(scp->request), b);
5691 	}
5692 	if (fake_host_busy(scp))
5693 		return SCSI_MLQUEUE_HOST_BUSY;
5694 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5695 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5696 		goto err_out;
5697 
5698 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5699 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5700 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5701 	if (unlikely(!devip)) {
5702 		devip = find_build_dev_info(sdp);
5703 		if (NULL == devip)
5704 			goto err_out;
5705 	}
5706 	na = oip->num_attached;
5707 	r_pfp = oip->pfp;
5708 	if (na) {	/* multiple commands with this opcode */
5709 		r_oip = oip;
5710 		if (FF_SA & r_oip->flags) {
5711 			if (F_SA_LOW & oip->flags)
5712 				sa = 0x1f & cmd[1];
5713 			else
5714 				sa = get_unaligned_be16(cmd + 8);
5715 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5716 				if (opcode == oip->opcode && sa == oip->sa)
5717 					break;
5718 			}
5719 		} else {   /* since no service action only check opcode */
5720 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5721 				if (opcode == oip->opcode)
5722 					break;
5723 			}
5724 		}
5725 		if (k > na) {
5726 			if (F_SA_LOW & r_oip->flags)
5727 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5728 			else if (F_SA_HIGH & r_oip->flags)
5729 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5730 			else
5731 				mk_sense_invalid_opcode(scp);
5732 			goto check_cond;
5733 		}
5734 	}	/* else (when na==0) we assume the oip is a match */
5735 	flags = oip->flags;
5736 	if (unlikely(F_INV_OP & flags)) {
5737 		mk_sense_invalid_opcode(scp);
5738 		goto check_cond;
5739 	}
5740 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5741 		if (sdebug_verbose)
5742 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5743 				    my_name, opcode, " supported for wlun");
5744 		mk_sense_invalid_opcode(scp);
5745 		goto check_cond;
5746 	}
5747 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5748 		u8 rem;
5749 		int j;
5750 
5751 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5752 			rem = ~oip->len_mask[k] & cmd[k];
5753 			if (rem) {
5754 				for (j = 7; j >= 0; --j, rem <<= 1) {
5755 					if (0x80 & rem)
5756 						break;
5757 				}
5758 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5759 				goto check_cond;
5760 			}
5761 		}
5762 	}
5763 	if (unlikely(!(F_SKIP_UA & flags) &&
5764 		     find_first_bit(devip->uas_bm,
5765 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5766 		errsts = make_ua(scp, devip);
5767 		if (errsts)
5768 			goto check_cond;
5769 	}
5770 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5771 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5772 		if (sdebug_verbose)
5773 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5774 				    "%s\n", my_name, "initializing command "
5775 				    "required");
5776 		errsts = check_condition_result;
5777 		goto fini;
5778 	}
5779 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5780 		goto fini;
5781 	if (unlikely(sdebug_every_nth)) {
5782 		if (fake_timeout(scp))
5783 			return 0;	/* ignore command: make trouble */
5784 	}
5785 	if (likely(oip->pfp))
5786 		pfp = oip->pfp;	/* calls a resp_* function */
5787 	else
5788 		pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
5789 
5790 fini:
5791 	if (F_DELAY_OVERR & flags)
5792 		return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5793 	else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
5794 		/*
5795 		 * If any delay is active, for F_SSU_DELAY want at least 1
5796 		 * second and if sdebug_jdelay>0 want a long delay of that
5797 		 * many seconds; for F_SYNC_DELAY want 1/20 of that.
5798 		 */
5799 		int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5800 		int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5801 
5802 		jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5803 		return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5804 	} else
5805 		return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
5806 				     sdebug_ndelay);
5807 check_cond:
5808 	return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
5809 err_out:
5810 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
5811 }
5812 
5813 static struct scsi_host_template sdebug_driver_template = {
5814 	.show_info =		scsi_debug_show_info,
5815 	.write_info =		scsi_debug_write_info,
5816 	.proc_name =		sdebug_proc_name,
5817 	.name =			"SCSI DEBUG",
5818 	.info =			scsi_debug_info,
5819 	.slave_alloc =		scsi_debug_slave_alloc,
5820 	.slave_configure =	scsi_debug_slave_configure,
5821 	.slave_destroy =	scsi_debug_slave_destroy,
5822 	.ioctl =		scsi_debug_ioctl,
5823 	.queuecommand =		scsi_debug_queuecommand,
5824 	.change_queue_depth =	sdebug_change_qdepth,
5825 	.eh_abort_handler =	scsi_debug_abort,
5826 	.eh_device_reset_handler = scsi_debug_device_reset,
5827 	.eh_target_reset_handler = scsi_debug_target_reset,
5828 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5829 	.eh_host_reset_handler = scsi_debug_host_reset,
5830 	.can_queue =		SDEBUG_CANQUEUE,
5831 	.this_id =		7,
5832 	.sg_tablesize =		SG_MAX_SEGMENTS,
5833 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5834 	.max_sectors =		-1U,
5835 	.use_clustering = 	DISABLE_CLUSTERING,
5836 	.module =		THIS_MODULE,
5837 	.track_queue_depth =	1,
5838 };
5839 
5840 static int sdebug_driver_probe(struct device *dev)
5841 {
5842 	int error = 0;
5843 	struct sdebug_host_info *sdbg_host;
5844 	struct Scsi_Host *hpnt;
5845 	int hprot;
5846 
5847 	sdbg_host = to_sdebug_host(dev);
5848 
5849 	sdebug_driver_template.can_queue = sdebug_max_queue;
5850 	if (sdebug_clustering)
5851 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5852 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5853 	if (NULL == hpnt) {
5854 		pr_err("scsi_host_alloc failed\n");
5855 		error = -ENODEV;
5856 		return error;
5857 	}
5858 	if (submit_queues > nr_cpu_ids) {
5859 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5860 			my_name, submit_queues, nr_cpu_ids);
5861 		submit_queues = nr_cpu_ids;
5862 	}
5863 	/* Decide whether to tell scsi subsystem that we want mq */
5864 	/* Following should give the same answer for each host */
5865 	if (shost_use_blk_mq(hpnt))
5866 		hpnt->nr_hw_queues = submit_queues;
5867 
5868 	sdbg_host->shost = hpnt;
5869 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5870 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5871 		hpnt->max_id = sdebug_num_tgts + 1;
5872 	else
5873 		hpnt->max_id = sdebug_num_tgts;
5874 	/* = sdebug_max_luns; */
5875 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5876 
5877 	hprot = 0;
5878 
5879 	switch (sdebug_dif) {
5880 
5881 	case T10_PI_TYPE1_PROTECTION:
5882 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5883 		if (sdebug_dix)
5884 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5885 		break;
5886 
5887 	case T10_PI_TYPE2_PROTECTION:
5888 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5889 		if (sdebug_dix)
5890 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5891 		break;
5892 
5893 	case T10_PI_TYPE3_PROTECTION:
5894 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5895 		if (sdebug_dix)
5896 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5897 		break;
5898 
5899 	default:
5900 		if (sdebug_dix)
5901 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5902 		break;
5903 	}
5904 
5905 	scsi_host_set_prot(hpnt, hprot);
5906 
5907 	if (have_dif_prot || sdebug_dix)
5908 		pr_info("host protection%s%s%s%s%s%s%s\n",
5909 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5910 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5911 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5912 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5913 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5914 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5915 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5916 
5917 	if (sdebug_guard == 1)
5918 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5919 	else
5920 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5921 
5922 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5923 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5924 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5925 		sdebug_statistics = true;
5926 	error = scsi_add_host(hpnt, &sdbg_host->dev);
5927 	if (error) {
5928 		pr_err("scsi_add_host failed\n");
5929 		error = -ENODEV;
5930 		scsi_host_put(hpnt);
5931 	} else
5932 		scsi_scan_host(hpnt);
5933 
5934 	return error;
5935 }
5936 
5937 static int sdebug_driver_remove(struct device *dev)
5938 {
5939 	struct sdebug_host_info *sdbg_host;
5940 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5941 
5942 	sdbg_host = to_sdebug_host(dev);
5943 
5944 	if (!sdbg_host) {
5945 		pr_err("Unable to locate host info\n");
5946 		return -ENODEV;
5947 	}
5948 
5949 	scsi_remove_host(sdbg_host->shost);
5950 
5951 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5952 				 dev_list) {
5953 		list_del(&sdbg_devinfo->dev_list);
5954 		kfree(sdbg_devinfo);
5955 	}
5956 
5957 	scsi_host_put(sdbg_host->shost);
5958 	return 0;
5959 }
5960 
5961 static int pseudo_lld_bus_match(struct device *dev,
5962 				struct device_driver *dev_driver)
5963 {
5964 	return 1;
5965 }
5966 
5967 static struct bus_type pseudo_lld_bus = {
5968 	.name = "pseudo",
5969 	.match = pseudo_lld_bus_match,
5970 	.probe = sdebug_driver_probe,
5971 	.remove = sdebug_driver_remove,
5972 	.drv_groups = sdebug_drv_groups,
5973 };
5974