xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 481b5e5c)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2017 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0187"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20171202";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
100 
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST   1
103 #define DEF_NUM_TGTS   1
104 #define DEF_MAX_LUNS   1
105 /* With these defaults, this driver will make 1 host with 1 target
106  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107  */
108 #define DEF_ATO 1
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB   8
112 #define DEF_DIF 0
113 #define DEF_DIX 0
114 #define DEF_D_SENSE   0
115 #define DEF_EVERY_NTH   0
116 #define DEF_FAKE_RW	0
117 #define DEF_GUARD 0
118 #define DEF_HOST_LOCK 0
119 #define DEF_LBPU 0
120 #define DEF_LBPWS 0
121 #define DEF_LBPWS10 0
122 #define DEF_LBPRZ 1
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0   0
126 #define DEF_NUM_PARTS   0
127 #define DEF_OPTS   0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE   TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB   0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
147 
148 #define SDEBUG_LUN_0_VAL 0
149 
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE		1
152 #define SDEBUG_OPT_MEDIUM_ERR		2
153 #define SDEBUG_OPT_TIMEOUT		4
154 #define SDEBUG_OPT_RECOVERED_ERR	8
155 #define SDEBUG_OPT_TRANSPORT_ERR	16
156 #define SDEBUG_OPT_DIF_ERR		32
157 #define SDEBUG_OPT_DIX_ERR		64
158 #define SDEBUG_OPT_MAC_TIMEOUT		128
159 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
160 #define SDEBUG_OPT_Q_NOISE		0x200
161 #define SDEBUG_OPT_ALL_TSF		0x400
162 #define SDEBUG_OPT_RARE_TSF		0x800
163 #define SDEBUG_OPT_N_WCE		0x1000
164 #define SDEBUG_OPT_RESET_NOISE		0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
166 #define SDEBUG_OPT_HOST_BUSY		0x8000
167 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
168 			      SDEBUG_OPT_RESET_NOISE)
169 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
170 				  SDEBUG_OPT_TRANSPORT_ERR | \
171 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
172 				  SDEBUG_OPT_SHORT_TRANSFER | \
173 				  SDEBUG_OPT_HOST_BUSY)
174 /* When "every_nth" > 0 then modulo "every_nth" commands:
175  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
176  *   - a RECOVERED_ERROR is simulated on successful read and write
177  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
178  *   - a TRANSPORT_ERROR is simulated on successful read and write
179  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
180  *
181  * When "every_nth" < 0 then after "- every_nth" commands:
182  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
183  *   - a RECOVERED_ERROR is simulated on successful read and write
184  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
185  *   - a TRANSPORT_ERROR is simulated on successful read and write
186  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
187  * This will continue on every subsequent command until some other action
188  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
189  * every_nth via sysfs).
190  */
191 
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193  * priority order. In the subset implemented here lower numbers have higher
194  * priority. The UA numbers should be a sequence starting from 0 with
195  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
204 
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206  * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
209 
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211  * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  255
224 
225 #define F_D_IN			1
226 #define F_D_OUT			2
227 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
228 #define F_D_UNKN		8
229 #define F_RL_WLUN_OK		0x10
230 #define F_SKIP_UA		0x20
231 #define F_DELAY_OVERR		0x40
232 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
234 #define F_INV_OP		0x200
235 #define F_FAKE_RW		0x400
236 #define F_M_ACCESS		0x800	/* media access */
237 
238 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
239 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
240 #define FF_SA (F_SA_HIGH | F_SA_LOW)
241 
242 #define SDEBUG_MAX_PARTS 4
243 
244 #define SDEBUG_MAX_CMD_LEN 32
245 
246 
247 struct sdebug_dev_info {
248 	struct list_head dev_list;
249 	unsigned int channel;
250 	unsigned int target;
251 	u64 lun;
252 	uuid_t lu_name;
253 	struct sdebug_host_info *sdbg_host;
254 	unsigned long uas_bm[1];
255 	atomic_t num_in_q;
256 	atomic_t stopped;
257 	bool used;
258 };
259 
260 struct sdebug_host_info {
261 	struct list_head host_list;
262 	struct Scsi_Host *shost;
263 	struct device dev;
264 	struct list_head dev_info_list;
265 };
266 
267 #define to_sdebug_host(d)	\
268 	container_of(d, struct sdebug_host_info, dev)
269 
270 struct sdebug_defer {
271 	struct hrtimer hrt;
272 	struct execute_work ew;
273 	int sqa_idx;	/* index of sdebug_queue array */
274 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
275 	int issuing_cpu;
276 };
277 
278 struct sdebug_queued_cmd {
279 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
280 	 * instance indicates this slot is in use.
281 	 */
282 	struct sdebug_defer *sd_dp;
283 	struct scsi_cmnd *a_cmnd;
284 	unsigned int inj_recovered:1;
285 	unsigned int inj_transport:1;
286 	unsigned int inj_dif:1;
287 	unsigned int inj_dix:1;
288 	unsigned int inj_short:1;
289 	unsigned int inj_host_busy:1;
290 };
291 
292 struct sdebug_queue {
293 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
294 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
295 	spinlock_t qc_lock;
296 	atomic_t blocked;	/* to temporarily stop more being queued */
297 };
298 
299 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
300 static atomic_t sdebug_completions;  /* count of deferred completions */
301 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
302 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
303 
304 struct opcode_info_t {
305 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
306 				/* for terminating element */
307 	u8 opcode;		/* if num_attached > 0, preferred */
308 	u16 sa;			/* service action */
309 	u32 flags;		/* OR-ed set of SDEB_F_* */
310 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
311 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
312 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
313 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
314 };
315 
316 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
317 enum sdeb_opcode_index {
318 	SDEB_I_INVALID_OPCODE =	0,
319 	SDEB_I_INQUIRY = 1,
320 	SDEB_I_REPORT_LUNS = 2,
321 	SDEB_I_REQUEST_SENSE = 3,
322 	SDEB_I_TEST_UNIT_READY = 4,
323 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
324 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
325 	SDEB_I_LOG_SENSE = 7,
326 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
327 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
328 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
329 	SDEB_I_START_STOP = 11,
330 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
331 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
332 	SDEB_I_MAINT_IN = 14,
333 	SDEB_I_MAINT_OUT = 15,
334 	SDEB_I_VERIFY = 16,		/* 10 only */
335 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
336 	SDEB_I_RESERVE = 18,		/* 6, 10 */
337 	SDEB_I_RELEASE = 19,		/* 6, 10 */
338 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
339 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
340 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
341 	SDEB_I_SEND_DIAG = 23,
342 	SDEB_I_UNMAP = 24,
343 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
344 	SDEB_I_WRITE_BUFFER = 26,
345 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
346 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
347 	SDEB_I_COMP_WRITE = 29,
348 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
349 };
350 
351 
352 static const unsigned char opcode_ind_arr[256] = {
353 /* 0x0; 0x0->0x1f: 6 byte cdbs */
354 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
355 	    0, 0, 0, 0,
356 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
357 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
358 	    SDEB_I_RELEASE,
359 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
360 	    SDEB_I_ALLOW_REMOVAL, 0,
361 /* 0x20; 0x20->0x3f: 10 byte cdbs */
362 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
363 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
364 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
365 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
366 /* 0x40; 0x40->0x5f: 10 byte cdbs */
367 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
368 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
369 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
370 	    SDEB_I_RELEASE,
371 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
372 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
373 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
374 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
375 	0, SDEB_I_VARIABLE_LEN,
376 /* 0x80; 0x80->0x9f: 16 byte cdbs */
377 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
378 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
379 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
380 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
381 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
382 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
383 	     SDEB_I_MAINT_OUT, 0, 0, 0,
384 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
385 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
386 	0, 0, 0, 0, 0, 0, 0, 0,
387 	0, 0, 0, 0, 0, 0, 0, 0,
388 /* 0xc0; 0xc0->0xff: vendor specific */
389 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
390 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
391 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
392 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
393 };
394 
395 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
409 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
410 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
412 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
417 
418 /*
419  * The following are overflow arrays for cdbs that "hit" the same index in
420  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
421  * should be placed in opcode_info_arr[], the others should be placed here.
422  */
423 static const struct opcode_info_t msense_iarr[] = {
424 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
425 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 };
427 
428 static const struct opcode_info_t mselect_iarr[] = {
429 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
430 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
431 };
432 
433 static const struct opcode_info_t read_iarr[] = {
434 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
435 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
436 	     0, 0, 0, 0} },
437 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
438 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
439 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
440 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
441 	     0xc7, 0, 0, 0, 0} },
442 };
443 
444 static const struct opcode_info_t write_iarr[] = {
445 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
446 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
447 		   0, 0, 0, 0, 0, 0} },
448 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
449 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
450 		   0, 0, 0} },
451 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
452 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
453 		   0xbf, 0xc7, 0, 0, 0, 0} },
454 };
455 
456 static const struct opcode_info_t sa_in_16_iarr[] = {
457 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
458 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
459 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
460 };
461 
462 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
463 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
464 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
465 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
466 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
467 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
468 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
469 };
470 
471 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
472 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
473 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
474 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
475 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
476 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
477 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
478 };
479 
480 static const struct opcode_info_t write_same_iarr[] = {
481 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
482 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
483 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
484 };
485 
486 static const struct opcode_info_t reserve_iarr[] = {
487 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
488 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489 };
490 
491 static const struct opcode_info_t release_iarr[] = {
492 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
493 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
494 };
495 
496 
497 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
498  * plus the terminating elements for logic that scans this table such as
499  * REPORT SUPPORTED OPERATION CODES. */
500 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
501 /* 0 */
502 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
503 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
504 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
505 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
506 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
507 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
508 	     0, 0} },					/* REPORT LUNS */
509 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
510 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
512 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
513 /* 5 */
514 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
515 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
516 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
517 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
518 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
519 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
520 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
521 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
522 	     0, 0, 0} },
523 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
524 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
525 	     0, 0} },
526 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
527 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
528 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
529 /* 10 */
530 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
531 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
532 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
533 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },		/* WRITE(16) */
534 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
535 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
536 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
537 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
538 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
540 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
541 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
542 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
543 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
544 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
545 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
546 				0xff, 0, 0xc7, 0, 0, 0, 0} },
547 /* 15 */
548 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
549 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
550 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
551 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
552 	     0, 0, 0, 0, 0, 0} },
553 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
554 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
555 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
556 	     0xff, 0xff} },
557 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
558 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
559 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
560 	     0} },
561 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
562 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
563 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
564 	     0} },
565 /* 20 */
566 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
567 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
568 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
569 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
570 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
571 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
572 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
573 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
575 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
576 /* 25 */
577 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
578 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
579 		   0, 0, 0, 0, 0, 0} },		/* XDWRITEREAD(10) */
580 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
581 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
582 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
583 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
584 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
585 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
586 		 0, 0, 0, 0, 0} },
587 	{0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
588 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
589 	     0, 0, 0, 0} },
590 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
591 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
592 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
593 
594 /* 30 */
595 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
596 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
597 };
598 
599 static int sdebug_add_host = DEF_NUM_HOST;
600 static int sdebug_ato = DEF_ATO;
601 static int sdebug_cdb_len = DEF_CDB_LEN;
602 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
603 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
604 static int sdebug_dif = DEF_DIF;
605 static int sdebug_dix = DEF_DIX;
606 static int sdebug_dsense = DEF_D_SENSE;
607 static int sdebug_every_nth = DEF_EVERY_NTH;
608 static int sdebug_fake_rw = DEF_FAKE_RW;
609 static unsigned int sdebug_guard = DEF_GUARD;
610 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
611 static int sdebug_max_luns = DEF_MAX_LUNS;
612 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
613 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
614 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
615 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
616 static int sdebug_no_uld;
617 static int sdebug_num_parts = DEF_NUM_PARTS;
618 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
619 static int sdebug_opt_blks = DEF_OPT_BLKS;
620 static int sdebug_opts = DEF_OPTS;
621 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
622 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
623 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
624 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
625 static int sdebug_sector_size = DEF_SECTOR_SIZE;
626 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
627 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
628 static unsigned int sdebug_lbpu = DEF_LBPU;
629 static unsigned int sdebug_lbpws = DEF_LBPWS;
630 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
631 static unsigned int sdebug_lbprz = DEF_LBPRZ;
632 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
633 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
634 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
635 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
636 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
637 static int sdebug_uuid_ctl = DEF_UUID_CTL;
638 static bool sdebug_removable = DEF_REMOVABLE;
639 static bool sdebug_clustering;
640 static bool sdebug_host_lock = DEF_HOST_LOCK;
641 static bool sdebug_strict = DEF_STRICT;
642 static bool sdebug_any_injecting_opt;
643 static bool sdebug_verbose;
644 static bool have_dif_prot;
645 static bool sdebug_statistics = DEF_STATISTICS;
646 static bool sdebug_mq_active;
647 
648 static unsigned int sdebug_store_sectors;
649 static sector_t sdebug_capacity;	/* in sectors */
650 
651 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
652    may still need them */
653 static int sdebug_heads;		/* heads per disk */
654 static int sdebug_cylinders_per;	/* cylinders per surface */
655 static int sdebug_sectors_per;		/* sectors per cylinder */
656 
657 static LIST_HEAD(sdebug_host_list);
658 static DEFINE_SPINLOCK(sdebug_host_list_lock);
659 
660 static unsigned char *fake_storep;	/* ramdisk storage */
661 static struct t10_pi_tuple *dif_storep;	/* protection info */
662 static void *map_storep;		/* provisioning map */
663 
664 static unsigned long map_size;
665 static int num_aborts;
666 static int num_dev_resets;
667 static int num_target_resets;
668 static int num_bus_resets;
669 static int num_host_resets;
670 static int dix_writes;
671 static int dix_reads;
672 static int dif_errors;
673 
674 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
675 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
676 
677 static DEFINE_RWLOCK(atomic_rw);
678 
679 static char sdebug_proc_name[] = MY_NAME;
680 static const char *my_name = MY_NAME;
681 
682 static struct bus_type pseudo_lld_bus;
683 
684 static struct device_driver sdebug_driverfs_driver = {
685 	.name 		= sdebug_proc_name,
686 	.bus		= &pseudo_lld_bus,
687 };
688 
689 static const int check_condition_result =
690 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
691 
692 static const int illegal_condition_result =
693 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
694 
695 static const int device_qfull_result =
696 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
697 
698 
699 /* Only do the extra work involved in logical block provisioning if one or
700  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
701  * real reads and writes (i.e. not skipping them for speed).
702  */
703 static inline bool scsi_debug_lbp(void)
704 {
705 	return 0 == sdebug_fake_rw &&
706 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
707 }
708 
709 static void *fake_store(unsigned long long lba)
710 {
711 	lba = do_div(lba, sdebug_store_sectors);
712 
713 	return fake_storep + lba * sdebug_sector_size;
714 }
715 
716 static struct t10_pi_tuple *dif_store(sector_t sector)
717 {
718 	sector = sector_div(sector, sdebug_store_sectors);
719 
720 	return dif_storep + sector;
721 }
722 
723 static void sdebug_max_tgts_luns(void)
724 {
725 	struct sdebug_host_info *sdbg_host;
726 	struct Scsi_Host *hpnt;
727 
728 	spin_lock(&sdebug_host_list_lock);
729 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
730 		hpnt = sdbg_host->shost;
731 		if ((hpnt->this_id >= 0) &&
732 		    (sdebug_num_tgts > hpnt->this_id))
733 			hpnt->max_id = sdebug_num_tgts + 1;
734 		else
735 			hpnt->max_id = sdebug_num_tgts;
736 		/* sdebug_max_luns; */
737 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
738 	}
739 	spin_unlock(&sdebug_host_list_lock);
740 }
741 
742 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
743 
744 /* Set in_bit to -1 to indicate no bit position of invalid field */
745 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
746 				 enum sdeb_cmd_data c_d,
747 				 int in_byte, int in_bit)
748 {
749 	unsigned char *sbuff;
750 	u8 sks[4];
751 	int sl, asc;
752 
753 	sbuff = scp->sense_buffer;
754 	if (!sbuff) {
755 		sdev_printk(KERN_ERR, scp->device,
756 			    "%s: sense_buffer is NULL\n", __func__);
757 		return;
758 	}
759 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
760 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
761 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
762 	memset(sks, 0, sizeof(sks));
763 	sks[0] = 0x80;
764 	if (c_d)
765 		sks[0] |= 0x40;
766 	if (in_bit >= 0) {
767 		sks[0] |= 0x8;
768 		sks[0] |= 0x7 & in_bit;
769 	}
770 	put_unaligned_be16(in_byte, sks + 1);
771 	if (sdebug_dsense) {
772 		sl = sbuff[7] + 8;
773 		sbuff[7] = sl;
774 		sbuff[sl] = 0x2;
775 		sbuff[sl + 1] = 0x6;
776 		memcpy(sbuff + sl + 4, sks, 3);
777 	} else
778 		memcpy(sbuff + 15, sks, 3);
779 	if (sdebug_verbose)
780 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
781 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
782 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
783 }
784 
785 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
786 {
787 	unsigned char *sbuff;
788 
789 	sbuff = scp->sense_buffer;
790 	if (!sbuff) {
791 		sdev_printk(KERN_ERR, scp->device,
792 			    "%s: sense_buffer is NULL\n", __func__);
793 		return;
794 	}
795 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
796 
797 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
798 
799 	if (sdebug_verbose)
800 		sdev_printk(KERN_INFO, scp->device,
801 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
802 			    my_name, key, asc, asq);
803 }
804 
805 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
806 {
807 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
808 }
809 
810 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
811 {
812 	if (sdebug_verbose) {
813 		if (0x1261 == cmd)
814 			sdev_printk(KERN_INFO, dev,
815 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
816 		else if (0x5331 == cmd)
817 			sdev_printk(KERN_INFO, dev,
818 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
819 				    __func__);
820 		else
821 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
822 				    __func__, cmd);
823 	}
824 	return -EINVAL;
825 	/* return -ENOTTY; // correct return but upsets fdisk */
826 }
827 
828 static void config_cdb_len(struct scsi_device *sdev)
829 {
830 	switch (sdebug_cdb_len) {
831 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
832 		sdev->use_10_for_rw = false;
833 		sdev->use_16_for_rw = false;
834 		sdev->use_10_for_ms = false;
835 		break;
836 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
837 		sdev->use_10_for_rw = true;
838 		sdev->use_16_for_rw = false;
839 		sdev->use_10_for_ms = false;
840 		break;
841 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
842 		sdev->use_10_for_rw = true;
843 		sdev->use_16_for_rw = false;
844 		sdev->use_10_for_ms = true;
845 		break;
846 	case 16:
847 		sdev->use_10_for_rw = false;
848 		sdev->use_16_for_rw = true;
849 		sdev->use_10_for_ms = true;
850 		break;
851 	case 32: /* No knobs to suggest this so same as 16 for now */
852 		sdev->use_10_for_rw = false;
853 		sdev->use_16_for_rw = true;
854 		sdev->use_10_for_ms = true;
855 		break;
856 	default:
857 		pr_warn("unexpected cdb_len=%d, force to 10\n",
858 			sdebug_cdb_len);
859 		sdev->use_10_for_rw = true;
860 		sdev->use_16_for_rw = false;
861 		sdev->use_10_for_ms = false;
862 		sdebug_cdb_len = 10;
863 		break;
864 	}
865 }
866 
867 static void all_config_cdb_len(void)
868 {
869 	struct sdebug_host_info *sdbg_host;
870 	struct Scsi_Host *shost;
871 	struct scsi_device *sdev;
872 
873 	spin_lock(&sdebug_host_list_lock);
874 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
875 		shost = sdbg_host->shost;
876 		shost_for_each_device(sdev, shost) {
877 			config_cdb_len(sdev);
878 		}
879 	}
880 	spin_unlock(&sdebug_host_list_lock);
881 }
882 
883 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
884 {
885 	struct sdebug_host_info *sdhp;
886 	struct sdebug_dev_info *dp;
887 
888 	spin_lock(&sdebug_host_list_lock);
889 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
890 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
891 			if ((devip->sdbg_host == dp->sdbg_host) &&
892 			    (devip->target == dp->target))
893 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
894 		}
895 	}
896 	spin_unlock(&sdebug_host_list_lock);
897 }
898 
899 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
900 {
901 	int k;
902 
903 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
904 	if (k != SDEBUG_NUM_UAS) {
905 		const char *cp = NULL;
906 
907 		switch (k) {
908 		case SDEBUG_UA_POR:
909 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
910 					POWER_ON_RESET_ASCQ);
911 			if (sdebug_verbose)
912 				cp = "power on reset";
913 			break;
914 		case SDEBUG_UA_BUS_RESET:
915 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
916 					BUS_RESET_ASCQ);
917 			if (sdebug_verbose)
918 				cp = "bus reset";
919 			break;
920 		case SDEBUG_UA_MODE_CHANGED:
921 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
922 					MODE_CHANGED_ASCQ);
923 			if (sdebug_verbose)
924 				cp = "mode parameters changed";
925 			break;
926 		case SDEBUG_UA_CAPACITY_CHANGED:
927 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
928 					CAPACITY_CHANGED_ASCQ);
929 			if (sdebug_verbose)
930 				cp = "capacity data changed";
931 			break;
932 		case SDEBUG_UA_MICROCODE_CHANGED:
933 			mk_sense_buffer(scp, UNIT_ATTENTION,
934 					TARGET_CHANGED_ASC,
935 					MICROCODE_CHANGED_ASCQ);
936 			if (sdebug_verbose)
937 				cp = "microcode has been changed";
938 			break;
939 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
940 			mk_sense_buffer(scp, UNIT_ATTENTION,
941 					TARGET_CHANGED_ASC,
942 					MICROCODE_CHANGED_WO_RESET_ASCQ);
943 			if (sdebug_verbose)
944 				cp = "microcode has been changed without reset";
945 			break;
946 		case SDEBUG_UA_LUNS_CHANGED:
947 			/*
948 			 * SPC-3 behavior is to report a UNIT ATTENTION with
949 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
950 			 * on the target, until a REPORT LUNS command is
951 			 * received.  SPC-4 behavior is to report it only once.
952 			 * NOTE:  sdebug_scsi_level does not use the same
953 			 * values as struct scsi_device->scsi_level.
954 			 */
955 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
956 				clear_luns_changed_on_target(devip);
957 			mk_sense_buffer(scp, UNIT_ATTENTION,
958 					TARGET_CHANGED_ASC,
959 					LUNS_CHANGED_ASCQ);
960 			if (sdebug_verbose)
961 				cp = "reported luns data has changed";
962 			break;
963 		default:
964 			pr_warn("unexpected unit attention code=%d\n", k);
965 			if (sdebug_verbose)
966 				cp = "unknown";
967 			break;
968 		}
969 		clear_bit(k, devip->uas_bm);
970 		if (sdebug_verbose)
971 			sdev_printk(KERN_INFO, scp->device,
972 				   "%s reports: Unit attention: %s\n",
973 				   my_name, cp);
974 		return check_condition_result;
975 	}
976 	return 0;
977 }
978 
979 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
980 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
981 				int arr_len)
982 {
983 	int act_len;
984 	struct scsi_data_buffer *sdb = scsi_in(scp);
985 
986 	if (!sdb->length)
987 		return 0;
988 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
989 		return DID_ERROR << 16;
990 
991 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
992 				      arr, arr_len);
993 	sdb->resid = scsi_bufflen(scp) - act_len;
994 
995 	return 0;
996 }
997 
998 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
999  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1000  * calls, not required to write in ascending offset order. Assumes resid
1001  * set to scsi_bufflen() prior to any calls.
1002  */
1003 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1004 				  int arr_len, unsigned int off_dst)
1005 {
1006 	int act_len, n;
1007 	struct scsi_data_buffer *sdb = scsi_in(scp);
1008 	off_t skip = off_dst;
1009 
1010 	if (sdb->length <= off_dst)
1011 		return 0;
1012 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1013 		return DID_ERROR << 16;
1014 
1015 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1016 				       arr, arr_len, skip);
1017 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1018 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1019 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1020 	sdb->resid = min(sdb->resid, n);
1021 	return 0;
1022 }
1023 
1024 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1025  * 'arr' or -1 if error.
1026  */
1027 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1028 			       int arr_len)
1029 {
1030 	if (!scsi_bufflen(scp))
1031 		return 0;
1032 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1033 		return -1;
1034 
1035 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1036 }
1037 
1038 
1039 static char sdebug_inq_vendor_id[9] = "Linux   ";
1040 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1041 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1042 /* Use some locally assigned NAAs for SAS addresses. */
1043 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1044 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1045 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1046 
1047 /* Device identification VPD page. Returns number of bytes placed in arr */
1048 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1049 			  int target_dev_id, int dev_id_num,
1050 			  const char *dev_id_str, int dev_id_str_len,
1051 			  const uuid_t *lu_name)
1052 {
1053 	int num, port_a;
1054 	char b[32];
1055 
1056 	port_a = target_dev_id + 1;
1057 	/* T10 vendor identifier field format (faked) */
1058 	arr[0] = 0x2;	/* ASCII */
1059 	arr[1] = 0x1;
1060 	arr[2] = 0x0;
1061 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1062 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1063 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1064 	num = 8 + 16 + dev_id_str_len;
1065 	arr[3] = num;
1066 	num += 4;
1067 	if (dev_id_num >= 0) {
1068 		if (sdebug_uuid_ctl) {
1069 			/* Locally assigned UUID */
1070 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1071 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1072 			arr[num++] = 0x0;
1073 			arr[num++] = 0x12;
1074 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1075 			arr[num++] = 0x0;
1076 			memcpy(arr + num, lu_name, 16);
1077 			num += 16;
1078 		} else {
1079 			/* NAA-3, Logical unit identifier (binary) */
1080 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1081 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1082 			arr[num++] = 0x0;
1083 			arr[num++] = 0x8;
1084 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1085 			num += 8;
1086 		}
1087 		/* Target relative port number */
1088 		arr[num++] = 0x61;	/* proto=sas, binary */
1089 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1090 		arr[num++] = 0x0;	/* reserved */
1091 		arr[num++] = 0x4;	/* length */
1092 		arr[num++] = 0x0;	/* reserved */
1093 		arr[num++] = 0x0;	/* reserved */
1094 		arr[num++] = 0x0;
1095 		arr[num++] = 0x1;	/* relative port A */
1096 	}
1097 	/* NAA-3, Target port identifier */
1098 	arr[num++] = 0x61;	/* proto=sas, binary */
1099 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1100 	arr[num++] = 0x0;
1101 	arr[num++] = 0x8;
1102 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1103 	num += 8;
1104 	/* NAA-3, Target port group identifier */
1105 	arr[num++] = 0x61;	/* proto=sas, binary */
1106 	arr[num++] = 0x95;	/* piv=1, target port group id */
1107 	arr[num++] = 0x0;
1108 	arr[num++] = 0x4;
1109 	arr[num++] = 0;
1110 	arr[num++] = 0;
1111 	put_unaligned_be16(port_group_id, arr + num);
1112 	num += 2;
1113 	/* NAA-3, Target device identifier */
1114 	arr[num++] = 0x61;	/* proto=sas, binary */
1115 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1116 	arr[num++] = 0x0;
1117 	arr[num++] = 0x8;
1118 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1119 	num += 8;
1120 	/* SCSI name string: Target device identifier */
1121 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1122 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1123 	arr[num++] = 0x0;
1124 	arr[num++] = 24;
1125 	memcpy(arr + num, "naa.32222220", 12);
1126 	num += 12;
1127 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1128 	memcpy(arr + num, b, 8);
1129 	num += 8;
1130 	memset(arr + num, 0, 4);
1131 	num += 4;
1132 	return num;
1133 }
1134 
1135 static unsigned char vpd84_data[] = {
1136 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1137     0x22,0x22,0x22,0x0,0xbb,0x1,
1138     0x22,0x22,0x22,0x0,0xbb,0x2,
1139 };
1140 
1141 /*  Software interface identification VPD page */
1142 static int inquiry_vpd_84(unsigned char *arr)
1143 {
1144 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1145 	return sizeof(vpd84_data);
1146 }
1147 
1148 /* Management network addresses VPD page */
1149 static int inquiry_vpd_85(unsigned char *arr)
1150 {
1151 	int num = 0;
1152 	const char * na1 = "https://www.kernel.org/config";
1153 	const char * na2 = "http://www.kernel.org/log";
1154 	int plen, olen;
1155 
1156 	arr[num++] = 0x1;	/* lu, storage config */
1157 	arr[num++] = 0x0;	/* reserved */
1158 	arr[num++] = 0x0;
1159 	olen = strlen(na1);
1160 	plen = olen + 1;
1161 	if (plen % 4)
1162 		plen = ((plen / 4) + 1) * 4;
1163 	arr[num++] = plen;	/* length, null termianted, padded */
1164 	memcpy(arr + num, na1, olen);
1165 	memset(arr + num + olen, 0, plen - olen);
1166 	num += plen;
1167 
1168 	arr[num++] = 0x4;	/* lu, logging */
1169 	arr[num++] = 0x0;	/* reserved */
1170 	arr[num++] = 0x0;
1171 	olen = strlen(na2);
1172 	plen = olen + 1;
1173 	if (plen % 4)
1174 		plen = ((plen / 4) + 1) * 4;
1175 	arr[num++] = plen;	/* length, null terminated, padded */
1176 	memcpy(arr + num, na2, olen);
1177 	memset(arr + num + olen, 0, plen - olen);
1178 	num += plen;
1179 
1180 	return num;
1181 }
1182 
1183 /* SCSI ports VPD page */
1184 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1185 {
1186 	int num = 0;
1187 	int port_a, port_b;
1188 
1189 	port_a = target_dev_id + 1;
1190 	port_b = port_a + 1;
1191 	arr[num++] = 0x0;	/* reserved */
1192 	arr[num++] = 0x0;	/* reserved */
1193 	arr[num++] = 0x0;
1194 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1195 	memset(arr + num, 0, 6);
1196 	num += 6;
1197 	arr[num++] = 0x0;
1198 	arr[num++] = 12;	/* length tp descriptor */
1199 	/* naa-5 target port identifier (A) */
1200 	arr[num++] = 0x61;	/* proto=sas, binary */
1201 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1202 	arr[num++] = 0x0;	/* reserved */
1203 	arr[num++] = 0x8;	/* length */
1204 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1205 	num += 8;
1206 	arr[num++] = 0x0;	/* reserved */
1207 	arr[num++] = 0x0;	/* reserved */
1208 	arr[num++] = 0x0;
1209 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1210 	memset(arr + num, 0, 6);
1211 	num += 6;
1212 	arr[num++] = 0x0;
1213 	arr[num++] = 12;	/* length tp descriptor */
1214 	/* naa-5 target port identifier (B) */
1215 	arr[num++] = 0x61;	/* proto=sas, binary */
1216 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1217 	arr[num++] = 0x0;	/* reserved */
1218 	arr[num++] = 0x8;	/* length */
1219 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1220 	num += 8;
1221 
1222 	return num;
1223 }
1224 
1225 
1226 static unsigned char vpd89_data[] = {
1227 /* from 4th byte */ 0,0,0,0,
1228 'l','i','n','u','x',' ',' ',' ',
1229 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1230 '1','2','3','4',
1231 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1232 0xec,0,0,0,
1233 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1234 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1235 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1236 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1237 0x53,0x41,
1238 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1239 0x20,0x20,
1240 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1241 0x10,0x80,
1242 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1243 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1244 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1245 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1246 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1247 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1248 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1249 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1250 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1251 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1252 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1253 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1254 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1255 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1256 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1257 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1258 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1259 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1260 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1261 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1262 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1263 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1264 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1265 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1266 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1267 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1268 };
1269 
1270 /* ATA Information VPD page */
1271 static int inquiry_vpd_89(unsigned char *arr)
1272 {
1273 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1274 	return sizeof(vpd89_data);
1275 }
1276 
1277 
1278 static unsigned char vpdb0_data[] = {
1279 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1280 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1281 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1282 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1283 };
1284 
1285 /* Block limits VPD page (SBC-3) */
1286 static int inquiry_vpd_b0(unsigned char *arr)
1287 {
1288 	unsigned int gran;
1289 
1290 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1291 
1292 	/* Optimal transfer length granularity */
1293 	if (sdebug_opt_xferlen_exp != 0 &&
1294 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1295 		gran = 1 << sdebug_opt_xferlen_exp;
1296 	else
1297 		gran = 1 << sdebug_physblk_exp;
1298 	put_unaligned_be16(gran, arr + 2);
1299 
1300 	/* Maximum Transfer Length */
1301 	if (sdebug_store_sectors > 0x400)
1302 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1303 
1304 	/* Optimal Transfer Length */
1305 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1306 
1307 	if (sdebug_lbpu) {
1308 		/* Maximum Unmap LBA Count */
1309 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1310 
1311 		/* Maximum Unmap Block Descriptor Count */
1312 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1313 	}
1314 
1315 	/* Unmap Granularity Alignment */
1316 	if (sdebug_unmap_alignment) {
1317 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1318 		arr[28] |= 0x80; /* UGAVALID */
1319 	}
1320 
1321 	/* Optimal Unmap Granularity */
1322 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1323 
1324 	/* Maximum WRITE SAME Length */
1325 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1326 
1327 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1328 
1329 	return sizeof(vpdb0_data);
1330 }
1331 
1332 /* Block device characteristics VPD page (SBC-3) */
1333 static int inquiry_vpd_b1(unsigned char *arr)
1334 {
1335 	memset(arr, 0, 0x3c);
1336 	arr[0] = 0;
1337 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1338 	arr[2] = 0;
1339 	arr[3] = 5;	/* less than 1.8" */
1340 
1341 	return 0x3c;
1342 }
1343 
1344 /* Logical block provisioning VPD page (SBC-4) */
1345 static int inquiry_vpd_b2(unsigned char *arr)
1346 {
1347 	memset(arr, 0, 0x4);
1348 	arr[0] = 0;			/* threshold exponent */
1349 	if (sdebug_lbpu)
1350 		arr[1] = 1 << 7;
1351 	if (sdebug_lbpws)
1352 		arr[1] |= 1 << 6;
1353 	if (sdebug_lbpws10)
1354 		arr[1] |= 1 << 5;
1355 	if (sdebug_lbprz && scsi_debug_lbp())
1356 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1357 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1358 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1359 	/* threshold_percentage=0 */
1360 	return 0x4;
1361 }
1362 
1363 #define SDEBUG_LONG_INQ_SZ 96
1364 #define SDEBUG_MAX_INQ_ARR_SZ 584
1365 
1366 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1367 {
1368 	unsigned char pq_pdt;
1369 	unsigned char * arr;
1370 	unsigned char *cmd = scp->cmnd;
1371 	int alloc_len, n, ret;
1372 	bool have_wlun, is_disk;
1373 
1374 	alloc_len = get_unaligned_be16(cmd + 3);
1375 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1376 	if (! arr)
1377 		return DID_REQUEUE << 16;
1378 	is_disk = (sdebug_ptype == TYPE_DISK);
1379 	have_wlun = scsi_is_wlun(scp->device->lun);
1380 	if (have_wlun)
1381 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1382 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1383 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1384 	else
1385 		pq_pdt = (sdebug_ptype & 0x1f);
1386 	arr[0] = pq_pdt;
1387 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1388 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1389 		kfree(arr);
1390 		return check_condition_result;
1391 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1392 		int lu_id_num, port_group_id, target_dev_id, len;
1393 		char lu_id_str[6];
1394 		int host_no = devip->sdbg_host->shost->host_no;
1395 
1396 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1397 		    (devip->channel & 0x7f);
1398 		if (sdebug_vpd_use_hostno == 0)
1399 			host_no = 0;
1400 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1401 			    (devip->target * 1000) + devip->lun);
1402 		target_dev_id = ((host_no + 1) * 2000) +
1403 				 (devip->target * 1000) - 3;
1404 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1405 		if (0 == cmd[2]) { /* supported vital product data pages */
1406 			arr[1] = cmd[2];	/*sanity */
1407 			n = 4;
1408 			arr[n++] = 0x0;   /* this page */
1409 			arr[n++] = 0x80;  /* unit serial number */
1410 			arr[n++] = 0x83;  /* device identification */
1411 			arr[n++] = 0x84;  /* software interface ident. */
1412 			arr[n++] = 0x85;  /* management network addresses */
1413 			arr[n++] = 0x86;  /* extended inquiry */
1414 			arr[n++] = 0x87;  /* mode page policy */
1415 			arr[n++] = 0x88;  /* SCSI ports */
1416 			if (is_disk) {	  /* SBC only */
1417 				arr[n++] = 0x89;  /* ATA information */
1418 				arr[n++] = 0xb0;  /* Block limits */
1419 				arr[n++] = 0xb1;  /* Block characteristics */
1420 				arr[n++] = 0xb2;  /* Logical Block Prov */
1421 			}
1422 			arr[3] = n - 4;	  /* number of supported VPD pages */
1423 		} else if (0x80 == cmd[2]) { /* unit serial number */
1424 			arr[1] = cmd[2];	/*sanity */
1425 			arr[3] = len;
1426 			memcpy(&arr[4], lu_id_str, len);
1427 		} else if (0x83 == cmd[2]) { /* device identification */
1428 			arr[1] = cmd[2];	/*sanity */
1429 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1430 						target_dev_id, lu_id_num,
1431 						lu_id_str, len,
1432 						&devip->lu_name);
1433 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1434 			arr[1] = cmd[2];	/*sanity */
1435 			arr[3] = inquiry_vpd_84(&arr[4]);
1436 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1437 			arr[1] = cmd[2];	/*sanity */
1438 			arr[3] = inquiry_vpd_85(&arr[4]);
1439 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1440 			arr[1] = cmd[2];	/*sanity */
1441 			arr[3] = 0x3c;	/* number of following entries */
1442 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1443 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1444 			else if (have_dif_prot)
1445 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1446 			else
1447 				arr[4] = 0x0;   /* no protection stuff */
1448 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1449 		} else if (0x87 == cmd[2]) { /* mode page policy */
1450 			arr[1] = cmd[2];	/*sanity */
1451 			arr[3] = 0x8;	/* number of following entries */
1452 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1453 			arr[6] = 0x80;	/* mlus, shared */
1454 			arr[8] = 0x18;	 /* protocol specific lu */
1455 			arr[10] = 0x82;	 /* mlus, per initiator port */
1456 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1457 			arr[1] = cmd[2];	/*sanity */
1458 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1459 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1460 			arr[1] = cmd[2];        /*sanity */
1461 			n = inquiry_vpd_89(&arr[4]);
1462 			put_unaligned_be16(n, arr + 2);
1463 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1464 			arr[1] = cmd[2];        /*sanity */
1465 			arr[3] = inquiry_vpd_b0(&arr[4]);
1466 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1467 			arr[1] = cmd[2];        /*sanity */
1468 			arr[3] = inquiry_vpd_b1(&arr[4]);
1469 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1470 			arr[1] = cmd[2];        /*sanity */
1471 			arr[3] = inquiry_vpd_b2(&arr[4]);
1472 		} else {
1473 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1474 			kfree(arr);
1475 			return check_condition_result;
1476 		}
1477 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1478 		ret = fill_from_dev_buffer(scp, arr,
1479 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1480 		kfree(arr);
1481 		return ret;
1482 	}
1483 	/* drops through here for a standard inquiry */
1484 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1485 	arr[2] = sdebug_scsi_level;
1486 	arr[3] = 2;    /* response_data_format==2 */
1487 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1488 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1489 	if (sdebug_vpd_use_hostno == 0)
1490 		arr[5] |= 0x10; /* claim: implicit TPGS */
1491 	arr[6] = 0x10; /* claim: MultiP */
1492 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1493 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1494 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1495 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1496 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1497 	/* Use Vendor Specific area to place driver date in ASCII hex */
1498 	memcpy(&arr[36], sdebug_version_date, 8);
1499 	/* version descriptors (2 bytes each) follow */
1500 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1501 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1502 	n = 62;
1503 	if (is_disk) {		/* SBC-4 no version claimed */
1504 		put_unaligned_be16(0x600, arr + n);
1505 		n += 2;
1506 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1507 		put_unaligned_be16(0x525, arr + n);
1508 		n += 2;
1509 	}
1510 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1511 	ret = fill_from_dev_buffer(scp, arr,
1512 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1513 	kfree(arr);
1514 	return ret;
1515 }
1516 
1517 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1518 				   0, 0, 0x0, 0x0};
1519 
1520 static int resp_requests(struct scsi_cmnd * scp,
1521 			 struct sdebug_dev_info * devip)
1522 {
1523 	unsigned char * sbuff;
1524 	unsigned char *cmd = scp->cmnd;
1525 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1526 	bool dsense;
1527 	int len = 18;
1528 
1529 	memset(arr, 0, sizeof(arr));
1530 	dsense = !!(cmd[1] & 1);
1531 	sbuff = scp->sense_buffer;
1532 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1533 		if (dsense) {
1534 			arr[0] = 0x72;
1535 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1536 			arr[2] = THRESHOLD_EXCEEDED;
1537 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1538 			len = 8;
1539 		} else {
1540 			arr[0] = 0x70;
1541 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1542 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1543 			arr[12] = THRESHOLD_EXCEEDED;
1544 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1545 		}
1546 	} else {
1547 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1548 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1549 			;	/* have sense and formats match */
1550 		else if (arr[0] <= 0x70) {
1551 			if (dsense) {
1552 				memset(arr, 0, 8);
1553 				arr[0] = 0x72;
1554 				len = 8;
1555 			} else {
1556 				memset(arr, 0, 18);
1557 				arr[0] = 0x70;
1558 				arr[7] = 0xa;
1559 			}
1560 		} else if (dsense) {
1561 			memset(arr, 0, 8);
1562 			arr[0] = 0x72;
1563 			arr[1] = sbuff[2];     /* sense key */
1564 			arr[2] = sbuff[12];    /* asc */
1565 			arr[3] = sbuff[13];    /* ascq */
1566 			len = 8;
1567 		} else {
1568 			memset(arr, 0, 18);
1569 			arr[0] = 0x70;
1570 			arr[2] = sbuff[1];
1571 			arr[7] = 0xa;
1572 			arr[12] = sbuff[1];
1573 			arr[13] = sbuff[3];
1574 		}
1575 
1576 	}
1577 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1578 	return fill_from_dev_buffer(scp, arr, len);
1579 }
1580 
1581 static int resp_start_stop(struct scsi_cmnd * scp,
1582 			   struct sdebug_dev_info * devip)
1583 {
1584 	unsigned char *cmd = scp->cmnd;
1585 	int power_cond, stop;
1586 
1587 	power_cond = (cmd[4] & 0xf0) >> 4;
1588 	if (power_cond) {
1589 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1590 		return check_condition_result;
1591 	}
1592 	stop = !(cmd[4] & 1);
1593 	atomic_xchg(&devip->stopped, stop);
1594 	return 0;
1595 }
1596 
1597 static sector_t get_sdebug_capacity(void)
1598 {
1599 	static const unsigned int gibibyte = 1073741824;
1600 
1601 	if (sdebug_virtual_gb > 0)
1602 		return (sector_t)sdebug_virtual_gb *
1603 			(gibibyte / sdebug_sector_size);
1604 	else
1605 		return sdebug_store_sectors;
1606 }
1607 
1608 #define SDEBUG_READCAP_ARR_SZ 8
1609 static int resp_readcap(struct scsi_cmnd * scp,
1610 			struct sdebug_dev_info * devip)
1611 {
1612 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1613 	unsigned int capac;
1614 
1615 	/* following just in case virtual_gb changed */
1616 	sdebug_capacity = get_sdebug_capacity();
1617 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1618 	if (sdebug_capacity < 0xffffffff) {
1619 		capac = (unsigned int)sdebug_capacity - 1;
1620 		put_unaligned_be32(capac, arr + 0);
1621 	} else
1622 		put_unaligned_be32(0xffffffff, arr + 0);
1623 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1624 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1625 }
1626 
1627 #define SDEBUG_READCAP16_ARR_SZ 32
1628 static int resp_readcap16(struct scsi_cmnd * scp,
1629 			  struct sdebug_dev_info * devip)
1630 {
1631 	unsigned char *cmd = scp->cmnd;
1632 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1633 	int alloc_len;
1634 
1635 	alloc_len = get_unaligned_be32(cmd + 10);
1636 	/* following just in case virtual_gb changed */
1637 	sdebug_capacity = get_sdebug_capacity();
1638 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1639 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1640 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1641 	arr[13] = sdebug_physblk_exp & 0xf;
1642 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1643 
1644 	if (scsi_debug_lbp()) {
1645 		arr[14] |= 0x80; /* LBPME */
1646 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1647 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1648 		 * in the wider field maps to 0 in this field.
1649 		 */
1650 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1651 			arr[14] |= 0x40;
1652 	}
1653 
1654 	arr[15] = sdebug_lowest_aligned & 0xff;
1655 
1656 	if (have_dif_prot) {
1657 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1658 		arr[12] |= 1; /* PROT_EN */
1659 	}
1660 
1661 	return fill_from_dev_buffer(scp, arr,
1662 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1663 }
1664 
1665 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1666 
1667 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1668 			      struct sdebug_dev_info * devip)
1669 {
1670 	unsigned char *cmd = scp->cmnd;
1671 	unsigned char * arr;
1672 	int host_no = devip->sdbg_host->shost->host_no;
1673 	int n, ret, alen, rlen;
1674 	int port_group_a, port_group_b, port_a, port_b;
1675 
1676 	alen = get_unaligned_be32(cmd + 6);
1677 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1678 	if (! arr)
1679 		return DID_REQUEUE << 16;
1680 	/*
1681 	 * EVPD page 0x88 states we have two ports, one
1682 	 * real and a fake port with no device connected.
1683 	 * So we create two port groups with one port each
1684 	 * and set the group with port B to unavailable.
1685 	 */
1686 	port_a = 0x1; /* relative port A */
1687 	port_b = 0x2; /* relative port B */
1688 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1689 			(devip->channel & 0x7f);
1690 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1691 			(devip->channel & 0x7f) + 0x80;
1692 
1693 	/*
1694 	 * The asymmetric access state is cycled according to the host_id.
1695 	 */
1696 	n = 4;
1697 	if (sdebug_vpd_use_hostno == 0) {
1698 		arr[n++] = host_no % 3; /* Asymm access state */
1699 		arr[n++] = 0x0F; /* claim: all states are supported */
1700 	} else {
1701 		arr[n++] = 0x0; /* Active/Optimized path */
1702 		arr[n++] = 0x01; /* only support active/optimized paths */
1703 	}
1704 	put_unaligned_be16(port_group_a, arr + n);
1705 	n += 2;
1706 	arr[n++] = 0;    /* Reserved */
1707 	arr[n++] = 0;    /* Status code */
1708 	arr[n++] = 0;    /* Vendor unique */
1709 	arr[n++] = 0x1;  /* One port per group */
1710 	arr[n++] = 0;    /* Reserved */
1711 	arr[n++] = 0;    /* Reserved */
1712 	put_unaligned_be16(port_a, arr + n);
1713 	n += 2;
1714 	arr[n++] = 3;    /* Port unavailable */
1715 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1716 	put_unaligned_be16(port_group_b, arr + n);
1717 	n += 2;
1718 	arr[n++] = 0;    /* Reserved */
1719 	arr[n++] = 0;    /* Status code */
1720 	arr[n++] = 0;    /* Vendor unique */
1721 	arr[n++] = 0x1;  /* One port per group */
1722 	arr[n++] = 0;    /* Reserved */
1723 	arr[n++] = 0;    /* Reserved */
1724 	put_unaligned_be16(port_b, arr + n);
1725 	n += 2;
1726 
1727 	rlen = n - 4;
1728 	put_unaligned_be32(rlen, arr + 0);
1729 
1730 	/*
1731 	 * Return the smallest value of either
1732 	 * - The allocated length
1733 	 * - The constructed command length
1734 	 * - The maximum array size
1735 	 */
1736 	rlen = min(alen,n);
1737 	ret = fill_from_dev_buffer(scp, arr,
1738 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1739 	kfree(arr);
1740 	return ret;
1741 }
1742 
1743 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1744 			     struct sdebug_dev_info *devip)
1745 {
1746 	bool rctd;
1747 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1748 	u16 req_sa, u;
1749 	u32 alloc_len, a_len;
1750 	int k, offset, len, errsts, count, bump, na;
1751 	const struct opcode_info_t *oip;
1752 	const struct opcode_info_t *r_oip;
1753 	u8 *arr;
1754 	u8 *cmd = scp->cmnd;
1755 
1756 	rctd = !!(cmd[2] & 0x80);
1757 	reporting_opts = cmd[2] & 0x7;
1758 	req_opcode = cmd[3];
1759 	req_sa = get_unaligned_be16(cmd + 4);
1760 	alloc_len = get_unaligned_be32(cmd + 6);
1761 	if (alloc_len < 4 || alloc_len > 0xffff) {
1762 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1763 		return check_condition_result;
1764 	}
1765 	if (alloc_len > 8192)
1766 		a_len = 8192;
1767 	else
1768 		a_len = alloc_len;
1769 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1770 	if (NULL == arr) {
1771 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1772 				INSUFF_RES_ASCQ);
1773 		return check_condition_result;
1774 	}
1775 	switch (reporting_opts) {
1776 	case 0:	/* all commands */
1777 		/* count number of commands */
1778 		for (count = 0, oip = opcode_info_arr;
1779 		     oip->num_attached != 0xff; ++oip) {
1780 			if (F_INV_OP & oip->flags)
1781 				continue;
1782 			count += (oip->num_attached + 1);
1783 		}
1784 		bump = rctd ? 20 : 8;
1785 		put_unaligned_be32(count * bump, arr);
1786 		for (offset = 4, oip = opcode_info_arr;
1787 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1788 			if (F_INV_OP & oip->flags)
1789 				continue;
1790 			na = oip->num_attached;
1791 			arr[offset] = oip->opcode;
1792 			put_unaligned_be16(oip->sa, arr + offset + 2);
1793 			if (rctd)
1794 				arr[offset + 5] |= 0x2;
1795 			if (FF_SA & oip->flags)
1796 				arr[offset + 5] |= 0x1;
1797 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1798 			if (rctd)
1799 				put_unaligned_be16(0xa, arr + offset + 8);
1800 			r_oip = oip;
1801 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1802 				if (F_INV_OP & oip->flags)
1803 					continue;
1804 				offset += bump;
1805 				arr[offset] = oip->opcode;
1806 				put_unaligned_be16(oip->sa, arr + offset + 2);
1807 				if (rctd)
1808 					arr[offset + 5] |= 0x2;
1809 				if (FF_SA & oip->flags)
1810 					arr[offset + 5] |= 0x1;
1811 				put_unaligned_be16(oip->len_mask[0],
1812 						   arr + offset + 6);
1813 				if (rctd)
1814 					put_unaligned_be16(0xa,
1815 							   arr + offset + 8);
1816 			}
1817 			oip = r_oip;
1818 			offset += bump;
1819 		}
1820 		break;
1821 	case 1:	/* one command: opcode only */
1822 	case 2:	/* one command: opcode plus service action */
1823 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1824 		sdeb_i = opcode_ind_arr[req_opcode];
1825 		oip = &opcode_info_arr[sdeb_i];
1826 		if (F_INV_OP & oip->flags) {
1827 			supp = 1;
1828 			offset = 4;
1829 		} else {
1830 			if (1 == reporting_opts) {
1831 				if (FF_SA & oip->flags) {
1832 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1833 							     2, 2);
1834 					kfree(arr);
1835 					return check_condition_result;
1836 				}
1837 				req_sa = 0;
1838 			} else if (2 == reporting_opts &&
1839 				   0 == (FF_SA & oip->flags)) {
1840 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1841 				kfree(arr);	/* point at requested sa */
1842 				return check_condition_result;
1843 			}
1844 			if (0 == (FF_SA & oip->flags) &&
1845 			    req_opcode == oip->opcode)
1846 				supp = 3;
1847 			else if (0 == (FF_SA & oip->flags)) {
1848 				na = oip->num_attached;
1849 				for (k = 0, oip = oip->arrp; k < na;
1850 				     ++k, ++oip) {
1851 					if (req_opcode == oip->opcode)
1852 						break;
1853 				}
1854 				supp = (k >= na) ? 1 : 3;
1855 			} else if (req_sa != oip->sa) {
1856 				na = oip->num_attached;
1857 				for (k = 0, oip = oip->arrp; k < na;
1858 				     ++k, ++oip) {
1859 					if (req_sa == oip->sa)
1860 						break;
1861 				}
1862 				supp = (k >= na) ? 1 : 3;
1863 			} else
1864 				supp = 3;
1865 			if (3 == supp) {
1866 				u = oip->len_mask[0];
1867 				put_unaligned_be16(u, arr + 2);
1868 				arr[4] = oip->opcode;
1869 				for (k = 1; k < u; ++k)
1870 					arr[4 + k] = (k < 16) ?
1871 						 oip->len_mask[k] : 0xff;
1872 				offset = 4 + u;
1873 			} else
1874 				offset = 4;
1875 		}
1876 		arr[1] = (rctd ? 0x80 : 0) | supp;
1877 		if (rctd) {
1878 			put_unaligned_be16(0xa, arr + offset);
1879 			offset += 12;
1880 		}
1881 		break;
1882 	default:
1883 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1884 		kfree(arr);
1885 		return check_condition_result;
1886 	}
1887 	offset = (offset < a_len) ? offset : a_len;
1888 	len = (offset < alloc_len) ? offset : alloc_len;
1889 	errsts = fill_from_dev_buffer(scp, arr, len);
1890 	kfree(arr);
1891 	return errsts;
1892 }
1893 
1894 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1895 			  struct sdebug_dev_info *devip)
1896 {
1897 	bool repd;
1898 	u32 alloc_len, len;
1899 	u8 arr[16];
1900 	u8 *cmd = scp->cmnd;
1901 
1902 	memset(arr, 0, sizeof(arr));
1903 	repd = !!(cmd[2] & 0x80);
1904 	alloc_len = get_unaligned_be32(cmd + 6);
1905 	if (alloc_len < 4) {
1906 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1907 		return check_condition_result;
1908 	}
1909 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1910 	arr[1] = 0x1;		/* ITNRS */
1911 	if (repd) {
1912 		arr[3] = 0xc;
1913 		len = 16;
1914 	} else
1915 		len = 4;
1916 
1917 	len = (len < alloc_len) ? len : alloc_len;
1918 	return fill_from_dev_buffer(scp, arr, len);
1919 }
1920 
1921 /* <<Following mode page info copied from ST318451LW>> */
1922 
1923 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1924 {	/* Read-Write Error Recovery page for mode_sense */
1925 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1926 					5, 0, 0xff, 0xff};
1927 
1928 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1929 	if (1 == pcontrol)
1930 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1931 	return sizeof(err_recov_pg);
1932 }
1933 
1934 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1935 { 	/* Disconnect-Reconnect page for mode_sense */
1936 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1937 					 0, 0, 0, 0, 0, 0, 0, 0};
1938 
1939 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1940 	if (1 == pcontrol)
1941 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1942 	return sizeof(disconnect_pg);
1943 }
1944 
1945 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1946 {       /* Format device page for mode_sense */
1947 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1948 				     0, 0, 0, 0, 0, 0, 0, 0,
1949 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1950 
1951 	memcpy(p, format_pg, sizeof(format_pg));
1952 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1953 	put_unaligned_be16(sdebug_sector_size, p + 12);
1954 	if (sdebug_removable)
1955 		p[20] |= 0x20; /* should agree with INQUIRY */
1956 	if (1 == pcontrol)
1957 		memset(p + 2, 0, sizeof(format_pg) - 2);
1958 	return sizeof(format_pg);
1959 }
1960 
1961 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1962 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1963 				     0, 0, 0, 0};
1964 
1965 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1966 { 	/* Caching page for mode_sense */
1967 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1968 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1969 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1970 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1971 
1972 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1973 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1974 	memcpy(p, caching_pg, sizeof(caching_pg));
1975 	if (1 == pcontrol)
1976 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1977 	else if (2 == pcontrol)
1978 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1979 	return sizeof(caching_pg);
1980 }
1981 
1982 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1983 				    0, 0, 0x2, 0x4b};
1984 
1985 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1986 { 	/* Control mode page for mode_sense */
1987 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1988 					0, 0, 0, 0};
1989 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1990 				     0, 0, 0x2, 0x4b};
1991 
1992 	if (sdebug_dsense)
1993 		ctrl_m_pg[2] |= 0x4;
1994 	else
1995 		ctrl_m_pg[2] &= ~0x4;
1996 
1997 	if (sdebug_ato)
1998 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1999 
2000 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2001 	if (1 == pcontrol)
2002 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2003 	else if (2 == pcontrol)
2004 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2005 	return sizeof(ctrl_m_pg);
2006 }
2007 
2008 
2009 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
2010 {	/* Informational Exceptions control mode page for mode_sense */
2011 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2012 				       0, 0, 0x0, 0x0};
2013 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2014 				      0, 0, 0x0, 0x0};
2015 
2016 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2017 	if (1 == pcontrol)
2018 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2019 	else if (2 == pcontrol)
2020 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2021 	return sizeof(iec_m_pg);
2022 }
2023 
2024 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
2025 {	/* SAS SSP mode page - short format for mode_sense */
2026 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2027 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2028 
2029 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2030 	if (1 == pcontrol)
2031 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2032 	return sizeof(sas_sf_m_pg);
2033 }
2034 
2035 
2036 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
2037 			      int target_dev_id)
2038 {	/* SAS phy control and discover mode page for mode_sense */
2039 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2040 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2041 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2042 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2043 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2044 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2045 		    0, 0, 0, 0, 0, 0, 0, 0,
2046 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2047 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2048 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2049 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2050 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2051 		    0, 0, 0, 0, 0, 0, 0, 0,
2052 		};
2053 	int port_a, port_b;
2054 
2055 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2056 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2057 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2058 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2059 	port_a = target_dev_id + 1;
2060 	port_b = port_a + 1;
2061 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2062 	put_unaligned_be32(port_a, p + 20);
2063 	put_unaligned_be32(port_b, p + 48 + 20);
2064 	if (1 == pcontrol)
2065 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2066 	return sizeof(sas_pcd_m_pg);
2067 }
2068 
2069 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
2070 {	/* SAS SSP shared protocol specific port mode subpage */
2071 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2072 		    0, 0, 0, 0, 0, 0, 0, 0,
2073 		};
2074 
2075 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2076 	if (1 == pcontrol)
2077 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2078 	return sizeof(sas_sha_m_pg);
2079 }
2080 
2081 #define SDEBUG_MAX_MSENSE_SZ 256
2082 
2083 static int resp_mode_sense(struct scsi_cmnd *scp,
2084 			   struct sdebug_dev_info *devip)
2085 {
2086 	int pcontrol, pcode, subpcode, bd_len;
2087 	unsigned char dev_spec;
2088 	int alloc_len, offset, len, target_dev_id;
2089 	int target = scp->device->id;
2090 	unsigned char * ap;
2091 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2092 	unsigned char *cmd = scp->cmnd;
2093 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2094 
2095 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2096 	pcontrol = (cmd[2] & 0xc0) >> 6;
2097 	pcode = cmd[2] & 0x3f;
2098 	subpcode = cmd[3];
2099 	msense_6 = (MODE_SENSE == cmd[0]);
2100 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2101 	is_disk = (sdebug_ptype == TYPE_DISK);
2102 	if (is_disk && !dbd)
2103 		bd_len = llbaa ? 16 : 8;
2104 	else
2105 		bd_len = 0;
2106 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2107 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2108 	if (0x3 == pcontrol) {  /* Saving values not supported */
2109 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2110 		return check_condition_result;
2111 	}
2112 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2113 			(devip->target * 1000) - 3;
2114 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2115 	if (is_disk)
2116 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2117 	else
2118 		dev_spec = 0x0;
2119 	if (msense_6) {
2120 		arr[2] = dev_spec;
2121 		arr[3] = bd_len;
2122 		offset = 4;
2123 	} else {
2124 		arr[3] = dev_spec;
2125 		if (16 == bd_len)
2126 			arr[4] = 0x1;	/* set LONGLBA bit */
2127 		arr[7] = bd_len;	/* assume 255 or less */
2128 		offset = 8;
2129 	}
2130 	ap = arr + offset;
2131 	if ((bd_len > 0) && (!sdebug_capacity))
2132 		sdebug_capacity = get_sdebug_capacity();
2133 
2134 	if (8 == bd_len) {
2135 		if (sdebug_capacity > 0xfffffffe)
2136 			put_unaligned_be32(0xffffffff, ap + 0);
2137 		else
2138 			put_unaligned_be32(sdebug_capacity, ap + 0);
2139 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2140 		offset += bd_len;
2141 		ap = arr + offset;
2142 	} else if (16 == bd_len) {
2143 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2144 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2145 		offset += bd_len;
2146 		ap = arr + offset;
2147 	}
2148 
2149 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2150 		/* TODO: Control Extension page */
2151 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2152 		return check_condition_result;
2153 	}
2154 	bad_pcode = false;
2155 
2156 	switch (pcode) {
2157 	case 0x1:	/* Read-Write error recovery page, direct access */
2158 		len = resp_err_recov_pg(ap, pcontrol, target);
2159 		offset += len;
2160 		break;
2161 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2162 		len = resp_disconnect_pg(ap, pcontrol, target);
2163 		offset += len;
2164 		break;
2165 	case 0x3:       /* Format device page, direct access */
2166 		if (is_disk) {
2167 			len = resp_format_pg(ap, pcontrol, target);
2168 			offset += len;
2169 		} else
2170 			bad_pcode = true;
2171 		break;
2172 	case 0x8:	/* Caching page, direct access */
2173 		if (is_disk) {
2174 			len = resp_caching_pg(ap, pcontrol, target);
2175 			offset += len;
2176 		} else
2177 			bad_pcode = true;
2178 		break;
2179 	case 0xa:	/* Control Mode page, all devices */
2180 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2181 		offset += len;
2182 		break;
2183 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2184 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2185 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2186 			return check_condition_result;
2187 		}
2188 		len = 0;
2189 		if ((0x0 == subpcode) || (0xff == subpcode))
2190 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2191 		if ((0x1 == subpcode) || (0xff == subpcode))
2192 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2193 						  target_dev_id);
2194 		if ((0x2 == subpcode) || (0xff == subpcode))
2195 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2196 		offset += len;
2197 		break;
2198 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2199 		len = resp_iec_m_pg(ap, pcontrol, target);
2200 		offset += len;
2201 		break;
2202 	case 0x3f:	/* Read all Mode pages */
2203 		if ((0 == subpcode) || (0xff == subpcode)) {
2204 			len = resp_err_recov_pg(ap, pcontrol, target);
2205 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2206 			if (is_disk) {
2207 				len += resp_format_pg(ap + len, pcontrol,
2208 						      target);
2209 				len += resp_caching_pg(ap + len, pcontrol,
2210 						       target);
2211 			}
2212 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2213 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2214 			if (0xff == subpcode) {
2215 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2216 						  target, target_dev_id);
2217 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2218 			}
2219 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2220 			offset += len;
2221 		} else {
2222 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2223 			return check_condition_result;
2224 		}
2225 		break;
2226 	default:
2227 		bad_pcode = true;
2228 		break;
2229 	}
2230 	if (bad_pcode) {
2231 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2232 		return check_condition_result;
2233 	}
2234 	if (msense_6)
2235 		arr[0] = offset - 1;
2236 	else
2237 		put_unaligned_be16((offset - 2), arr + 0);
2238 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2239 }
2240 
2241 #define SDEBUG_MAX_MSELECT_SZ 512
2242 
2243 static int resp_mode_select(struct scsi_cmnd *scp,
2244 			    struct sdebug_dev_info *devip)
2245 {
2246 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2247 	int param_len, res, mpage;
2248 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2249 	unsigned char *cmd = scp->cmnd;
2250 	int mselect6 = (MODE_SELECT == cmd[0]);
2251 
2252 	memset(arr, 0, sizeof(arr));
2253 	pf = cmd[1] & 0x10;
2254 	sp = cmd[1] & 0x1;
2255 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2256 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2257 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2258 		return check_condition_result;
2259 	}
2260 	res = fetch_to_dev_buffer(scp, arr, param_len);
2261 	if (-1 == res)
2262 		return DID_ERROR << 16;
2263 	else if (sdebug_verbose && (res < param_len))
2264 		sdev_printk(KERN_INFO, scp->device,
2265 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2266 			    __func__, param_len, res);
2267 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2268 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2269 	if (md_len > 2) {
2270 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2271 		return check_condition_result;
2272 	}
2273 	off = bd_len + (mselect6 ? 4 : 8);
2274 	mpage = arr[off] & 0x3f;
2275 	ps = !!(arr[off] & 0x80);
2276 	if (ps) {
2277 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2278 		return check_condition_result;
2279 	}
2280 	spf = !!(arr[off] & 0x40);
2281 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2282 		       (arr[off + 1] + 2);
2283 	if ((pg_len + off) > param_len) {
2284 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2285 				PARAMETER_LIST_LENGTH_ERR, 0);
2286 		return check_condition_result;
2287 	}
2288 	switch (mpage) {
2289 	case 0x8:      /* Caching Mode page */
2290 		if (caching_pg[1] == arr[off + 1]) {
2291 			memcpy(caching_pg + 2, arr + off + 2,
2292 			       sizeof(caching_pg) - 2);
2293 			goto set_mode_changed_ua;
2294 		}
2295 		break;
2296 	case 0xa:      /* Control Mode page */
2297 		if (ctrl_m_pg[1] == arr[off + 1]) {
2298 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2299 			       sizeof(ctrl_m_pg) - 2);
2300 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2301 			goto set_mode_changed_ua;
2302 		}
2303 		break;
2304 	case 0x1c:      /* Informational Exceptions Mode page */
2305 		if (iec_m_pg[1] == arr[off + 1]) {
2306 			memcpy(iec_m_pg + 2, arr + off + 2,
2307 			       sizeof(iec_m_pg) - 2);
2308 			goto set_mode_changed_ua;
2309 		}
2310 		break;
2311 	default:
2312 		break;
2313 	}
2314 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2315 	return check_condition_result;
2316 set_mode_changed_ua:
2317 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2318 	return 0;
2319 }
2320 
2321 static int resp_temp_l_pg(unsigned char * arr)
2322 {
2323 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2324 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2325 		};
2326 
2327 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2328 	return sizeof(temp_l_pg);
2329 }
2330 
2331 static int resp_ie_l_pg(unsigned char * arr)
2332 {
2333 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2334 		};
2335 
2336 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2337 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2338 		arr[4] = THRESHOLD_EXCEEDED;
2339 		arr[5] = 0xff;
2340 	}
2341 	return sizeof(ie_l_pg);
2342 }
2343 
2344 #define SDEBUG_MAX_LSENSE_SZ 512
2345 
2346 static int resp_log_sense(struct scsi_cmnd *scp,
2347 			  struct sdebug_dev_info *devip)
2348 {
2349 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2350 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2351 	unsigned char *cmd = scp->cmnd;
2352 
2353 	memset(arr, 0, sizeof(arr));
2354 	ppc = cmd[1] & 0x2;
2355 	sp = cmd[1] & 0x1;
2356 	if (ppc || sp) {
2357 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2358 		return check_condition_result;
2359 	}
2360 	pcode = cmd[2] & 0x3f;
2361 	subpcode = cmd[3] & 0xff;
2362 	alloc_len = get_unaligned_be16(cmd + 7);
2363 	arr[0] = pcode;
2364 	if (0 == subpcode) {
2365 		switch (pcode) {
2366 		case 0x0:	/* Supported log pages log page */
2367 			n = 4;
2368 			arr[n++] = 0x0;		/* this page */
2369 			arr[n++] = 0xd;		/* Temperature */
2370 			arr[n++] = 0x2f;	/* Informational exceptions */
2371 			arr[3] = n - 4;
2372 			break;
2373 		case 0xd:	/* Temperature log page */
2374 			arr[3] = resp_temp_l_pg(arr + 4);
2375 			break;
2376 		case 0x2f:	/* Informational exceptions log page */
2377 			arr[3] = resp_ie_l_pg(arr + 4);
2378 			break;
2379 		default:
2380 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2381 			return check_condition_result;
2382 		}
2383 	} else if (0xff == subpcode) {
2384 		arr[0] |= 0x40;
2385 		arr[1] = subpcode;
2386 		switch (pcode) {
2387 		case 0x0:	/* Supported log pages and subpages log page */
2388 			n = 4;
2389 			arr[n++] = 0x0;
2390 			arr[n++] = 0x0;		/* 0,0 page */
2391 			arr[n++] = 0x0;
2392 			arr[n++] = 0xff;	/* this page */
2393 			arr[n++] = 0xd;
2394 			arr[n++] = 0x0;		/* Temperature */
2395 			arr[n++] = 0x2f;
2396 			arr[n++] = 0x0;	/* Informational exceptions */
2397 			arr[3] = n - 4;
2398 			break;
2399 		case 0xd:	/* Temperature subpages */
2400 			n = 4;
2401 			arr[n++] = 0xd;
2402 			arr[n++] = 0x0;		/* Temperature */
2403 			arr[3] = n - 4;
2404 			break;
2405 		case 0x2f:	/* Informational exceptions subpages */
2406 			n = 4;
2407 			arr[n++] = 0x2f;
2408 			arr[n++] = 0x0;		/* Informational exceptions */
2409 			arr[3] = n - 4;
2410 			break;
2411 		default:
2412 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2413 			return check_condition_result;
2414 		}
2415 	} else {
2416 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2417 		return check_condition_result;
2418 	}
2419 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2420 	return fill_from_dev_buffer(scp, arr,
2421 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2422 }
2423 
2424 static int check_device_access_params(struct scsi_cmnd *scp,
2425 				      unsigned long long lba, unsigned int num)
2426 {
2427 	if (lba + num > sdebug_capacity) {
2428 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2429 		return check_condition_result;
2430 	}
2431 	/* transfer length excessive (tie in to block limits VPD page) */
2432 	if (num > sdebug_store_sectors) {
2433 		/* needs work to find which cdb byte 'num' comes from */
2434 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2435 		return check_condition_result;
2436 	}
2437 	return 0;
2438 }
2439 
2440 /* Returns number of bytes copied or -1 if error. */
2441 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2442 			    u32 num, bool do_write)
2443 {
2444 	int ret;
2445 	u64 block, rest = 0;
2446 	struct scsi_data_buffer *sdb;
2447 	enum dma_data_direction dir;
2448 
2449 	if (do_write) {
2450 		sdb = scsi_out(scmd);
2451 		dir = DMA_TO_DEVICE;
2452 	} else {
2453 		sdb = scsi_in(scmd);
2454 		dir = DMA_FROM_DEVICE;
2455 	}
2456 
2457 	if (!sdb->length)
2458 		return 0;
2459 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2460 		return -1;
2461 
2462 	block = do_div(lba, sdebug_store_sectors);
2463 	if (block + num > sdebug_store_sectors)
2464 		rest = block + num - sdebug_store_sectors;
2465 
2466 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2467 		   fake_storep + (block * sdebug_sector_size),
2468 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2469 	if (ret != (num - rest) * sdebug_sector_size)
2470 		return ret;
2471 
2472 	if (rest) {
2473 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2474 			    fake_storep, rest * sdebug_sector_size,
2475 			    sg_skip + ((num - rest) * sdebug_sector_size),
2476 			    do_write);
2477 	}
2478 
2479 	return ret;
2480 }
2481 
2482 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2483  * arr into fake_store(lba,num) and return true. If comparison fails then
2484  * return false. */
2485 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2486 {
2487 	bool res;
2488 	u64 block, rest = 0;
2489 	u32 store_blks = sdebug_store_sectors;
2490 	u32 lb_size = sdebug_sector_size;
2491 
2492 	block = do_div(lba, store_blks);
2493 	if (block + num > store_blks)
2494 		rest = block + num - store_blks;
2495 
2496 	res = !memcmp(fake_storep + (block * lb_size), arr,
2497 		      (num - rest) * lb_size);
2498 	if (!res)
2499 		return res;
2500 	if (rest)
2501 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2502 			     rest * lb_size);
2503 	if (!res)
2504 		return res;
2505 	arr += num * lb_size;
2506 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2507 	if (rest)
2508 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2509 		       rest * lb_size);
2510 	return res;
2511 }
2512 
2513 static __be16 dif_compute_csum(const void *buf, int len)
2514 {
2515 	__be16 csum;
2516 
2517 	if (sdebug_guard)
2518 		csum = (__force __be16)ip_compute_csum(buf, len);
2519 	else
2520 		csum = cpu_to_be16(crc_t10dif(buf, len));
2521 
2522 	return csum;
2523 }
2524 
2525 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2526 		      sector_t sector, u32 ei_lba)
2527 {
2528 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2529 
2530 	if (sdt->guard_tag != csum) {
2531 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2532 			(unsigned long)sector,
2533 			be16_to_cpu(sdt->guard_tag),
2534 			be16_to_cpu(csum));
2535 		return 0x01;
2536 	}
2537 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2538 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2539 		pr_err("REF check failed on sector %lu\n",
2540 			(unsigned long)sector);
2541 		return 0x03;
2542 	}
2543 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2544 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2545 		pr_err("REF check failed on sector %lu\n",
2546 			(unsigned long)sector);
2547 		return 0x03;
2548 	}
2549 	return 0;
2550 }
2551 
2552 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2553 			  unsigned int sectors, bool read)
2554 {
2555 	size_t resid;
2556 	void *paddr;
2557 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2558 	struct sg_mapping_iter miter;
2559 
2560 	/* Bytes of protection data to copy into sgl */
2561 	resid = sectors * sizeof(*dif_storep);
2562 
2563 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2564 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2565 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2566 
2567 	while (sg_miter_next(&miter) && resid > 0) {
2568 		size_t len = min(miter.length, resid);
2569 		void *start = dif_store(sector);
2570 		size_t rest = 0;
2571 
2572 		if (dif_store_end < start + len)
2573 			rest = start + len - dif_store_end;
2574 
2575 		paddr = miter.addr;
2576 
2577 		if (read)
2578 			memcpy(paddr, start, len - rest);
2579 		else
2580 			memcpy(start, paddr, len - rest);
2581 
2582 		if (rest) {
2583 			if (read)
2584 				memcpy(paddr + len - rest, dif_storep, rest);
2585 			else
2586 				memcpy(dif_storep, paddr + len - rest, rest);
2587 		}
2588 
2589 		sector += len / sizeof(*dif_storep);
2590 		resid -= len;
2591 	}
2592 	sg_miter_stop(&miter);
2593 }
2594 
2595 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2596 			    unsigned int sectors, u32 ei_lba)
2597 {
2598 	unsigned int i;
2599 	struct t10_pi_tuple *sdt;
2600 	sector_t sector;
2601 
2602 	for (i = 0; i < sectors; i++, ei_lba++) {
2603 		int ret;
2604 
2605 		sector = start_sec + i;
2606 		sdt = dif_store(sector);
2607 
2608 		if (sdt->app_tag == cpu_to_be16(0xffff))
2609 			continue;
2610 
2611 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2612 		if (ret) {
2613 			dif_errors++;
2614 			return ret;
2615 		}
2616 	}
2617 
2618 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2619 	dix_reads++;
2620 
2621 	return 0;
2622 }
2623 
2624 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2625 {
2626 	u8 *cmd = scp->cmnd;
2627 	struct sdebug_queued_cmd *sqcp;
2628 	u64 lba;
2629 	u32 num;
2630 	u32 ei_lba;
2631 	unsigned long iflags;
2632 	int ret;
2633 	bool check_prot;
2634 
2635 	switch (cmd[0]) {
2636 	case READ_16:
2637 		ei_lba = 0;
2638 		lba = get_unaligned_be64(cmd + 2);
2639 		num = get_unaligned_be32(cmd + 10);
2640 		check_prot = true;
2641 		break;
2642 	case READ_10:
2643 		ei_lba = 0;
2644 		lba = get_unaligned_be32(cmd + 2);
2645 		num = get_unaligned_be16(cmd + 7);
2646 		check_prot = true;
2647 		break;
2648 	case READ_6:
2649 		ei_lba = 0;
2650 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2651 		      (u32)(cmd[1] & 0x1f) << 16;
2652 		num = (0 == cmd[4]) ? 256 : cmd[4];
2653 		check_prot = true;
2654 		break;
2655 	case READ_12:
2656 		ei_lba = 0;
2657 		lba = get_unaligned_be32(cmd + 2);
2658 		num = get_unaligned_be32(cmd + 6);
2659 		check_prot = true;
2660 		break;
2661 	case XDWRITEREAD_10:
2662 		ei_lba = 0;
2663 		lba = get_unaligned_be32(cmd + 2);
2664 		num = get_unaligned_be16(cmd + 7);
2665 		check_prot = false;
2666 		break;
2667 	default:	/* assume READ(32) */
2668 		lba = get_unaligned_be64(cmd + 12);
2669 		ei_lba = get_unaligned_be32(cmd + 20);
2670 		num = get_unaligned_be32(cmd + 28);
2671 		check_prot = false;
2672 		break;
2673 	}
2674 	if (unlikely(have_dif_prot && check_prot)) {
2675 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2676 		    (cmd[1] & 0xe0)) {
2677 			mk_sense_invalid_opcode(scp);
2678 			return check_condition_result;
2679 		}
2680 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2681 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2682 		    (cmd[1] & 0xe0) == 0)
2683 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2684 				    "to DIF device\n");
2685 	}
2686 	if (unlikely(sdebug_any_injecting_opt)) {
2687 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2688 
2689 		if (sqcp) {
2690 			if (sqcp->inj_short)
2691 				num /= 2;
2692 		}
2693 	} else
2694 		sqcp = NULL;
2695 
2696 	/* inline check_device_access_params() */
2697 	if (unlikely(lba + num > sdebug_capacity)) {
2698 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2699 		return check_condition_result;
2700 	}
2701 	/* transfer length excessive (tie in to block limits VPD page) */
2702 	if (unlikely(num > sdebug_store_sectors)) {
2703 		/* needs work to find which cdb byte 'num' comes from */
2704 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2705 		return check_condition_result;
2706 	}
2707 
2708 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2709 		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2710 		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2711 		/* claim unrecoverable read error */
2712 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2713 		/* set info field and valid bit for fixed descriptor */
2714 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2715 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2716 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2717 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2718 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2719 		}
2720 		scsi_set_resid(scp, scsi_bufflen(scp));
2721 		return check_condition_result;
2722 	}
2723 
2724 	read_lock_irqsave(&atomic_rw, iflags);
2725 
2726 	/* DIX + T10 DIF */
2727 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2728 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2729 
2730 		if (prot_ret) {
2731 			read_unlock_irqrestore(&atomic_rw, iflags);
2732 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2733 			return illegal_condition_result;
2734 		}
2735 	}
2736 
2737 	ret = do_device_access(scp, 0, lba, num, false);
2738 	read_unlock_irqrestore(&atomic_rw, iflags);
2739 	if (unlikely(ret == -1))
2740 		return DID_ERROR << 16;
2741 
2742 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2743 
2744 	if (unlikely(sqcp)) {
2745 		if (sqcp->inj_recovered) {
2746 			mk_sense_buffer(scp, RECOVERED_ERROR,
2747 					THRESHOLD_EXCEEDED, 0);
2748 			return check_condition_result;
2749 		} else if (sqcp->inj_transport) {
2750 			mk_sense_buffer(scp, ABORTED_COMMAND,
2751 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2752 			return check_condition_result;
2753 		} else if (sqcp->inj_dif) {
2754 			/* Logical block guard check failed */
2755 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2756 			return illegal_condition_result;
2757 		} else if (sqcp->inj_dix) {
2758 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2759 			return illegal_condition_result;
2760 		}
2761 	}
2762 	return 0;
2763 }
2764 
2765 static void dump_sector(unsigned char *buf, int len)
2766 {
2767 	int i, j, n;
2768 
2769 	pr_err(">>> Sector Dump <<<\n");
2770 	for (i = 0 ; i < len ; i += 16) {
2771 		char b[128];
2772 
2773 		for (j = 0, n = 0; j < 16; j++) {
2774 			unsigned char c = buf[i+j];
2775 
2776 			if (c >= 0x20 && c < 0x7e)
2777 				n += scnprintf(b + n, sizeof(b) - n,
2778 					       " %c ", buf[i+j]);
2779 			else
2780 				n += scnprintf(b + n, sizeof(b) - n,
2781 					       "%02x ", buf[i+j]);
2782 		}
2783 		pr_err("%04d: %s\n", i, b);
2784 	}
2785 }
2786 
2787 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2788 			     unsigned int sectors, u32 ei_lba)
2789 {
2790 	int ret;
2791 	struct t10_pi_tuple *sdt;
2792 	void *daddr;
2793 	sector_t sector = start_sec;
2794 	int ppage_offset;
2795 	int dpage_offset;
2796 	struct sg_mapping_iter diter;
2797 	struct sg_mapping_iter piter;
2798 
2799 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2800 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2801 
2802 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2803 			scsi_prot_sg_count(SCpnt),
2804 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2805 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2806 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2807 
2808 	/* For each protection page */
2809 	while (sg_miter_next(&piter)) {
2810 		dpage_offset = 0;
2811 		if (WARN_ON(!sg_miter_next(&diter))) {
2812 			ret = 0x01;
2813 			goto out;
2814 		}
2815 
2816 		for (ppage_offset = 0; ppage_offset < piter.length;
2817 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2818 			/* If we're at the end of the current
2819 			 * data page advance to the next one
2820 			 */
2821 			if (dpage_offset >= diter.length) {
2822 				if (WARN_ON(!sg_miter_next(&diter))) {
2823 					ret = 0x01;
2824 					goto out;
2825 				}
2826 				dpage_offset = 0;
2827 			}
2828 
2829 			sdt = piter.addr + ppage_offset;
2830 			daddr = diter.addr + dpage_offset;
2831 
2832 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2833 			if (ret) {
2834 				dump_sector(daddr, sdebug_sector_size);
2835 				goto out;
2836 			}
2837 
2838 			sector++;
2839 			ei_lba++;
2840 			dpage_offset += sdebug_sector_size;
2841 		}
2842 		diter.consumed = dpage_offset;
2843 		sg_miter_stop(&diter);
2844 	}
2845 	sg_miter_stop(&piter);
2846 
2847 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2848 	dix_writes++;
2849 
2850 	return 0;
2851 
2852 out:
2853 	dif_errors++;
2854 	sg_miter_stop(&diter);
2855 	sg_miter_stop(&piter);
2856 	return ret;
2857 }
2858 
2859 static unsigned long lba_to_map_index(sector_t lba)
2860 {
2861 	if (sdebug_unmap_alignment)
2862 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2863 	sector_div(lba, sdebug_unmap_granularity);
2864 	return lba;
2865 }
2866 
2867 static sector_t map_index_to_lba(unsigned long index)
2868 {
2869 	sector_t lba = index * sdebug_unmap_granularity;
2870 
2871 	if (sdebug_unmap_alignment)
2872 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2873 	return lba;
2874 }
2875 
2876 static unsigned int map_state(sector_t lba, unsigned int *num)
2877 {
2878 	sector_t end;
2879 	unsigned int mapped;
2880 	unsigned long index;
2881 	unsigned long next;
2882 
2883 	index = lba_to_map_index(lba);
2884 	mapped = test_bit(index, map_storep);
2885 
2886 	if (mapped)
2887 		next = find_next_zero_bit(map_storep, map_size, index);
2888 	else
2889 		next = find_next_bit(map_storep, map_size, index);
2890 
2891 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2892 	*num = end - lba;
2893 	return mapped;
2894 }
2895 
2896 static void map_region(sector_t lba, unsigned int len)
2897 {
2898 	sector_t end = lba + len;
2899 
2900 	while (lba < end) {
2901 		unsigned long index = lba_to_map_index(lba);
2902 
2903 		if (index < map_size)
2904 			set_bit(index, map_storep);
2905 
2906 		lba = map_index_to_lba(index + 1);
2907 	}
2908 }
2909 
2910 static void unmap_region(sector_t lba, unsigned int len)
2911 {
2912 	sector_t end = lba + len;
2913 
2914 	while (lba < end) {
2915 		unsigned long index = lba_to_map_index(lba);
2916 
2917 		if (lba == map_index_to_lba(index) &&
2918 		    lba + sdebug_unmap_granularity <= end &&
2919 		    index < map_size) {
2920 			clear_bit(index, map_storep);
2921 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2922 				memset(fake_storep +
2923 				       lba * sdebug_sector_size,
2924 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2925 				       sdebug_sector_size *
2926 				       sdebug_unmap_granularity);
2927 			}
2928 			if (dif_storep) {
2929 				memset(dif_storep + lba, 0xff,
2930 				       sizeof(*dif_storep) *
2931 				       sdebug_unmap_granularity);
2932 			}
2933 		}
2934 		lba = map_index_to_lba(index + 1);
2935 	}
2936 }
2937 
2938 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2939 {
2940 	u8 *cmd = scp->cmnd;
2941 	u64 lba;
2942 	u32 num;
2943 	u32 ei_lba;
2944 	unsigned long iflags;
2945 	int ret;
2946 	bool check_prot;
2947 
2948 	switch (cmd[0]) {
2949 	case WRITE_16:
2950 		ei_lba = 0;
2951 		lba = get_unaligned_be64(cmd + 2);
2952 		num = get_unaligned_be32(cmd + 10);
2953 		check_prot = true;
2954 		break;
2955 	case WRITE_10:
2956 		ei_lba = 0;
2957 		lba = get_unaligned_be32(cmd + 2);
2958 		num = get_unaligned_be16(cmd + 7);
2959 		check_prot = true;
2960 		break;
2961 	case WRITE_6:
2962 		ei_lba = 0;
2963 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2964 		      (u32)(cmd[1] & 0x1f) << 16;
2965 		num = (0 == cmd[4]) ? 256 : cmd[4];
2966 		check_prot = true;
2967 		break;
2968 	case WRITE_12:
2969 		ei_lba = 0;
2970 		lba = get_unaligned_be32(cmd + 2);
2971 		num = get_unaligned_be32(cmd + 6);
2972 		check_prot = true;
2973 		break;
2974 	case 0x53:	/* XDWRITEREAD(10) */
2975 		ei_lba = 0;
2976 		lba = get_unaligned_be32(cmd + 2);
2977 		num = get_unaligned_be16(cmd + 7);
2978 		check_prot = false;
2979 		break;
2980 	default:	/* assume WRITE(32) */
2981 		lba = get_unaligned_be64(cmd + 12);
2982 		ei_lba = get_unaligned_be32(cmd + 20);
2983 		num = get_unaligned_be32(cmd + 28);
2984 		check_prot = false;
2985 		break;
2986 	}
2987 	if (unlikely(have_dif_prot && check_prot)) {
2988 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2989 		    (cmd[1] & 0xe0)) {
2990 			mk_sense_invalid_opcode(scp);
2991 			return check_condition_result;
2992 		}
2993 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2994 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2995 		    (cmd[1] & 0xe0) == 0)
2996 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2997 				    "to DIF device\n");
2998 	}
2999 
3000 	/* inline check_device_access_params() */
3001 	if (unlikely(lba + num > sdebug_capacity)) {
3002 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3003 		return check_condition_result;
3004 	}
3005 	/* transfer length excessive (tie in to block limits VPD page) */
3006 	if (unlikely(num > sdebug_store_sectors)) {
3007 		/* needs work to find which cdb byte 'num' comes from */
3008 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3009 		return check_condition_result;
3010 	}
3011 
3012 	write_lock_irqsave(&atomic_rw, iflags);
3013 
3014 	/* DIX + T10 DIF */
3015 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3016 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3017 
3018 		if (prot_ret) {
3019 			write_unlock_irqrestore(&atomic_rw, iflags);
3020 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3021 			return illegal_condition_result;
3022 		}
3023 	}
3024 
3025 	ret = do_device_access(scp, 0, lba, num, true);
3026 	if (unlikely(scsi_debug_lbp()))
3027 		map_region(lba, num);
3028 	write_unlock_irqrestore(&atomic_rw, iflags);
3029 	if (unlikely(-1 == ret))
3030 		return DID_ERROR << 16;
3031 	else if (unlikely(sdebug_verbose &&
3032 			  (ret < (num * sdebug_sector_size))))
3033 		sdev_printk(KERN_INFO, scp->device,
3034 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3035 			    my_name, num * sdebug_sector_size, ret);
3036 
3037 	if (unlikely(sdebug_any_injecting_opt)) {
3038 		struct sdebug_queued_cmd *sqcp =
3039 				(struct sdebug_queued_cmd *)scp->host_scribble;
3040 
3041 		if (sqcp) {
3042 			if (sqcp->inj_recovered) {
3043 				mk_sense_buffer(scp, RECOVERED_ERROR,
3044 						THRESHOLD_EXCEEDED, 0);
3045 				return check_condition_result;
3046 			} else if (sqcp->inj_dif) {
3047 				/* Logical block guard check failed */
3048 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3049 				return illegal_condition_result;
3050 			} else if (sqcp->inj_dix) {
3051 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3052 				return illegal_condition_result;
3053 			}
3054 		}
3055 	}
3056 	return 0;
3057 }
3058 
3059 /*
3060  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3061  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3062  */
3063 static int resp_write_scat(struct scsi_cmnd *scp,
3064 			   struct sdebug_dev_info *devip)
3065 {
3066 	u8 *cmd = scp->cmnd;
3067 	u8 *lrdp = NULL;
3068 	u8 *up;
3069 	u8 wrprotect;
3070 	u16 lbdof, num_lrd, k;
3071 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3072 	u32 lb_size = sdebug_sector_size;
3073 	u32 ei_lba;
3074 	u64 lba;
3075 	unsigned long iflags;
3076 	int ret, res;
3077 	bool is_16;
3078 	static const u32 lrd_size = 32; /* + parameter list header size */
3079 
3080 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3081 		is_16 = false;
3082 		wrprotect = (cmd[10] >> 5) & 0x7;
3083 		lbdof = get_unaligned_be16(cmd + 12);
3084 		num_lrd = get_unaligned_be16(cmd + 16);
3085 		bt_len = get_unaligned_be32(cmd + 28);
3086 	} else {        /* that leaves WRITE SCATTERED(16) */
3087 		is_16 = true;
3088 		wrprotect = (cmd[2] >> 5) & 0x7;
3089 		lbdof = get_unaligned_be16(cmd + 4);
3090 		num_lrd = get_unaligned_be16(cmd + 8);
3091 		bt_len = get_unaligned_be32(cmd + 10);
3092 		if (unlikely(have_dif_prot)) {
3093 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3094 			    wrprotect) {
3095 				mk_sense_invalid_opcode(scp);
3096 				return illegal_condition_result;
3097 			}
3098 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3099 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3100 			     wrprotect == 0)
3101 				sdev_printk(KERN_ERR, scp->device,
3102 					    "Unprotected WR to DIF device\n");
3103 		}
3104 	}
3105 	if ((num_lrd == 0) || (bt_len == 0))
3106 		return 0;       /* T10 says these do-nothings are not errors */
3107 	if (lbdof == 0) {
3108 		if (sdebug_verbose)
3109 			sdev_printk(KERN_INFO, scp->device,
3110 				"%s: %s: LB Data Offset field bad\n",
3111 				my_name, __func__);
3112 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3113 		return illegal_condition_result;
3114 	}
3115 	lbdof_blen = lbdof * lb_size;
3116 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3117 		if (sdebug_verbose)
3118 			sdev_printk(KERN_INFO, scp->device,
3119 				"%s: %s: LBA range descriptors don't fit\n",
3120 				my_name, __func__);
3121 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3122 		return illegal_condition_result;
3123 	}
3124 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3125 	if (lrdp == NULL)
3126 		return SCSI_MLQUEUE_HOST_BUSY;
3127 	if (sdebug_verbose)
3128 		sdev_printk(KERN_INFO, scp->device,
3129 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3130 			my_name, __func__, lbdof_blen);
3131 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3132 	if (res == -1) {
3133 		ret = DID_ERROR << 16;
3134 		goto err_out;
3135 	}
3136 
3137 	write_lock_irqsave(&atomic_rw, iflags);
3138 	sg_off = lbdof_blen;
3139 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3140 	cum_lb = 0;
3141 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3142 		lba = get_unaligned_be64(up + 0);
3143 		num = get_unaligned_be32(up + 8);
3144 		if (sdebug_verbose)
3145 			sdev_printk(KERN_INFO, scp->device,
3146 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3147 				my_name, __func__, k, lba, num, sg_off);
3148 		if (num == 0)
3149 			continue;
3150 		ret = check_device_access_params(scp, lba, num);
3151 		if (ret)
3152 			goto err_out_unlock;
3153 		num_by = num * lb_size;
3154 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3155 
3156 		if ((cum_lb + num) > bt_len) {
3157 			if (sdebug_verbose)
3158 				sdev_printk(KERN_INFO, scp->device,
3159 				    "%s: %s: sum of blocks > data provided\n",
3160 				    my_name, __func__);
3161 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3162 					0);
3163 			ret = illegal_condition_result;
3164 			goto err_out_unlock;
3165 		}
3166 
3167 		/* DIX + T10 DIF */
3168 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3169 			int prot_ret = prot_verify_write(scp, lba, num,
3170 							 ei_lba);
3171 
3172 			if (prot_ret) {
3173 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3174 						prot_ret);
3175 				ret = illegal_condition_result;
3176 				goto err_out_unlock;
3177 			}
3178 		}
3179 
3180 		ret = do_device_access(scp, sg_off, lba, num, true);
3181 		if (unlikely(scsi_debug_lbp()))
3182 			map_region(lba, num);
3183 		if (unlikely(-1 == ret)) {
3184 			ret = DID_ERROR << 16;
3185 			goto err_out_unlock;
3186 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3187 			sdev_printk(KERN_INFO, scp->device,
3188 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3189 			    my_name, num_by, ret);
3190 
3191 		if (unlikely(sdebug_any_injecting_opt)) {
3192 			struct sdebug_queued_cmd *sqcp =
3193 				(struct sdebug_queued_cmd *)scp->host_scribble;
3194 
3195 			if (sqcp) {
3196 				if (sqcp->inj_recovered) {
3197 					mk_sense_buffer(scp, RECOVERED_ERROR,
3198 							THRESHOLD_EXCEEDED, 0);
3199 					ret = illegal_condition_result;
3200 					goto err_out_unlock;
3201 				} else if (sqcp->inj_dif) {
3202 					/* Logical block guard check failed */
3203 					mk_sense_buffer(scp, ABORTED_COMMAND,
3204 							0x10, 1);
3205 					ret = illegal_condition_result;
3206 					goto err_out_unlock;
3207 				} else if (sqcp->inj_dix) {
3208 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3209 							0x10, 1);
3210 					ret = illegal_condition_result;
3211 					goto err_out_unlock;
3212 				}
3213 			}
3214 		}
3215 		sg_off += num_by;
3216 		cum_lb += num;
3217 	}
3218 	ret = 0;
3219 err_out_unlock:
3220 	write_unlock_irqrestore(&atomic_rw, iflags);
3221 err_out:
3222 	kfree(lrdp);
3223 	return ret;
3224 }
3225 
3226 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3227 			   u32 ei_lba, bool unmap, bool ndob)
3228 {
3229 	unsigned long iflags;
3230 	unsigned long long i;
3231 	int ret;
3232 	u64 lba_off;
3233 
3234 	ret = check_device_access_params(scp, lba, num);
3235 	if (ret)
3236 		return ret;
3237 
3238 	write_lock_irqsave(&atomic_rw, iflags);
3239 
3240 	if (unmap && scsi_debug_lbp()) {
3241 		unmap_region(lba, num);
3242 		goto out;
3243 	}
3244 
3245 	lba_off = lba * sdebug_sector_size;
3246 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3247 	if (ndob) {
3248 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
3249 		ret = 0;
3250 	} else
3251 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3252 					  sdebug_sector_size);
3253 
3254 	if (-1 == ret) {
3255 		write_unlock_irqrestore(&atomic_rw, iflags);
3256 		return DID_ERROR << 16;
3257 	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3258 		sdev_printk(KERN_INFO, scp->device,
3259 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3260 			    my_name, "write same",
3261 			    sdebug_sector_size, ret);
3262 
3263 	/* Copy first sector to remaining blocks */
3264 	for (i = 1 ; i < num ; i++)
3265 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3266 		       fake_storep + lba_off,
3267 		       sdebug_sector_size);
3268 
3269 	if (scsi_debug_lbp())
3270 		map_region(lba, num);
3271 out:
3272 	write_unlock_irqrestore(&atomic_rw, iflags);
3273 
3274 	return 0;
3275 }
3276 
3277 static int resp_write_same_10(struct scsi_cmnd *scp,
3278 			      struct sdebug_dev_info *devip)
3279 {
3280 	u8 *cmd = scp->cmnd;
3281 	u32 lba;
3282 	u16 num;
3283 	u32 ei_lba = 0;
3284 	bool unmap = false;
3285 
3286 	if (cmd[1] & 0x8) {
3287 		if (sdebug_lbpws10 == 0) {
3288 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3289 			return check_condition_result;
3290 		} else
3291 			unmap = true;
3292 	}
3293 	lba = get_unaligned_be32(cmd + 2);
3294 	num = get_unaligned_be16(cmd + 7);
3295 	if (num > sdebug_write_same_length) {
3296 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3297 		return check_condition_result;
3298 	}
3299 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3300 }
3301 
3302 static int resp_write_same_16(struct scsi_cmnd *scp,
3303 			      struct sdebug_dev_info *devip)
3304 {
3305 	u8 *cmd = scp->cmnd;
3306 	u64 lba;
3307 	u32 num;
3308 	u32 ei_lba = 0;
3309 	bool unmap = false;
3310 	bool ndob = false;
3311 
3312 	if (cmd[1] & 0x8) {	/* UNMAP */
3313 		if (sdebug_lbpws == 0) {
3314 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3315 			return check_condition_result;
3316 		} else
3317 			unmap = true;
3318 	}
3319 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3320 		ndob = true;
3321 	lba = get_unaligned_be64(cmd + 2);
3322 	num = get_unaligned_be32(cmd + 10);
3323 	if (num > sdebug_write_same_length) {
3324 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3325 		return check_condition_result;
3326 	}
3327 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3328 }
3329 
3330 /* Note the mode field is in the same position as the (lower) service action
3331  * field. For the Report supported operation codes command, SPC-4 suggests
3332  * each mode of this command should be reported separately; for future. */
3333 static int resp_write_buffer(struct scsi_cmnd *scp,
3334 			     struct sdebug_dev_info *devip)
3335 {
3336 	u8 *cmd = scp->cmnd;
3337 	struct scsi_device *sdp = scp->device;
3338 	struct sdebug_dev_info *dp;
3339 	u8 mode;
3340 
3341 	mode = cmd[1] & 0x1f;
3342 	switch (mode) {
3343 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3344 		/* set UAs on this device only */
3345 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3346 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3347 		break;
3348 	case 0x5:	/* download MC, save and ACT */
3349 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3350 		break;
3351 	case 0x6:	/* download MC with offsets and ACT */
3352 		/* set UAs on most devices (LUs) in this target */
3353 		list_for_each_entry(dp,
3354 				    &devip->sdbg_host->dev_info_list,
3355 				    dev_list)
3356 			if (dp->target == sdp->id) {
3357 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3358 				if (devip != dp)
3359 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3360 						dp->uas_bm);
3361 			}
3362 		break;
3363 	case 0x7:	/* download MC with offsets, save, and ACT */
3364 		/* set UA on all devices (LUs) in this target */
3365 		list_for_each_entry(dp,
3366 				    &devip->sdbg_host->dev_info_list,
3367 				    dev_list)
3368 			if (dp->target == sdp->id)
3369 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3370 					dp->uas_bm);
3371 		break;
3372 	default:
3373 		/* do nothing for this command for other mode values */
3374 		break;
3375 	}
3376 	return 0;
3377 }
3378 
3379 static int resp_comp_write(struct scsi_cmnd *scp,
3380 			   struct sdebug_dev_info *devip)
3381 {
3382 	u8 *cmd = scp->cmnd;
3383 	u8 *arr;
3384 	u8 *fake_storep_hold;
3385 	u64 lba;
3386 	u32 dnum;
3387 	u32 lb_size = sdebug_sector_size;
3388 	u8 num;
3389 	unsigned long iflags;
3390 	int ret;
3391 	int retval = 0;
3392 
3393 	lba = get_unaligned_be64(cmd + 2);
3394 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3395 	if (0 == num)
3396 		return 0;	/* degenerate case, not an error */
3397 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3398 	    (cmd[1] & 0xe0)) {
3399 		mk_sense_invalid_opcode(scp);
3400 		return check_condition_result;
3401 	}
3402 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3403 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3404 	    (cmd[1] & 0xe0) == 0)
3405 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3406 			    "to DIF device\n");
3407 
3408 	/* inline check_device_access_params() */
3409 	if (lba + num > sdebug_capacity) {
3410 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3411 		return check_condition_result;
3412 	}
3413 	/* transfer length excessive (tie in to block limits VPD page) */
3414 	if (num > sdebug_store_sectors) {
3415 		/* needs work to find which cdb byte 'num' comes from */
3416 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3417 		return check_condition_result;
3418 	}
3419 	dnum = 2 * num;
3420 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3421 	if (NULL == arr) {
3422 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3423 				INSUFF_RES_ASCQ);
3424 		return check_condition_result;
3425 	}
3426 
3427 	write_lock_irqsave(&atomic_rw, iflags);
3428 
3429 	/* trick do_device_access() to fetch both compare and write buffers
3430 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3431 	fake_storep_hold = fake_storep;
3432 	fake_storep = arr;
3433 	ret = do_device_access(scp, 0, 0, dnum, true);
3434 	fake_storep = fake_storep_hold;
3435 	if (ret == -1) {
3436 		retval = DID_ERROR << 16;
3437 		goto cleanup;
3438 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3439 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3440 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3441 			    dnum * lb_size, ret);
3442 	if (!comp_write_worker(lba, num, arr)) {
3443 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3444 		retval = check_condition_result;
3445 		goto cleanup;
3446 	}
3447 	if (scsi_debug_lbp())
3448 		map_region(lba, num);
3449 cleanup:
3450 	write_unlock_irqrestore(&atomic_rw, iflags);
3451 	kfree(arr);
3452 	return retval;
3453 }
3454 
3455 struct unmap_block_desc {
3456 	__be64	lba;
3457 	__be32	blocks;
3458 	__be32	__reserved;
3459 };
3460 
3461 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3462 {
3463 	unsigned char *buf;
3464 	struct unmap_block_desc *desc;
3465 	unsigned int i, payload_len, descriptors;
3466 	int ret;
3467 	unsigned long iflags;
3468 
3469 
3470 	if (!scsi_debug_lbp())
3471 		return 0;	/* fib and say its done */
3472 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3473 	BUG_ON(scsi_bufflen(scp) != payload_len);
3474 
3475 	descriptors = (payload_len - 8) / 16;
3476 	if (descriptors > sdebug_unmap_max_desc) {
3477 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3478 		return check_condition_result;
3479 	}
3480 
3481 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3482 	if (!buf) {
3483 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3484 				INSUFF_RES_ASCQ);
3485 		return check_condition_result;
3486 	}
3487 
3488 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3489 
3490 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3491 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3492 
3493 	desc = (void *)&buf[8];
3494 
3495 	write_lock_irqsave(&atomic_rw, iflags);
3496 
3497 	for (i = 0 ; i < descriptors ; i++) {
3498 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3499 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3500 
3501 		ret = check_device_access_params(scp, lba, num);
3502 		if (ret)
3503 			goto out;
3504 
3505 		unmap_region(lba, num);
3506 	}
3507 
3508 	ret = 0;
3509 
3510 out:
3511 	write_unlock_irqrestore(&atomic_rw, iflags);
3512 	kfree(buf);
3513 
3514 	return ret;
3515 }
3516 
3517 #define SDEBUG_GET_LBA_STATUS_LEN 32
3518 
3519 static int resp_get_lba_status(struct scsi_cmnd *scp,
3520 			       struct sdebug_dev_info *devip)
3521 {
3522 	u8 *cmd = scp->cmnd;
3523 	u64 lba;
3524 	u32 alloc_len, mapped, num;
3525 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3526 	int ret;
3527 
3528 	lba = get_unaligned_be64(cmd + 2);
3529 	alloc_len = get_unaligned_be32(cmd + 10);
3530 
3531 	if (alloc_len < 24)
3532 		return 0;
3533 
3534 	ret = check_device_access_params(scp, lba, 1);
3535 	if (ret)
3536 		return ret;
3537 
3538 	if (scsi_debug_lbp())
3539 		mapped = map_state(lba, &num);
3540 	else {
3541 		mapped = 1;
3542 		/* following just in case virtual_gb changed */
3543 		sdebug_capacity = get_sdebug_capacity();
3544 		if (sdebug_capacity - lba <= 0xffffffff)
3545 			num = sdebug_capacity - lba;
3546 		else
3547 			num = 0xffffffff;
3548 	}
3549 
3550 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3551 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3552 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3553 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3554 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3555 
3556 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3557 }
3558 
3559 #define RL_BUCKET_ELEMS 8
3560 
3561 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3562  * (W-LUN), the normal Linux scanning logic does not associate it with a
3563  * device (e.g. /dev/sg7). The following magic will make that association:
3564  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3565  * where <n> is a host number. If there are multiple targets in a host then
3566  * the above will associate a W-LUN to each target. To only get a W-LUN
3567  * for target 2, then use "echo '- 2 49409' > scan" .
3568  */
3569 static int resp_report_luns(struct scsi_cmnd *scp,
3570 			    struct sdebug_dev_info *devip)
3571 {
3572 	unsigned char *cmd = scp->cmnd;
3573 	unsigned int alloc_len;
3574 	unsigned char select_report;
3575 	u64 lun;
3576 	struct scsi_lun *lun_p;
3577 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3578 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3579 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3580 	unsigned int tlun_cnt;	/* total LUN count */
3581 	unsigned int rlen;	/* response length (in bytes) */
3582 	int k, j, n, res;
3583 	unsigned int off_rsp = 0;
3584 	const int sz_lun = sizeof(struct scsi_lun);
3585 
3586 	clear_luns_changed_on_target(devip);
3587 
3588 	select_report = cmd[2];
3589 	alloc_len = get_unaligned_be32(cmd + 6);
3590 
3591 	if (alloc_len < 4) {
3592 		pr_err("alloc len too small %d\n", alloc_len);
3593 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3594 		return check_condition_result;
3595 	}
3596 
3597 	switch (select_report) {
3598 	case 0:		/* all LUNs apart from W-LUNs */
3599 		lun_cnt = sdebug_max_luns;
3600 		wlun_cnt = 0;
3601 		break;
3602 	case 1:		/* only W-LUNs */
3603 		lun_cnt = 0;
3604 		wlun_cnt = 1;
3605 		break;
3606 	case 2:		/* all LUNs */
3607 		lun_cnt = sdebug_max_luns;
3608 		wlun_cnt = 1;
3609 		break;
3610 	case 0x10:	/* only administrative LUs */
3611 	case 0x11:	/* see SPC-5 */
3612 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3613 	default:
3614 		pr_debug("select report invalid %d\n", select_report);
3615 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3616 		return check_condition_result;
3617 	}
3618 
3619 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3620 		--lun_cnt;
3621 
3622 	tlun_cnt = lun_cnt + wlun_cnt;
3623 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3624 	scsi_set_resid(scp, scsi_bufflen(scp));
3625 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3626 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3627 
3628 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3629 	lun = sdebug_no_lun_0 ? 1 : 0;
3630 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3631 		memset(arr, 0, sizeof(arr));
3632 		lun_p = (struct scsi_lun *)&arr[0];
3633 		if (k == 0) {
3634 			put_unaligned_be32(rlen, &arr[0]);
3635 			++lun_p;
3636 			j = 1;
3637 		}
3638 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3639 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3640 				break;
3641 			int_to_scsilun(lun++, lun_p);
3642 		}
3643 		if (j < RL_BUCKET_ELEMS)
3644 			break;
3645 		n = j * sz_lun;
3646 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3647 		if (res)
3648 			return res;
3649 		off_rsp += n;
3650 	}
3651 	if (wlun_cnt) {
3652 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3653 		++j;
3654 	}
3655 	if (j > 0)
3656 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3657 	return res;
3658 }
3659 
3660 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3661 			    unsigned int num, struct sdebug_dev_info *devip)
3662 {
3663 	int j;
3664 	unsigned char *kaddr, *buf;
3665 	unsigned int offset;
3666 	struct scsi_data_buffer *sdb = scsi_in(scp);
3667 	struct sg_mapping_iter miter;
3668 
3669 	/* better not to use temporary buffer. */
3670 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3671 	if (!buf) {
3672 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3673 				INSUFF_RES_ASCQ);
3674 		return check_condition_result;
3675 	}
3676 
3677 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3678 
3679 	offset = 0;
3680 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3681 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3682 
3683 	while (sg_miter_next(&miter)) {
3684 		kaddr = miter.addr;
3685 		for (j = 0; j < miter.length; j++)
3686 			*(kaddr + j) ^= *(buf + offset + j);
3687 
3688 		offset += miter.length;
3689 	}
3690 	sg_miter_stop(&miter);
3691 	kfree(buf);
3692 
3693 	return 0;
3694 }
3695 
3696 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3697 			       struct sdebug_dev_info *devip)
3698 {
3699 	u8 *cmd = scp->cmnd;
3700 	u64 lba;
3701 	u32 num;
3702 	int errsts;
3703 
3704 	if (!scsi_bidi_cmnd(scp)) {
3705 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3706 				INSUFF_RES_ASCQ);
3707 		return check_condition_result;
3708 	}
3709 	errsts = resp_read_dt0(scp, devip);
3710 	if (errsts)
3711 		return errsts;
3712 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3713 		errsts = resp_write_dt0(scp, devip);
3714 		if (errsts)
3715 			return errsts;
3716 	}
3717 	lba = get_unaligned_be32(cmd + 2);
3718 	num = get_unaligned_be16(cmd + 7);
3719 	return resp_xdwriteread(scp, lba, num, devip);
3720 }
3721 
3722 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3723 {
3724 	struct sdebug_queue *sqp = sdebug_q_arr;
3725 
3726 	if (sdebug_mq_active) {
3727 		u32 tag = blk_mq_unique_tag(cmnd->request);
3728 		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3729 
3730 		if (unlikely(hwq >= submit_queues)) {
3731 			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3732 			hwq %= submit_queues;
3733 		}
3734 		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3735 		return sqp + hwq;
3736 	} else
3737 		return sqp;
3738 }
3739 
3740 /* Queued (deferred) command completions converge here. */
3741 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3742 {
3743 	int qc_idx;
3744 	int retiring = 0;
3745 	unsigned long iflags;
3746 	struct sdebug_queue *sqp;
3747 	struct sdebug_queued_cmd *sqcp;
3748 	struct scsi_cmnd *scp;
3749 	struct sdebug_dev_info *devip;
3750 
3751 	qc_idx = sd_dp->qc_idx;
3752 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3753 	if (sdebug_statistics) {
3754 		atomic_inc(&sdebug_completions);
3755 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3756 			atomic_inc(&sdebug_miss_cpus);
3757 	}
3758 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3759 		pr_err("wild qc_idx=%d\n", qc_idx);
3760 		return;
3761 	}
3762 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3763 	sqcp = &sqp->qc_arr[qc_idx];
3764 	scp = sqcp->a_cmnd;
3765 	if (unlikely(scp == NULL)) {
3766 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3767 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3768 		       sd_dp->sqa_idx, qc_idx);
3769 		return;
3770 	}
3771 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3772 	if (likely(devip))
3773 		atomic_dec(&devip->num_in_q);
3774 	else
3775 		pr_err("devip=NULL\n");
3776 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3777 		retiring = 1;
3778 
3779 	sqcp->a_cmnd = NULL;
3780 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3781 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3782 		pr_err("Unexpected completion\n");
3783 		return;
3784 	}
3785 
3786 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3787 		int k, retval;
3788 
3789 		retval = atomic_read(&retired_max_queue);
3790 		if (qc_idx >= retval) {
3791 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3792 			pr_err("index %d too large\n", retval);
3793 			return;
3794 		}
3795 		k = find_last_bit(sqp->in_use_bm, retval);
3796 		if ((k < sdebug_max_queue) || (k == retval))
3797 			atomic_set(&retired_max_queue, 0);
3798 		else
3799 			atomic_set(&retired_max_queue, k + 1);
3800 	}
3801 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3802 	scp->scsi_done(scp); /* callback to mid level */
3803 }
3804 
3805 /* When high resolution timer goes off this function is called. */
3806 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3807 {
3808 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3809 						  hrt);
3810 	sdebug_q_cmd_complete(sd_dp);
3811 	return HRTIMER_NORESTART;
3812 }
3813 
3814 /* When work queue schedules work, it calls this function. */
3815 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3816 {
3817 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3818 						  ew.work);
3819 	sdebug_q_cmd_complete(sd_dp);
3820 }
3821 
3822 static bool got_shared_uuid;
3823 static uuid_t shared_uuid;
3824 
3825 static struct sdebug_dev_info *sdebug_device_create(
3826 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3827 {
3828 	struct sdebug_dev_info *devip;
3829 
3830 	devip = kzalloc(sizeof(*devip), flags);
3831 	if (devip) {
3832 		if (sdebug_uuid_ctl == 1)
3833 			uuid_gen(&devip->lu_name);
3834 		else if (sdebug_uuid_ctl == 2) {
3835 			if (got_shared_uuid)
3836 				devip->lu_name = shared_uuid;
3837 			else {
3838 				uuid_gen(&shared_uuid);
3839 				got_shared_uuid = true;
3840 				devip->lu_name = shared_uuid;
3841 			}
3842 		}
3843 		devip->sdbg_host = sdbg_host;
3844 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3845 	}
3846 	return devip;
3847 }
3848 
3849 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3850 {
3851 	struct sdebug_host_info *sdbg_host;
3852 	struct sdebug_dev_info *open_devip = NULL;
3853 	struct sdebug_dev_info *devip;
3854 
3855 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3856 	if (!sdbg_host) {
3857 		pr_err("Host info NULL\n");
3858 		return NULL;
3859 	}
3860 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3861 		if ((devip->used) && (devip->channel == sdev->channel) &&
3862 		    (devip->target == sdev->id) &&
3863 		    (devip->lun == sdev->lun))
3864 			return devip;
3865 		else {
3866 			if ((!devip->used) && (!open_devip))
3867 				open_devip = devip;
3868 		}
3869 	}
3870 	if (!open_devip) { /* try and make a new one */
3871 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3872 		if (!open_devip) {
3873 			pr_err("out of memory at line %d\n", __LINE__);
3874 			return NULL;
3875 		}
3876 	}
3877 
3878 	open_devip->channel = sdev->channel;
3879 	open_devip->target = sdev->id;
3880 	open_devip->lun = sdev->lun;
3881 	open_devip->sdbg_host = sdbg_host;
3882 	atomic_set(&open_devip->num_in_q, 0);
3883 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3884 	open_devip->used = true;
3885 	return open_devip;
3886 }
3887 
3888 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3889 {
3890 	if (sdebug_verbose)
3891 		pr_info("slave_alloc <%u %u %u %llu>\n",
3892 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3893 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3894 	return 0;
3895 }
3896 
3897 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3898 {
3899 	struct sdebug_dev_info *devip =
3900 			(struct sdebug_dev_info *)sdp->hostdata;
3901 
3902 	if (sdebug_verbose)
3903 		pr_info("slave_configure <%u %u %u %llu>\n",
3904 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3905 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3906 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3907 	if (devip == NULL) {
3908 		devip = find_build_dev_info(sdp);
3909 		if (devip == NULL)
3910 			return 1;  /* no resources, will be marked offline */
3911 	}
3912 	sdp->hostdata = devip;
3913 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3914 	if (sdebug_no_uld)
3915 		sdp->no_uld_attach = 1;
3916 	config_cdb_len(sdp);
3917 	return 0;
3918 }
3919 
3920 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3921 {
3922 	struct sdebug_dev_info *devip =
3923 		(struct sdebug_dev_info *)sdp->hostdata;
3924 
3925 	if (sdebug_verbose)
3926 		pr_info("slave_destroy <%u %u %u %llu>\n",
3927 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3928 	if (devip) {
3929 		/* make this slot available for re-use */
3930 		devip->used = false;
3931 		sdp->hostdata = NULL;
3932 	}
3933 }
3934 
3935 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3936 {
3937 	if (!sd_dp)
3938 		return;
3939 	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3940 		hrtimer_cancel(&sd_dp->hrt);
3941 	else if (sdebug_jdelay < 0)
3942 		cancel_work_sync(&sd_dp->ew.work);
3943 }
3944 
3945 /* If @cmnd found deletes its timer or work queue and returns true; else
3946    returns false */
3947 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3948 {
3949 	unsigned long iflags;
3950 	int j, k, qmax, r_qmax;
3951 	struct sdebug_queue *sqp;
3952 	struct sdebug_queued_cmd *sqcp;
3953 	struct sdebug_dev_info *devip;
3954 	struct sdebug_defer *sd_dp;
3955 
3956 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3957 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3958 		qmax = sdebug_max_queue;
3959 		r_qmax = atomic_read(&retired_max_queue);
3960 		if (r_qmax > qmax)
3961 			qmax = r_qmax;
3962 		for (k = 0; k < qmax; ++k) {
3963 			if (test_bit(k, sqp->in_use_bm)) {
3964 				sqcp = &sqp->qc_arr[k];
3965 				if (cmnd != sqcp->a_cmnd)
3966 					continue;
3967 				/* found */
3968 				devip = (struct sdebug_dev_info *)
3969 						cmnd->device->hostdata;
3970 				if (devip)
3971 					atomic_dec(&devip->num_in_q);
3972 				sqcp->a_cmnd = NULL;
3973 				sd_dp = sqcp->sd_dp;
3974 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3975 				stop_qc_helper(sd_dp);
3976 				clear_bit(k, sqp->in_use_bm);
3977 				return true;
3978 			}
3979 		}
3980 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3981 	}
3982 	return false;
3983 }
3984 
3985 /* Deletes (stops) timers or work queues of all queued commands */
3986 static void stop_all_queued(void)
3987 {
3988 	unsigned long iflags;
3989 	int j, k;
3990 	struct sdebug_queue *sqp;
3991 	struct sdebug_queued_cmd *sqcp;
3992 	struct sdebug_dev_info *devip;
3993 	struct sdebug_defer *sd_dp;
3994 
3995 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3996 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3997 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3998 			if (test_bit(k, sqp->in_use_bm)) {
3999 				sqcp = &sqp->qc_arr[k];
4000 				if (sqcp->a_cmnd == NULL)
4001 					continue;
4002 				devip = (struct sdebug_dev_info *)
4003 					sqcp->a_cmnd->device->hostdata;
4004 				if (devip)
4005 					atomic_dec(&devip->num_in_q);
4006 				sqcp->a_cmnd = NULL;
4007 				sd_dp = sqcp->sd_dp;
4008 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4009 				stop_qc_helper(sd_dp);
4010 				clear_bit(k, sqp->in_use_bm);
4011 				spin_lock_irqsave(&sqp->qc_lock, iflags);
4012 			}
4013 		}
4014 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4015 	}
4016 }
4017 
4018 /* Free queued command memory on heap */
4019 static void free_all_queued(void)
4020 {
4021 	int j, k;
4022 	struct sdebug_queue *sqp;
4023 	struct sdebug_queued_cmd *sqcp;
4024 
4025 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4026 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4027 			sqcp = &sqp->qc_arr[k];
4028 			kfree(sqcp->sd_dp);
4029 			sqcp->sd_dp = NULL;
4030 		}
4031 	}
4032 }
4033 
4034 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4035 {
4036 	bool ok;
4037 
4038 	++num_aborts;
4039 	if (SCpnt) {
4040 		ok = stop_queued_cmnd(SCpnt);
4041 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4042 			sdev_printk(KERN_INFO, SCpnt->device,
4043 				    "%s: command%s found\n", __func__,
4044 				    ok ? "" : " not");
4045 	}
4046 	return SUCCESS;
4047 }
4048 
4049 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
4050 {
4051 	++num_dev_resets;
4052 	if (SCpnt && SCpnt->device) {
4053 		struct scsi_device *sdp = SCpnt->device;
4054 		struct sdebug_dev_info *devip =
4055 				(struct sdebug_dev_info *)sdp->hostdata;
4056 
4057 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4058 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4059 		if (devip)
4060 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4061 	}
4062 	return SUCCESS;
4063 }
4064 
4065 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4066 {
4067 	struct sdebug_host_info *sdbg_host;
4068 	struct sdebug_dev_info *devip;
4069 	struct scsi_device *sdp;
4070 	struct Scsi_Host *hp;
4071 	int k = 0;
4072 
4073 	++num_target_resets;
4074 	if (!SCpnt)
4075 		goto lie;
4076 	sdp = SCpnt->device;
4077 	if (!sdp)
4078 		goto lie;
4079 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4080 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4081 	hp = sdp->host;
4082 	if (!hp)
4083 		goto lie;
4084 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4085 	if (sdbg_host) {
4086 		list_for_each_entry(devip,
4087 				    &sdbg_host->dev_info_list,
4088 				    dev_list)
4089 			if (devip->target == sdp->id) {
4090 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4091 				++k;
4092 			}
4093 	}
4094 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4095 		sdev_printk(KERN_INFO, sdp,
4096 			    "%s: %d device(s) found in target\n", __func__, k);
4097 lie:
4098 	return SUCCESS;
4099 }
4100 
4101 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
4102 {
4103 	struct sdebug_host_info *sdbg_host;
4104 	struct sdebug_dev_info *devip;
4105 	struct scsi_device *sdp;
4106 	struct Scsi_Host *hp;
4107 	int k = 0;
4108 
4109 	++num_bus_resets;
4110 	if (!(SCpnt && SCpnt->device))
4111 		goto lie;
4112 	sdp = SCpnt->device;
4113 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4114 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4115 	hp = sdp->host;
4116 	if (hp) {
4117 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4118 		if (sdbg_host) {
4119 			list_for_each_entry(devip,
4120 					    &sdbg_host->dev_info_list,
4121 					    dev_list) {
4122 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4123 				++k;
4124 			}
4125 		}
4126 	}
4127 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4128 		sdev_printk(KERN_INFO, sdp,
4129 			    "%s: %d device(s) found in host\n", __func__, k);
4130 lie:
4131 	return SUCCESS;
4132 }
4133 
4134 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
4135 {
4136 	struct sdebug_host_info * sdbg_host;
4137 	struct sdebug_dev_info *devip;
4138 	int k = 0;
4139 
4140 	++num_host_resets;
4141 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4142 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4143 	spin_lock(&sdebug_host_list_lock);
4144 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4145 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4146 				    dev_list) {
4147 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4148 			++k;
4149 		}
4150 	}
4151 	spin_unlock(&sdebug_host_list_lock);
4152 	stop_all_queued();
4153 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4154 		sdev_printk(KERN_INFO, SCpnt->device,
4155 			    "%s: %d device(s) found\n", __func__, k);
4156 	return SUCCESS;
4157 }
4158 
4159 static void __init sdebug_build_parts(unsigned char *ramp,
4160 				      unsigned long store_size)
4161 {
4162 	struct partition * pp;
4163 	int starts[SDEBUG_MAX_PARTS + 2];
4164 	int sectors_per_part, num_sectors, k;
4165 	int heads_by_sects, start_sec, end_sec;
4166 
4167 	/* assume partition table already zeroed */
4168 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4169 		return;
4170 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4171 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4172 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4173 	}
4174 	num_sectors = (int)sdebug_store_sectors;
4175 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4176 			   / sdebug_num_parts;
4177 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4178 	starts[0] = sdebug_sectors_per;
4179 	for (k = 1; k < sdebug_num_parts; ++k)
4180 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4181 			    * heads_by_sects;
4182 	starts[sdebug_num_parts] = num_sectors;
4183 	starts[sdebug_num_parts + 1] = 0;
4184 
4185 	ramp[510] = 0x55;	/* magic partition markings */
4186 	ramp[511] = 0xAA;
4187 	pp = (struct partition *)(ramp + 0x1be);
4188 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4189 		start_sec = starts[k];
4190 		end_sec = starts[k + 1] - 1;
4191 		pp->boot_ind = 0;
4192 
4193 		pp->cyl = start_sec / heads_by_sects;
4194 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4195 			   / sdebug_sectors_per;
4196 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4197 
4198 		pp->end_cyl = end_sec / heads_by_sects;
4199 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4200 			       / sdebug_sectors_per;
4201 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4202 
4203 		pp->start_sect = cpu_to_le32(start_sec);
4204 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4205 		pp->sys_ind = 0x83;	/* plain Linux partition */
4206 	}
4207 }
4208 
4209 static void block_unblock_all_queues(bool block)
4210 {
4211 	int j;
4212 	struct sdebug_queue *sqp;
4213 
4214 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4215 		atomic_set(&sqp->blocked, (int)block);
4216 }
4217 
4218 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4219  * commands will be processed normally before triggers occur.
4220  */
4221 static void tweak_cmnd_count(void)
4222 {
4223 	int count, modulo;
4224 
4225 	modulo = abs(sdebug_every_nth);
4226 	if (modulo < 2)
4227 		return;
4228 	block_unblock_all_queues(true);
4229 	count = atomic_read(&sdebug_cmnd_count);
4230 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4231 	block_unblock_all_queues(false);
4232 }
4233 
4234 static void clear_queue_stats(void)
4235 {
4236 	atomic_set(&sdebug_cmnd_count, 0);
4237 	atomic_set(&sdebug_completions, 0);
4238 	atomic_set(&sdebug_miss_cpus, 0);
4239 	atomic_set(&sdebug_a_tsf, 0);
4240 }
4241 
4242 static void setup_inject(struct sdebug_queue *sqp,
4243 			 struct sdebug_queued_cmd *sqcp)
4244 {
4245 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
4246 		return;
4247 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4248 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4249 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4250 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4251 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4252 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4253 }
4254 
4255 /* Complete the processing of the thread that queued a SCSI command to this
4256  * driver. It either completes the command by calling cmnd_done() or
4257  * schedules a hr timer or work queue then returns 0. Returns
4258  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4259  */
4260 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4261 			 int scsi_result, int delta_jiff)
4262 {
4263 	unsigned long iflags;
4264 	int k, num_in_q, qdepth, inject;
4265 	struct sdebug_queue *sqp;
4266 	struct sdebug_queued_cmd *sqcp;
4267 	struct scsi_device *sdp;
4268 	struct sdebug_defer *sd_dp;
4269 
4270 	if (unlikely(devip == NULL)) {
4271 		if (scsi_result == 0)
4272 			scsi_result = DID_NO_CONNECT << 16;
4273 		goto respond_in_thread;
4274 	}
4275 	sdp = cmnd->device;
4276 
4277 	if (unlikely(sdebug_verbose && scsi_result))
4278 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4279 			    __func__, scsi_result);
4280 	if (delta_jiff == 0)
4281 		goto respond_in_thread;
4282 
4283 	/* schedule the response at a later time if resources permit */
4284 	sqp = get_queue(cmnd);
4285 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4286 	if (unlikely(atomic_read(&sqp->blocked))) {
4287 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4288 		return SCSI_MLQUEUE_HOST_BUSY;
4289 	}
4290 	num_in_q = atomic_read(&devip->num_in_q);
4291 	qdepth = cmnd->device->queue_depth;
4292 	inject = 0;
4293 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4294 		if (scsi_result) {
4295 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4296 			goto respond_in_thread;
4297 		} else
4298 			scsi_result = device_qfull_result;
4299 	} else if (unlikely(sdebug_every_nth &&
4300 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4301 			    (scsi_result == 0))) {
4302 		if ((num_in_q == (qdepth - 1)) &&
4303 		    (atomic_inc_return(&sdebug_a_tsf) >=
4304 		     abs(sdebug_every_nth))) {
4305 			atomic_set(&sdebug_a_tsf, 0);
4306 			inject = 1;
4307 			scsi_result = device_qfull_result;
4308 		}
4309 	}
4310 
4311 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4312 	if (unlikely(k >= sdebug_max_queue)) {
4313 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4314 		if (scsi_result)
4315 			goto respond_in_thread;
4316 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4317 			scsi_result = device_qfull_result;
4318 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4319 			sdev_printk(KERN_INFO, sdp,
4320 				    "%s: max_queue=%d exceeded, %s\n",
4321 				    __func__, sdebug_max_queue,
4322 				    (scsi_result ?  "status: TASK SET FULL" :
4323 						    "report: host busy"));
4324 		if (scsi_result)
4325 			goto respond_in_thread;
4326 		else
4327 			return SCSI_MLQUEUE_HOST_BUSY;
4328 	}
4329 	__set_bit(k, sqp->in_use_bm);
4330 	atomic_inc(&devip->num_in_q);
4331 	sqcp = &sqp->qc_arr[k];
4332 	sqcp->a_cmnd = cmnd;
4333 	cmnd->host_scribble = (unsigned char *)sqcp;
4334 	cmnd->result = scsi_result;
4335 	sd_dp = sqcp->sd_dp;
4336 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4337 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4338 		setup_inject(sqp, sqcp);
4339 	if (delta_jiff > 0 || sdebug_ndelay > 0) {
4340 		ktime_t kt;
4341 
4342 		if (delta_jiff > 0) {
4343 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4344 		} else
4345 			kt = sdebug_ndelay;
4346 		if (NULL == sd_dp) {
4347 			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4348 			if (NULL == sd_dp)
4349 				return SCSI_MLQUEUE_HOST_BUSY;
4350 			sqcp->sd_dp = sd_dp;
4351 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4352 				     HRTIMER_MODE_REL_PINNED);
4353 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4354 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4355 			sd_dp->qc_idx = k;
4356 		}
4357 		if (sdebug_statistics)
4358 			sd_dp->issuing_cpu = raw_smp_processor_id();
4359 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4360 	} else {	/* jdelay < 0, use work queue */
4361 		if (NULL == sd_dp) {
4362 			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4363 			if (NULL == sd_dp)
4364 				return SCSI_MLQUEUE_HOST_BUSY;
4365 			sqcp->sd_dp = sd_dp;
4366 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4367 			sd_dp->qc_idx = k;
4368 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4369 		}
4370 		if (sdebug_statistics)
4371 			sd_dp->issuing_cpu = raw_smp_processor_id();
4372 		schedule_work(&sd_dp->ew.work);
4373 	}
4374 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4375 		     (scsi_result == device_qfull_result)))
4376 		sdev_printk(KERN_INFO, sdp,
4377 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4378 			    num_in_q, (inject ? "<inject> " : ""),
4379 			    "status: TASK SET FULL");
4380 	return 0;
4381 
4382 respond_in_thread:	/* call back to mid-layer using invocation thread */
4383 	cmnd->result = scsi_result;
4384 	cmnd->scsi_done(cmnd);
4385 	return 0;
4386 }
4387 
4388 /* Note: The following macros create attribute files in the
4389    /sys/module/scsi_debug/parameters directory. Unfortunately this
4390    driver is unaware of a change and cannot trigger auxiliary actions
4391    as it can when the corresponding attribute in the
4392    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4393  */
4394 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4395 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4396 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4397 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4398 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4399 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4400 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4401 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4402 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4403 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4404 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4405 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4406 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4407 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4408 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4409 module_param_string(inq_product, sdebug_inq_product_id,
4410 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4411 module_param_string(inq_rev, sdebug_inq_product_rev,
4412 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4413 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4414 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4415 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4416 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4417 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4418 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4419 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4420 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4421 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4422 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4423 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4424 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4425 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4426 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4427 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4428 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4429 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4430 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4431 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4432 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4433 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4434 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4435 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4436 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4437 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4438 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4439 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4440 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4441 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4442 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4443 		   S_IRUGO | S_IWUSR);
4444 module_param_named(write_same_length, sdebug_write_same_length, int,
4445 		   S_IRUGO | S_IWUSR);
4446 
4447 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4448 MODULE_DESCRIPTION("SCSI debug adapter driver");
4449 MODULE_LICENSE("GPL");
4450 MODULE_VERSION(SDEBUG_VERSION);
4451 
4452 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4453 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4454 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4455 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4456 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4457 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4458 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4459 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4460 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4461 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4462 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4463 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4464 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4465 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4466 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4467 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4468 		 SDEBUG_VERSION "\")");
4469 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4470 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4471 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4472 MODULE_PARM_DESC(lbprz,
4473 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4474 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4475 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4476 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4477 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4478 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4479 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4480 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4481 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4482 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4483 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4484 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4485 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4486 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4487 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4488 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4489 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4490 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4491 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4492 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4493 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4494 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4495 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4496 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4497 MODULE_PARM_DESC(uuid_ctl,
4498 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4499 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4500 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4501 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4502 
4503 #define SDEBUG_INFO_LEN 256
4504 static char sdebug_info[SDEBUG_INFO_LEN];
4505 
4506 static const char * scsi_debug_info(struct Scsi_Host * shp)
4507 {
4508 	int k;
4509 
4510 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4511 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4512 	if (k >= (SDEBUG_INFO_LEN - 1))
4513 		return sdebug_info;
4514 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4515 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4516 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4517 		  "statistics", (int)sdebug_statistics);
4518 	return sdebug_info;
4519 }
4520 
4521 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4522 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4523 				 int length)
4524 {
4525 	char arr[16];
4526 	int opts;
4527 	int minLen = length > 15 ? 15 : length;
4528 
4529 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4530 		return -EACCES;
4531 	memcpy(arr, buffer, minLen);
4532 	arr[minLen] = '\0';
4533 	if (1 != sscanf(arr, "%d", &opts))
4534 		return -EINVAL;
4535 	sdebug_opts = opts;
4536 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4537 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4538 	if (sdebug_every_nth != 0)
4539 		tweak_cmnd_count();
4540 	return length;
4541 }
4542 
4543 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4544  * same for each scsi_debug host (if more than one). Some of the counters
4545  * output are not atomics so might be inaccurate in a busy system. */
4546 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4547 {
4548 	int f, j, l;
4549 	struct sdebug_queue *sqp;
4550 
4551 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4552 		   SDEBUG_VERSION, sdebug_version_date);
4553 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4554 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4555 		   sdebug_opts, sdebug_every_nth);
4556 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4557 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4558 		   sdebug_sector_size, "bytes");
4559 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4560 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4561 		   num_aborts);
4562 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4563 		   num_dev_resets, num_target_resets, num_bus_resets,
4564 		   num_host_resets);
4565 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4566 		   dix_reads, dix_writes, dif_errors);
4567 	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4568 		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4569 		   sdebug_mq_active);
4570 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4571 		   atomic_read(&sdebug_cmnd_count),
4572 		   atomic_read(&sdebug_completions),
4573 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4574 		   atomic_read(&sdebug_a_tsf));
4575 
4576 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4577 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4578 		seq_printf(m, "  queue %d:\n", j);
4579 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4580 		if (f != sdebug_max_queue) {
4581 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4582 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4583 				   "first,last bits", f, l);
4584 		}
4585 	}
4586 	return 0;
4587 }
4588 
4589 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4590 {
4591 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4592 }
4593 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4594  * of delay is jiffies.
4595  */
4596 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4597 			   size_t count)
4598 {
4599 	int jdelay, res;
4600 
4601 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4602 		res = count;
4603 		if (sdebug_jdelay != jdelay) {
4604 			int j, k;
4605 			struct sdebug_queue *sqp;
4606 
4607 			block_unblock_all_queues(true);
4608 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4609 			     ++j, ++sqp) {
4610 				k = find_first_bit(sqp->in_use_bm,
4611 						   sdebug_max_queue);
4612 				if (k != sdebug_max_queue) {
4613 					res = -EBUSY;   /* queued commands */
4614 					break;
4615 				}
4616 			}
4617 			if (res > 0) {
4618 				/* make sure sdebug_defer instances get
4619 				 * re-allocated for new delay variant */
4620 				free_all_queued();
4621 				sdebug_jdelay = jdelay;
4622 				sdebug_ndelay = 0;
4623 			}
4624 			block_unblock_all_queues(false);
4625 		}
4626 		return res;
4627 	}
4628 	return -EINVAL;
4629 }
4630 static DRIVER_ATTR_RW(delay);
4631 
4632 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4633 {
4634 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4635 }
4636 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4637 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4638 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4639 			    size_t count)
4640 {
4641 	int ndelay, res;
4642 
4643 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4644 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4645 		res = count;
4646 		if (sdebug_ndelay != ndelay) {
4647 			int j, k;
4648 			struct sdebug_queue *sqp;
4649 
4650 			block_unblock_all_queues(true);
4651 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4652 			     ++j, ++sqp) {
4653 				k = find_first_bit(sqp->in_use_bm,
4654 						   sdebug_max_queue);
4655 				if (k != sdebug_max_queue) {
4656 					res = -EBUSY;   /* queued commands */
4657 					break;
4658 				}
4659 			}
4660 			if (res > 0) {
4661 				/* make sure sdebug_defer instances get
4662 				 * re-allocated for new delay variant */
4663 				free_all_queued();
4664 				sdebug_ndelay = ndelay;
4665 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4666 							: DEF_JDELAY;
4667 			}
4668 			block_unblock_all_queues(false);
4669 		}
4670 		return res;
4671 	}
4672 	return -EINVAL;
4673 }
4674 static DRIVER_ATTR_RW(ndelay);
4675 
4676 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4677 {
4678 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4679 }
4680 
4681 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4682 			  size_t count)
4683 {
4684 	int opts;
4685 	char work[20];
4686 
4687 	if (sscanf(buf, "%10s", work) == 1) {
4688 		if (strncasecmp(work, "0x", 2) == 0) {
4689 			if (kstrtoint(work + 2, 16, &opts) == 0)
4690 				goto opts_done;
4691 		} else {
4692 			if (kstrtoint(work, 10, &opts) == 0)
4693 				goto opts_done;
4694 		}
4695 	}
4696 	return -EINVAL;
4697 opts_done:
4698 	sdebug_opts = opts;
4699 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4700 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4701 	tweak_cmnd_count();
4702 	return count;
4703 }
4704 static DRIVER_ATTR_RW(opts);
4705 
4706 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4707 {
4708 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4709 }
4710 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4711 			   size_t count)
4712 {
4713 	int n;
4714 
4715 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4716 		sdebug_ptype = n;
4717 		return count;
4718 	}
4719 	return -EINVAL;
4720 }
4721 static DRIVER_ATTR_RW(ptype);
4722 
4723 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4724 {
4725 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4726 }
4727 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4728 			    size_t count)
4729 {
4730 	int n;
4731 
4732 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4733 		sdebug_dsense = n;
4734 		return count;
4735 	}
4736 	return -EINVAL;
4737 }
4738 static DRIVER_ATTR_RW(dsense);
4739 
4740 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4741 {
4742 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4743 }
4744 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4745 			     size_t count)
4746 {
4747 	int n;
4748 
4749 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4750 		n = (n > 0);
4751 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4752 		if (sdebug_fake_rw != n) {
4753 			if ((0 == n) && (NULL == fake_storep)) {
4754 				unsigned long sz =
4755 					(unsigned long)sdebug_dev_size_mb *
4756 					1048576;
4757 
4758 				fake_storep = vmalloc(sz);
4759 				if (NULL == fake_storep) {
4760 					pr_err("out of memory, 9\n");
4761 					return -ENOMEM;
4762 				}
4763 				memset(fake_storep, 0, sz);
4764 			}
4765 			sdebug_fake_rw = n;
4766 		}
4767 		return count;
4768 	}
4769 	return -EINVAL;
4770 }
4771 static DRIVER_ATTR_RW(fake_rw);
4772 
4773 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4774 {
4775 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4776 }
4777 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4778 			      size_t count)
4779 {
4780 	int n;
4781 
4782 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4783 		sdebug_no_lun_0 = n;
4784 		return count;
4785 	}
4786 	return -EINVAL;
4787 }
4788 static DRIVER_ATTR_RW(no_lun_0);
4789 
4790 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4791 {
4792 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4793 }
4794 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4795 			      size_t count)
4796 {
4797 	int n;
4798 
4799 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4800 		sdebug_num_tgts = n;
4801 		sdebug_max_tgts_luns();
4802 		return count;
4803 	}
4804 	return -EINVAL;
4805 }
4806 static DRIVER_ATTR_RW(num_tgts);
4807 
4808 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4809 {
4810 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4811 }
4812 static DRIVER_ATTR_RO(dev_size_mb);
4813 
4814 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4815 {
4816 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4817 }
4818 static DRIVER_ATTR_RO(num_parts);
4819 
4820 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4821 {
4822 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4823 }
4824 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4825 			       size_t count)
4826 {
4827 	int nth;
4828 
4829 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4830 		sdebug_every_nth = nth;
4831 		if (nth && !sdebug_statistics) {
4832 			pr_info("every_nth needs statistics=1, set it\n");
4833 			sdebug_statistics = true;
4834 		}
4835 		tweak_cmnd_count();
4836 		return count;
4837 	}
4838 	return -EINVAL;
4839 }
4840 static DRIVER_ATTR_RW(every_nth);
4841 
4842 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4843 {
4844 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4845 }
4846 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4847 			      size_t count)
4848 {
4849 	int n;
4850 	bool changed;
4851 
4852 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4853 		if (n > 256) {
4854 			pr_warn("max_luns can be no more than 256\n");
4855 			return -EINVAL;
4856 		}
4857 		changed = (sdebug_max_luns != n);
4858 		sdebug_max_luns = n;
4859 		sdebug_max_tgts_luns();
4860 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4861 			struct sdebug_host_info *sdhp;
4862 			struct sdebug_dev_info *dp;
4863 
4864 			spin_lock(&sdebug_host_list_lock);
4865 			list_for_each_entry(sdhp, &sdebug_host_list,
4866 					    host_list) {
4867 				list_for_each_entry(dp, &sdhp->dev_info_list,
4868 						    dev_list) {
4869 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4870 						dp->uas_bm);
4871 				}
4872 			}
4873 			spin_unlock(&sdebug_host_list_lock);
4874 		}
4875 		return count;
4876 	}
4877 	return -EINVAL;
4878 }
4879 static DRIVER_ATTR_RW(max_luns);
4880 
4881 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4882 {
4883 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4884 }
4885 /* N.B. max_queue can be changed while there are queued commands. In flight
4886  * commands beyond the new max_queue will be completed. */
4887 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4888 			       size_t count)
4889 {
4890 	int j, n, k, a;
4891 	struct sdebug_queue *sqp;
4892 
4893 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4894 	    (n <= SDEBUG_CANQUEUE)) {
4895 		block_unblock_all_queues(true);
4896 		k = 0;
4897 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4898 		     ++j, ++sqp) {
4899 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4900 			if (a > k)
4901 				k = a;
4902 		}
4903 		sdebug_max_queue = n;
4904 		if (k == SDEBUG_CANQUEUE)
4905 			atomic_set(&retired_max_queue, 0);
4906 		else if (k >= n)
4907 			atomic_set(&retired_max_queue, k + 1);
4908 		else
4909 			atomic_set(&retired_max_queue, 0);
4910 		block_unblock_all_queues(false);
4911 		return count;
4912 	}
4913 	return -EINVAL;
4914 }
4915 static DRIVER_ATTR_RW(max_queue);
4916 
4917 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4918 {
4919 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4920 }
4921 static DRIVER_ATTR_RO(no_uld);
4922 
4923 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4924 {
4925 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4926 }
4927 static DRIVER_ATTR_RO(scsi_level);
4928 
4929 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4930 {
4931 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4932 }
4933 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4934 				size_t count)
4935 {
4936 	int n;
4937 	bool changed;
4938 
4939 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4940 		changed = (sdebug_virtual_gb != n);
4941 		sdebug_virtual_gb = n;
4942 		sdebug_capacity = get_sdebug_capacity();
4943 		if (changed) {
4944 			struct sdebug_host_info *sdhp;
4945 			struct sdebug_dev_info *dp;
4946 
4947 			spin_lock(&sdebug_host_list_lock);
4948 			list_for_each_entry(sdhp, &sdebug_host_list,
4949 					    host_list) {
4950 				list_for_each_entry(dp, &sdhp->dev_info_list,
4951 						    dev_list) {
4952 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4953 						dp->uas_bm);
4954 				}
4955 			}
4956 			spin_unlock(&sdebug_host_list_lock);
4957 		}
4958 		return count;
4959 	}
4960 	return -EINVAL;
4961 }
4962 static DRIVER_ATTR_RW(virtual_gb);
4963 
4964 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4965 {
4966 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4967 }
4968 
4969 static int sdebug_add_adapter(void);
4970 static void sdebug_remove_adapter(void);
4971 
4972 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4973 			      size_t count)
4974 {
4975 	int delta_hosts;
4976 
4977 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4978 		return -EINVAL;
4979 	if (delta_hosts > 0) {
4980 		do {
4981 			sdebug_add_adapter();
4982 		} while (--delta_hosts);
4983 	} else if (delta_hosts < 0) {
4984 		do {
4985 			sdebug_remove_adapter();
4986 		} while (++delta_hosts);
4987 	}
4988 	return count;
4989 }
4990 static DRIVER_ATTR_RW(add_host);
4991 
4992 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4993 {
4994 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4995 }
4996 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4997 				    size_t count)
4998 {
4999 	int n;
5000 
5001 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5002 		sdebug_vpd_use_hostno = n;
5003 		return count;
5004 	}
5005 	return -EINVAL;
5006 }
5007 static DRIVER_ATTR_RW(vpd_use_hostno);
5008 
5009 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5010 {
5011 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5012 }
5013 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5014 				size_t count)
5015 {
5016 	int n;
5017 
5018 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5019 		if (n > 0)
5020 			sdebug_statistics = true;
5021 		else {
5022 			clear_queue_stats();
5023 			sdebug_statistics = false;
5024 		}
5025 		return count;
5026 	}
5027 	return -EINVAL;
5028 }
5029 static DRIVER_ATTR_RW(statistics);
5030 
5031 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5032 {
5033 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5034 }
5035 static DRIVER_ATTR_RO(sector_size);
5036 
5037 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5038 {
5039 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5040 }
5041 static DRIVER_ATTR_RO(submit_queues);
5042 
5043 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5044 {
5045 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5046 }
5047 static DRIVER_ATTR_RO(dix);
5048 
5049 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5050 {
5051 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5052 }
5053 static DRIVER_ATTR_RO(dif);
5054 
5055 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5056 {
5057 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5058 }
5059 static DRIVER_ATTR_RO(guard);
5060 
5061 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5062 {
5063 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5064 }
5065 static DRIVER_ATTR_RO(ato);
5066 
5067 static ssize_t map_show(struct device_driver *ddp, char *buf)
5068 {
5069 	ssize_t count;
5070 
5071 	if (!scsi_debug_lbp())
5072 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5073 				 sdebug_store_sectors);
5074 
5075 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5076 			  (int)map_size, map_storep);
5077 	buf[count++] = '\n';
5078 	buf[count] = '\0';
5079 
5080 	return count;
5081 }
5082 static DRIVER_ATTR_RO(map);
5083 
5084 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5085 {
5086 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5087 }
5088 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5089 			       size_t count)
5090 {
5091 	int n;
5092 
5093 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5094 		sdebug_removable = (n > 0);
5095 		return count;
5096 	}
5097 	return -EINVAL;
5098 }
5099 static DRIVER_ATTR_RW(removable);
5100 
5101 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5102 {
5103 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5104 }
5105 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5106 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5107 			       size_t count)
5108 {
5109 	int n;
5110 
5111 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5112 		sdebug_host_lock = (n > 0);
5113 		return count;
5114 	}
5115 	return -EINVAL;
5116 }
5117 static DRIVER_ATTR_RW(host_lock);
5118 
5119 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5120 {
5121 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5122 }
5123 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5124 			    size_t count)
5125 {
5126 	int n;
5127 
5128 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5129 		sdebug_strict = (n > 0);
5130 		return count;
5131 	}
5132 	return -EINVAL;
5133 }
5134 static DRIVER_ATTR_RW(strict);
5135 
5136 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5137 {
5138 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5139 }
5140 static DRIVER_ATTR_RO(uuid_ctl);
5141 
5142 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5143 {
5144 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5145 }
5146 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5147 			     size_t count)
5148 {
5149 	int ret, n;
5150 
5151 	ret = kstrtoint(buf, 0, &n);
5152 	if (ret)
5153 		return ret;
5154 	sdebug_cdb_len = n;
5155 	all_config_cdb_len();
5156 	return count;
5157 }
5158 static DRIVER_ATTR_RW(cdb_len);
5159 
5160 
5161 /* Note: The following array creates attribute files in the
5162    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5163    files (over those found in the /sys/module/scsi_debug/parameters
5164    directory) is that auxiliary actions can be triggered when an attribute
5165    is changed. For example see: sdebug_add_host_store() above.
5166  */
5167 
5168 static struct attribute *sdebug_drv_attrs[] = {
5169 	&driver_attr_delay.attr,
5170 	&driver_attr_opts.attr,
5171 	&driver_attr_ptype.attr,
5172 	&driver_attr_dsense.attr,
5173 	&driver_attr_fake_rw.attr,
5174 	&driver_attr_no_lun_0.attr,
5175 	&driver_attr_num_tgts.attr,
5176 	&driver_attr_dev_size_mb.attr,
5177 	&driver_attr_num_parts.attr,
5178 	&driver_attr_every_nth.attr,
5179 	&driver_attr_max_luns.attr,
5180 	&driver_attr_max_queue.attr,
5181 	&driver_attr_no_uld.attr,
5182 	&driver_attr_scsi_level.attr,
5183 	&driver_attr_virtual_gb.attr,
5184 	&driver_attr_add_host.attr,
5185 	&driver_attr_vpd_use_hostno.attr,
5186 	&driver_attr_sector_size.attr,
5187 	&driver_attr_statistics.attr,
5188 	&driver_attr_submit_queues.attr,
5189 	&driver_attr_dix.attr,
5190 	&driver_attr_dif.attr,
5191 	&driver_attr_guard.attr,
5192 	&driver_attr_ato.attr,
5193 	&driver_attr_map.attr,
5194 	&driver_attr_removable.attr,
5195 	&driver_attr_host_lock.attr,
5196 	&driver_attr_ndelay.attr,
5197 	&driver_attr_strict.attr,
5198 	&driver_attr_uuid_ctl.attr,
5199 	&driver_attr_cdb_len.attr,
5200 	NULL,
5201 };
5202 ATTRIBUTE_GROUPS(sdebug_drv);
5203 
5204 static struct device *pseudo_primary;
5205 
5206 static int __init scsi_debug_init(void)
5207 {
5208 	unsigned long sz;
5209 	int host_to_add;
5210 	int k;
5211 	int ret;
5212 
5213 	atomic_set(&retired_max_queue, 0);
5214 
5215 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5216 		pr_warn("ndelay must be less than 1 second, ignored\n");
5217 		sdebug_ndelay = 0;
5218 	} else if (sdebug_ndelay > 0)
5219 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5220 
5221 	switch (sdebug_sector_size) {
5222 	case  512:
5223 	case 1024:
5224 	case 2048:
5225 	case 4096:
5226 		break;
5227 	default:
5228 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5229 		return -EINVAL;
5230 	}
5231 
5232 	switch (sdebug_dif) {
5233 	case T10_PI_TYPE0_PROTECTION:
5234 		break;
5235 	case T10_PI_TYPE1_PROTECTION:
5236 	case T10_PI_TYPE2_PROTECTION:
5237 	case T10_PI_TYPE3_PROTECTION:
5238 		have_dif_prot = true;
5239 		break;
5240 
5241 	default:
5242 		pr_err("dif must be 0, 1, 2 or 3\n");
5243 		return -EINVAL;
5244 	}
5245 
5246 	if (sdebug_guard > 1) {
5247 		pr_err("guard must be 0 or 1\n");
5248 		return -EINVAL;
5249 	}
5250 
5251 	if (sdebug_ato > 1) {
5252 		pr_err("ato must be 0 or 1\n");
5253 		return -EINVAL;
5254 	}
5255 
5256 	if (sdebug_physblk_exp > 15) {
5257 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5258 		return -EINVAL;
5259 	}
5260 	if (sdebug_max_luns > 256) {
5261 		pr_warn("max_luns can be no more than 256, use default\n");
5262 		sdebug_max_luns = DEF_MAX_LUNS;
5263 	}
5264 
5265 	if (sdebug_lowest_aligned > 0x3fff) {
5266 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5267 		return -EINVAL;
5268 	}
5269 
5270 	if (submit_queues < 1) {
5271 		pr_err("submit_queues must be 1 or more\n");
5272 		return -EINVAL;
5273 	}
5274 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5275 			       GFP_KERNEL);
5276 	if (sdebug_q_arr == NULL)
5277 		return -ENOMEM;
5278 	for (k = 0; k < submit_queues; ++k)
5279 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5280 
5281 	if (sdebug_dev_size_mb < 1)
5282 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5283 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5284 	sdebug_store_sectors = sz / sdebug_sector_size;
5285 	sdebug_capacity = get_sdebug_capacity();
5286 
5287 	/* play around with geometry, don't waste too much on track 0 */
5288 	sdebug_heads = 8;
5289 	sdebug_sectors_per = 32;
5290 	if (sdebug_dev_size_mb >= 256)
5291 		sdebug_heads = 64;
5292 	else if (sdebug_dev_size_mb >= 16)
5293 		sdebug_heads = 32;
5294 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5295 			       (sdebug_sectors_per * sdebug_heads);
5296 	if (sdebug_cylinders_per >= 1024) {
5297 		/* other LLDs do this; implies >= 1GB ram disk ... */
5298 		sdebug_heads = 255;
5299 		sdebug_sectors_per = 63;
5300 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5301 			       (sdebug_sectors_per * sdebug_heads);
5302 	}
5303 
5304 	if (sdebug_fake_rw == 0) {
5305 		fake_storep = vmalloc(sz);
5306 		if (NULL == fake_storep) {
5307 			pr_err("out of memory, 1\n");
5308 			ret = -ENOMEM;
5309 			goto free_q_arr;
5310 		}
5311 		memset(fake_storep, 0, sz);
5312 		if (sdebug_num_parts > 0)
5313 			sdebug_build_parts(fake_storep, sz);
5314 	}
5315 
5316 	if (sdebug_dix) {
5317 		int dif_size;
5318 
5319 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5320 		dif_storep = vmalloc(dif_size);
5321 
5322 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5323 
5324 		if (dif_storep == NULL) {
5325 			pr_err("out of mem. (DIX)\n");
5326 			ret = -ENOMEM;
5327 			goto free_vm;
5328 		}
5329 
5330 		memset(dif_storep, 0xff, dif_size);
5331 	}
5332 
5333 	/* Logical Block Provisioning */
5334 	if (scsi_debug_lbp()) {
5335 		sdebug_unmap_max_blocks =
5336 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5337 
5338 		sdebug_unmap_max_desc =
5339 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5340 
5341 		sdebug_unmap_granularity =
5342 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5343 
5344 		if (sdebug_unmap_alignment &&
5345 		    sdebug_unmap_granularity <=
5346 		    sdebug_unmap_alignment) {
5347 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5348 			ret = -EINVAL;
5349 			goto free_vm;
5350 		}
5351 
5352 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5353 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5354 
5355 		pr_info("%lu provisioning blocks\n", map_size);
5356 
5357 		if (map_storep == NULL) {
5358 			pr_err("out of mem. (MAP)\n");
5359 			ret = -ENOMEM;
5360 			goto free_vm;
5361 		}
5362 
5363 		bitmap_zero(map_storep, map_size);
5364 
5365 		/* Map first 1KB for partition table */
5366 		if (sdebug_num_parts)
5367 			map_region(0, 2);
5368 	}
5369 
5370 	pseudo_primary = root_device_register("pseudo_0");
5371 	if (IS_ERR(pseudo_primary)) {
5372 		pr_warn("root_device_register() error\n");
5373 		ret = PTR_ERR(pseudo_primary);
5374 		goto free_vm;
5375 	}
5376 	ret = bus_register(&pseudo_lld_bus);
5377 	if (ret < 0) {
5378 		pr_warn("bus_register error: %d\n", ret);
5379 		goto dev_unreg;
5380 	}
5381 	ret = driver_register(&sdebug_driverfs_driver);
5382 	if (ret < 0) {
5383 		pr_warn("driver_register error: %d\n", ret);
5384 		goto bus_unreg;
5385 	}
5386 
5387 	host_to_add = sdebug_add_host;
5388 	sdebug_add_host = 0;
5389 
5390 	for (k = 0; k < host_to_add; k++) {
5391 		if (sdebug_add_adapter()) {
5392 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5393 			break;
5394 		}
5395 	}
5396 
5397 	if (sdebug_verbose)
5398 		pr_info("built %d host(s)\n", sdebug_add_host);
5399 
5400 	return 0;
5401 
5402 bus_unreg:
5403 	bus_unregister(&pseudo_lld_bus);
5404 dev_unreg:
5405 	root_device_unregister(pseudo_primary);
5406 free_vm:
5407 	vfree(map_storep);
5408 	vfree(dif_storep);
5409 	vfree(fake_storep);
5410 free_q_arr:
5411 	kfree(sdebug_q_arr);
5412 	return ret;
5413 }
5414 
5415 static void __exit scsi_debug_exit(void)
5416 {
5417 	int k = sdebug_add_host;
5418 
5419 	stop_all_queued();
5420 	free_all_queued();
5421 	for (; k; k--)
5422 		sdebug_remove_adapter();
5423 	driver_unregister(&sdebug_driverfs_driver);
5424 	bus_unregister(&pseudo_lld_bus);
5425 	root_device_unregister(pseudo_primary);
5426 
5427 	vfree(map_storep);
5428 	vfree(dif_storep);
5429 	vfree(fake_storep);
5430 	kfree(sdebug_q_arr);
5431 }
5432 
5433 device_initcall(scsi_debug_init);
5434 module_exit(scsi_debug_exit);
5435 
5436 static void sdebug_release_adapter(struct device * dev)
5437 {
5438 	struct sdebug_host_info *sdbg_host;
5439 
5440 	sdbg_host = to_sdebug_host(dev);
5441 	kfree(sdbg_host);
5442 }
5443 
5444 static int sdebug_add_adapter(void)
5445 {
5446 	int k, devs_per_host;
5447 	int error = 0;
5448 	struct sdebug_host_info *sdbg_host;
5449 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5450 
5451 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5452 	if (sdbg_host == NULL) {
5453 		pr_err("out of memory at line %d\n", __LINE__);
5454 		return -ENOMEM;
5455 	}
5456 
5457 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5458 
5459 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5460 	for (k = 0; k < devs_per_host; k++) {
5461 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5462 		if (!sdbg_devinfo) {
5463 			pr_err("out of memory at line %d\n", __LINE__);
5464 			error = -ENOMEM;
5465 			goto clean;
5466 		}
5467 	}
5468 
5469 	spin_lock(&sdebug_host_list_lock);
5470 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5471 	spin_unlock(&sdebug_host_list_lock);
5472 
5473 	sdbg_host->dev.bus = &pseudo_lld_bus;
5474 	sdbg_host->dev.parent = pseudo_primary;
5475 	sdbg_host->dev.release = &sdebug_release_adapter;
5476 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5477 
5478 	error = device_register(&sdbg_host->dev);
5479 
5480 	if (error)
5481 		goto clean;
5482 
5483 	++sdebug_add_host;
5484 	return error;
5485 
5486 clean:
5487 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5488 				 dev_list) {
5489 		list_del(&sdbg_devinfo->dev_list);
5490 		kfree(sdbg_devinfo);
5491 	}
5492 
5493 	kfree(sdbg_host);
5494 	return error;
5495 }
5496 
5497 static void sdebug_remove_adapter(void)
5498 {
5499 	struct sdebug_host_info *sdbg_host = NULL;
5500 
5501 	spin_lock(&sdebug_host_list_lock);
5502 	if (!list_empty(&sdebug_host_list)) {
5503 		sdbg_host = list_entry(sdebug_host_list.prev,
5504 				       struct sdebug_host_info, host_list);
5505 		list_del(&sdbg_host->host_list);
5506 	}
5507 	spin_unlock(&sdebug_host_list_lock);
5508 
5509 	if (!sdbg_host)
5510 		return;
5511 
5512 	device_unregister(&sdbg_host->dev);
5513 	--sdebug_add_host;
5514 }
5515 
5516 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5517 {
5518 	int num_in_q = 0;
5519 	struct sdebug_dev_info *devip;
5520 
5521 	block_unblock_all_queues(true);
5522 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5523 	if (NULL == devip) {
5524 		block_unblock_all_queues(false);
5525 		return	-ENODEV;
5526 	}
5527 	num_in_q = atomic_read(&devip->num_in_q);
5528 
5529 	if (qdepth < 1)
5530 		qdepth = 1;
5531 	/* allow to exceed max host qc_arr elements for testing */
5532 	if (qdepth > SDEBUG_CANQUEUE + 10)
5533 		qdepth = SDEBUG_CANQUEUE + 10;
5534 	scsi_change_queue_depth(sdev, qdepth);
5535 
5536 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5537 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5538 			    __func__, qdepth, num_in_q);
5539 	}
5540 	block_unblock_all_queues(false);
5541 	return sdev->queue_depth;
5542 }
5543 
5544 static bool fake_timeout(struct scsi_cmnd *scp)
5545 {
5546 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5547 		if (sdebug_every_nth < -1)
5548 			sdebug_every_nth = -1;
5549 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5550 			return true; /* ignore command causing timeout */
5551 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5552 			 scsi_medium_access_command(scp))
5553 			return true; /* time out reads and writes */
5554 	}
5555 	return false;
5556 }
5557 
5558 static bool fake_host_busy(struct scsi_cmnd *scp)
5559 {
5560 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5561 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5562 }
5563 
5564 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5565 				   struct scsi_cmnd *scp)
5566 {
5567 	u8 sdeb_i;
5568 	struct scsi_device *sdp = scp->device;
5569 	const struct opcode_info_t *oip;
5570 	const struct opcode_info_t *r_oip;
5571 	struct sdebug_dev_info *devip;
5572 	u8 *cmd = scp->cmnd;
5573 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5574 	int k, na;
5575 	int errsts = 0;
5576 	u32 flags;
5577 	u16 sa;
5578 	u8 opcode = cmd[0];
5579 	bool has_wlun_rl;
5580 
5581 	scsi_set_resid(scp, 0);
5582 	if (sdebug_statistics)
5583 		atomic_inc(&sdebug_cmnd_count);
5584 	if (unlikely(sdebug_verbose &&
5585 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5586 		char b[120];
5587 		int n, len, sb;
5588 
5589 		len = scp->cmd_len;
5590 		sb = (int)sizeof(b);
5591 		if (len > 32)
5592 			strcpy(b, "too long, over 32 bytes");
5593 		else {
5594 			for (k = 0, n = 0; k < len && n < sb; ++k)
5595 				n += scnprintf(b + n, sb - n, "%02x ",
5596 					       (u32)cmd[k]);
5597 		}
5598 		if (sdebug_mq_active)
5599 			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5600 				    my_name, blk_mq_unique_tag(scp->request),
5601 				    b);
5602 		else
5603 			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5604 				    b);
5605 	}
5606 	if (fake_host_busy(scp))
5607 		return SCSI_MLQUEUE_HOST_BUSY;
5608 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5609 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5610 		goto err_out;
5611 
5612 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5613 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5614 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5615 	if (unlikely(!devip)) {
5616 		devip = find_build_dev_info(sdp);
5617 		if (NULL == devip)
5618 			goto err_out;
5619 	}
5620 	na = oip->num_attached;
5621 	r_pfp = oip->pfp;
5622 	if (na) {	/* multiple commands with this opcode */
5623 		r_oip = oip;
5624 		if (FF_SA & r_oip->flags) {
5625 			if (F_SA_LOW & oip->flags)
5626 				sa = 0x1f & cmd[1];
5627 			else
5628 				sa = get_unaligned_be16(cmd + 8);
5629 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5630 				if (opcode == oip->opcode && sa == oip->sa)
5631 					break;
5632 			}
5633 		} else {   /* since no service action only check opcode */
5634 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5635 				if (opcode == oip->opcode)
5636 					break;
5637 			}
5638 		}
5639 		if (k > na) {
5640 			if (F_SA_LOW & r_oip->flags)
5641 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5642 			else if (F_SA_HIGH & r_oip->flags)
5643 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5644 			else
5645 				mk_sense_invalid_opcode(scp);
5646 			goto check_cond;
5647 		}
5648 	}	/* else (when na==0) we assume the oip is a match */
5649 	flags = oip->flags;
5650 	if (unlikely(F_INV_OP & flags)) {
5651 		mk_sense_invalid_opcode(scp);
5652 		goto check_cond;
5653 	}
5654 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5655 		if (sdebug_verbose)
5656 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5657 				    my_name, opcode, " supported for wlun");
5658 		mk_sense_invalid_opcode(scp);
5659 		goto check_cond;
5660 	}
5661 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5662 		u8 rem;
5663 		int j;
5664 
5665 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5666 			rem = ~oip->len_mask[k] & cmd[k];
5667 			if (rem) {
5668 				for (j = 7; j >= 0; --j, rem <<= 1) {
5669 					if (0x80 & rem)
5670 						break;
5671 				}
5672 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5673 				goto check_cond;
5674 			}
5675 		}
5676 	}
5677 	if (unlikely(!(F_SKIP_UA & flags) &&
5678 		     find_first_bit(devip->uas_bm,
5679 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5680 		errsts = make_ua(scp, devip);
5681 		if (errsts)
5682 			goto check_cond;
5683 	}
5684 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5685 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5686 		if (sdebug_verbose)
5687 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5688 				    "%s\n", my_name, "initializing command "
5689 				    "required");
5690 		errsts = check_condition_result;
5691 		goto fini;
5692 	}
5693 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5694 		goto fini;
5695 	if (unlikely(sdebug_every_nth)) {
5696 		if (fake_timeout(scp))
5697 			return 0;	/* ignore command: make trouble */
5698 	}
5699 	if (likely(oip->pfp))
5700 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5701 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5702 		errsts = r_pfp(scp, devip);
5703 
5704 fini:
5705 	return schedule_resp(scp, devip, errsts,
5706 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5707 check_cond:
5708 	return schedule_resp(scp, devip, check_condition_result, 0);
5709 err_out:
5710 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5711 }
5712 
5713 static struct scsi_host_template sdebug_driver_template = {
5714 	.show_info =		scsi_debug_show_info,
5715 	.write_info =		scsi_debug_write_info,
5716 	.proc_name =		sdebug_proc_name,
5717 	.name =			"SCSI DEBUG",
5718 	.info =			scsi_debug_info,
5719 	.slave_alloc =		scsi_debug_slave_alloc,
5720 	.slave_configure =	scsi_debug_slave_configure,
5721 	.slave_destroy =	scsi_debug_slave_destroy,
5722 	.ioctl =		scsi_debug_ioctl,
5723 	.queuecommand =		scsi_debug_queuecommand,
5724 	.change_queue_depth =	sdebug_change_qdepth,
5725 	.eh_abort_handler =	scsi_debug_abort,
5726 	.eh_device_reset_handler = scsi_debug_device_reset,
5727 	.eh_target_reset_handler = scsi_debug_target_reset,
5728 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5729 	.eh_host_reset_handler = scsi_debug_host_reset,
5730 	.can_queue =		SDEBUG_CANQUEUE,
5731 	.this_id =		7,
5732 	.sg_tablesize =		SG_MAX_SEGMENTS,
5733 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5734 	.max_sectors =		-1U,
5735 	.use_clustering = 	DISABLE_CLUSTERING,
5736 	.module =		THIS_MODULE,
5737 	.track_queue_depth =	1,
5738 };
5739 
5740 static int sdebug_driver_probe(struct device * dev)
5741 {
5742 	int error = 0;
5743 	struct sdebug_host_info *sdbg_host;
5744 	struct Scsi_Host *hpnt;
5745 	int hprot;
5746 
5747 	sdbg_host = to_sdebug_host(dev);
5748 
5749 	sdebug_driver_template.can_queue = sdebug_max_queue;
5750 	if (sdebug_clustering)
5751 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5752 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5753 	if (NULL == hpnt) {
5754 		pr_err("scsi_host_alloc failed\n");
5755 		error = -ENODEV;
5756 		return error;
5757 	}
5758 	if (submit_queues > nr_cpu_ids) {
5759 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5760 			my_name, submit_queues, nr_cpu_ids);
5761 		submit_queues = nr_cpu_ids;
5762 	}
5763 	/* Decide whether to tell scsi subsystem that we want mq */
5764 	/* Following should give the same answer for each host */
5765 	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5766 	if (sdebug_mq_active)
5767 		hpnt->nr_hw_queues = submit_queues;
5768 
5769 	sdbg_host->shost = hpnt;
5770 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5771 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5772 		hpnt->max_id = sdebug_num_tgts + 1;
5773 	else
5774 		hpnt->max_id = sdebug_num_tgts;
5775 	/* = sdebug_max_luns; */
5776 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5777 
5778 	hprot = 0;
5779 
5780 	switch (sdebug_dif) {
5781 
5782 	case T10_PI_TYPE1_PROTECTION:
5783 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5784 		if (sdebug_dix)
5785 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5786 		break;
5787 
5788 	case T10_PI_TYPE2_PROTECTION:
5789 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5790 		if (sdebug_dix)
5791 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5792 		break;
5793 
5794 	case T10_PI_TYPE3_PROTECTION:
5795 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5796 		if (sdebug_dix)
5797 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5798 		break;
5799 
5800 	default:
5801 		if (sdebug_dix)
5802 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5803 		break;
5804 	}
5805 
5806 	scsi_host_set_prot(hpnt, hprot);
5807 
5808 	if (have_dif_prot || sdebug_dix)
5809 		pr_info("host protection%s%s%s%s%s%s%s\n",
5810 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5811 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5812 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5813 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5814 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5815 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5816 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5817 
5818 	if (sdebug_guard == 1)
5819 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5820 	else
5821 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5822 
5823 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5824 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5825 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5826 		sdebug_statistics = true;
5827 	error = scsi_add_host(hpnt, &sdbg_host->dev);
5828 	if (error) {
5829 		pr_err("scsi_add_host failed\n");
5830 		error = -ENODEV;
5831 		scsi_host_put(hpnt);
5832 	} else
5833 		scsi_scan_host(hpnt);
5834 
5835 	return error;
5836 }
5837 
5838 static int sdebug_driver_remove(struct device * dev)
5839 {
5840 	struct sdebug_host_info *sdbg_host;
5841 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5842 
5843 	sdbg_host = to_sdebug_host(dev);
5844 
5845 	if (!sdbg_host) {
5846 		pr_err("Unable to locate host info\n");
5847 		return -ENODEV;
5848 	}
5849 
5850 	scsi_remove_host(sdbg_host->shost);
5851 
5852 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5853 				 dev_list) {
5854 		list_del(&sdbg_devinfo->dev_list);
5855 		kfree(sdbg_devinfo);
5856 	}
5857 
5858 	scsi_host_put(sdbg_host->shost);
5859 	return 0;
5860 }
5861 
5862 static int pseudo_lld_bus_match(struct device *dev,
5863 				struct device_driver *dev_driver)
5864 {
5865 	return 1;
5866 }
5867 
5868 static struct bus_type pseudo_lld_bus = {
5869 	.name = "pseudo",
5870 	.match = pseudo_lld_bus_match,
5871 	.probe = sdebug_driver_probe,
5872 	.remove = sdebug_driver_remove,
5873 	.drv_groups = sdebug_drv_groups,
5874 };
5875