xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 4f205687)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2016 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "1.86"
64 static const char *sdebug_version_date = "20160430";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
87 #define CAPACITY_CHANGED_ASCQ 0x9
88 #define SAVING_PARAMS_UNSUP 0x39
89 #define TRANSPORT_PROBLEM 0x4b
90 #define THRESHOLD_EXCEEDED 0x5d
91 #define LOW_POWER_COND_ON 0x5e
92 #define MISCOMPARE_VERIFY_ASC 0x1d
93 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
94 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
95 
96 /* Additional Sense Code Qualifier (ASCQ) */
97 #define ACK_NAK_TO 0x3
98 
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW	0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   TYPE_DISK
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB   0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DEF_STATISTICS false
140 #define DEF_SUBMIT_QUEUES 1
141 #define DEF_UUID_CTL 0
142 #define JDELAY_OVERRIDDEN -9999
143 
144 #define SDEBUG_LUN_0_VAL 0
145 
146 /* bit mask values for sdebug_opts */
147 #define SDEBUG_OPT_NOISE		1
148 #define SDEBUG_OPT_MEDIUM_ERR		2
149 #define SDEBUG_OPT_TIMEOUT		4
150 #define SDEBUG_OPT_RECOVERED_ERR	8
151 #define SDEBUG_OPT_TRANSPORT_ERR	16
152 #define SDEBUG_OPT_DIF_ERR		32
153 #define SDEBUG_OPT_DIX_ERR		64
154 #define SDEBUG_OPT_MAC_TIMEOUT		128
155 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
156 #define SDEBUG_OPT_Q_NOISE		0x200
157 #define SDEBUG_OPT_ALL_TSF		0x400
158 #define SDEBUG_OPT_RARE_TSF		0x800
159 #define SDEBUG_OPT_N_WCE		0x1000
160 #define SDEBUG_OPT_RESET_NOISE		0x2000
161 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
162 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
163 			      SDEBUG_OPT_RESET_NOISE)
164 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
165 				  SDEBUG_OPT_TRANSPORT_ERR | \
166 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
167 				  SDEBUG_OPT_SHORT_TRANSFER)
168 /* When "every_nth" > 0 then modulo "every_nth" commands:
169  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
170  *   - a RECOVERED_ERROR is simulated on successful read and write
171  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
172  *   - a TRANSPORT_ERROR is simulated on successful read and write
173  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
174  *
175  * When "every_nth" < 0 then after "- every_nth" commands:
176  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
177  *   - a RECOVERED_ERROR is simulated on successful read and write
178  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
179  *   - a TRANSPORT_ERROR is simulated on successful read and write
180  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
181  * This will continue on every subsequent command until some other action
182  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
183  * every_nth via sysfs).
184  */
185 
186 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
187  * priority order. In the subset implemented here lower numbers have higher
188  * priority. The UA numbers should be a sequence starting from 0 with
189  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
190 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
191 #define SDEBUG_UA_BUS_RESET 1
192 #define SDEBUG_UA_MODE_CHANGED 2
193 #define SDEBUG_UA_CAPACITY_CHANGED 3
194 #define SDEBUG_UA_LUNS_CHANGED 4
195 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
196 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
197 #define SDEBUG_NUM_UAS 7
198 
199 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
200  * sector on read commands: */
201 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
202 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
203 
204 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
205  * or "peripheral device" addressing (value 0) */
206 #define SAM2_LUN_ADDRESS_METHOD 0
207 
208 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
209  * (for response) per submit queue at one time. Can be reduced by max_queue
210  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
211  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
212  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
213  * but cannot exceed SDEBUG_CANQUEUE .
214  */
215 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
216 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
217 #define DEF_CMD_PER_LUN  255
218 
219 #define F_D_IN			1
220 #define F_D_OUT			2
221 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
222 #define F_D_UNKN		8
223 #define F_RL_WLUN_OK		0x10
224 #define F_SKIP_UA		0x20
225 #define F_DELAY_OVERR		0x40
226 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
227 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
228 #define F_INV_OP		0x200
229 #define F_FAKE_RW		0x400
230 #define F_M_ACCESS		0x800	/* media access */
231 
232 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
233 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
234 #define FF_SA (F_SA_HIGH | F_SA_LOW)
235 
236 #define SDEBUG_MAX_PARTS 4
237 
238 #define SDEBUG_MAX_CMD_LEN 32
239 
240 
241 struct sdebug_dev_info {
242 	struct list_head dev_list;
243 	unsigned int channel;
244 	unsigned int target;
245 	u64 lun;
246 	uuid_be lu_name;
247 	struct sdebug_host_info *sdbg_host;
248 	unsigned long uas_bm[1];
249 	atomic_t num_in_q;
250 	atomic_t stopped;
251 	bool used;
252 };
253 
254 struct sdebug_host_info {
255 	struct list_head host_list;
256 	struct Scsi_Host *shost;
257 	struct device dev;
258 	struct list_head dev_info_list;
259 };
260 
261 #define to_sdebug_host(d)	\
262 	container_of(d, struct sdebug_host_info, dev)
263 
264 struct sdebug_defer {
265 	struct hrtimer hrt;
266 	struct execute_work ew;
267 	int sqa_idx;	/* index of sdebug_queue array */
268 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
269 	int issuing_cpu;
270 };
271 
272 struct sdebug_queued_cmd {
273 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
274 	 * instance indicates this slot is in use.
275 	 */
276 	struct sdebug_defer *sd_dp;
277 	struct scsi_cmnd *a_cmnd;
278 	unsigned int inj_recovered:1;
279 	unsigned int inj_transport:1;
280 	unsigned int inj_dif:1;
281 	unsigned int inj_dix:1;
282 	unsigned int inj_short:1;
283 };
284 
285 struct sdebug_queue {
286 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
287 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
288 	spinlock_t qc_lock;
289 	atomic_t blocked;	/* to temporarily stop more being queued */
290 };
291 
292 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
293 static atomic_t sdebug_completions;  /* count of deferred completions */
294 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
295 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
296 
297 struct opcode_info_t {
298 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
299 				/* for terminating element */
300 	u8 opcode;		/* if num_attached > 0, preferred */
301 	u16 sa;			/* service action */
302 	u32 flags;		/* OR-ed set of SDEB_F_* */
303 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
304 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
305 	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
306 				/* ignore cdb bytes after position 15 */
307 };
308 
309 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
310 enum sdeb_opcode_index {
311 	SDEB_I_INVALID_OPCODE =	0,
312 	SDEB_I_INQUIRY = 1,
313 	SDEB_I_REPORT_LUNS = 2,
314 	SDEB_I_REQUEST_SENSE = 3,
315 	SDEB_I_TEST_UNIT_READY = 4,
316 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
317 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
318 	SDEB_I_LOG_SENSE = 7,
319 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
320 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
321 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
322 	SDEB_I_START_STOP = 11,
323 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
324 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
325 	SDEB_I_MAINT_IN = 14,
326 	SDEB_I_MAINT_OUT = 15,
327 	SDEB_I_VERIFY = 16,		/* 10 only */
328 	SDEB_I_VARIABLE_LEN = 17,
329 	SDEB_I_RESERVE = 18,		/* 6, 10 */
330 	SDEB_I_RELEASE = 19,		/* 6, 10 */
331 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
332 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
333 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
334 	SDEB_I_SEND_DIAG = 23,
335 	SDEB_I_UNMAP = 24,
336 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
337 	SDEB_I_WRITE_BUFFER = 26,
338 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
339 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
340 	SDEB_I_COMP_WRITE = 29,
341 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
342 };
343 
344 
345 static const unsigned char opcode_ind_arr[256] = {
346 /* 0x0; 0x0->0x1f: 6 byte cdbs */
347 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
348 	    0, 0, 0, 0,
349 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
350 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
351 	    SDEB_I_RELEASE,
352 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
353 	    SDEB_I_ALLOW_REMOVAL, 0,
354 /* 0x20; 0x20->0x3f: 10 byte cdbs */
355 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
356 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
357 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
358 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
359 /* 0x40; 0x40->0x5f: 10 byte cdbs */
360 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
361 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
362 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
363 	    SDEB_I_RELEASE,
364 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
365 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
366 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
367 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 	0, SDEB_I_VARIABLE_LEN,
369 /* 0x80; 0x80->0x9f: 16 byte cdbs */
370 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
371 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
372 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
373 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
374 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
375 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
376 	     SDEB_I_MAINT_OUT, 0, 0, 0,
377 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
378 	     0, 0, 0, 0,
379 	0, 0, 0, 0, 0, 0, 0, 0,
380 	0, 0, 0, 0, 0, 0, 0, 0,
381 /* 0xc0; 0xc0->0xff: vendor specific */
382 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
383 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
384 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 };
387 
388 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
389 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
390 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
391 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
392 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
393 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
394 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
395 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
409 
410 static const struct opcode_info_t msense_iarr[1] = {
411 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
412 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
413 };
414 
415 static const struct opcode_info_t mselect_iarr[1] = {
416 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
417 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
418 };
419 
420 static const struct opcode_info_t read_iarr[3] = {
421 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
422 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
423 	     0, 0, 0, 0} },
424 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
425 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
427 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
428 	     0xc7, 0, 0, 0, 0} },
429 };
430 
431 static const struct opcode_info_t write_iarr[3] = {
432 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
433 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
434 	     0, 0, 0, 0} },
435 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
436 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
438 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
439 	     0xc7, 0, 0, 0, 0} },
440 };
441 
442 static const struct opcode_info_t sa_in_iarr[1] = {
443 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
444 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445 	     0xff, 0xff, 0xff, 0, 0xc7} },
446 };
447 
448 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
449 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
450 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
451 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
452 };
453 
454 static const struct opcode_info_t maint_in_iarr[2] = {
455 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
456 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
457 	     0xc7, 0, 0, 0, 0} },
458 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
459 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
460 	     0, 0} },
461 };
462 
463 static const struct opcode_info_t write_same_iarr[1] = {
464 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
465 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
466 	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
467 };
468 
469 static const struct opcode_info_t reserve_iarr[1] = {
470 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
471 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
472 };
473 
474 static const struct opcode_info_t release_iarr[1] = {
475 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
476 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
477 };
478 
479 
480 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
481  * plus the terminating elements for logic that scans this table such as
482  * REPORT SUPPORTED OPERATION CODES. */
483 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
484 /* 0 */
485 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
486 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
487 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
488 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
490 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
491 	     0, 0} },
492 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
493 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
494 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
495 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
496 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
497 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
498 	     0} },
499 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
500 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
501 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
502 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
503 	     0, 0, 0} },
504 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
505 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
506 	     0, 0} },
507 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
508 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
509 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
510 /* 10 */
511 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
512 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
513 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
514 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
515 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
517 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
518 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
519 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
520 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
521 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
522 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
523 	     0} },
524 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
525 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
527 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
528 	     0, 0, 0, 0, 0, 0} },
529 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
530 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
531 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
532 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
533 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
534 	     0} },
535 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
536 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
537 	     0} },
538 /* 20 */
539 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
540 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
541 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
542 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
543 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
544 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
545 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
546 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
548 	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
549 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
550 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
553 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
554 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
555 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
556 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
557 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
558 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
559 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
560 	     0, 0, 0, 0} },
561 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
562 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
563 	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
564 
565 /* 30 */
566 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
567 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
568 };
569 
570 static int sdebug_add_host = DEF_NUM_HOST;
571 static int sdebug_ato = DEF_ATO;
572 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
573 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
574 static int sdebug_dif = DEF_DIF;
575 static int sdebug_dix = DEF_DIX;
576 static int sdebug_dsense = DEF_D_SENSE;
577 static int sdebug_every_nth = DEF_EVERY_NTH;
578 static int sdebug_fake_rw = DEF_FAKE_RW;
579 static unsigned int sdebug_guard = DEF_GUARD;
580 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
581 static int sdebug_max_luns = DEF_MAX_LUNS;
582 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
583 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
584 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
585 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
586 static int sdebug_no_uld;
587 static int sdebug_num_parts = DEF_NUM_PARTS;
588 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
589 static int sdebug_opt_blks = DEF_OPT_BLKS;
590 static int sdebug_opts = DEF_OPTS;
591 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
592 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
593 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
594 static int sdebug_sector_size = DEF_SECTOR_SIZE;
595 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
596 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
597 static unsigned int sdebug_lbpu = DEF_LBPU;
598 static unsigned int sdebug_lbpws = DEF_LBPWS;
599 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
600 static unsigned int sdebug_lbprz = DEF_LBPRZ;
601 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
602 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
603 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
604 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
605 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
606 static int sdebug_uuid_ctl = DEF_UUID_CTL;
607 static bool sdebug_removable = DEF_REMOVABLE;
608 static bool sdebug_clustering;
609 static bool sdebug_host_lock = DEF_HOST_LOCK;
610 static bool sdebug_strict = DEF_STRICT;
611 static bool sdebug_any_injecting_opt;
612 static bool sdebug_verbose;
613 static bool have_dif_prot;
614 static bool sdebug_statistics = DEF_STATISTICS;
615 static bool sdebug_mq_active;
616 
617 static unsigned int sdebug_store_sectors;
618 static sector_t sdebug_capacity;	/* in sectors */
619 
620 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
621    may still need them */
622 static int sdebug_heads;		/* heads per disk */
623 static int sdebug_cylinders_per;	/* cylinders per surface */
624 static int sdebug_sectors_per;		/* sectors per cylinder */
625 
626 static LIST_HEAD(sdebug_host_list);
627 static DEFINE_SPINLOCK(sdebug_host_list_lock);
628 
629 static unsigned char *fake_storep;	/* ramdisk storage */
630 static struct sd_dif_tuple *dif_storep;	/* protection info */
631 static void *map_storep;		/* provisioning map */
632 
633 static unsigned long map_size;
634 static int num_aborts;
635 static int num_dev_resets;
636 static int num_target_resets;
637 static int num_bus_resets;
638 static int num_host_resets;
639 static int dix_writes;
640 static int dix_reads;
641 static int dif_errors;
642 
643 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
644 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
645 
646 static DEFINE_RWLOCK(atomic_rw);
647 
648 static char sdebug_proc_name[] = MY_NAME;
649 static const char *my_name = MY_NAME;
650 
651 static struct bus_type pseudo_lld_bus;
652 
653 static struct device_driver sdebug_driverfs_driver = {
654 	.name 		= sdebug_proc_name,
655 	.bus		= &pseudo_lld_bus,
656 };
657 
658 static const int check_condition_result =
659 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
660 
661 static const int illegal_condition_result =
662 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
663 
664 static const int device_qfull_result =
665 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
666 
667 
668 /* Only do the extra work involved in logical block provisioning if one or
669  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
670  * real reads and writes (i.e. not skipping them for speed).
671  */
672 static inline bool scsi_debug_lbp(void)
673 {
674 	return 0 == sdebug_fake_rw &&
675 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
676 }
677 
678 static void *fake_store(unsigned long long lba)
679 {
680 	lba = do_div(lba, sdebug_store_sectors);
681 
682 	return fake_storep + lba * sdebug_sector_size;
683 }
684 
685 static struct sd_dif_tuple *dif_store(sector_t sector)
686 {
687 	sector = sector_div(sector, sdebug_store_sectors);
688 
689 	return dif_storep + sector;
690 }
691 
692 static void sdebug_max_tgts_luns(void)
693 {
694 	struct sdebug_host_info *sdbg_host;
695 	struct Scsi_Host *hpnt;
696 
697 	spin_lock(&sdebug_host_list_lock);
698 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
699 		hpnt = sdbg_host->shost;
700 		if ((hpnt->this_id >= 0) &&
701 		    (sdebug_num_tgts > hpnt->this_id))
702 			hpnt->max_id = sdebug_num_tgts + 1;
703 		else
704 			hpnt->max_id = sdebug_num_tgts;
705 		/* sdebug_max_luns; */
706 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
707 	}
708 	spin_unlock(&sdebug_host_list_lock);
709 }
710 
711 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
712 
713 /* Set in_bit to -1 to indicate no bit position of invalid field */
714 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
715 				 enum sdeb_cmd_data c_d,
716 				 int in_byte, int in_bit)
717 {
718 	unsigned char *sbuff;
719 	u8 sks[4];
720 	int sl, asc;
721 
722 	sbuff = scp->sense_buffer;
723 	if (!sbuff) {
724 		sdev_printk(KERN_ERR, scp->device,
725 			    "%s: sense_buffer is NULL\n", __func__);
726 		return;
727 	}
728 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
729 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
730 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
731 	memset(sks, 0, sizeof(sks));
732 	sks[0] = 0x80;
733 	if (c_d)
734 		sks[0] |= 0x40;
735 	if (in_bit >= 0) {
736 		sks[0] |= 0x8;
737 		sks[0] |= 0x7 & in_bit;
738 	}
739 	put_unaligned_be16(in_byte, sks + 1);
740 	if (sdebug_dsense) {
741 		sl = sbuff[7] + 8;
742 		sbuff[7] = sl;
743 		sbuff[sl] = 0x2;
744 		sbuff[sl + 1] = 0x6;
745 		memcpy(sbuff + sl + 4, sks, 3);
746 	} else
747 		memcpy(sbuff + 15, sks, 3);
748 	if (sdebug_verbose)
749 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
750 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
751 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
752 }
753 
754 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
755 {
756 	unsigned char *sbuff;
757 
758 	sbuff = scp->sense_buffer;
759 	if (!sbuff) {
760 		sdev_printk(KERN_ERR, scp->device,
761 			    "%s: sense_buffer is NULL\n", __func__);
762 		return;
763 	}
764 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
765 
766 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
767 
768 	if (sdebug_verbose)
769 		sdev_printk(KERN_INFO, scp->device,
770 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
771 			    my_name, key, asc, asq);
772 }
773 
774 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
775 {
776 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
777 }
778 
779 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
780 {
781 	if (sdebug_verbose) {
782 		if (0x1261 == cmd)
783 			sdev_printk(KERN_INFO, dev,
784 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
785 		else if (0x5331 == cmd)
786 			sdev_printk(KERN_INFO, dev,
787 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
788 				    __func__);
789 		else
790 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
791 				    __func__, cmd);
792 	}
793 	return -EINVAL;
794 	/* return -ENOTTY; // correct return but upsets fdisk */
795 }
796 
797 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
798 {
799 	struct sdebug_host_info *sdhp;
800 	struct sdebug_dev_info *dp;
801 
802 	spin_lock(&sdebug_host_list_lock);
803 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
804 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
805 			if ((devip->sdbg_host == dp->sdbg_host) &&
806 			    (devip->target == dp->target))
807 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
808 		}
809 	}
810 	spin_unlock(&sdebug_host_list_lock);
811 }
812 
813 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
814 {
815 	int k;
816 
817 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
818 	if (k != SDEBUG_NUM_UAS) {
819 		const char *cp = NULL;
820 
821 		switch (k) {
822 		case SDEBUG_UA_POR:
823 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
824 					POWER_ON_RESET_ASCQ);
825 			if (sdebug_verbose)
826 				cp = "power on reset";
827 			break;
828 		case SDEBUG_UA_BUS_RESET:
829 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
830 					BUS_RESET_ASCQ);
831 			if (sdebug_verbose)
832 				cp = "bus reset";
833 			break;
834 		case SDEBUG_UA_MODE_CHANGED:
835 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
836 					MODE_CHANGED_ASCQ);
837 			if (sdebug_verbose)
838 				cp = "mode parameters changed";
839 			break;
840 		case SDEBUG_UA_CAPACITY_CHANGED:
841 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
842 					CAPACITY_CHANGED_ASCQ);
843 			if (sdebug_verbose)
844 				cp = "capacity data changed";
845 			break;
846 		case SDEBUG_UA_MICROCODE_CHANGED:
847 			mk_sense_buffer(scp, UNIT_ATTENTION,
848 					TARGET_CHANGED_ASC,
849 					MICROCODE_CHANGED_ASCQ);
850 			if (sdebug_verbose)
851 				cp = "microcode has been changed";
852 			break;
853 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
854 			mk_sense_buffer(scp, UNIT_ATTENTION,
855 					TARGET_CHANGED_ASC,
856 					MICROCODE_CHANGED_WO_RESET_ASCQ);
857 			if (sdebug_verbose)
858 				cp = "microcode has been changed without reset";
859 			break;
860 		case SDEBUG_UA_LUNS_CHANGED:
861 			/*
862 			 * SPC-3 behavior is to report a UNIT ATTENTION with
863 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
864 			 * on the target, until a REPORT LUNS command is
865 			 * received.  SPC-4 behavior is to report it only once.
866 			 * NOTE:  sdebug_scsi_level does not use the same
867 			 * values as struct scsi_device->scsi_level.
868 			 */
869 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
870 				clear_luns_changed_on_target(devip);
871 			mk_sense_buffer(scp, UNIT_ATTENTION,
872 					TARGET_CHANGED_ASC,
873 					LUNS_CHANGED_ASCQ);
874 			if (sdebug_verbose)
875 				cp = "reported luns data has changed";
876 			break;
877 		default:
878 			pr_warn("unexpected unit attention code=%d\n", k);
879 			if (sdebug_verbose)
880 				cp = "unknown";
881 			break;
882 		}
883 		clear_bit(k, devip->uas_bm);
884 		if (sdebug_verbose)
885 			sdev_printk(KERN_INFO, scp->device,
886 				   "%s reports: Unit attention: %s\n",
887 				   my_name, cp);
888 		return check_condition_result;
889 	}
890 	return 0;
891 }
892 
893 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
894 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
895 				int arr_len)
896 {
897 	int act_len;
898 	struct scsi_data_buffer *sdb = scsi_in(scp);
899 
900 	if (!sdb->length)
901 		return 0;
902 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
903 		return DID_ERROR << 16;
904 
905 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
906 				      arr, arr_len);
907 	sdb->resid = scsi_bufflen(scp) - act_len;
908 
909 	return 0;
910 }
911 
912 /* Returns number of bytes fetched into 'arr' or -1 if error. */
913 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
914 			       int arr_len)
915 {
916 	if (!scsi_bufflen(scp))
917 		return 0;
918 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
919 		return -1;
920 
921 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
922 }
923 
924 
925 static const char * inq_vendor_id = "Linux   ";
926 static const char * inq_product_id = "scsi_debug      ";
927 static const char *inq_product_rev = "0186";	/* version less '.' */
928 /* Use some locally assigned NAAs for SAS addresses. */
929 static const u64 naa3_comp_a = 0x3222222000000000ULL;
930 static const u64 naa3_comp_b = 0x3333333000000000ULL;
931 static const u64 naa3_comp_c = 0x3111111000000000ULL;
932 
933 /* Device identification VPD page. Returns number of bytes placed in arr */
934 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
935 			  int target_dev_id, int dev_id_num,
936 			  const char *dev_id_str, int dev_id_str_len,
937 			  const uuid_be *lu_name)
938 {
939 	int num, port_a;
940 	char b[32];
941 
942 	port_a = target_dev_id + 1;
943 	/* T10 vendor identifier field format (faked) */
944 	arr[0] = 0x2;	/* ASCII */
945 	arr[1] = 0x1;
946 	arr[2] = 0x0;
947 	memcpy(&arr[4], inq_vendor_id, 8);
948 	memcpy(&arr[12], inq_product_id, 16);
949 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
950 	num = 8 + 16 + dev_id_str_len;
951 	arr[3] = num;
952 	num += 4;
953 	if (dev_id_num >= 0) {
954 		if (sdebug_uuid_ctl) {
955 			/* Locally assigned UUID */
956 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
957 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
958 			arr[num++] = 0x0;
959 			arr[num++] = 0x12;
960 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
961 			arr[num++] = 0x0;
962 			memcpy(arr + num, lu_name, 16);
963 			num += 16;
964 		} else {
965 			/* NAA-3, Logical unit identifier (binary) */
966 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
967 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
968 			arr[num++] = 0x0;
969 			arr[num++] = 0x8;
970 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
971 			num += 8;
972 		}
973 		/* Target relative port number */
974 		arr[num++] = 0x61;	/* proto=sas, binary */
975 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
976 		arr[num++] = 0x0;	/* reserved */
977 		arr[num++] = 0x4;	/* length */
978 		arr[num++] = 0x0;	/* reserved */
979 		arr[num++] = 0x0;	/* reserved */
980 		arr[num++] = 0x0;
981 		arr[num++] = 0x1;	/* relative port A */
982 	}
983 	/* NAA-3, Target port identifier */
984 	arr[num++] = 0x61;	/* proto=sas, binary */
985 	arr[num++] = 0x93;	/* piv=1, target port, naa */
986 	arr[num++] = 0x0;
987 	arr[num++] = 0x8;
988 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
989 	num += 8;
990 	/* NAA-3, Target port group identifier */
991 	arr[num++] = 0x61;	/* proto=sas, binary */
992 	arr[num++] = 0x95;	/* piv=1, target port group id */
993 	arr[num++] = 0x0;
994 	arr[num++] = 0x4;
995 	arr[num++] = 0;
996 	arr[num++] = 0;
997 	put_unaligned_be16(port_group_id, arr + num);
998 	num += 2;
999 	/* NAA-3, Target device identifier */
1000 	arr[num++] = 0x61;	/* proto=sas, binary */
1001 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1002 	arr[num++] = 0x0;
1003 	arr[num++] = 0x8;
1004 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1005 	num += 8;
1006 	/* SCSI name string: Target device identifier */
1007 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1008 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1009 	arr[num++] = 0x0;
1010 	arr[num++] = 24;
1011 	memcpy(arr + num, "naa.32222220", 12);
1012 	num += 12;
1013 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1014 	memcpy(arr + num, b, 8);
1015 	num += 8;
1016 	memset(arr + num, 0, 4);
1017 	num += 4;
1018 	return num;
1019 }
1020 
1021 static unsigned char vpd84_data[] = {
1022 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1023     0x22,0x22,0x22,0x0,0xbb,0x1,
1024     0x22,0x22,0x22,0x0,0xbb,0x2,
1025 };
1026 
1027 /*  Software interface identification VPD page */
1028 static int inquiry_vpd_84(unsigned char *arr)
1029 {
1030 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1031 	return sizeof(vpd84_data);
1032 }
1033 
1034 /* Management network addresses VPD page */
1035 static int inquiry_vpd_85(unsigned char *arr)
1036 {
1037 	int num = 0;
1038 	const char * na1 = "https://www.kernel.org/config";
1039 	const char * na2 = "http://www.kernel.org/log";
1040 	int plen, olen;
1041 
1042 	arr[num++] = 0x1;	/* lu, storage config */
1043 	arr[num++] = 0x0;	/* reserved */
1044 	arr[num++] = 0x0;
1045 	olen = strlen(na1);
1046 	plen = olen + 1;
1047 	if (plen % 4)
1048 		plen = ((plen / 4) + 1) * 4;
1049 	arr[num++] = plen;	/* length, null termianted, padded */
1050 	memcpy(arr + num, na1, olen);
1051 	memset(arr + num + olen, 0, plen - olen);
1052 	num += plen;
1053 
1054 	arr[num++] = 0x4;	/* lu, logging */
1055 	arr[num++] = 0x0;	/* reserved */
1056 	arr[num++] = 0x0;
1057 	olen = strlen(na2);
1058 	plen = olen + 1;
1059 	if (plen % 4)
1060 		plen = ((plen / 4) + 1) * 4;
1061 	arr[num++] = plen;	/* length, null terminated, padded */
1062 	memcpy(arr + num, na2, olen);
1063 	memset(arr + num + olen, 0, plen - olen);
1064 	num += plen;
1065 
1066 	return num;
1067 }
1068 
1069 /* SCSI ports VPD page */
1070 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1071 {
1072 	int num = 0;
1073 	int port_a, port_b;
1074 
1075 	port_a = target_dev_id + 1;
1076 	port_b = port_a + 1;
1077 	arr[num++] = 0x0;	/* reserved */
1078 	arr[num++] = 0x0;	/* reserved */
1079 	arr[num++] = 0x0;
1080 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1081 	memset(arr + num, 0, 6);
1082 	num += 6;
1083 	arr[num++] = 0x0;
1084 	arr[num++] = 12;	/* length tp descriptor */
1085 	/* naa-5 target port identifier (A) */
1086 	arr[num++] = 0x61;	/* proto=sas, binary */
1087 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1088 	arr[num++] = 0x0;	/* reserved */
1089 	arr[num++] = 0x8;	/* length */
1090 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1091 	num += 8;
1092 	arr[num++] = 0x0;	/* reserved */
1093 	arr[num++] = 0x0;	/* reserved */
1094 	arr[num++] = 0x0;
1095 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1096 	memset(arr + num, 0, 6);
1097 	num += 6;
1098 	arr[num++] = 0x0;
1099 	arr[num++] = 12;	/* length tp descriptor */
1100 	/* naa-5 target port identifier (B) */
1101 	arr[num++] = 0x61;	/* proto=sas, binary */
1102 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1103 	arr[num++] = 0x0;	/* reserved */
1104 	arr[num++] = 0x8;	/* length */
1105 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1106 	num += 8;
1107 
1108 	return num;
1109 }
1110 
1111 
1112 static unsigned char vpd89_data[] = {
1113 /* from 4th byte */ 0,0,0,0,
1114 'l','i','n','u','x',' ',' ',' ',
1115 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1116 '1','2','3','4',
1117 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1118 0xec,0,0,0,
1119 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1120 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1121 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1122 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1123 0x53,0x41,
1124 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1125 0x20,0x20,
1126 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1127 0x10,0x80,
1128 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1129 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1130 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1131 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1132 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1133 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1134 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1139 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1140 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1141 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1142 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1143 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1144 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1148 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1149 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1150 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1151 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1152 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1153 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1154 };
1155 
1156 /* ATA Information VPD page */
1157 static int inquiry_vpd_89(unsigned char *arr)
1158 {
1159 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1160 	return sizeof(vpd89_data);
1161 }
1162 
1163 
1164 static unsigned char vpdb0_data[] = {
1165 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1166 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1167 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1168 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1169 };
1170 
1171 /* Block limits VPD page (SBC-3) */
1172 static int inquiry_vpd_b0(unsigned char *arr)
1173 {
1174 	unsigned int gran;
1175 
1176 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1177 
1178 	/* Optimal transfer length granularity */
1179 	gran = 1 << sdebug_physblk_exp;
1180 	put_unaligned_be16(gran, arr + 2);
1181 
1182 	/* Maximum Transfer Length */
1183 	if (sdebug_store_sectors > 0x400)
1184 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1185 
1186 	/* Optimal Transfer Length */
1187 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1188 
1189 	if (sdebug_lbpu) {
1190 		/* Maximum Unmap LBA Count */
1191 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1192 
1193 		/* Maximum Unmap Block Descriptor Count */
1194 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1195 	}
1196 
1197 	/* Unmap Granularity Alignment */
1198 	if (sdebug_unmap_alignment) {
1199 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1200 		arr[28] |= 0x80; /* UGAVALID */
1201 	}
1202 
1203 	/* Optimal Unmap Granularity */
1204 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1205 
1206 	/* Maximum WRITE SAME Length */
1207 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1208 
1209 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1210 
1211 	return sizeof(vpdb0_data);
1212 }
1213 
1214 /* Block device characteristics VPD page (SBC-3) */
1215 static int inquiry_vpd_b1(unsigned char *arr)
1216 {
1217 	memset(arr, 0, 0x3c);
1218 	arr[0] = 0;
1219 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1220 	arr[2] = 0;
1221 	arr[3] = 5;	/* less than 1.8" */
1222 
1223 	return 0x3c;
1224 }
1225 
1226 /* Logical block provisioning VPD page (SBC-4) */
1227 static int inquiry_vpd_b2(unsigned char *arr)
1228 {
1229 	memset(arr, 0, 0x4);
1230 	arr[0] = 0;			/* threshold exponent */
1231 	if (sdebug_lbpu)
1232 		arr[1] = 1 << 7;
1233 	if (sdebug_lbpws)
1234 		arr[1] |= 1 << 6;
1235 	if (sdebug_lbpws10)
1236 		arr[1] |= 1 << 5;
1237 	if (sdebug_lbprz && scsi_debug_lbp())
1238 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1239 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1240 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1241 	/* threshold_percentage=0 */
1242 	return 0x4;
1243 }
1244 
1245 #define SDEBUG_LONG_INQ_SZ 96
1246 #define SDEBUG_MAX_INQ_ARR_SZ 584
1247 
1248 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1249 {
1250 	unsigned char pq_pdt;
1251 	unsigned char * arr;
1252 	unsigned char *cmd = scp->cmnd;
1253 	int alloc_len, n, ret;
1254 	bool have_wlun, is_disk;
1255 
1256 	alloc_len = get_unaligned_be16(cmd + 3);
1257 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1258 	if (! arr)
1259 		return DID_REQUEUE << 16;
1260 	is_disk = (sdebug_ptype == TYPE_DISK);
1261 	have_wlun = scsi_is_wlun(scp->device->lun);
1262 	if (have_wlun)
1263 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1264 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1265 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1266 	else
1267 		pq_pdt = (sdebug_ptype & 0x1f);
1268 	arr[0] = pq_pdt;
1269 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1270 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1271 		kfree(arr);
1272 		return check_condition_result;
1273 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1274 		int lu_id_num, port_group_id, target_dev_id, len;
1275 		char lu_id_str[6];
1276 		int host_no = devip->sdbg_host->shost->host_no;
1277 
1278 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1279 		    (devip->channel & 0x7f);
1280 		if (sdebug_vpd_use_hostno == 0)
1281 			host_no = 0;
1282 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1283 			    (devip->target * 1000) + devip->lun);
1284 		target_dev_id = ((host_no + 1) * 2000) +
1285 				 (devip->target * 1000) - 3;
1286 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1287 		if (0 == cmd[2]) { /* supported vital product data pages */
1288 			arr[1] = cmd[2];	/*sanity */
1289 			n = 4;
1290 			arr[n++] = 0x0;   /* this page */
1291 			arr[n++] = 0x80;  /* unit serial number */
1292 			arr[n++] = 0x83;  /* device identification */
1293 			arr[n++] = 0x84;  /* software interface ident. */
1294 			arr[n++] = 0x85;  /* management network addresses */
1295 			arr[n++] = 0x86;  /* extended inquiry */
1296 			arr[n++] = 0x87;  /* mode page policy */
1297 			arr[n++] = 0x88;  /* SCSI ports */
1298 			if (is_disk) {	  /* SBC only */
1299 				arr[n++] = 0x89;  /* ATA information */
1300 				arr[n++] = 0xb0;  /* Block limits */
1301 				arr[n++] = 0xb1;  /* Block characteristics */
1302 				arr[n++] = 0xb2;  /* Logical Block Prov */
1303 			}
1304 			arr[3] = n - 4;	  /* number of supported VPD pages */
1305 		} else if (0x80 == cmd[2]) { /* unit serial number */
1306 			arr[1] = cmd[2];	/*sanity */
1307 			arr[3] = len;
1308 			memcpy(&arr[4], lu_id_str, len);
1309 		} else if (0x83 == cmd[2]) { /* device identification */
1310 			arr[1] = cmd[2];	/*sanity */
1311 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1312 						target_dev_id, lu_id_num,
1313 						lu_id_str, len,
1314 						&devip->lu_name);
1315 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1316 			arr[1] = cmd[2];	/*sanity */
1317 			arr[3] = inquiry_vpd_84(&arr[4]);
1318 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1319 			arr[1] = cmd[2];	/*sanity */
1320 			arr[3] = inquiry_vpd_85(&arr[4]);
1321 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1322 			arr[1] = cmd[2];	/*sanity */
1323 			arr[3] = 0x3c;	/* number of following entries */
1324 			if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1325 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1326 			else if (have_dif_prot)
1327 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1328 			else
1329 				arr[4] = 0x0;   /* no protection stuff */
1330 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1331 		} else if (0x87 == cmd[2]) { /* mode page policy */
1332 			arr[1] = cmd[2];	/*sanity */
1333 			arr[3] = 0x8;	/* number of following entries */
1334 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1335 			arr[6] = 0x80;	/* mlus, shared */
1336 			arr[8] = 0x18;	 /* protocol specific lu */
1337 			arr[10] = 0x82;	 /* mlus, per initiator port */
1338 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1339 			arr[1] = cmd[2];	/*sanity */
1340 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1341 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1342 			arr[1] = cmd[2];        /*sanity */
1343 			n = inquiry_vpd_89(&arr[4]);
1344 			put_unaligned_be16(n, arr + 2);
1345 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1346 			arr[1] = cmd[2];        /*sanity */
1347 			arr[3] = inquiry_vpd_b0(&arr[4]);
1348 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1349 			arr[1] = cmd[2];        /*sanity */
1350 			arr[3] = inquiry_vpd_b1(&arr[4]);
1351 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1352 			arr[1] = cmd[2];        /*sanity */
1353 			arr[3] = inquiry_vpd_b2(&arr[4]);
1354 		} else {
1355 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1356 			kfree(arr);
1357 			return check_condition_result;
1358 		}
1359 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1360 		ret = fill_from_dev_buffer(scp, arr,
1361 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1362 		kfree(arr);
1363 		return ret;
1364 	}
1365 	/* drops through here for a standard inquiry */
1366 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1367 	arr[2] = sdebug_scsi_level;
1368 	arr[3] = 2;    /* response_data_format==2 */
1369 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1370 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1371 	if (sdebug_vpd_use_hostno == 0)
1372 		arr[5] = 0x10; /* claim: implicit TGPS */
1373 	arr[6] = 0x10; /* claim: MultiP */
1374 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1375 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1376 	memcpy(&arr[8], inq_vendor_id, 8);
1377 	memcpy(&arr[16], inq_product_id, 16);
1378 	memcpy(&arr[32], inq_product_rev, 4);
1379 	/* version descriptors (2 bytes each) follow */
1380 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1381 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1382 	n = 62;
1383 	if (is_disk) {		/* SBC-4 no version claimed */
1384 		put_unaligned_be16(0x600, arr + n);
1385 		n += 2;
1386 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1387 		put_unaligned_be16(0x525, arr + n);
1388 		n += 2;
1389 	}
1390 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1391 	ret = fill_from_dev_buffer(scp, arr,
1392 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1393 	kfree(arr);
1394 	return ret;
1395 }
1396 
1397 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1398 				   0, 0, 0x0, 0x0};
1399 
1400 static int resp_requests(struct scsi_cmnd * scp,
1401 			 struct sdebug_dev_info * devip)
1402 {
1403 	unsigned char * sbuff;
1404 	unsigned char *cmd = scp->cmnd;
1405 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1406 	bool dsense;
1407 	int len = 18;
1408 
1409 	memset(arr, 0, sizeof(arr));
1410 	dsense = !!(cmd[1] & 1);
1411 	sbuff = scp->sense_buffer;
1412 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1413 		if (dsense) {
1414 			arr[0] = 0x72;
1415 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1416 			arr[2] = THRESHOLD_EXCEEDED;
1417 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1418 			len = 8;
1419 		} else {
1420 			arr[0] = 0x70;
1421 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1422 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1423 			arr[12] = THRESHOLD_EXCEEDED;
1424 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1425 		}
1426 	} else {
1427 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1428 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1429 			;	/* have sense and formats match */
1430 		else if (arr[0] <= 0x70) {
1431 			if (dsense) {
1432 				memset(arr, 0, 8);
1433 				arr[0] = 0x72;
1434 				len = 8;
1435 			} else {
1436 				memset(arr, 0, 18);
1437 				arr[0] = 0x70;
1438 				arr[7] = 0xa;
1439 			}
1440 		} else if (dsense) {
1441 			memset(arr, 0, 8);
1442 			arr[0] = 0x72;
1443 			arr[1] = sbuff[2];     /* sense key */
1444 			arr[2] = sbuff[12];    /* asc */
1445 			arr[3] = sbuff[13];    /* ascq */
1446 			len = 8;
1447 		} else {
1448 			memset(arr, 0, 18);
1449 			arr[0] = 0x70;
1450 			arr[2] = sbuff[1];
1451 			arr[7] = 0xa;
1452 			arr[12] = sbuff[1];
1453 			arr[13] = sbuff[3];
1454 		}
1455 
1456 	}
1457 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1458 	return fill_from_dev_buffer(scp, arr, len);
1459 }
1460 
1461 static int resp_start_stop(struct scsi_cmnd * scp,
1462 			   struct sdebug_dev_info * devip)
1463 {
1464 	unsigned char *cmd = scp->cmnd;
1465 	int power_cond, stop;
1466 
1467 	power_cond = (cmd[4] & 0xf0) >> 4;
1468 	if (power_cond) {
1469 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1470 		return check_condition_result;
1471 	}
1472 	stop = !(cmd[4] & 1);
1473 	atomic_xchg(&devip->stopped, stop);
1474 	return 0;
1475 }
1476 
1477 static sector_t get_sdebug_capacity(void)
1478 {
1479 	static const unsigned int gibibyte = 1073741824;
1480 
1481 	if (sdebug_virtual_gb > 0)
1482 		return (sector_t)sdebug_virtual_gb *
1483 			(gibibyte / sdebug_sector_size);
1484 	else
1485 		return sdebug_store_sectors;
1486 }
1487 
1488 #define SDEBUG_READCAP_ARR_SZ 8
1489 static int resp_readcap(struct scsi_cmnd * scp,
1490 			struct sdebug_dev_info * devip)
1491 {
1492 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1493 	unsigned int capac;
1494 
1495 	/* following just in case virtual_gb changed */
1496 	sdebug_capacity = get_sdebug_capacity();
1497 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1498 	if (sdebug_capacity < 0xffffffff) {
1499 		capac = (unsigned int)sdebug_capacity - 1;
1500 		put_unaligned_be32(capac, arr + 0);
1501 	} else
1502 		put_unaligned_be32(0xffffffff, arr + 0);
1503 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1504 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1505 }
1506 
1507 #define SDEBUG_READCAP16_ARR_SZ 32
1508 static int resp_readcap16(struct scsi_cmnd * scp,
1509 			  struct sdebug_dev_info * devip)
1510 {
1511 	unsigned char *cmd = scp->cmnd;
1512 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1513 	int alloc_len;
1514 
1515 	alloc_len = get_unaligned_be32(cmd + 10);
1516 	/* following just in case virtual_gb changed */
1517 	sdebug_capacity = get_sdebug_capacity();
1518 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1519 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1520 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1521 	arr[13] = sdebug_physblk_exp & 0xf;
1522 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1523 
1524 	if (scsi_debug_lbp()) {
1525 		arr[14] |= 0x80; /* LBPME */
1526 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1527 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1528 		 * in the wider field maps to 0 in this field.
1529 		 */
1530 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1531 			arr[14] |= 0x40;
1532 	}
1533 
1534 	arr[15] = sdebug_lowest_aligned & 0xff;
1535 
1536 	if (have_dif_prot) {
1537 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1538 		arr[12] |= 1; /* PROT_EN */
1539 	}
1540 
1541 	return fill_from_dev_buffer(scp, arr,
1542 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1543 }
1544 
1545 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1546 
1547 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1548 			      struct sdebug_dev_info * devip)
1549 {
1550 	unsigned char *cmd = scp->cmnd;
1551 	unsigned char * arr;
1552 	int host_no = devip->sdbg_host->shost->host_no;
1553 	int n, ret, alen, rlen;
1554 	int port_group_a, port_group_b, port_a, port_b;
1555 
1556 	alen = get_unaligned_be32(cmd + 6);
1557 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1558 	if (! arr)
1559 		return DID_REQUEUE << 16;
1560 	/*
1561 	 * EVPD page 0x88 states we have two ports, one
1562 	 * real and a fake port with no device connected.
1563 	 * So we create two port groups with one port each
1564 	 * and set the group with port B to unavailable.
1565 	 */
1566 	port_a = 0x1; /* relative port A */
1567 	port_b = 0x2; /* relative port B */
1568 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1569 			(devip->channel & 0x7f);
1570 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1571 			(devip->channel & 0x7f) + 0x80;
1572 
1573 	/*
1574 	 * The asymmetric access state is cycled according to the host_id.
1575 	 */
1576 	n = 4;
1577 	if (sdebug_vpd_use_hostno == 0) {
1578 		arr[n++] = host_no % 3; /* Asymm access state */
1579 		arr[n++] = 0x0F; /* claim: all states are supported */
1580 	} else {
1581 		arr[n++] = 0x0; /* Active/Optimized path */
1582 		arr[n++] = 0x01; /* only support active/optimized paths */
1583 	}
1584 	put_unaligned_be16(port_group_a, arr + n);
1585 	n += 2;
1586 	arr[n++] = 0;    /* Reserved */
1587 	arr[n++] = 0;    /* Status code */
1588 	arr[n++] = 0;    /* Vendor unique */
1589 	arr[n++] = 0x1;  /* One port per group */
1590 	arr[n++] = 0;    /* Reserved */
1591 	arr[n++] = 0;    /* Reserved */
1592 	put_unaligned_be16(port_a, arr + n);
1593 	n += 2;
1594 	arr[n++] = 3;    /* Port unavailable */
1595 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1596 	put_unaligned_be16(port_group_b, arr + n);
1597 	n += 2;
1598 	arr[n++] = 0;    /* Reserved */
1599 	arr[n++] = 0;    /* Status code */
1600 	arr[n++] = 0;    /* Vendor unique */
1601 	arr[n++] = 0x1;  /* One port per group */
1602 	arr[n++] = 0;    /* Reserved */
1603 	arr[n++] = 0;    /* Reserved */
1604 	put_unaligned_be16(port_b, arr + n);
1605 	n += 2;
1606 
1607 	rlen = n - 4;
1608 	put_unaligned_be32(rlen, arr + 0);
1609 
1610 	/*
1611 	 * Return the smallest value of either
1612 	 * - The allocated length
1613 	 * - The constructed command length
1614 	 * - The maximum array size
1615 	 */
1616 	rlen = min(alen,n);
1617 	ret = fill_from_dev_buffer(scp, arr,
1618 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1619 	kfree(arr);
1620 	return ret;
1621 }
1622 
1623 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1624 			     struct sdebug_dev_info *devip)
1625 {
1626 	bool rctd;
1627 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1628 	u16 req_sa, u;
1629 	u32 alloc_len, a_len;
1630 	int k, offset, len, errsts, count, bump, na;
1631 	const struct opcode_info_t *oip;
1632 	const struct opcode_info_t *r_oip;
1633 	u8 *arr;
1634 	u8 *cmd = scp->cmnd;
1635 
1636 	rctd = !!(cmd[2] & 0x80);
1637 	reporting_opts = cmd[2] & 0x7;
1638 	req_opcode = cmd[3];
1639 	req_sa = get_unaligned_be16(cmd + 4);
1640 	alloc_len = get_unaligned_be32(cmd + 6);
1641 	if (alloc_len < 4 || alloc_len > 0xffff) {
1642 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1643 		return check_condition_result;
1644 	}
1645 	if (alloc_len > 8192)
1646 		a_len = 8192;
1647 	else
1648 		a_len = alloc_len;
1649 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1650 	if (NULL == arr) {
1651 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1652 				INSUFF_RES_ASCQ);
1653 		return check_condition_result;
1654 	}
1655 	switch (reporting_opts) {
1656 	case 0:	/* all commands */
1657 		/* count number of commands */
1658 		for (count = 0, oip = opcode_info_arr;
1659 		     oip->num_attached != 0xff; ++oip) {
1660 			if (F_INV_OP & oip->flags)
1661 				continue;
1662 			count += (oip->num_attached + 1);
1663 		}
1664 		bump = rctd ? 20 : 8;
1665 		put_unaligned_be32(count * bump, arr);
1666 		for (offset = 4, oip = opcode_info_arr;
1667 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1668 			if (F_INV_OP & oip->flags)
1669 				continue;
1670 			na = oip->num_attached;
1671 			arr[offset] = oip->opcode;
1672 			put_unaligned_be16(oip->sa, arr + offset + 2);
1673 			if (rctd)
1674 				arr[offset + 5] |= 0x2;
1675 			if (FF_SA & oip->flags)
1676 				arr[offset + 5] |= 0x1;
1677 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1678 			if (rctd)
1679 				put_unaligned_be16(0xa, arr + offset + 8);
1680 			r_oip = oip;
1681 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1682 				if (F_INV_OP & oip->flags)
1683 					continue;
1684 				offset += bump;
1685 				arr[offset] = oip->opcode;
1686 				put_unaligned_be16(oip->sa, arr + offset + 2);
1687 				if (rctd)
1688 					arr[offset + 5] |= 0x2;
1689 				if (FF_SA & oip->flags)
1690 					arr[offset + 5] |= 0x1;
1691 				put_unaligned_be16(oip->len_mask[0],
1692 						   arr + offset + 6);
1693 				if (rctd)
1694 					put_unaligned_be16(0xa,
1695 							   arr + offset + 8);
1696 			}
1697 			oip = r_oip;
1698 			offset += bump;
1699 		}
1700 		break;
1701 	case 1:	/* one command: opcode only */
1702 	case 2:	/* one command: opcode plus service action */
1703 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1704 		sdeb_i = opcode_ind_arr[req_opcode];
1705 		oip = &opcode_info_arr[sdeb_i];
1706 		if (F_INV_OP & oip->flags) {
1707 			supp = 1;
1708 			offset = 4;
1709 		} else {
1710 			if (1 == reporting_opts) {
1711 				if (FF_SA & oip->flags) {
1712 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1713 							     2, 2);
1714 					kfree(arr);
1715 					return check_condition_result;
1716 				}
1717 				req_sa = 0;
1718 			} else if (2 == reporting_opts &&
1719 				   0 == (FF_SA & oip->flags)) {
1720 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1721 				kfree(arr);	/* point at requested sa */
1722 				return check_condition_result;
1723 			}
1724 			if (0 == (FF_SA & oip->flags) &&
1725 			    req_opcode == oip->opcode)
1726 				supp = 3;
1727 			else if (0 == (FF_SA & oip->flags)) {
1728 				na = oip->num_attached;
1729 				for (k = 0, oip = oip->arrp; k < na;
1730 				     ++k, ++oip) {
1731 					if (req_opcode == oip->opcode)
1732 						break;
1733 				}
1734 				supp = (k >= na) ? 1 : 3;
1735 			} else if (req_sa != oip->sa) {
1736 				na = oip->num_attached;
1737 				for (k = 0, oip = oip->arrp; k < na;
1738 				     ++k, ++oip) {
1739 					if (req_sa == oip->sa)
1740 						break;
1741 				}
1742 				supp = (k >= na) ? 1 : 3;
1743 			} else
1744 				supp = 3;
1745 			if (3 == supp) {
1746 				u = oip->len_mask[0];
1747 				put_unaligned_be16(u, arr + 2);
1748 				arr[4] = oip->opcode;
1749 				for (k = 1; k < u; ++k)
1750 					arr[4 + k] = (k < 16) ?
1751 						 oip->len_mask[k] : 0xff;
1752 				offset = 4 + u;
1753 			} else
1754 				offset = 4;
1755 		}
1756 		arr[1] = (rctd ? 0x80 : 0) | supp;
1757 		if (rctd) {
1758 			put_unaligned_be16(0xa, arr + offset);
1759 			offset += 12;
1760 		}
1761 		break;
1762 	default:
1763 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1764 		kfree(arr);
1765 		return check_condition_result;
1766 	}
1767 	offset = (offset < a_len) ? offset : a_len;
1768 	len = (offset < alloc_len) ? offset : alloc_len;
1769 	errsts = fill_from_dev_buffer(scp, arr, len);
1770 	kfree(arr);
1771 	return errsts;
1772 }
1773 
1774 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1775 			  struct sdebug_dev_info *devip)
1776 {
1777 	bool repd;
1778 	u32 alloc_len, len;
1779 	u8 arr[16];
1780 	u8 *cmd = scp->cmnd;
1781 
1782 	memset(arr, 0, sizeof(arr));
1783 	repd = !!(cmd[2] & 0x80);
1784 	alloc_len = get_unaligned_be32(cmd + 6);
1785 	if (alloc_len < 4) {
1786 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1787 		return check_condition_result;
1788 	}
1789 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1790 	arr[1] = 0x1;		/* ITNRS */
1791 	if (repd) {
1792 		arr[3] = 0xc;
1793 		len = 16;
1794 	} else
1795 		len = 4;
1796 
1797 	len = (len < alloc_len) ? len : alloc_len;
1798 	return fill_from_dev_buffer(scp, arr, len);
1799 }
1800 
1801 /* <<Following mode page info copied from ST318451LW>> */
1802 
1803 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1804 {	/* Read-Write Error Recovery page for mode_sense */
1805 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1806 					5, 0, 0xff, 0xff};
1807 
1808 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1809 	if (1 == pcontrol)
1810 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1811 	return sizeof(err_recov_pg);
1812 }
1813 
1814 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1815 { 	/* Disconnect-Reconnect page for mode_sense */
1816 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1817 					 0, 0, 0, 0, 0, 0, 0, 0};
1818 
1819 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1820 	if (1 == pcontrol)
1821 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1822 	return sizeof(disconnect_pg);
1823 }
1824 
1825 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1826 {       /* Format device page for mode_sense */
1827 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1828 				     0, 0, 0, 0, 0, 0, 0, 0,
1829 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1830 
1831 	memcpy(p, format_pg, sizeof(format_pg));
1832 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1833 	put_unaligned_be16(sdebug_sector_size, p + 12);
1834 	if (sdebug_removable)
1835 		p[20] |= 0x20; /* should agree with INQUIRY */
1836 	if (1 == pcontrol)
1837 		memset(p + 2, 0, sizeof(format_pg) - 2);
1838 	return sizeof(format_pg);
1839 }
1840 
1841 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1842 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1843 				     0, 0, 0, 0};
1844 
1845 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1846 { 	/* Caching page for mode_sense */
1847 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1848 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1849 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1850 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1851 
1852 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1853 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1854 	memcpy(p, caching_pg, sizeof(caching_pg));
1855 	if (1 == pcontrol)
1856 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1857 	else if (2 == pcontrol)
1858 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1859 	return sizeof(caching_pg);
1860 }
1861 
1862 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1863 				    0, 0, 0x2, 0x4b};
1864 
1865 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1866 { 	/* Control mode page for mode_sense */
1867 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1868 				        0, 0, 0, 0};
1869 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1870 				     0, 0, 0x2, 0x4b};
1871 
1872 	if (sdebug_dsense)
1873 		ctrl_m_pg[2] |= 0x4;
1874 	else
1875 		ctrl_m_pg[2] &= ~0x4;
1876 
1877 	if (sdebug_ato)
1878 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1879 
1880 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1881 	if (1 == pcontrol)
1882 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1883 	else if (2 == pcontrol)
1884 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1885 	return sizeof(ctrl_m_pg);
1886 }
1887 
1888 
1889 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1890 {	/* Informational Exceptions control mode page for mode_sense */
1891 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1892 				       0, 0, 0x0, 0x0};
1893 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1894 				      0, 0, 0x0, 0x0};
1895 
1896 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1897 	if (1 == pcontrol)
1898 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1899 	else if (2 == pcontrol)
1900 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1901 	return sizeof(iec_m_pg);
1902 }
1903 
1904 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1905 {	/* SAS SSP mode page - short format for mode_sense */
1906 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1907 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1908 
1909 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1910 	if (1 == pcontrol)
1911 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1912 	return sizeof(sas_sf_m_pg);
1913 }
1914 
1915 
1916 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1917 			      int target_dev_id)
1918 {	/* SAS phy control and discover mode page for mode_sense */
1919 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1920 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1921 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1922 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1923 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1924 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1925 		    0, 0, 0, 0, 0, 0, 0, 0,
1926 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1927 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1928 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1929 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1930 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1931 		    0, 0, 0, 0, 0, 0, 0, 0,
1932 		};
1933 	int port_a, port_b;
1934 
1935 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
1936 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
1937 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
1938 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
1939 	port_a = target_dev_id + 1;
1940 	port_b = port_a + 1;
1941 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1942 	put_unaligned_be32(port_a, p + 20);
1943 	put_unaligned_be32(port_b, p + 48 + 20);
1944 	if (1 == pcontrol)
1945 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1946 	return sizeof(sas_pcd_m_pg);
1947 }
1948 
1949 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1950 {	/* SAS SSP shared protocol specific port mode subpage */
1951 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1952 		    0, 0, 0, 0, 0, 0, 0, 0,
1953 		};
1954 
1955 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1956 	if (1 == pcontrol)
1957 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1958 	return sizeof(sas_sha_m_pg);
1959 }
1960 
1961 #define SDEBUG_MAX_MSENSE_SZ 256
1962 
1963 static int resp_mode_sense(struct scsi_cmnd *scp,
1964 			   struct sdebug_dev_info *devip)
1965 {
1966 	int pcontrol, pcode, subpcode, bd_len;
1967 	unsigned char dev_spec;
1968 	int alloc_len, offset, len, target_dev_id;
1969 	int target = scp->device->id;
1970 	unsigned char * ap;
1971 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1972 	unsigned char *cmd = scp->cmnd;
1973 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
1974 
1975 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
1976 	pcontrol = (cmd[2] & 0xc0) >> 6;
1977 	pcode = cmd[2] & 0x3f;
1978 	subpcode = cmd[3];
1979 	msense_6 = (MODE_SENSE == cmd[0]);
1980 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
1981 	is_disk = (sdebug_ptype == TYPE_DISK);
1982 	if (is_disk && !dbd)
1983 		bd_len = llbaa ? 16 : 8;
1984 	else
1985 		bd_len = 0;
1986 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1987 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1988 	if (0x3 == pcontrol) {  /* Saving values not supported */
1989 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1990 		return check_condition_result;
1991 	}
1992 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1993 			(devip->target * 1000) - 3;
1994 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
1995 	if (is_disk)
1996 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
1997 	else
1998 		dev_spec = 0x0;
1999 	if (msense_6) {
2000 		arr[2] = dev_spec;
2001 		arr[3] = bd_len;
2002 		offset = 4;
2003 	} else {
2004 		arr[3] = dev_spec;
2005 		if (16 == bd_len)
2006 			arr[4] = 0x1;	/* set LONGLBA bit */
2007 		arr[7] = bd_len;	/* assume 255 or less */
2008 		offset = 8;
2009 	}
2010 	ap = arr + offset;
2011 	if ((bd_len > 0) && (!sdebug_capacity))
2012 		sdebug_capacity = get_sdebug_capacity();
2013 
2014 	if (8 == bd_len) {
2015 		if (sdebug_capacity > 0xfffffffe)
2016 			put_unaligned_be32(0xffffffff, ap + 0);
2017 		else
2018 			put_unaligned_be32(sdebug_capacity, ap + 0);
2019 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2020 		offset += bd_len;
2021 		ap = arr + offset;
2022 	} else if (16 == bd_len) {
2023 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2024 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2025 		offset += bd_len;
2026 		ap = arr + offset;
2027 	}
2028 
2029 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2030 		/* TODO: Control Extension page */
2031 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2032 		return check_condition_result;
2033 	}
2034 	bad_pcode = false;
2035 
2036 	switch (pcode) {
2037 	case 0x1:	/* Read-Write error recovery page, direct access */
2038 		len = resp_err_recov_pg(ap, pcontrol, target);
2039 		offset += len;
2040 		break;
2041 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2042 		len = resp_disconnect_pg(ap, pcontrol, target);
2043 		offset += len;
2044 		break;
2045         case 0x3:       /* Format device page, direct access */
2046 		if (is_disk) {
2047 			len = resp_format_pg(ap, pcontrol, target);
2048 			offset += len;
2049 		} else
2050 			bad_pcode = true;
2051                 break;
2052 	case 0x8:	/* Caching page, direct access */
2053 		if (is_disk) {
2054 			len = resp_caching_pg(ap, pcontrol, target);
2055 			offset += len;
2056 		} else
2057 			bad_pcode = true;
2058 		break;
2059 	case 0xa:	/* Control Mode page, all devices */
2060 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2061 		offset += len;
2062 		break;
2063 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2064 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2065 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2066 			return check_condition_result;
2067 	        }
2068 		len = 0;
2069 		if ((0x0 == subpcode) || (0xff == subpcode))
2070 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2071 		if ((0x1 == subpcode) || (0xff == subpcode))
2072 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2073 						  target_dev_id);
2074 		if ((0x2 == subpcode) || (0xff == subpcode))
2075 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2076 		offset += len;
2077 		break;
2078 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2079 		len = resp_iec_m_pg(ap, pcontrol, target);
2080 		offset += len;
2081 		break;
2082 	case 0x3f:	/* Read all Mode pages */
2083 		if ((0 == subpcode) || (0xff == subpcode)) {
2084 			len = resp_err_recov_pg(ap, pcontrol, target);
2085 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2086 			if (is_disk) {
2087 				len += resp_format_pg(ap + len, pcontrol,
2088 						      target);
2089 				len += resp_caching_pg(ap + len, pcontrol,
2090 						       target);
2091 			}
2092 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2093 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2094 			if (0xff == subpcode) {
2095 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2096 						  target, target_dev_id);
2097 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2098 			}
2099 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2100 			offset += len;
2101 		} else {
2102 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2103 			return check_condition_result;
2104                 }
2105 		break;
2106 	default:
2107 		bad_pcode = true;
2108 		break;
2109 	}
2110 	if (bad_pcode) {
2111 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2112 		return check_condition_result;
2113 	}
2114 	if (msense_6)
2115 		arr[0] = offset - 1;
2116 	else
2117 		put_unaligned_be16((offset - 2), arr + 0);
2118 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2119 }
2120 
2121 #define SDEBUG_MAX_MSELECT_SZ 512
2122 
2123 static int resp_mode_select(struct scsi_cmnd *scp,
2124 			    struct sdebug_dev_info *devip)
2125 {
2126 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2127 	int param_len, res, mpage;
2128 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2129 	unsigned char *cmd = scp->cmnd;
2130 	int mselect6 = (MODE_SELECT == cmd[0]);
2131 
2132 	memset(arr, 0, sizeof(arr));
2133 	pf = cmd[1] & 0x10;
2134 	sp = cmd[1] & 0x1;
2135 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2136 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2137 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2138 		return check_condition_result;
2139 	}
2140         res = fetch_to_dev_buffer(scp, arr, param_len);
2141         if (-1 == res)
2142 		return DID_ERROR << 16;
2143 	else if (sdebug_verbose && (res < param_len))
2144 		sdev_printk(KERN_INFO, scp->device,
2145 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2146 			    __func__, param_len, res);
2147 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2148 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2149 	if (md_len > 2) {
2150 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2151 		return check_condition_result;
2152 	}
2153 	off = bd_len + (mselect6 ? 4 : 8);
2154 	mpage = arr[off] & 0x3f;
2155 	ps = !!(arr[off] & 0x80);
2156 	if (ps) {
2157 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2158 		return check_condition_result;
2159 	}
2160 	spf = !!(arr[off] & 0x40);
2161 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2162 		       (arr[off + 1] + 2);
2163 	if ((pg_len + off) > param_len) {
2164 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2165 				PARAMETER_LIST_LENGTH_ERR, 0);
2166 		return check_condition_result;
2167 	}
2168 	switch (mpage) {
2169 	case 0x8:      /* Caching Mode page */
2170 		if (caching_pg[1] == arr[off + 1]) {
2171 			memcpy(caching_pg + 2, arr + off + 2,
2172 			       sizeof(caching_pg) - 2);
2173 			goto set_mode_changed_ua;
2174 		}
2175 		break;
2176 	case 0xa:      /* Control Mode page */
2177 		if (ctrl_m_pg[1] == arr[off + 1]) {
2178 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2179 			       sizeof(ctrl_m_pg) - 2);
2180 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2181 			goto set_mode_changed_ua;
2182 		}
2183 		break;
2184 	case 0x1c:      /* Informational Exceptions Mode page */
2185 		if (iec_m_pg[1] == arr[off + 1]) {
2186 			memcpy(iec_m_pg + 2, arr + off + 2,
2187 			       sizeof(iec_m_pg) - 2);
2188 			goto set_mode_changed_ua;
2189 		}
2190 		break;
2191 	default:
2192 		break;
2193 	}
2194 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2195 	return check_condition_result;
2196 set_mode_changed_ua:
2197 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2198 	return 0;
2199 }
2200 
2201 static int resp_temp_l_pg(unsigned char * arr)
2202 {
2203 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2204 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2205 		};
2206 
2207         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2208         return sizeof(temp_l_pg);
2209 }
2210 
2211 static int resp_ie_l_pg(unsigned char * arr)
2212 {
2213 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2214 		};
2215 
2216         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2217 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2218 		arr[4] = THRESHOLD_EXCEEDED;
2219 		arr[5] = 0xff;
2220 	}
2221         return sizeof(ie_l_pg);
2222 }
2223 
2224 #define SDEBUG_MAX_LSENSE_SZ 512
2225 
2226 static int resp_log_sense(struct scsi_cmnd * scp,
2227                           struct sdebug_dev_info * devip)
2228 {
2229 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2230 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2231 	unsigned char *cmd = scp->cmnd;
2232 
2233 	memset(arr, 0, sizeof(arr));
2234 	ppc = cmd[1] & 0x2;
2235 	sp = cmd[1] & 0x1;
2236 	if (ppc || sp) {
2237 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2238 		return check_condition_result;
2239 	}
2240 	pcontrol = (cmd[2] & 0xc0) >> 6;
2241 	pcode = cmd[2] & 0x3f;
2242 	subpcode = cmd[3] & 0xff;
2243 	alloc_len = get_unaligned_be16(cmd + 7);
2244 	arr[0] = pcode;
2245 	if (0 == subpcode) {
2246 		switch (pcode) {
2247 		case 0x0:	/* Supported log pages log page */
2248 			n = 4;
2249 			arr[n++] = 0x0;		/* this page */
2250 			arr[n++] = 0xd;		/* Temperature */
2251 			arr[n++] = 0x2f;	/* Informational exceptions */
2252 			arr[3] = n - 4;
2253 			break;
2254 		case 0xd:	/* Temperature log page */
2255 			arr[3] = resp_temp_l_pg(arr + 4);
2256 			break;
2257 		case 0x2f:	/* Informational exceptions log page */
2258 			arr[3] = resp_ie_l_pg(arr + 4);
2259 			break;
2260 		default:
2261 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2262 			return check_condition_result;
2263 		}
2264 	} else if (0xff == subpcode) {
2265 		arr[0] |= 0x40;
2266 		arr[1] = subpcode;
2267 		switch (pcode) {
2268 		case 0x0:	/* Supported log pages and subpages log page */
2269 			n = 4;
2270 			arr[n++] = 0x0;
2271 			arr[n++] = 0x0;		/* 0,0 page */
2272 			arr[n++] = 0x0;
2273 			arr[n++] = 0xff;	/* this page */
2274 			arr[n++] = 0xd;
2275 			arr[n++] = 0x0;		/* Temperature */
2276 			arr[n++] = 0x2f;
2277 			arr[n++] = 0x0;	/* Informational exceptions */
2278 			arr[3] = n - 4;
2279 			break;
2280 		case 0xd:	/* Temperature subpages */
2281 			n = 4;
2282 			arr[n++] = 0xd;
2283 			arr[n++] = 0x0;		/* Temperature */
2284 			arr[3] = n - 4;
2285 			break;
2286 		case 0x2f:	/* Informational exceptions subpages */
2287 			n = 4;
2288 			arr[n++] = 0x2f;
2289 			arr[n++] = 0x0;		/* Informational exceptions */
2290 			arr[3] = n - 4;
2291 			break;
2292 		default:
2293 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2294 			return check_condition_result;
2295 		}
2296 	} else {
2297 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2298 		return check_condition_result;
2299 	}
2300 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2301 	return fill_from_dev_buffer(scp, arr,
2302 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2303 }
2304 
2305 static int check_device_access_params(struct scsi_cmnd *scp,
2306 				      unsigned long long lba, unsigned int num)
2307 {
2308 	if (lba + num > sdebug_capacity) {
2309 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2310 		return check_condition_result;
2311 	}
2312 	/* transfer length excessive (tie in to block limits VPD page) */
2313 	if (num > sdebug_store_sectors) {
2314 		/* needs work to find which cdb byte 'num' comes from */
2315 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2316 		return check_condition_result;
2317 	}
2318 	return 0;
2319 }
2320 
2321 /* Returns number of bytes copied or -1 if error. */
2322 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2323 			    bool do_write)
2324 {
2325 	int ret;
2326 	u64 block, rest = 0;
2327 	struct scsi_data_buffer *sdb;
2328 	enum dma_data_direction dir;
2329 
2330 	if (do_write) {
2331 		sdb = scsi_out(scmd);
2332 		dir = DMA_TO_DEVICE;
2333 	} else {
2334 		sdb = scsi_in(scmd);
2335 		dir = DMA_FROM_DEVICE;
2336 	}
2337 
2338 	if (!sdb->length)
2339 		return 0;
2340 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2341 		return -1;
2342 
2343 	block = do_div(lba, sdebug_store_sectors);
2344 	if (block + num > sdebug_store_sectors)
2345 		rest = block + num - sdebug_store_sectors;
2346 
2347 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2348 		   fake_storep + (block * sdebug_sector_size),
2349 		   (num - rest) * sdebug_sector_size, 0, do_write);
2350 	if (ret != (num - rest) * sdebug_sector_size)
2351 		return ret;
2352 
2353 	if (rest) {
2354 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2355 			    fake_storep, rest * sdebug_sector_size,
2356 			    (num - rest) * sdebug_sector_size, do_write);
2357 	}
2358 
2359 	return ret;
2360 }
2361 
2362 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2363  * arr into fake_store(lba,num) and return true. If comparison fails then
2364  * return false. */
2365 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2366 {
2367 	bool res;
2368 	u64 block, rest = 0;
2369 	u32 store_blks = sdebug_store_sectors;
2370 	u32 lb_size = sdebug_sector_size;
2371 
2372 	block = do_div(lba, store_blks);
2373 	if (block + num > store_blks)
2374 		rest = block + num - store_blks;
2375 
2376 	res = !memcmp(fake_storep + (block * lb_size), arr,
2377 		      (num - rest) * lb_size);
2378 	if (!res)
2379 		return res;
2380 	if (rest)
2381 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2382 			     rest * lb_size);
2383 	if (!res)
2384 		return res;
2385 	arr += num * lb_size;
2386 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2387 	if (rest)
2388 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2389 		       rest * lb_size);
2390 	return res;
2391 }
2392 
2393 static __be16 dif_compute_csum(const void *buf, int len)
2394 {
2395 	__be16 csum;
2396 
2397 	if (sdebug_guard)
2398 		csum = (__force __be16)ip_compute_csum(buf, len);
2399 	else
2400 		csum = cpu_to_be16(crc_t10dif(buf, len));
2401 
2402 	return csum;
2403 }
2404 
2405 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2406 		      sector_t sector, u32 ei_lba)
2407 {
2408 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2409 
2410 	if (sdt->guard_tag != csum) {
2411 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2412 			(unsigned long)sector,
2413 			be16_to_cpu(sdt->guard_tag),
2414 			be16_to_cpu(csum));
2415 		return 0x01;
2416 	}
2417 	if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2418 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2419 		pr_err("REF check failed on sector %lu\n",
2420 			(unsigned long)sector);
2421 		return 0x03;
2422 	}
2423 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2424 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2425 		pr_err("REF check failed on sector %lu\n",
2426 			(unsigned long)sector);
2427 		return 0x03;
2428 	}
2429 	return 0;
2430 }
2431 
2432 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2433 			  unsigned int sectors, bool read)
2434 {
2435 	size_t resid;
2436 	void *paddr;
2437 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2438 	struct sg_mapping_iter miter;
2439 
2440 	/* Bytes of protection data to copy into sgl */
2441 	resid = sectors * sizeof(*dif_storep);
2442 
2443 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2444 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2445 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2446 
2447 	while (sg_miter_next(&miter) && resid > 0) {
2448 		size_t len = min(miter.length, resid);
2449 		void *start = dif_store(sector);
2450 		size_t rest = 0;
2451 
2452 		if (dif_store_end < start + len)
2453 			rest = start + len - dif_store_end;
2454 
2455 		paddr = miter.addr;
2456 
2457 		if (read)
2458 			memcpy(paddr, start, len - rest);
2459 		else
2460 			memcpy(start, paddr, len - rest);
2461 
2462 		if (rest) {
2463 			if (read)
2464 				memcpy(paddr + len - rest, dif_storep, rest);
2465 			else
2466 				memcpy(dif_storep, paddr + len - rest, rest);
2467 		}
2468 
2469 		sector += len / sizeof(*dif_storep);
2470 		resid -= len;
2471 	}
2472 	sg_miter_stop(&miter);
2473 }
2474 
2475 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2476 			    unsigned int sectors, u32 ei_lba)
2477 {
2478 	unsigned int i;
2479 	struct sd_dif_tuple *sdt;
2480 	sector_t sector;
2481 
2482 	for (i = 0; i < sectors; i++, ei_lba++) {
2483 		int ret;
2484 
2485 		sector = start_sec + i;
2486 		sdt = dif_store(sector);
2487 
2488 		if (sdt->app_tag == cpu_to_be16(0xffff))
2489 			continue;
2490 
2491 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2492 		if (ret) {
2493 			dif_errors++;
2494 			return ret;
2495 		}
2496 	}
2497 
2498 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2499 	dix_reads++;
2500 
2501 	return 0;
2502 }
2503 
2504 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2505 {
2506 	u8 *cmd = scp->cmnd;
2507 	struct sdebug_queued_cmd *sqcp;
2508 	u64 lba;
2509 	u32 num;
2510 	u32 ei_lba;
2511 	unsigned long iflags;
2512 	int ret;
2513 	bool check_prot;
2514 
2515 	switch (cmd[0]) {
2516 	case READ_16:
2517 		ei_lba = 0;
2518 		lba = get_unaligned_be64(cmd + 2);
2519 		num = get_unaligned_be32(cmd + 10);
2520 		check_prot = true;
2521 		break;
2522 	case READ_10:
2523 		ei_lba = 0;
2524 		lba = get_unaligned_be32(cmd + 2);
2525 		num = get_unaligned_be16(cmd + 7);
2526 		check_prot = true;
2527 		break;
2528 	case READ_6:
2529 		ei_lba = 0;
2530 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2531 		      (u32)(cmd[1] & 0x1f) << 16;
2532 		num = (0 == cmd[4]) ? 256 : cmd[4];
2533 		check_prot = true;
2534 		break;
2535 	case READ_12:
2536 		ei_lba = 0;
2537 		lba = get_unaligned_be32(cmd + 2);
2538 		num = get_unaligned_be32(cmd + 6);
2539 		check_prot = true;
2540 		break;
2541 	case XDWRITEREAD_10:
2542 		ei_lba = 0;
2543 		lba = get_unaligned_be32(cmd + 2);
2544 		num = get_unaligned_be16(cmd + 7);
2545 		check_prot = false;
2546 		break;
2547 	default:	/* assume READ(32) */
2548 		lba = get_unaligned_be64(cmd + 12);
2549 		ei_lba = get_unaligned_be32(cmd + 20);
2550 		num = get_unaligned_be32(cmd + 28);
2551 		check_prot = false;
2552 		break;
2553 	}
2554 	if (unlikely(have_dif_prot && check_prot)) {
2555 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2556 		    (cmd[1] & 0xe0)) {
2557 			mk_sense_invalid_opcode(scp);
2558 			return check_condition_result;
2559 		}
2560 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2561 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2562 		    (cmd[1] & 0xe0) == 0)
2563 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2564 				    "to DIF device\n");
2565 	}
2566 	if (unlikely(sdebug_any_injecting_opt)) {
2567 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2568 
2569 		if (sqcp) {
2570 			if (sqcp->inj_short)
2571 				num /= 2;
2572 		}
2573 	} else
2574 		sqcp = NULL;
2575 
2576 	/* inline check_device_access_params() */
2577 	if (unlikely(lba + num > sdebug_capacity)) {
2578 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2579 		return check_condition_result;
2580 	}
2581 	/* transfer length excessive (tie in to block limits VPD page) */
2582 	if (unlikely(num > sdebug_store_sectors)) {
2583 		/* needs work to find which cdb byte 'num' comes from */
2584 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2585 		return check_condition_result;
2586 	}
2587 
2588 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2589 		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2590 		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2591 		/* claim unrecoverable read error */
2592 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2593 		/* set info field and valid bit for fixed descriptor */
2594 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2595 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2596 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2597 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2598 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2599 		}
2600 		scsi_set_resid(scp, scsi_bufflen(scp));
2601 		return check_condition_result;
2602 	}
2603 
2604 	read_lock_irqsave(&atomic_rw, iflags);
2605 
2606 	/* DIX + T10 DIF */
2607 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2608 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2609 
2610 		if (prot_ret) {
2611 			read_unlock_irqrestore(&atomic_rw, iflags);
2612 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2613 			return illegal_condition_result;
2614 		}
2615 	}
2616 
2617 	ret = do_device_access(scp, lba, num, false);
2618 	read_unlock_irqrestore(&atomic_rw, iflags);
2619 	if (unlikely(ret == -1))
2620 		return DID_ERROR << 16;
2621 
2622 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2623 
2624 	if (unlikely(sqcp)) {
2625 		if (sqcp->inj_recovered) {
2626 			mk_sense_buffer(scp, RECOVERED_ERROR,
2627 					THRESHOLD_EXCEEDED, 0);
2628 			return check_condition_result;
2629 		} else if (sqcp->inj_transport) {
2630 			mk_sense_buffer(scp, ABORTED_COMMAND,
2631 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2632 			return check_condition_result;
2633 		} else if (sqcp->inj_dif) {
2634 			/* Logical block guard check failed */
2635 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2636 			return illegal_condition_result;
2637 		} else if (sqcp->inj_dix) {
2638 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2639 			return illegal_condition_result;
2640 		}
2641 	}
2642 	return 0;
2643 }
2644 
2645 static void dump_sector(unsigned char *buf, int len)
2646 {
2647 	int i, j, n;
2648 
2649 	pr_err(">>> Sector Dump <<<\n");
2650 	for (i = 0 ; i < len ; i += 16) {
2651 		char b[128];
2652 
2653 		for (j = 0, n = 0; j < 16; j++) {
2654 			unsigned char c = buf[i+j];
2655 
2656 			if (c >= 0x20 && c < 0x7e)
2657 				n += scnprintf(b + n, sizeof(b) - n,
2658 					       " %c ", buf[i+j]);
2659 			else
2660 				n += scnprintf(b + n, sizeof(b) - n,
2661 					       "%02x ", buf[i+j]);
2662 		}
2663 		pr_err("%04d: %s\n", i, b);
2664 	}
2665 }
2666 
2667 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2668 			     unsigned int sectors, u32 ei_lba)
2669 {
2670 	int ret;
2671 	struct sd_dif_tuple *sdt;
2672 	void *daddr;
2673 	sector_t sector = start_sec;
2674 	int ppage_offset;
2675 	int dpage_offset;
2676 	struct sg_mapping_iter diter;
2677 	struct sg_mapping_iter piter;
2678 
2679 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2680 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2681 
2682 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2683 			scsi_prot_sg_count(SCpnt),
2684 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2685 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2686 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2687 
2688 	/* For each protection page */
2689 	while (sg_miter_next(&piter)) {
2690 		dpage_offset = 0;
2691 		if (WARN_ON(!sg_miter_next(&diter))) {
2692 			ret = 0x01;
2693 			goto out;
2694 		}
2695 
2696 		for (ppage_offset = 0; ppage_offset < piter.length;
2697 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
2698 			/* If we're at the end of the current
2699 			 * data page advance to the next one
2700 			 */
2701 			if (dpage_offset >= diter.length) {
2702 				if (WARN_ON(!sg_miter_next(&diter))) {
2703 					ret = 0x01;
2704 					goto out;
2705 				}
2706 				dpage_offset = 0;
2707 			}
2708 
2709 			sdt = piter.addr + ppage_offset;
2710 			daddr = diter.addr + dpage_offset;
2711 
2712 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2713 			if (ret) {
2714 				dump_sector(daddr, sdebug_sector_size);
2715 				goto out;
2716 			}
2717 
2718 			sector++;
2719 			ei_lba++;
2720 			dpage_offset += sdebug_sector_size;
2721 		}
2722 		diter.consumed = dpage_offset;
2723 		sg_miter_stop(&diter);
2724 	}
2725 	sg_miter_stop(&piter);
2726 
2727 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2728 	dix_writes++;
2729 
2730 	return 0;
2731 
2732 out:
2733 	dif_errors++;
2734 	sg_miter_stop(&diter);
2735 	sg_miter_stop(&piter);
2736 	return ret;
2737 }
2738 
2739 static unsigned long lba_to_map_index(sector_t lba)
2740 {
2741 	if (sdebug_unmap_alignment)
2742 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2743 	sector_div(lba, sdebug_unmap_granularity);
2744 	return lba;
2745 }
2746 
2747 static sector_t map_index_to_lba(unsigned long index)
2748 {
2749 	sector_t lba = index * sdebug_unmap_granularity;
2750 
2751 	if (sdebug_unmap_alignment)
2752 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2753 	return lba;
2754 }
2755 
2756 static unsigned int map_state(sector_t lba, unsigned int *num)
2757 {
2758 	sector_t end;
2759 	unsigned int mapped;
2760 	unsigned long index;
2761 	unsigned long next;
2762 
2763 	index = lba_to_map_index(lba);
2764 	mapped = test_bit(index, map_storep);
2765 
2766 	if (mapped)
2767 		next = find_next_zero_bit(map_storep, map_size, index);
2768 	else
2769 		next = find_next_bit(map_storep, map_size, index);
2770 
2771 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2772 	*num = end - lba;
2773 	return mapped;
2774 }
2775 
2776 static void map_region(sector_t lba, unsigned int len)
2777 {
2778 	sector_t end = lba + len;
2779 
2780 	while (lba < end) {
2781 		unsigned long index = lba_to_map_index(lba);
2782 
2783 		if (index < map_size)
2784 			set_bit(index, map_storep);
2785 
2786 		lba = map_index_to_lba(index + 1);
2787 	}
2788 }
2789 
2790 static void unmap_region(sector_t lba, unsigned int len)
2791 {
2792 	sector_t end = lba + len;
2793 
2794 	while (lba < end) {
2795 		unsigned long index = lba_to_map_index(lba);
2796 
2797 		if (lba == map_index_to_lba(index) &&
2798 		    lba + sdebug_unmap_granularity <= end &&
2799 		    index < map_size) {
2800 			clear_bit(index, map_storep);
2801 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2802 				memset(fake_storep +
2803 				       lba * sdebug_sector_size,
2804 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2805 				       sdebug_sector_size *
2806 				       sdebug_unmap_granularity);
2807 			}
2808 			if (dif_storep) {
2809 				memset(dif_storep + lba, 0xff,
2810 				       sizeof(*dif_storep) *
2811 				       sdebug_unmap_granularity);
2812 			}
2813 		}
2814 		lba = map_index_to_lba(index + 1);
2815 	}
2816 }
2817 
2818 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2819 {
2820 	u8 *cmd = scp->cmnd;
2821 	u64 lba;
2822 	u32 num;
2823 	u32 ei_lba;
2824 	unsigned long iflags;
2825 	int ret;
2826 	bool check_prot;
2827 
2828 	switch (cmd[0]) {
2829 	case WRITE_16:
2830 		ei_lba = 0;
2831 		lba = get_unaligned_be64(cmd + 2);
2832 		num = get_unaligned_be32(cmd + 10);
2833 		check_prot = true;
2834 		break;
2835 	case WRITE_10:
2836 		ei_lba = 0;
2837 		lba = get_unaligned_be32(cmd + 2);
2838 		num = get_unaligned_be16(cmd + 7);
2839 		check_prot = true;
2840 		break;
2841 	case WRITE_6:
2842 		ei_lba = 0;
2843 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2844 		      (u32)(cmd[1] & 0x1f) << 16;
2845 		num = (0 == cmd[4]) ? 256 : cmd[4];
2846 		check_prot = true;
2847 		break;
2848 	case WRITE_12:
2849 		ei_lba = 0;
2850 		lba = get_unaligned_be32(cmd + 2);
2851 		num = get_unaligned_be32(cmd + 6);
2852 		check_prot = true;
2853 		break;
2854 	case 0x53:	/* XDWRITEREAD(10) */
2855 		ei_lba = 0;
2856 		lba = get_unaligned_be32(cmd + 2);
2857 		num = get_unaligned_be16(cmd + 7);
2858 		check_prot = false;
2859 		break;
2860 	default:	/* assume WRITE(32) */
2861 		lba = get_unaligned_be64(cmd + 12);
2862 		ei_lba = get_unaligned_be32(cmd + 20);
2863 		num = get_unaligned_be32(cmd + 28);
2864 		check_prot = false;
2865 		break;
2866 	}
2867 	if (unlikely(have_dif_prot && check_prot)) {
2868 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2869 		    (cmd[1] & 0xe0)) {
2870 			mk_sense_invalid_opcode(scp);
2871 			return check_condition_result;
2872 		}
2873 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2874 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2875 		    (cmd[1] & 0xe0) == 0)
2876 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2877 				    "to DIF device\n");
2878 	}
2879 
2880 	/* inline check_device_access_params() */
2881 	if (unlikely(lba + num > sdebug_capacity)) {
2882 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2883 		return check_condition_result;
2884 	}
2885 	/* transfer length excessive (tie in to block limits VPD page) */
2886 	if (unlikely(num > sdebug_store_sectors)) {
2887 		/* needs work to find which cdb byte 'num' comes from */
2888 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2889 		return check_condition_result;
2890 	}
2891 
2892 	write_lock_irqsave(&atomic_rw, iflags);
2893 
2894 	/* DIX + T10 DIF */
2895 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2896 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2897 
2898 		if (prot_ret) {
2899 			write_unlock_irqrestore(&atomic_rw, iflags);
2900 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2901 			return illegal_condition_result;
2902 		}
2903 	}
2904 
2905 	ret = do_device_access(scp, lba, num, true);
2906 	if (unlikely(scsi_debug_lbp()))
2907 		map_region(lba, num);
2908 	write_unlock_irqrestore(&atomic_rw, iflags);
2909 	if (unlikely(-1 == ret))
2910 		return DID_ERROR << 16;
2911 	else if (unlikely(sdebug_verbose &&
2912 			  (ret < (num * sdebug_sector_size))))
2913 		sdev_printk(KERN_INFO, scp->device,
2914 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2915 			    my_name, num * sdebug_sector_size, ret);
2916 
2917 	if (unlikely(sdebug_any_injecting_opt)) {
2918 		struct sdebug_queued_cmd *sqcp =
2919 				(struct sdebug_queued_cmd *)scp->host_scribble;
2920 
2921 		if (sqcp) {
2922 			if (sqcp->inj_recovered) {
2923 				mk_sense_buffer(scp, RECOVERED_ERROR,
2924 						THRESHOLD_EXCEEDED, 0);
2925 				return check_condition_result;
2926 			} else if (sqcp->inj_dif) {
2927 				/* Logical block guard check failed */
2928 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2929 				return illegal_condition_result;
2930 			} else if (sqcp->inj_dix) {
2931 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2932 				return illegal_condition_result;
2933 			}
2934 		}
2935 	}
2936 	return 0;
2937 }
2938 
2939 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2940 			   u32 ei_lba, bool unmap, bool ndob)
2941 {
2942 	unsigned long iflags;
2943 	unsigned long long i;
2944 	int ret;
2945 	u64 lba_off;
2946 
2947 	ret = check_device_access_params(scp, lba, num);
2948 	if (ret)
2949 		return ret;
2950 
2951 	write_lock_irqsave(&atomic_rw, iflags);
2952 
2953 	if (unmap && scsi_debug_lbp()) {
2954 		unmap_region(lba, num);
2955 		goto out;
2956 	}
2957 
2958 	lba_off = lba * sdebug_sector_size;
2959 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
2960 	if (ndob) {
2961 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
2962 		ret = 0;
2963 	} else
2964 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2965 					  sdebug_sector_size);
2966 
2967 	if (-1 == ret) {
2968 		write_unlock_irqrestore(&atomic_rw, iflags);
2969 		return DID_ERROR << 16;
2970 	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2971 		sdev_printk(KERN_INFO, scp->device,
2972 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2973 			    my_name, "write same",
2974 			    num * sdebug_sector_size, ret);
2975 
2976 	/* Copy first sector to remaining blocks */
2977 	for (i = 1 ; i < num ; i++)
2978 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
2979 		       fake_storep + lba_off,
2980 		       sdebug_sector_size);
2981 
2982 	if (scsi_debug_lbp())
2983 		map_region(lba, num);
2984 out:
2985 	write_unlock_irqrestore(&atomic_rw, iflags);
2986 
2987 	return 0;
2988 }
2989 
2990 static int resp_write_same_10(struct scsi_cmnd *scp,
2991 			      struct sdebug_dev_info *devip)
2992 {
2993 	u8 *cmd = scp->cmnd;
2994 	u32 lba;
2995 	u16 num;
2996 	u32 ei_lba = 0;
2997 	bool unmap = false;
2998 
2999 	if (cmd[1] & 0x8) {
3000 		if (sdebug_lbpws10 == 0) {
3001 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3002 			return check_condition_result;
3003 		} else
3004 			unmap = true;
3005 	}
3006 	lba = get_unaligned_be32(cmd + 2);
3007 	num = get_unaligned_be16(cmd + 7);
3008 	if (num > sdebug_write_same_length) {
3009 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3010 		return check_condition_result;
3011 	}
3012 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3013 }
3014 
3015 static int resp_write_same_16(struct scsi_cmnd *scp,
3016 			      struct sdebug_dev_info *devip)
3017 {
3018 	u8 *cmd = scp->cmnd;
3019 	u64 lba;
3020 	u32 num;
3021 	u32 ei_lba = 0;
3022 	bool unmap = false;
3023 	bool ndob = false;
3024 
3025 	if (cmd[1] & 0x8) {	/* UNMAP */
3026 		if (sdebug_lbpws == 0) {
3027 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3028 			return check_condition_result;
3029 		} else
3030 			unmap = true;
3031 	}
3032 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3033 		ndob = true;
3034 	lba = get_unaligned_be64(cmd + 2);
3035 	num = get_unaligned_be32(cmd + 10);
3036 	if (num > sdebug_write_same_length) {
3037 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3038 		return check_condition_result;
3039 	}
3040 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3041 }
3042 
3043 /* Note the mode field is in the same position as the (lower) service action
3044  * field. For the Report supported operation codes command, SPC-4 suggests
3045  * each mode of this command should be reported separately; for future. */
3046 static int resp_write_buffer(struct scsi_cmnd *scp,
3047 			     struct sdebug_dev_info *devip)
3048 {
3049 	u8 *cmd = scp->cmnd;
3050 	struct scsi_device *sdp = scp->device;
3051 	struct sdebug_dev_info *dp;
3052 	u8 mode;
3053 
3054 	mode = cmd[1] & 0x1f;
3055 	switch (mode) {
3056 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3057 		/* set UAs on this device only */
3058 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3059 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3060 		break;
3061 	case 0x5:	/* download MC, save and ACT */
3062 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3063 		break;
3064 	case 0x6:	/* download MC with offsets and ACT */
3065 		/* set UAs on most devices (LUs) in this target */
3066 		list_for_each_entry(dp,
3067 				    &devip->sdbg_host->dev_info_list,
3068 				    dev_list)
3069 			if (dp->target == sdp->id) {
3070 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3071 				if (devip != dp)
3072 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3073 						dp->uas_bm);
3074 			}
3075 		break;
3076 	case 0x7:	/* download MC with offsets, save, and ACT */
3077 		/* set UA on all devices (LUs) in this target */
3078 		list_for_each_entry(dp,
3079 				    &devip->sdbg_host->dev_info_list,
3080 				    dev_list)
3081 			if (dp->target == sdp->id)
3082 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3083 					dp->uas_bm);
3084 		break;
3085 	default:
3086 		/* do nothing for this command for other mode values */
3087 		break;
3088 	}
3089 	return 0;
3090 }
3091 
3092 static int resp_comp_write(struct scsi_cmnd *scp,
3093 			   struct sdebug_dev_info *devip)
3094 {
3095 	u8 *cmd = scp->cmnd;
3096 	u8 *arr;
3097 	u8 *fake_storep_hold;
3098 	u64 lba;
3099 	u32 dnum;
3100 	u32 lb_size = sdebug_sector_size;
3101 	u8 num;
3102 	unsigned long iflags;
3103 	int ret;
3104 	int retval = 0;
3105 
3106 	lba = get_unaligned_be64(cmd + 2);
3107 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3108 	if (0 == num)
3109 		return 0;	/* degenerate case, not an error */
3110 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3111 	    (cmd[1] & 0xe0)) {
3112 		mk_sense_invalid_opcode(scp);
3113 		return check_condition_result;
3114 	}
3115 	if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3116 	     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3117 	    (cmd[1] & 0xe0) == 0)
3118 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3119 			    "to DIF device\n");
3120 
3121 	/* inline check_device_access_params() */
3122 	if (lba + num > sdebug_capacity) {
3123 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3124 		return check_condition_result;
3125 	}
3126 	/* transfer length excessive (tie in to block limits VPD page) */
3127 	if (num > sdebug_store_sectors) {
3128 		/* needs work to find which cdb byte 'num' comes from */
3129 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3130 		return check_condition_result;
3131 	}
3132 	dnum = 2 * num;
3133 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3134 	if (NULL == arr) {
3135 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3136 				INSUFF_RES_ASCQ);
3137 		return check_condition_result;
3138 	}
3139 
3140 	write_lock_irqsave(&atomic_rw, iflags);
3141 
3142 	/* trick do_device_access() to fetch both compare and write buffers
3143 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3144 	fake_storep_hold = fake_storep;
3145 	fake_storep = arr;
3146 	ret = do_device_access(scp, 0, dnum, true);
3147 	fake_storep = fake_storep_hold;
3148 	if (ret == -1) {
3149 		retval = DID_ERROR << 16;
3150 		goto cleanup;
3151 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3152 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3153 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3154 			    dnum * lb_size, ret);
3155 	if (!comp_write_worker(lba, num, arr)) {
3156 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3157 		retval = check_condition_result;
3158 		goto cleanup;
3159 	}
3160 	if (scsi_debug_lbp())
3161 		map_region(lba, num);
3162 cleanup:
3163 	write_unlock_irqrestore(&atomic_rw, iflags);
3164 	kfree(arr);
3165 	return retval;
3166 }
3167 
3168 struct unmap_block_desc {
3169 	__be64	lba;
3170 	__be32	blocks;
3171 	__be32	__reserved;
3172 };
3173 
3174 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3175 {
3176 	unsigned char *buf;
3177 	struct unmap_block_desc *desc;
3178 	unsigned int i, payload_len, descriptors;
3179 	int ret;
3180 	unsigned long iflags;
3181 
3182 
3183 	if (!scsi_debug_lbp())
3184 		return 0;	/* fib and say its done */
3185 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3186 	BUG_ON(scsi_bufflen(scp) != payload_len);
3187 
3188 	descriptors = (payload_len - 8) / 16;
3189 	if (descriptors > sdebug_unmap_max_desc) {
3190 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3191 		return check_condition_result;
3192 	}
3193 
3194 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3195 	if (!buf) {
3196 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3197 				INSUFF_RES_ASCQ);
3198 		return check_condition_result;
3199 	}
3200 
3201 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3202 
3203 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3204 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3205 
3206 	desc = (void *)&buf[8];
3207 
3208 	write_lock_irqsave(&atomic_rw, iflags);
3209 
3210 	for (i = 0 ; i < descriptors ; i++) {
3211 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3212 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3213 
3214 		ret = check_device_access_params(scp, lba, num);
3215 		if (ret)
3216 			goto out;
3217 
3218 		unmap_region(lba, num);
3219 	}
3220 
3221 	ret = 0;
3222 
3223 out:
3224 	write_unlock_irqrestore(&atomic_rw, iflags);
3225 	kfree(buf);
3226 
3227 	return ret;
3228 }
3229 
3230 #define SDEBUG_GET_LBA_STATUS_LEN 32
3231 
3232 static int resp_get_lba_status(struct scsi_cmnd *scp,
3233 			       struct sdebug_dev_info *devip)
3234 {
3235 	u8 *cmd = scp->cmnd;
3236 	u64 lba;
3237 	u32 alloc_len, mapped, num;
3238 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3239 	int ret;
3240 
3241 	lba = get_unaligned_be64(cmd + 2);
3242 	alloc_len = get_unaligned_be32(cmd + 10);
3243 
3244 	if (alloc_len < 24)
3245 		return 0;
3246 
3247 	ret = check_device_access_params(scp, lba, 1);
3248 	if (ret)
3249 		return ret;
3250 
3251 	if (scsi_debug_lbp())
3252 		mapped = map_state(lba, &num);
3253 	else {
3254 		mapped = 1;
3255 		/* following just in case virtual_gb changed */
3256 		sdebug_capacity = get_sdebug_capacity();
3257 		if (sdebug_capacity - lba <= 0xffffffff)
3258 			num = sdebug_capacity - lba;
3259 		else
3260 			num = 0xffffffff;
3261 	}
3262 
3263 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3264 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3265 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3266 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3267 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3268 
3269 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3270 }
3271 
3272 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3273  * (W-LUN), the normal Linux scanning logic does not associate it with a
3274  * device (e.g. /dev/sg7). The following magic will make that association:
3275  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3276  * where <n> is a host number. If there are multiple targets in a host then
3277  * the above will associate a W-LUN to each target. To only get a W-LUN
3278  * for target 2, then use "echo '- 2 49409' > scan" .
3279  */
3280 static int resp_report_luns(struct scsi_cmnd *scp,
3281 			    struct sdebug_dev_info *devip)
3282 {
3283 	unsigned char *cmd = scp->cmnd;
3284 	unsigned int alloc_len;
3285 	unsigned char select_report;
3286 	u64 lun;
3287 	struct scsi_lun *lun_p;
3288 	u8 *arr;
3289 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3290 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3291 	unsigned int tlun_cnt;	/* total LUN count */
3292 	unsigned int rlen;	/* response length (in bytes) */
3293 	int i, res;
3294 
3295 	clear_luns_changed_on_target(devip);
3296 
3297 	select_report = cmd[2];
3298 	alloc_len = get_unaligned_be32(cmd + 6);
3299 
3300 	if (alloc_len < 4) {
3301 		pr_err("alloc len too small %d\n", alloc_len);
3302 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3303 		return check_condition_result;
3304 	}
3305 
3306 	switch (select_report) {
3307 	case 0:		/* all LUNs apart from W-LUNs */
3308 		lun_cnt = sdebug_max_luns;
3309 		wlun_cnt = 0;
3310 		break;
3311 	case 1:		/* only W-LUNs */
3312 		lun_cnt = 0;
3313 		wlun_cnt = 1;
3314 		break;
3315 	case 2:		/* all LUNs */
3316 		lun_cnt = sdebug_max_luns;
3317 		wlun_cnt = 1;
3318 		break;
3319 	case 0x10:	/* only administrative LUs */
3320 	case 0x11:	/* see SPC-5 */
3321 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3322 	default:
3323 		pr_debug("select report invalid %d\n", select_report);
3324 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3325 		return check_condition_result;
3326 	}
3327 
3328 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3329 		--lun_cnt;
3330 
3331 	tlun_cnt = lun_cnt + wlun_cnt;
3332 
3333 	rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8;
3334 	arr = vmalloc(rlen);
3335 	if (!arr) {
3336 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3337 				INSUFF_RES_ASCQ);
3338 		return check_condition_result;
3339 	}
3340 	memset(arr, 0, rlen);
3341 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3342 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3343 
3344 	/* luns start at byte 8 in response following the header */
3345 	lun_p = (struct scsi_lun *)&arr[8];
3346 
3347 	/* LUNs use single level peripheral device addressing method */
3348 	lun = sdebug_no_lun_0 ? 1 : 0;
3349 	for (i = 0; i < lun_cnt; i++)
3350 		int_to_scsilun(lun++, lun_p++);
3351 
3352 	if (wlun_cnt)
3353 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++);
3354 
3355 	put_unaligned_be32(rlen - 8, &arr[0]);
3356 
3357 	res = fill_from_dev_buffer(scp, arr, rlen);
3358 	vfree(arr);
3359 	return res;
3360 }
3361 
3362 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3363 			    unsigned int num, struct sdebug_dev_info *devip)
3364 {
3365 	int j;
3366 	unsigned char *kaddr, *buf;
3367 	unsigned int offset;
3368 	struct scsi_data_buffer *sdb = scsi_in(scp);
3369 	struct sg_mapping_iter miter;
3370 
3371 	/* better not to use temporary buffer. */
3372 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3373 	if (!buf) {
3374 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3375 				INSUFF_RES_ASCQ);
3376 		return check_condition_result;
3377 	}
3378 
3379 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3380 
3381 	offset = 0;
3382 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3383 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3384 
3385 	while (sg_miter_next(&miter)) {
3386 		kaddr = miter.addr;
3387 		for (j = 0; j < miter.length; j++)
3388 			*(kaddr + j) ^= *(buf + offset + j);
3389 
3390 		offset += miter.length;
3391 	}
3392 	sg_miter_stop(&miter);
3393 	kfree(buf);
3394 
3395 	return 0;
3396 }
3397 
3398 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3399 			       struct sdebug_dev_info *devip)
3400 {
3401 	u8 *cmd = scp->cmnd;
3402 	u64 lba;
3403 	u32 num;
3404 	int errsts;
3405 
3406 	if (!scsi_bidi_cmnd(scp)) {
3407 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3408 				INSUFF_RES_ASCQ);
3409 		return check_condition_result;
3410 	}
3411 	errsts = resp_read_dt0(scp, devip);
3412 	if (errsts)
3413 		return errsts;
3414 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3415 		errsts = resp_write_dt0(scp, devip);
3416 		if (errsts)
3417 			return errsts;
3418 	}
3419 	lba = get_unaligned_be32(cmd + 2);
3420 	num = get_unaligned_be16(cmd + 7);
3421 	return resp_xdwriteread(scp, lba, num, devip);
3422 }
3423 
3424 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3425 {
3426 	struct sdebug_queue *sqp = sdebug_q_arr;
3427 
3428 	if (sdebug_mq_active) {
3429 		u32 tag = blk_mq_unique_tag(cmnd->request);
3430 		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3431 
3432 		if (unlikely(hwq >= submit_queues)) {
3433 			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3434 			hwq %= submit_queues;
3435 		}
3436 		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3437 		return sqp + hwq;
3438 	} else
3439 		return sqp;
3440 }
3441 
3442 /* Queued (deferred) command completions converge here. */
3443 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3444 {
3445 	int qc_idx;
3446 	int retiring = 0;
3447 	unsigned long iflags;
3448 	struct sdebug_queue *sqp;
3449 	struct sdebug_queued_cmd *sqcp;
3450 	struct scsi_cmnd *scp;
3451 	struct sdebug_dev_info *devip;
3452 
3453 	qc_idx = sd_dp->qc_idx;
3454 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3455 	if (sdebug_statistics) {
3456 		atomic_inc(&sdebug_completions);
3457 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3458 			atomic_inc(&sdebug_miss_cpus);
3459 	}
3460 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3461 		pr_err("wild qc_idx=%d\n", qc_idx);
3462 		return;
3463 	}
3464 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3465 	sqcp = &sqp->qc_arr[qc_idx];
3466 	scp = sqcp->a_cmnd;
3467 	if (unlikely(scp == NULL)) {
3468 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3469 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3470 		       sd_dp->sqa_idx, qc_idx);
3471 		return;
3472 	}
3473 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3474 	if (likely(devip))
3475 		atomic_dec(&devip->num_in_q);
3476 	else
3477 		pr_err("devip=NULL\n");
3478 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3479 		retiring = 1;
3480 
3481 	sqcp->a_cmnd = NULL;
3482 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3483 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3484 		pr_err("Unexpected completion\n");
3485 		return;
3486 	}
3487 
3488 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3489 		int k, retval;
3490 
3491 		retval = atomic_read(&retired_max_queue);
3492 		if (qc_idx >= retval) {
3493 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3494 			pr_err("index %d too large\n", retval);
3495 			return;
3496 		}
3497 		k = find_last_bit(sqp->in_use_bm, retval);
3498 		if ((k < sdebug_max_queue) || (k == retval))
3499 			atomic_set(&retired_max_queue, 0);
3500 		else
3501 			atomic_set(&retired_max_queue, k + 1);
3502 	}
3503 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3504 	scp->scsi_done(scp); /* callback to mid level */
3505 }
3506 
3507 /* When high resolution timer goes off this function is called. */
3508 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3509 {
3510 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3511 						  hrt);
3512 	sdebug_q_cmd_complete(sd_dp);
3513 	return HRTIMER_NORESTART;
3514 }
3515 
3516 /* When work queue schedules work, it calls this function. */
3517 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3518 {
3519 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3520 						  ew.work);
3521 	sdebug_q_cmd_complete(sd_dp);
3522 }
3523 
3524 static bool got_shared_uuid;
3525 static uuid_be shared_uuid;
3526 
3527 static struct sdebug_dev_info *sdebug_device_create(
3528 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3529 {
3530 	struct sdebug_dev_info *devip;
3531 
3532 	devip = kzalloc(sizeof(*devip), flags);
3533 	if (devip) {
3534 		if (sdebug_uuid_ctl == 1)
3535 			uuid_be_gen(&devip->lu_name);
3536 		else if (sdebug_uuid_ctl == 2) {
3537 			if (got_shared_uuid)
3538 				devip->lu_name = shared_uuid;
3539 			else {
3540 				uuid_be_gen(&shared_uuid);
3541 				got_shared_uuid = true;
3542 				devip->lu_name = shared_uuid;
3543 			}
3544 		}
3545 		devip->sdbg_host = sdbg_host;
3546 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3547 	}
3548 	return devip;
3549 }
3550 
3551 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3552 {
3553 	struct sdebug_host_info *sdbg_host;
3554 	struct sdebug_dev_info *open_devip = NULL;
3555 	struct sdebug_dev_info *devip;
3556 
3557 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3558 	if (!sdbg_host) {
3559 		pr_err("Host info NULL\n");
3560 		return NULL;
3561         }
3562 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3563 		if ((devip->used) && (devip->channel == sdev->channel) &&
3564                     (devip->target == sdev->id) &&
3565                     (devip->lun == sdev->lun))
3566                         return devip;
3567 		else {
3568 			if ((!devip->used) && (!open_devip))
3569 				open_devip = devip;
3570 		}
3571 	}
3572 	if (!open_devip) { /* try and make a new one */
3573 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3574 		if (!open_devip) {
3575 			pr_err("out of memory at line %d\n", __LINE__);
3576 			return NULL;
3577 		}
3578 	}
3579 
3580 	open_devip->channel = sdev->channel;
3581 	open_devip->target = sdev->id;
3582 	open_devip->lun = sdev->lun;
3583 	open_devip->sdbg_host = sdbg_host;
3584 	atomic_set(&open_devip->num_in_q, 0);
3585 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3586 	open_devip->used = true;
3587 	return open_devip;
3588 }
3589 
3590 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3591 {
3592 	if (sdebug_verbose)
3593 		pr_info("slave_alloc <%u %u %u %llu>\n",
3594 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3595 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3596 	return 0;
3597 }
3598 
3599 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3600 {
3601 	struct sdebug_dev_info *devip =
3602 			(struct sdebug_dev_info *)sdp->hostdata;
3603 
3604 	if (sdebug_verbose)
3605 		pr_info("slave_configure <%u %u %u %llu>\n",
3606 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3607 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3608 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3609 	if (devip == NULL) {
3610 		devip = find_build_dev_info(sdp);
3611 		if (devip == NULL)
3612 			return 1;  /* no resources, will be marked offline */
3613 	}
3614 	sdp->hostdata = devip;
3615 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3616 	if (sdebug_no_uld)
3617 		sdp->no_uld_attach = 1;
3618 	return 0;
3619 }
3620 
3621 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3622 {
3623 	struct sdebug_dev_info *devip =
3624 		(struct sdebug_dev_info *)sdp->hostdata;
3625 
3626 	if (sdebug_verbose)
3627 		pr_info("slave_destroy <%u %u %u %llu>\n",
3628 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3629 	if (devip) {
3630 		/* make this slot available for re-use */
3631 		devip->used = false;
3632 		sdp->hostdata = NULL;
3633 	}
3634 }
3635 
3636 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3637 {
3638 	if (!sd_dp)
3639 		return;
3640 	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3641 		hrtimer_cancel(&sd_dp->hrt);
3642 	else if (sdebug_jdelay < 0)
3643 		cancel_work_sync(&sd_dp->ew.work);
3644 }
3645 
3646 /* If @cmnd found deletes its timer or work queue and returns true; else
3647    returns false */
3648 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3649 {
3650 	unsigned long iflags;
3651 	int j, k, qmax, r_qmax;
3652 	struct sdebug_queue *sqp;
3653 	struct sdebug_queued_cmd *sqcp;
3654 	struct sdebug_dev_info *devip;
3655 	struct sdebug_defer *sd_dp;
3656 
3657 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3658 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3659 		qmax = sdebug_max_queue;
3660 		r_qmax = atomic_read(&retired_max_queue);
3661 		if (r_qmax > qmax)
3662 			qmax = r_qmax;
3663 		for (k = 0; k < qmax; ++k) {
3664 			if (test_bit(k, sqp->in_use_bm)) {
3665 				sqcp = &sqp->qc_arr[k];
3666 				if (cmnd != sqcp->a_cmnd)
3667 					continue;
3668 				/* found */
3669 				devip = (struct sdebug_dev_info *)
3670 						cmnd->device->hostdata;
3671 				if (devip)
3672 					atomic_dec(&devip->num_in_q);
3673 				sqcp->a_cmnd = NULL;
3674 				sd_dp = sqcp->sd_dp;
3675 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3676 				stop_qc_helper(sd_dp);
3677 				clear_bit(k, sqp->in_use_bm);
3678 				return true;
3679 			}
3680 		}
3681 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3682 	}
3683 	return false;
3684 }
3685 
3686 /* Deletes (stops) timers or work queues of all queued commands */
3687 static void stop_all_queued(void)
3688 {
3689 	unsigned long iflags;
3690 	int j, k;
3691 	struct sdebug_queue *sqp;
3692 	struct sdebug_queued_cmd *sqcp;
3693 	struct sdebug_dev_info *devip;
3694 	struct sdebug_defer *sd_dp;
3695 
3696 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3697 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3698 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3699 			if (test_bit(k, sqp->in_use_bm)) {
3700 				sqcp = &sqp->qc_arr[k];
3701 				if (sqcp->a_cmnd == NULL)
3702 					continue;
3703 				devip = (struct sdebug_dev_info *)
3704 					sqcp->a_cmnd->device->hostdata;
3705 				if (devip)
3706 					atomic_dec(&devip->num_in_q);
3707 				sqcp->a_cmnd = NULL;
3708 				sd_dp = sqcp->sd_dp;
3709 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3710 				stop_qc_helper(sd_dp);
3711 				clear_bit(k, sqp->in_use_bm);
3712 				spin_lock_irqsave(&sqp->qc_lock, iflags);
3713 			}
3714 		}
3715 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3716 	}
3717 }
3718 
3719 /* Free queued command memory on heap */
3720 static void free_all_queued(void)
3721 {
3722 	int j, k;
3723 	struct sdebug_queue *sqp;
3724 	struct sdebug_queued_cmd *sqcp;
3725 
3726 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3727 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3728 			sqcp = &sqp->qc_arr[k];
3729 			kfree(sqcp->sd_dp);
3730 			sqcp->sd_dp = NULL;
3731 		}
3732 	}
3733 }
3734 
3735 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3736 {
3737 	bool ok;
3738 
3739 	++num_aborts;
3740 	if (SCpnt) {
3741 		ok = stop_queued_cmnd(SCpnt);
3742 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3743 			sdev_printk(KERN_INFO, SCpnt->device,
3744 				    "%s: command%s found\n", __func__,
3745 				    ok ? "" : " not");
3746 	}
3747 	return SUCCESS;
3748 }
3749 
3750 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3751 {
3752 	++num_dev_resets;
3753 	if (SCpnt && SCpnt->device) {
3754 		struct scsi_device *sdp = SCpnt->device;
3755 		struct sdebug_dev_info *devip =
3756 				(struct sdebug_dev_info *)sdp->hostdata;
3757 
3758 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3759 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3760 		if (devip)
3761 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3762 	}
3763 	return SUCCESS;
3764 }
3765 
3766 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3767 {
3768 	struct sdebug_host_info *sdbg_host;
3769 	struct sdebug_dev_info *devip;
3770 	struct scsi_device *sdp;
3771 	struct Scsi_Host *hp;
3772 	int k = 0;
3773 
3774 	++num_target_resets;
3775 	if (!SCpnt)
3776 		goto lie;
3777 	sdp = SCpnt->device;
3778 	if (!sdp)
3779 		goto lie;
3780 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3781 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3782 	hp = sdp->host;
3783 	if (!hp)
3784 		goto lie;
3785 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3786 	if (sdbg_host) {
3787 		list_for_each_entry(devip,
3788 				    &sdbg_host->dev_info_list,
3789 				    dev_list)
3790 			if (devip->target == sdp->id) {
3791 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3792 				++k;
3793 			}
3794 	}
3795 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3796 		sdev_printk(KERN_INFO, sdp,
3797 			    "%s: %d device(s) found in target\n", __func__, k);
3798 lie:
3799 	return SUCCESS;
3800 }
3801 
3802 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3803 {
3804 	struct sdebug_host_info *sdbg_host;
3805 	struct sdebug_dev_info *devip;
3806         struct scsi_device * sdp;
3807         struct Scsi_Host * hp;
3808 	int k = 0;
3809 
3810 	++num_bus_resets;
3811 	if (!(SCpnt && SCpnt->device))
3812 		goto lie;
3813 	sdp = SCpnt->device;
3814 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3815 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3816 	hp = sdp->host;
3817 	if (hp) {
3818 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3819 		if (sdbg_host) {
3820 			list_for_each_entry(devip,
3821                                             &sdbg_host->dev_info_list,
3822 					    dev_list) {
3823 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3824 				++k;
3825 			}
3826 		}
3827 	}
3828 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3829 		sdev_printk(KERN_INFO, sdp,
3830 			    "%s: %d device(s) found in host\n", __func__, k);
3831 lie:
3832 	return SUCCESS;
3833 }
3834 
3835 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3836 {
3837 	struct sdebug_host_info * sdbg_host;
3838 	struct sdebug_dev_info *devip;
3839 	int k = 0;
3840 
3841 	++num_host_resets;
3842 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3843 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3844         spin_lock(&sdebug_host_list_lock);
3845         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3846 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3847 				    dev_list) {
3848 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3849 			++k;
3850 		}
3851         }
3852         spin_unlock(&sdebug_host_list_lock);
3853 	stop_all_queued();
3854 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3855 		sdev_printk(KERN_INFO, SCpnt->device,
3856 			    "%s: %d device(s) found\n", __func__, k);
3857 	return SUCCESS;
3858 }
3859 
3860 static void __init sdebug_build_parts(unsigned char *ramp,
3861 				      unsigned long store_size)
3862 {
3863 	struct partition * pp;
3864 	int starts[SDEBUG_MAX_PARTS + 2];
3865 	int sectors_per_part, num_sectors, k;
3866 	int heads_by_sects, start_sec, end_sec;
3867 
3868 	/* assume partition table already zeroed */
3869 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3870 		return;
3871 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3872 		sdebug_num_parts = SDEBUG_MAX_PARTS;
3873 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3874 	}
3875 	num_sectors = (int)sdebug_store_sectors;
3876 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3877 			   / sdebug_num_parts;
3878 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3879         starts[0] = sdebug_sectors_per;
3880 	for (k = 1; k < sdebug_num_parts; ++k)
3881 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3882 			    * heads_by_sects;
3883 	starts[sdebug_num_parts] = num_sectors;
3884 	starts[sdebug_num_parts + 1] = 0;
3885 
3886 	ramp[510] = 0x55;	/* magic partition markings */
3887 	ramp[511] = 0xAA;
3888 	pp = (struct partition *)(ramp + 0x1be);
3889 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3890 		start_sec = starts[k];
3891 		end_sec = starts[k + 1] - 1;
3892 		pp->boot_ind = 0;
3893 
3894 		pp->cyl = start_sec / heads_by_sects;
3895 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3896 			   / sdebug_sectors_per;
3897 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3898 
3899 		pp->end_cyl = end_sec / heads_by_sects;
3900 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3901 			       / sdebug_sectors_per;
3902 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3903 
3904 		pp->start_sect = cpu_to_le32(start_sec);
3905 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3906 		pp->sys_ind = 0x83;	/* plain Linux partition */
3907 	}
3908 }
3909 
3910 static void block_unblock_all_queues(bool block)
3911 {
3912 	int j;
3913 	struct sdebug_queue *sqp;
3914 
3915 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
3916 		atomic_set(&sqp->blocked, (int)block);
3917 }
3918 
3919 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3920  * commands will be processed normally before triggers occur.
3921  */
3922 static void tweak_cmnd_count(void)
3923 {
3924 	int count, modulo;
3925 
3926 	modulo = abs(sdebug_every_nth);
3927 	if (modulo < 2)
3928 		return;
3929 	block_unblock_all_queues(true);
3930 	count = atomic_read(&sdebug_cmnd_count);
3931 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
3932 	block_unblock_all_queues(false);
3933 }
3934 
3935 static void clear_queue_stats(void)
3936 {
3937 	atomic_set(&sdebug_cmnd_count, 0);
3938 	atomic_set(&sdebug_completions, 0);
3939 	atomic_set(&sdebug_miss_cpus, 0);
3940 	atomic_set(&sdebug_a_tsf, 0);
3941 }
3942 
3943 static void setup_inject(struct sdebug_queue *sqp,
3944 			 struct sdebug_queued_cmd *sqcp)
3945 {
3946 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
3947 		return;
3948 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
3949 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
3950 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3951 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3952 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
3953 }
3954 
3955 /* Complete the processing of the thread that queued a SCSI command to this
3956  * driver. It either completes the command by calling cmnd_done() or
3957  * schedules a hr timer or work queue then returns 0. Returns
3958  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
3959  */
3960 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3961 			 int scsi_result, int delta_jiff)
3962 {
3963 	unsigned long iflags;
3964 	int k, num_in_q, qdepth, inject;
3965 	struct sdebug_queue *sqp;
3966 	struct sdebug_queued_cmd *sqcp;
3967 	struct scsi_device *sdp;
3968 	struct sdebug_defer *sd_dp;
3969 
3970 	if (unlikely(devip == NULL)) {
3971 		if (scsi_result == 0)
3972 			scsi_result = DID_NO_CONNECT << 16;
3973 		goto respond_in_thread;
3974 	}
3975 	sdp = cmnd->device;
3976 
3977 	if (unlikely(sdebug_verbose && scsi_result))
3978 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3979 			    __func__, scsi_result);
3980 	if (delta_jiff == 0)
3981 		goto respond_in_thread;
3982 
3983 	/* schedule the response at a later time if resources permit */
3984 	sqp = get_queue(cmnd);
3985 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3986 	if (unlikely(atomic_read(&sqp->blocked))) {
3987 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3988 		return SCSI_MLQUEUE_HOST_BUSY;
3989 	}
3990 	num_in_q = atomic_read(&devip->num_in_q);
3991 	qdepth = cmnd->device->queue_depth;
3992 	inject = 0;
3993 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
3994 		if (scsi_result) {
3995 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3996 			goto respond_in_thread;
3997 		} else
3998 			scsi_result = device_qfull_result;
3999 	} else if (unlikely(sdebug_every_nth &&
4000 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4001 			    (scsi_result == 0))) {
4002 		if ((num_in_q == (qdepth - 1)) &&
4003 		    (atomic_inc_return(&sdebug_a_tsf) >=
4004 		     abs(sdebug_every_nth))) {
4005 			atomic_set(&sdebug_a_tsf, 0);
4006 			inject = 1;
4007 			scsi_result = device_qfull_result;
4008 		}
4009 	}
4010 
4011 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4012 	if (unlikely(k >= sdebug_max_queue)) {
4013 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4014 		if (scsi_result)
4015 			goto respond_in_thread;
4016 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4017 			scsi_result = device_qfull_result;
4018 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4019 			sdev_printk(KERN_INFO, sdp,
4020 				    "%s: max_queue=%d exceeded, %s\n",
4021 				    __func__, sdebug_max_queue,
4022 				    (scsi_result ?  "status: TASK SET FULL" :
4023 						    "report: host busy"));
4024 		if (scsi_result)
4025 			goto respond_in_thread;
4026 		else
4027 			return SCSI_MLQUEUE_HOST_BUSY;
4028 	}
4029 	__set_bit(k, sqp->in_use_bm);
4030 	atomic_inc(&devip->num_in_q);
4031 	sqcp = &sqp->qc_arr[k];
4032 	sqcp->a_cmnd = cmnd;
4033 	cmnd->host_scribble = (unsigned char *)sqcp;
4034 	cmnd->result = scsi_result;
4035 	sd_dp = sqcp->sd_dp;
4036 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4037 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4038 		setup_inject(sqp, sqcp);
4039 	if (delta_jiff > 0 || sdebug_ndelay > 0) {
4040 		ktime_t kt;
4041 
4042 		if (delta_jiff > 0) {
4043 			struct timespec ts;
4044 
4045 			jiffies_to_timespec(delta_jiff, &ts);
4046 			kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4047 		} else
4048 			kt = ktime_set(0, sdebug_ndelay);
4049 		if (NULL == sd_dp) {
4050 			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4051 			if (NULL == sd_dp)
4052 				return SCSI_MLQUEUE_HOST_BUSY;
4053 			sqcp->sd_dp = sd_dp;
4054 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4055 				     HRTIMER_MODE_REL_PINNED);
4056 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4057 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4058 			sd_dp->qc_idx = k;
4059 		}
4060 		if (sdebug_statistics)
4061 			sd_dp->issuing_cpu = raw_smp_processor_id();
4062 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4063 	} else {	/* jdelay < 0, use work queue */
4064 		if (NULL == sd_dp) {
4065 			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4066 			if (NULL == sd_dp)
4067 				return SCSI_MLQUEUE_HOST_BUSY;
4068 			sqcp->sd_dp = sd_dp;
4069 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4070 			sd_dp->qc_idx = k;
4071 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4072 		}
4073 		if (sdebug_statistics)
4074 			sd_dp->issuing_cpu = raw_smp_processor_id();
4075 		schedule_work(&sd_dp->ew.work);
4076 	}
4077 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4078 		     (scsi_result == device_qfull_result)))
4079 		sdev_printk(KERN_INFO, sdp,
4080 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4081 			    num_in_q, (inject ? "<inject> " : ""),
4082 			    "status: TASK SET FULL");
4083 	return 0;
4084 
4085 respond_in_thread:	/* call back to mid-layer using invocation thread */
4086 	cmnd->result = scsi_result;
4087 	cmnd->scsi_done(cmnd);
4088 	return 0;
4089 }
4090 
4091 /* Note: The following macros create attribute files in the
4092    /sys/module/scsi_debug/parameters directory. Unfortunately this
4093    driver is unaware of a change and cannot trigger auxiliary actions
4094    as it can when the corresponding attribute in the
4095    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4096  */
4097 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4098 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4099 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4100 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4101 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4102 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4103 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4104 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4105 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4106 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4107 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4108 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4109 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4110 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4111 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4112 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4113 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4114 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4115 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4116 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4117 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4118 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4119 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4120 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4121 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4122 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4123 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4124 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4125 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4126 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4127 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4128 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4129 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4130 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4131 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4132 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4133 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4134 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4135 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4136 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4137 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4138 		   S_IRUGO | S_IWUSR);
4139 module_param_named(write_same_length, sdebug_write_same_length, int,
4140 		   S_IRUGO | S_IWUSR);
4141 
4142 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4143 MODULE_DESCRIPTION("SCSI debug adapter driver");
4144 MODULE_LICENSE("GPL");
4145 MODULE_VERSION(SDEBUG_VERSION);
4146 
4147 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4148 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4149 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4150 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4151 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4152 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4153 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4154 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4155 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4156 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4157 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4158 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4159 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4160 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4161 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4162 MODULE_PARM_DESC(lbprz,
4163 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4164 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4165 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4166 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4167 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4168 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4169 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4170 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4171 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4172 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4173 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4174 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4175 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4176 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4177 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4178 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4179 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4180 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4181 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4182 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4183 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4184 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4185 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4186 MODULE_PARM_DESC(uuid_ctl,
4187 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4188 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4189 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4190 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4191 
4192 #define SDEBUG_INFO_LEN 256
4193 static char sdebug_info[SDEBUG_INFO_LEN];
4194 
4195 static const char * scsi_debug_info(struct Scsi_Host * shp)
4196 {
4197 	int k;
4198 
4199 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4200 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4201 	if (k >= (SDEBUG_INFO_LEN - 1))
4202 		return sdebug_info;
4203 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4204 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4205 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4206 		  "statistics", (int)sdebug_statistics);
4207 	return sdebug_info;
4208 }
4209 
4210 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4211 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4212 				 int length)
4213 {
4214 	char arr[16];
4215 	int opts;
4216 	int minLen = length > 15 ? 15 : length;
4217 
4218 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4219 		return -EACCES;
4220 	memcpy(arr, buffer, minLen);
4221 	arr[minLen] = '\0';
4222 	if (1 != sscanf(arr, "%d", &opts))
4223 		return -EINVAL;
4224 	sdebug_opts = opts;
4225 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4226 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4227 	if (sdebug_every_nth != 0)
4228 		tweak_cmnd_count();
4229 	return length;
4230 }
4231 
4232 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4233  * same for each scsi_debug host (if more than one). Some of the counters
4234  * output are not atomics so might be inaccurate in a busy system. */
4235 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4236 {
4237 	int f, j, l;
4238 	struct sdebug_queue *sqp;
4239 
4240 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4241 		   SDEBUG_VERSION, sdebug_version_date);
4242 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4243 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4244 		   sdebug_opts, sdebug_every_nth);
4245 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4246 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4247 		   sdebug_sector_size, "bytes");
4248 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4249 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4250 		   num_aborts);
4251 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4252 		   num_dev_resets, num_target_resets, num_bus_resets,
4253 		   num_host_resets);
4254 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4255 		   dix_reads, dix_writes, dif_errors);
4256 	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4257 		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4258 		   sdebug_mq_active);
4259 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4260 		   atomic_read(&sdebug_cmnd_count),
4261 		   atomic_read(&sdebug_completions),
4262 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4263 		   atomic_read(&sdebug_a_tsf));
4264 
4265 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4266 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4267 		seq_printf(m, "  queue %d:\n", j);
4268 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4269 		if (f != sdebug_max_queue) {
4270 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4271 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4272 				   "first,last bits", f, l);
4273 		}
4274 	}
4275 	return 0;
4276 }
4277 
4278 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4279 {
4280 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4281 }
4282 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4283  * of delay is jiffies.
4284  */
4285 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4286 			   size_t count)
4287 {
4288 	int jdelay, res;
4289 
4290 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4291 		res = count;
4292 		if (sdebug_jdelay != jdelay) {
4293 			int j, k;
4294 			struct sdebug_queue *sqp;
4295 
4296 			block_unblock_all_queues(true);
4297 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4298 			     ++j, ++sqp) {
4299 				k = find_first_bit(sqp->in_use_bm,
4300 						   sdebug_max_queue);
4301 				if (k != sdebug_max_queue) {
4302 					res = -EBUSY;   /* queued commands */
4303 					break;
4304 				}
4305 			}
4306 			if (res > 0) {
4307 				/* make sure sdebug_defer instances get
4308 				 * re-allocated for new delay variant */
4309 				free_all_queued();
4310 				sdebug_jdelay = jdelay;
4311 				sdebug_ndelay = 0;
4312 			}
4313 			block_unblock_all_queues(false);
4314 		}
4315 		return res;
4316 	}
4317 	return -EINVAL;
4318 }
4319 static DRIVER_ATTR_RW(delay);
4320 
4321 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4322 {
4323 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4324 }
4325 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4326 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4327 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4328 			    size_t count)
4329 {
4330 	int ndelay, res;
4331 
4332 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4333 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4334 		res = count;
4335 		if (sdebug_ndelay != ndelay) {
4336 			int j, k;
4337 			struct sdebug_queue *sqp;
4338 
4339 			block_unblock_all_queues(true);
4340 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4341 			     ++j, ++sqp) {
4342 				k = find_first_bit(sqp->in_use_bm,
4343 						   sdebug_max_queue);
4344 				if (k != sdebug_max_queue) {
4345 					res = -EBUSY;   /* queued commands */
4346 					break;
4347 				}
4348 			}
4349 			if (res > 0) {
4350 				/* make sure sdebug_defer instances get
4351 				 * re-allocated for new delay variant */
4352 				free_all_queued();
4353 				sdebug_ndelay = ndelay;
4354 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4355 							: DEF_JDELAY;
4356 			}
4357 			block_unblock_all_queues(false);
4358 		}
4359 		return res;
4360 	}
4361 	return -EINVAL;
4362 }
4363 static DRIVER_ATTR_RW(ndelay);
4364 
4365 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4366 {
4367 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4368 }
4369 
4370 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4371 			  size_t count)
4372 {
4373         int opts;
4374 	char work[20];
4375 
4376         if (1 == sscanf(buf, "%10s", work)) {
4377 		if (0 == strncasecmp(work,"0x", 2)) {
4378 			if (1 == sscanf(&work[2], "%x", &opts))
4379 				goto opts_done;
4380 		} else {
4381 			if (1 == sscanf(work, "%d", &opts))
4382 				goto opts_done;
4383 		}
4384 	}
4385 	return -EINVAL;
4386 opts_done:
4387 	sdebug_opts = opts;
4388 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4389 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4390 	tweak_cmnd_count();
4391 	return count;
4392 }
4393 static DRIVER_ATTR_RW(opts);
4394 
4395 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4396 {
4397 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4398 }
4399 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4400 			   size_t count)
4401 {
4402         int n;
4403 
4404 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4405 		sdebug_ptype = n;
4406 		return count;
4407 	}
4408 	return -EINVAL;
4409 }
4410 static DRIVER_ATTR_RW(ptype);
4411 
4412 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4413 {
4414 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4415 }
4416 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4417 			    size_t count)
4418 {
4419         int n;
4420 
4421 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4422 		sdebug_dsense = n;
4423 		return count;
4424 	}
4425 	return -EINVAL;
4426 }
4427 static DRIVER_ATTR_RW(dsense);
4428 
4429 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4430 {
4431 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4432 }
4433 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4434 			     size_t count)
4435 {
4436         int n;
4437 
4438 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4439 		n = (n > 0);
4440 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4441 		if (sdebug_fake_rw != n) {
4442 			if ((0 == n) && (NULL == fake_storep)) {
4443 				unsigned long sz =
4444 					(unsigned long)sdebug_dev_size_mb *
4445 					1048576;
4446 
4447 				fake_storep = vmalloc(sz);
4448 				if (NULL == fake_storep) {
4449 					pr_err("out of memory, 9\n");
4450 					return -ENOMEM;
4451 				}
4452 				memset(fake_storep, 0, sz);
4453 			}
4454 			sdebug_fake_rw = n;
4455 		}
4456 		return count;
4457 	}
4458 	return -EINVAL;
4459 }
4460 static DRIVER_ATTR_RW(fake_rw);
4461 
4462 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4463 {
4464 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4465 }
4466 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4467 			      size_t count)
4468 {
4469         int n;
4470 
4471 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4472 		sdebug_no_lun_0 = n;
4473 		return count;
4474 	}
4475 	return -EINVAL;
4476 }
4477 static DRIVER_ATTR_RW(no_lun_0);
4478 
4479 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4480 {
4481 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4482 }
4483 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4484 			      size_t count)
4485 {
4486         int n;
4487 
4488 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4489 		sdebug_num_tgts = n;
4490 		sdebug_max_tgts_luns();
4491 		return count;
4492 	}
4493 	return -EINVAL;
4494 }
4495 static DRIVER_ATTR_RW(num_tgts);
4496 
4497 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4498 {
4499 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4500 }
4501 static DRIVER_ATTR_RO(dev_size_mb);
4502 
4503 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4504 {
4505 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4506 }
4507 static DRIVER_ATTR_RO(num_parts);
4508 
4509 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4510 {
4511 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4512 }
4513 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4514 			       size_t count)
4515 {
4516         int nth;
4517 
4518 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4519 		sdebug_every_nth = nth;
4520 		if (nth && !sdebug_statistics) {
4521 			pr_info("every_nth needs statistics=1, set it\n");
4522 			sdebug_statistics = true;
4523 		}
4524 		tweak_cmnd_count();
4525 		return count;
4526 	}
4527 	return -EINVAL;
4528 }
4529 static DRIVER_ATTR_RW(every_nth);
4530 
4531 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4532 {
4533 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4534 }
4535 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4536 			      size_t count)
4537 {
4538         int n;
4539 	bool changed;
4540 
4541 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4542 		if (n > 256) {
4543 			pr_warn("max_luns can be no more than 256\n");
4544 			return -EINVAL;
4545 		}
4546 		changed = (sdebug_max_luns != n);
4547 		sdebug_max_luns = n;
4548 		sdebug_max_tgts_luns();
4549 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4550 			struct sdebug_host_info *sdhp;
4551 			struct sdebug_dev_info *dp;
4552 
4553 			spin_lock(&sdebug_host_list_lock);
4554 			list_for_each_entry(sdhp, &sdebug_host_list,
4555 					    host_list) {
4556 				list_for_each_entry(dp, &sdhp->dev_info_list,
4557 						    dev_list) {
4558 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4559 						dp->uas_bm);
4560 				}
4561 			}
4562 			spin_unlock(&sdebug_host_list_lock);
4563 		}
4564 		return count;
4565 	}
4566 	return -EINVAL;
4567 }
4568 static DRIVER_ATTR_RW(max_luns);
4569 
4570 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4571 {
4572 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4573 }
4574 /* N.B. max_queue can be changed while there are queued commands. In flight
4575  * commands beyond the new max_queue will be completed. */
4576 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4577 			       size_t count)
4578 {
4579 	int j, n, k, a;
4580 	struct sdebug_queue *sqp;
4581 
4582 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4583 	    (n <= SDEBUG_CANQUEUE)) {
4584 		block_unblock_all_queues(true);
4585 		k = 0;
4586 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4587 		     ++j, ++sqp) {
4588 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4589 			if (a > k)
4590 				k = a;
4591 		}
4592 		sdebug_max_queue = n;
4593 		if (k == SDEBUG_CANQUEUE)
4594 			atomic_set(&retired_max_queue, 0);
4595 		else if (k >= n)
4596 			atomic_set(&retired_max_queue, k + 1);
4597 		else
4598 			atomic_set(&retired_max_queue, 0);
4599 		block_unblock_all_queues(false);
4600 		return count;
4601 	}
4602 	return -EINVAL;
4603 }
4604 static DRIVER_ATTR_RW(max_queue);
4605 
4606 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4607 {
4608 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4609 }
4610 static DRIVER_ATTR_RO(no_uld);
4611 
4612 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4613 {
4614 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4615 }
4616 static DRIVER_ATTR_RO(scsi_level);
4617 
4618 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4619 {
4620 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4621 }
4622 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4623 				size_t count)
4624 {
4625         int n;
4626 	bool changed;
4627 
4628 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4629 		changed = (sdebug_virtual_gb != n);
4630 		sdebug_virtual_gb = n;
4631 		sdebug_capacity = get_sdebug_capacity();
4632 		if (changed) {
4633 			struct sdebug_host_info *sdhp;
4634 			struct sdebug_dev_info *dp;
4635 
4636 			spin_lock(&sdebug_host_list_lock);
4637 			list_for_each_entry(sdhp, &sdebug_host_list,
4638 					    host_list) {
4639 				list_for_each_entry(dp, &sdhp->dev_info_list,
4640 						    dev_list) {
4641 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4642 						dp->uas_bm);
4643 				}
4644 			}
4645 			spin_unlock(&sdebug_host_list_lock);
4646 		}
4647 		return count;
4648 	}
4649 	return -EINVAL;
4650 }
4651 static DRIVER_ATTR_RW(virtual_gb);
4652 
4653 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4654 {
4655 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4656 }
4657 
4658 static int sdebug_add_adapter(void);
4659 static void sdebug_remove_adapter(void);
4660 
4661 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4662 			      size_t count)
4663 {
4664 	int delta_hosts;
4665 
4666 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4667 		return -EINVAL;
4668 	if (delta_hosts > 0) {
4669 		do {
4670 			sdebug_add_adapter();
4671 		} while (--delta_hosts);
4672 	} else if (delta_hosts < 0) {
4673 		do {
4674 			sdebug_remove_adapter();
4675 		} while (++delta_hosts);
4676 	}
4677 	return count;
4678 }
4679 static DRIVER_ATTR_RW(add_host);
4680 
4681 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4682 {
4683 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4684 }
4685 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4686 				    size_t count)
4687 {
4688 	int n;
4689 
4690 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4691 		sdebug_vpd_use_hostno = n;
4692 		return count;
4693 	}
4694 	return -EINVAL;
4695 }
4696 static DRIVER_ATTR_RW(vpd_use_hostno);
4697 
4698 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4699 {
4700 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4701 }
4702 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4703 				size_t count)
4704 {
4705 	int n;
4706 
4707 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4708 		if (n > 0)
4709 			sdebug_statistics = true;
4710 		else {
4711 			clear_queue_stats();
4712 			sdebug_statistics = false;
4713 		}
4714 		return count;
4715 	}
4716 	return -EINVAL;
4717 }
4718 static DRIVER_ATTR_RW(statistics);
4719 
4720 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4721 {
4722 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4723 }
4724 static DRIVER_ATTR_RO(sector_size);
4725 
4726 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4727 {
4728 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4729 }
4730 static DRIVER_ATTR_RO(submit_queues);
4731 
4732 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4733 {
4734 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4735 }
4736 static DRIVER_ATTR_RO(dix);
4737 
4738 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4739 {
4740 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4741 }
4742 static DRIVER_ATTR_RO(dif);
4743 
4744 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4745 {
4746 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4747 }
4748 static DRIVER_ATTR_RO(guard);
4749 
4750 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4751 {
4752 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4753 }
4754 static DRIVER_ATTR_RO(ato);
4755 
4756 static ssize_t map_show(struct device_driver *ddp, char *buf)
4757 {
4758 	ssize_t count;
4759 
4760 	if (!scsi_debug_lbp())
4761 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4762 				 sdebug_store_sectors);
4763 
4764 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4765 			  (int)map_size, map_storep);
4766 	buf[count++] = '\n';
4767 	buf[count] = '\0';
4768 
4769 	return count;
4770 }
4771 static DRIVER_ATTR_RO(map);
4772 
4773 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4774 {
4775 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4776 }
4777 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4778 			       size_t count)
4779 {
4780 	int n;
4781 
4782 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4783 		sdebug_removable = (n > 0);
4784 		return count;
4785 	}
4786 	return -EINVAL;
4787 }
4788 static DRIVER_ATTR_RW(removable);
4789 
4790 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4791 {
4792 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4793 }
4794 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4795 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4796 			       size_t count)
4797 {
4798 	int n;
4799 
4800 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4801 		sdebug_host_lock = (n > 0);
4802 		return count;
4803 	}
4804 	return -EINVAL;
4805 }
4806 static DRIVER_ATTR_RW(host_lock);
4807 
4808 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4809 {
4810 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4811 }
4812 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4813 			    size_t count)
4814 {
4815 	int n;
4816 
4817 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4818 		sdebug_strict = (n > 0);
4819 		return count;
4820 	}
4821 	return -EINVAL;
4822 }
4823 static DRIVER_ATTR_RW(strict);
4824 
4825 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4826 {
4827 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4828 }
4829 static DRIVER_ATTR_RO(uuid_ctl);
4830 
4831 
4832 /* Note: The following array creates attribute files in the
4833    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4834    files (over those found in the /sys/module/scsi_debug/parameters
4835    directory) is that auxiliary actions can be triggered when an attribute
4836    is changed. For example see: sdebug_add_host_store() above.
4837  */
4838 
4839 static struct attribute *sdebug_drv_attrs[] = {
4840 	&driver_attr_delay.attr,
4841 	&driver_attr_opts.attr,
4842 	&driver_attr_ptype.attr,
4843 	&driver_attr_dsense.attr,
4844 	&driver_attr_fake_rw.attr,
4845 	&driver_attr_no_lun_0.attr,
4846 	&driver_attr_num_tgts.attr,
4847 	&driver_attr_dev_size_mb.attr,
4848 	&driver_attr_num_parts.attr,
4849 	&driver_attr_every_nth.attr,
4850 	&driver_attr_max_luns.attr,
4851 	&driver_attr_max_queue.attr,
4852 	&driver_attr_no_uld.attr,
4853 	&driver_attr_scsi_level.attr,
4854 	&driver_attr_virtual_gb.attr,
4855 	&driver_attr_add_host.attr,
4856 	&driver_attr_vpd_use_hostno.attr,
4857 	&driver_attr_sector_size.attr,
4858 	&driver_attr_statistics.attr,
4859 	&driver_attr_submit_queues.attr,
4860 	&driver_attr_dix.attr,
4861 	&driver_attr_dif.attr,
4862 	&driver_attr_guard.attr,
4863 	&driver_attr_ato.attr,
4864 	&driver_attr_map.attr,
4865 	&driver_attr_removable.attr,
4866 	&driver_attr_host_lock.attr,
4867 	&driver_attr_ndelay.attr,
4868 	&driver_attr_strict.attr,
4869 	&driver_attr_uuid_ctl.attr,
4870 	NULL,
4871 };
4872 ATTRIBUTE_GROUPS(sdebug_drv);
4873 
4874 static struct device *pseudo_primary;
4875 
4876 static int __init scsi_debug_init(void)
4877 {
4878 	unsigned long sz;
4879 	int host_to_add;
4880 	int k;
4881 	int ret;
4882 
4883 	atomic_set(&retired_max_queue, 0);
4884 
4885 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4886 		pr_warn("ndelay must be less than 1 second, ignored\n");
4887 		sdebug_ndelay = 0;
4888 	} else if (sdebug_ndelay > 0)
4889 		sdebug_jdelay = JDELAY_OVERRIDDEN;
4890 
4891 	switch (sdebug_sector_size) {
4892 	case  512:
4893 	case 1024:
4894 	case 2048:
4895 	case 4096:
4896 		break;
4897 	default:
4898 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
4899 		return -EINVAL;
4900 	}
4901 
4902 	switch (sdebug_dif) {
4903 
4904 	case SD_DIF_TYPE0_PROTECTION:
4905 		break;
4906 	case SD_DIF_TYPE1_PROTECTION:
4907 	case SD_DIF_TYPE2_PROTECTION:
4908 	case SD_DIF_TYPE3_PROTECTION:
4909 		have_dif_prot = true;
4910 		break;
4911 
4912 	default:
4913 		pr_err("dif must be 0, 1, 2 or 3\n");
4914 		return -EINVAL;
4915 	}
4916 
4917 	if (sdebug_guard > 1) {
4918 		pr_err("guard must be 0 or 1\n");
4919 		return -EINVAL;
4920 	}
4921 
4922 	if (sdebug_ato > 1) {
4923 		pr_err("ato must be 0 or 1\n");
4924 		return -EINVAL;
4925 	}
4926 
4927 	if (sdebug_physblk_exp > 15) {
4928 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4929 		return -EINVAL;
4930 	}
4931 	if (sdebug_max_luns > 256) {
4932 		pr_warn("max_luns can be no more than 256, use default\n");
4933 		sdebug_max_luns = DEF_MAX_LUNS;
4934 	}
4935 
4936 	if (sdebug_lowest_aligned > 0x3fff) {
4937 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4938 		return -EINVAL;
4939 	}
4940 
4941 	if (submit_queues < 1) {
4942 		pr_err("submit_queues must be 1 or more\n");
4943 		return -EINVAL;
4944 	}
4945 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
4946 			       GFP_KERNEL);
4947 	if (sdebug_q_arr == NULL)
4948 		return -ENOMEM;
4949 	for (k = 0; k < submit_queues; ++k)
4950 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
4951 
4952 	if (sdebug_dev_size_mb < 1)
4953 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4954 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4955 	sdebug_store_sectors = sz / sdebug_sector_size;
4956 	sdebug_capacity = get_sdebug_capacity();
4957 
4958 	/* play around with geometry, don't waste too much on track 0 */
4959 	sdebug_heads = 8;
4960 	sdebug_sectors_per = 32;
4961 	if (sdebug_dev_size_mb >= 256)
4962 		sdebug_heads = 64;
4963 	else if (sdebug_dev_size_mb >= 16)
4964 		sdebug_heads = 32;
4965 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4966 			       (sdebug_sectors_per * sdebug_heads);
4967 	if (sdebug_cylinders_per >= 1024) {
4968 		/* other LLDs do this; implies >= 1GB ram disk ... */
4969 		sdebug_heads = 255;
4970 		sdebug_sectors_per = 63;
4971 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4972 			       (sdebug_sectors_per * sdebug_heads);
4973 	}
4974 
4975 	if (sdebug_fake_rw == 0) {
4976 		fake_storep = vmalloc(sz);
4977 		if (NULL == fake_storep) {
4978 			pr_err("out of memory, 1\n");
4979 			ret = -ENOMEM;
4980 			goto free_q_arr;
4981 		}
4982 		memset(fake_storep, 0, sz);
4983 		if (sdebug_num_parts > 0)
4984 			sdebug_build_parts(fake_storep, sz);
4985 	}
4986 
4987 	if (sdebug_dix) {
4988 		int dif_size;
4989 
4990 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4991 		dif_storep = vmalloc(dif_size);
4992 
4993 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4994 
4995 		if (dif_storep == NULL) {
4996 			pr_err("out of mem. (DIX)\n");
4997 			ret = -ENOMEM;
4998 			goto free_vm;
4999 		}
5000 
5001 		memset(dif_storep, 0xff, dif_size);
5002 	}
5003 
5004 	/* Logical Block Provisioning */
5005 	if (scsi_debug_lbp()) {
5006 		sdebug_unmap_max_blocks =
5007 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5008 
5009 		sdebug_unmap_max_desc =
5010 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5011 
5012 		sdebug_unmap_granularity =
5013 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5014 
5015 		if (sdebug_unmap_alignment &&
5016 		    sdebug_unmap_granularity <=
5017 		    sdebug_unmap_alignment) {
5018 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5019 			ret = -EINVAL;
5020 			goto free_vm;
5021 		}
5022 
5023 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5024 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5025 
5026 		pr_info("%lu provisioning blocks\n", map_size);
5027 
5028 		if (map_storep == NULL) {
5029 			pr_err("out of mem. (MAP)\n");
5030 			ret = -ENOMEM;
5031 			goto free_vm;
5032 		}
5033 
5034 		bitmap_zero(map_storep, map_size);
5035 
5036 		/* Map first 1KB for partition table */
5037 		if (sdebug_num_parts)
5038 			map_region(0, 2);
5039 	}
5040 
5041 	pseudo_primary = root_device_register("pseudo_0");
5042 	if (IS_ERR(pseudo_primary)) {
5043 		pr_warn("root_device_register() error\n");
5044 		ret = PTR_ERR(pseudo_primary);
5045 		goto free_vm;
5046 	}
5047 	ret = bus_register(&pseudo_lld_bus);
5048 	if (ret < 0) {
5049 		pr_warn("bus_register error: %d\n", ret);
5050 		goto dev_unreg;
5051 	}
5052 	ret = driver_register(&sdebug_driverfs_driver);
5053 	if (ret < 0) {
5054 		pr_warn("driver_register error: %d\n", ret);
5055 		goto bus_unreg;
5056 	}
5057 
5058 	host_to_add = sdebug_add_host;
5059 	sdebug_add_host = 0;
5060 
5061         for (k = 0; k < host_to_add; k++) {
5062                 if (sdebug_add_adapter()) {
5063 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5064                         break;
5065                 }
5066         }
5067 
5068 	if (sdebug_verbose)
5069 		pr_info("built %d host(s)\n", sdebug_add_host);
5070 
5071 	return 0;
5072 
5073 bus_unreg:
5074 	bus_unregister(&pseudo_lld_bus);
5075 dev_unreg:
5076 	root_device_unregister(pseudo_primary);
5077 free_vm:
5078 	vfree(map_storep);
5079 	vfree(dif_storep);
5080 	vfree(fake_storep);
5081 free_q_arr:
5082 	kfree(sdebug_q_arr);
5083 	return ret;
5084 }
5085 
5086 static void __exit scsi_debug_exit(void)
5087 {
5088 	int k = sdebug_add_host;
5089 
5090 	stop_all_queued();
5091 	free_all_queued();
5092 	for (; k; k--)
5093 		sdebug_remove_adapter();
5094 	driver_unregister(&sdebug_driverfs_driver);
5095 	bus_unregister(&pseudo_lld_bus);
5096 	root_device_unregister(pseudo_primary);
5097 
5098 	vfree(dif_storep);
5099 	vfree(fake_storep);
5100 	kfree(sdebug_q_arr);
5101 }
5102 
5103 device_initcall(scsi_debug_init);
5104 module_exit(scsi_debug_exit);
5105 
5106 static void sdebug_release_adapter(struct device * dev)
5107 {
5108         struct sdebug_host_info *sdbg_host;
5109 
5110 	sdbg_host = to_sdebug_host(dev);
5111         kfree(sdbg_host);
5112 }
5113 
5114 static int sdebug_add_adapter(void)
5115 {
5116 	int k, devs_per_host;
5117         int error = 0;
5118         struct sdebug_host_info *sdbg_host;
5119 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5120 
5121         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5122         if (NULL == sdbg_host) {
5123 		pr_err("out of memory at line %d\n", __LINE__);
5124                 return -ENOMEM;
5125         }
5126 
5127         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5128 
5129 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5130         for (k = 0; k < devs_per_host; k++) {
5131 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5132 		if (!sdbg_devinfo) {
5133 			pr_err("out of memory at line %d\n", __LINE__);
5134                         error = -ENOMEM;
5135 			goto clean;
5136                 }
5137         }
5138 
5139         spin_lock(&sdebug_host_list_lock);
5140         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5141         spin_unlock(&sdebug_host_list_lock);
5142 
5143         sdbg_host->dev.bus = &pseudo_lld_bus;
5144         sdbg_host->dev.parent = pseudo_primary;
5145         sdbg_host->dev.release = &sdebug_release_adapter;
5146 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5147 
5148         error = device_register(&sdbg_host->dev);
5149 
5150         if (error)
5151 		goto clean;
5152 
5153 	++sdebug_add_host;
5154         return error;
5155 
5156 clean:
5157 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5158 				 dev_list) {
5159 		list_del(&sdbg_devinfo->dev_list);
5160 		kfree(sdbg_devinfo);
5161 	}
5162 
5163 	kfree(sdbg_host);
5164         return error;
5165 }
5166 
5167 static void sdebug_remove_adapter(void)
5168 {
5169         struct sdebug_host_info * sdbg_host = NULL;
5170 
5171         spin_lock(&sdebug_host_list_lock);
5172         if (!list_empty(&sdebug_host_list)) {
5173                 sdbg_host = list_entry(sdebug_host_list.prev,
5174                                        struct sdebug_host_info, host_list);
5175 		list_del(&sdbg_host->host_list);
5176 	}
5177         spin_unlock(&sdebug_host_list_lock);
5178 
5179 	if (!sdbg_host)
5180 		return;
5181 
5182 	device_unregister(&sdbg_host->dev);
5183 	--sdebug_add_host;
5184 }
5185 
5186 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5187 {
5188 	int num_in_q = 0;
5189 	struct sdebug_dev_info *devip;
5190 
5191 	block_unblock_all_queues(true);
5192 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5193 	if (NULL == devip) {
5194 		block_unblock_all_queues(false);
5195 		return	-ENODEV;
5196 	}
5197 	num_in_q = atomic_read(&devip->num_in_q);
5198 
5199 	if (qdepth < 1)
5200 		qdepth = 1;
5201 	/* allow to exceed max host qc_arr elements for testing */
5202 	if (qdepth > SDEBUG_CANQUEUE + 10)
5203 		qdepth = SDEBUG_CANQUEUE + 10;
5204 	scsi_change_queue_depth(sdev, qdepth);
5205 
5206 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5207 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5208 			    __func__, qdepth, num_in_q);
5209 	}
5210 	block_unblock_all_queues(false);
5211 	return sdev->queue_depth;
5212 }
5213 
5214 static bool fake_timeout(struct scsi_cmnd *scp)
5215 {
5216 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5217 		if (sdebug_every_nth < -1)
5218 			sdebug_every_nth = -1;
5219 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5220 			return true; /* ignore command causing timeout */
5221 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5222 			 scsi_medium_access_command(scp))
5223 			return true; /* time out reads and writes */
5224 	}
5225 	return false;
5226 }
5227 
5228 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5229 				   struct scsi_cmnd *scp)
5230 {
5231 	u8 sdeb_i;
5232 	struct scsi_device *sdp = scp->device;
5233 	const struct opcode_info_t *oip;
5234 	const struct opcode_info_t *r_oip;
5235 	struct sdebug_dev_info *devip;
5236 	u8 *cmd = scp->cmnd;
5237 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5238 	int k, na;
5239 	int errsts = 0;
5240 	u32 flags;
5241 	u16 sa;
5242 	u8 opcode = cmd[0];
5243 	bool has_wlun_rl;
5244 
5245 	scsi_set_resid(scp, 0);
5246 	if (sdebug_statistics)
5247 		atomic_inc(&sdebug_cmnd_count);
5248 	if (unlikely(sdebug_verbose &&
5249 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5250 		char b[120];
5251 		int n, len, sb;
5252 
5253 		len = scp->cmd_len;
5254 		sb = (int)sizeof(b);
5255 		if (len > 32)
5256 			strcpy(b, "too long, over 32 bytes");
5257 		else {
5258 			for (k = 0, n = 0; k < len && n < sb; ++k)
5259 				n += scnprintf(b + n, sb - n, "%02x ",
5260 					       (u32)cmd[k]);
5261 		}
5262 		if (sdebug_mq_active)
5263 			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5264 				    my_name, blk_mq_unique_tag(scp->request),
5265 				    b);
5266 		else
5267 			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5268 				    b);
5269 	}
5270 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5271 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5272 		goto err_out;
5273 
5274 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5275 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5276 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5277 	if (unlikely(!devip)) {
5278 		devip = find_build_dev_info(sdp);
5279 		if (NULL == devip)
5280 			goto err_out;
5281 	}
5282 	na = oip->num_attached;
5283 	r_pfp = oip->pfp;
5284 	if (na) {	/* multiple commands with this opcode */
5285 		r_oip = oip;
5286 		if (FF_SA & r_oip->flags) {
5287 			if (F_SA_LOW & oip->flags)
5288 				sa = 0x1f & cmd[1];
5289 			else
5290 				sa = get_unaligned_be16(cmd + 8);
5291 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5292 				if (opcode == oip->opcode && sa == oip->sa)
5293 					break;
5294 			}
5295 		} else {   /* since no service action only check opcode */
5296 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5297 				if (opcode == oip->opcode)
5298 					break;
5299 			}
5300 		}
5301 		if (k > na) {
5302 			if (F_SA_LOW & r_oip->flags)
5303 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5304 			else if (F_SA_HIGH & r_oip->flags)
5305 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5306 			else
5307 				mk_sense_invalid_opcode(scp);
5308 			goto check_cond;
5309 		}
5310 	}	/* else (when na==0) we assume the oip is a match */
5311 	flags = oip->flags;
5312 	if (unlikely(F_INV_OP & flags)) {
5313 		mk_sense_invalid_opcode(scp);
5314 		goto check_cond;
5315 	}
5316 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5317 		if (sdebug_verbose)
5318 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5319 				    my_name, opcode, " supported for wlun");
5320 		mk_sense_invalid_opcode(scp);
5321 		goto check_cond;
5322 	}
5323 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5324 		u8 rem;
5325 		int j;
5326 
5327 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5328 			rem = ~oip->len_mask[k] & cmd[k];
5329 			if (rem) {
5330 				for (j = 7; j >= 0; --j, rem <<= 1) {
5331 					if (0x80 & rem)
5332 						break;
5333 				}
5334 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5335 				goto check_cond;
5336 			}
5337 		}
5338 	}
5339 	if (unlikely(!(F_SKIP_UA & flags) &&
5340 		     find_first_bit(devip->uas_bm,
5341 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5342 		errsts = make_ua(scp, devip);
5343 		if (errsts)
5344 			goto check_cond;
5345 	}
5346 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5347 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5348 		if (sdebug_verbose)
5349 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5350 				    "%s\n", my_name, "initializing command "
5351 				    "required");
5352 		errsts = check_condition_result;
5353 		goto fini;
5354 	}
5355 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5356 		goto fini;
5357 	if (unlikely(sdebug_every_nth)) {
5358 		if (fake_timeout(scp))
5359 			return 0;	/* ignore command: make trouble */
5360 	}
5361 	if (likely(oip->pfp))
5362 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5363 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5364 		errsts = r_pfp(scp, devip);
5365 
5366 fini:
5367 	return schedule_resp(scp, devip, errsts,
5368 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5369 check_cond:
5370 	return schedule_resp(scp, devip, check_condition_result, 0);
5371 err_out:
5372 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5373 }
5374 
5375 static struct scsi_host_template sdebug_driver_template = {
5376 	.show_info =		scsi_debug_show_info,
5377 	.write_info =		scsi_debug_write_info,
5378 	.proc_name =		sdebug_proc_name,
5379 	.name =			"SCSI DEBUG",
5380 	.info =			scsi_debug_info,
5381 	.slave_alloc =		scsi_debug_slave_alloc,
5382 	.slave_configure =	scsi_debug_slave_configure,
5383 	.slave_destroy =	scsi_debug_slave_destroy,
5384 	.ioctl =		scsi_debug_ioctl,
5385 	.queuecommand =		scsi_debug_queuecommand,
5386 	.change_queue_depth =	sdebug_change_qdepth,
5387 	.eh_abort_handler =	scsi_debug_abort,
5388 	.eh_device_reset_handler = scsi_debug_device_reset,
5389 	.eh_target_reset_handler = scsi_debug_target_reset,
5390 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5391 	.eh_host_reset_handler = scsi_debug_host_reset,
5392 	.can_queue =		SDEBUG_CANQUEUE,
5393 	.this_id =		7,
5394 	.sg_tablesize =		SG_MAX_SEGMENTS,
5395 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5396 	.max_sectors =		-1U,
5397 	.use_clustering = 	DISABLE_CLUSTERING,
5398 	.module =		THIS_MODULE,
5399 	.track_queue_depth =	1,
5400 };
5401 
5402 static int sdebug_driver_probe(struct device * dev)
5403 {
5404 	int error = 0;
5405 	struct sdebug_host_info *sdbg_host;
5406 	struct Scsi_Host *hpnt;
5407 	int hprot;
5408 
5409 	sdbg_host = to_sdebug_host(dev);
5410 
5411 	sdebug_driver_template.can_queue = sdebug_max_queue;
5412 	if (sdebug_clustering)
5413 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5414 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5415 	if (NULL == hpnt) {
5416 		pr_err("scsi_host_alloc failed\n");
5417 		error = -ENODEV;
5418 		return error;
5419 	}
5420 	if (submit_queues > nr_cpu_ids) {
5421 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5422 			my_name, submit_queues, nr_cpu_ids);
5423 		submit_queues = nr_cpu_ids;
5424 	}
5425 	/* Decide whether to tell scsi subsystem that we want mq */
5426 	/* Following should give the same answer for each host */
5427 	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5428 	if (sdebug_mq_active)
5429 		hpnt->nr_hw_queues = submit_queues;
5430 
5431         sdbg_host->shost = hpnt;
5432 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5433 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5434 		hpnt->max_id = sdebug_num_tgts + 1;
5435 	else
5436 		hpnt->max_id = sdebug_num_tgts;
5437 	/* = sdebug_max_luns; */
5438 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5439 
5440 	hprot = 0;
5441 
5442 	switch (sdebug_dif) {
5443 
5444 	case SD_DIF_TYPE1_PROTECTION:
5445 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5446 		if (sdebug_dix)
5447 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5448 		break;
5449 
5450 	case SD_DIF_TYPE2_PROTECTION:
5451 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5452 		if (sdebug_dix)
5453 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5454 		break;
5455 
5456 	case SD_DIF_TYPE3_PROTECTION:
5457 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5458 		if (sdebug_dix)
5459 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5460 		break;
5461 
5462 	default:
5463 		if (sdebug_dix)
5464 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5465 		break;
5466 	}
5467 
5468 	scsi_host_set_prot(hpnt, hprot);
5469 
5470 	if (have_dif_prot || sdebug_dix)
5471 		pr_info("host protection%s%s%s%s%s%s%s\n",
5472 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5473 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5474 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5475 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5476 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5477 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5478 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5479 
5480 	if (sdebug_guard == 1)
5481 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5482 	else
5483 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5484 
5485 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5486 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5487 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5488 		sdebug_statistics = true;
5489         error = scsi_add_host(hpnt, &sdbg_host->dev);
5490         if (error) {
5491 		pr_err("scsi_add_host failed\n");
5492                 error = -ENODEV;
5493 		scsi_host_put(hpnt);
5494         } else
5495 		scsi_scan_host(hpnt);
5496 
5497 	return error;
5498 }
5499 
5500 static int sdebug_driver_remove(struct device * dev)
5501 {
5502         struct sdebug_host_info *sdbg_host;
5503 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5504 
5505 	sdbg_host = to_sdebug_host(dev);
5506 
5507 	if (!sdbg_host) {
5508 		pr_err("Unable to locate host info\n");
5509 		return -ENODEV;
5510 	}
5511 
5512         scsi_remove_host(sdbg_host->shost);
5513 
5514 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5515 				 dev_list) {
5516                 list_del(&sdbg_devinfo->dev_list);
5517                 kfree(sdbg_devinfo);
5518         }
5519 
5520         scsi_host_put(sdbg_host->shost);
5521         return 0;
5522 }
5523 
5524 static int pseudo_lld_bus_match(struct device *dev,
5525 				struct device_driver *dev_driver)
5526 {
5527 	return 1;
5528 }
5529 
5530 static struct bus_type pseudo_lld_bus = {
5531 	.name = "pseudo",
5532 	.match = pseudo_lld_bus_match,
5533 	.probe = sdebug_driver_probe,
5534 	.remove = sdebug_driver_remove,
5535 	.drv_groups = sdebug_drv_groups,
5536 };
5537