xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision bc5aa3a0)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2016 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
58 
59 #include "sd.h"
60 #include "scsi_logging.h"
61 
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "1.86"
64 static const char *sdebug_version_date = "20160430";
65 
66 #define MY_NAME "scsi_debug"
67 
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
87 #define CAPACITY_CHANGED_ASCQ 0x9
88 #define SAVING_PARAMS_UNSUP 0x39
89 #define TRANSPORT_PROBLEM 0x4b
90 #define THRESHOLD_EXCEEDED 0x5d
91 #define LOW_POWER_COND_ON 0x5e
92 #define MISCOMPARE_VERIFY_ASC 0x1d
93 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
94 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
95 
96 /* Additional Sense Code Qualifier (ASCQ) */
97 #define ACK_NAK_TO 0x3
98 
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW	0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   TYPE_DISK
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB   0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DEF_STATISTICS false
140 #define DEF_SUBMIT_QUEUES 1
141 #define DEF_UUID_CTL 0
142 #define JDELAY_OVERRIDDEN -9999
143 
144 #define SDEBUG_LUN_0_VAL 0
145 
146 /* bit mask values for sdebug_opts */
147 #define SDEBUG_OPT_NOISE		1
148 #define SDEBUG_OPT_MEDIUM_ERR		2
149 #define SDEBUG_OPT_TIMEOUT		4
150 #define SDEBUG_OPT_RECOVERED_ERR	8
151 #define SDEBUG_OPT_TRANSPORT_ERR	16
152 #define SDEBUG_OPT_DIF_ERR		32
153 #define SDEBUG_OPT_DIX_ERR		64
154 #define SDEBUG_OPT_MAC_TIMEOUT		128
155 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
156 #define SDEBUG_OPT_Q_NOISE		0x200
157 #define SDEBUG_OPT_ALL_TSF		0x400
158 #define SDEBUG_OPT_RARE_TSF		0x800
159 #define SDEBUG_OPT_N_WCE		0x1000
160 #define SDEBUG_OPT_RESET_NOISE		0x2000
161 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
162 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
163 			      SDEBUG_OPT_RESET_NOISE)
164 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
165 				  SDEBUG_OPT_TRANSPORT_ERR | \
166 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
167 				  SDEBUG_OPT_SHORT_TRANSFER)
168 /* When "every_nth" > 0 then modulo "every_nth" commands:
169  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
170  *   - a RECOVERED_ERROR is simulated on successful read and write
171  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
172  *   - a TRANSPORT_ERROR is simulated on successful read and write
173  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
174  *
175  * When "every_nth" < 0 then after "- every_nth" commands:
176  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
177  *   - a RECOVERED_ERROR is simulated on successful read and write
178  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
179  *   - a TRANSPORT_ERROR is simulated on successful read and write
180  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
181  * This will continue on every subsequent command until some other action
182  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
183  * every_nth via sysfs).
184  */
185 
186 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
187  * priority order. In the subset implemented here lower numbers have higher
188  * priority. The UA numbers should be a sequence starting from 0 with
189  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
190 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
191 #define SDEBUG_UA_BUS_RESET 1
192 #define SDEBUG_UA_MODE_CHANGED 2
193 #define SDEBUG_UA_CAPACITY_CHANGED 3
194 #define SDEBUG_UA_LUNS_CHANGED 4
195 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
196 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
197 #define SDEBUG_NUM_UAS 7
198 
199 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
200  * sector on read commands: */
201 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
202 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
203 
204 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
205  * or "peripheral device" addressing (value 0) */
206 #define SAM2_LUN_ADDRESS_METHOD 0
207 
208 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
209  * (for response) per submit queue at one time. Can be reduced by max_queue
210  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
211  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
212  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
213  * but cannot exceed SDEBUG_CANQUEUE .
214  */
215 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
216 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
217 #define DEF_CMD_PER_LUN  255
218 
219 #define F_D_IN			1
220 #define F_D_OUT			2
221 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
222 #define F_D_UNKN		8
223 #define F_RL_WLUN_OK		0x10
224 #define F_SKIP_UA		0x20
225 #define F_DELAY_OVERR		0x40
226 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
227 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
228 #define F_INV_OP		0x200
229 #define F_FAKE_RW		0x400
230 #define F_M_ACCESS		0x800	/* media access */
231 
232 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
233 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
234 #define FF_SA (F_SA_HIGH | F_SA_LOW)
235 
236 #define SDEBUG_MAX_PARTS 4
237 
238 #define SDEBUG_MAX_CMD_LEN 32
239 
240 
241 struct sdebug_dev_info {
242 	struct list_head dev_list;
243 	unsigned int channel;
244 	unsigned int target;
245 	u64 lun;
246 	uuid_be lu_name;
247 	struct sdebug_host_info *sdbg_host;
248 	unsigned long uas_bm[1];
249 	atomic_t num_in_q;
250 	atomic_t stopped;
251 	bool used;
252 };
253 
254 struct sdebug_host_info {
255 	struct list_head host_list;
256 	struct Scsi_Host *shost;
257 	struct device dev;
258 	struct list_head dev_info_list;
259 };
260 
261 #define to_sdebug_host(d)	\
262 	container_of(d, struct sdebug_host_info, dev)
263 
264 struct sdebug_defer {
265 	struct hrtimer hrt;
266 	struct execute_work ew;
267 	int sqa_idx;	/* index of sdebug_queue array */
268 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
269 	int issuing_cpu;
270 };
271 
272 struct sdebug_queued_cmd {
273 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
274 	 * instance indicates this slot is in use.
275 	 */
276 	struct sdebug_defer *sd_dp;
277 	struct scsi_cmnd *a_cmnd;
278 	unsigned int inj_recovered:1;
279 	unsigned int inj_transport:1;
280 	unsigned int inj_dif:1;
281 	unsigned int inj_dix:1;
282 	unsigned int inj_short:1;
283 };
284 
285 struct sdebug_queue {
286 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
287 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
288 	spinlock_t qc_lock;
289 	atomic_t blocked;	/* to temporarily stop more being queued */
290 };
291 
292 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
293 static atomic_t sdebug_completions;  /* count of deferred completions */
294 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
295 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
296 
297 struct opcode_info_t {
298 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
299 				/* for terminating element */
300 	u8 opcode;		/* if num_attached > 0, preferred */
301 	u16 sa;			/* service action */
302 	u32 flags;		/* OR-ed set of SDEB_F_* */
303 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
304 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
305 	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
306 				/* ignore cdb bytes after position 15 */
307 };
308 
309 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
310 enum sdeb_opcode_index {
311 	SDEB_I_INVALID_OPCODE =	0,
312 	SDEB_I_INQUIRY = 1,
313 	SDEB_I_REPORT_LUNS = 2,
314 	SDEB_I_REQUEST_SENSE = 3,
315 	SDEB_I_TEST_UNIT_READY = 4,
316 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
317 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
318 	SDEB_I_LOG_SENSE = 7,
319 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
320 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
321 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
322 	SDEB_I_START_STOP = 11,
323 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
324 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
325 	SDEB_I_MAINT_IN = 14,
326 	SDEB_I_MAINT_OUT = 15,
327 	SDEB_I_VERIFY = 16,		/* 10 only */
328 	SDEB_I_VARIABLE_LEN = 17,
329 	SDEB_I_RESERVE = 18,		/* 6, 10 */
330 	SDEB_I_RELEASE = 19,		/* 6, 10 */
331 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
332 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
333 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
334 	SDEB_I_SEND_DIAG = 23,
335 	SDEB_I_UNMAP = 24,
336 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
337 	SDEB_I_WRITE_BUFFER = 26,
338 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
339 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
340 	SDEB_I_COMP_WRITE = 29,
341 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
342 };
343 
344 
345 static const unsigned char opcode_ind_arr[256] = {
346 /* 0x0; 0x0->0x1f: 6 byte cdbs */
347 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
348 	    0, 0, 0, 0,
349 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
350 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
351 	    SDEB_I_RELEASE,
352 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
353 	    SDEB_I_ALLOW_REMOVAL, 0,
354 /* 0x20; 0x20->0x3f: 10 byte cdbs */
355 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
356 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
357 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
358 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
359 /* 0x40; 0x40->0x5f: 10 byte cdbs */
360 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
361 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
362 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
363 	    SDEB_I_RELEASE,
364 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
365 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
366 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
367 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 	0, SDEB_I_VARIABLE_LEN,
369 /* 0x80; 0x80->0x9f: 16 byte cdbs */
370 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
371 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
372 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
373 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
374 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
375 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
376 	     SDEB_I_MAINT_OUT, 0, 0, 0,
377 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
378 	     0, 0, 0, 0,
379 	0, 0, 0, 0, 0, 0, 0, 0,
380 	0, 0, 0, 0, 0, 0, 0, 0,
381 /* 0xc0; 0xc0->0xff: vendor specific */
382 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
383 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
384 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 };
387 
388 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
389 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
390 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
391 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
392 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
393 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
394 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
395 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
409 
410 static const struct opcode_info_t msense_iarr[1] = {
411 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
412 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
413 };
414 
415 static const struct opcode_info_t mselect_iarr[1] = {
416 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
417 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
418 };
419 
420 static const struct opcode_info_t read_iarr[3] = {
421 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
422 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
423 	     0, 0, 0, 0} },
424 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
425 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
427 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
428 	     0xc7, 0, 0, 0, 0} },
429 };
430 
431 static const struct opcode_info_t write_iarr[3] = {
432 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
433 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
434 	     0, 0, 0, 0} },
435 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
436 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
438 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
439 	     0xc7, 0, 0, 0, 0} },
440 };
441 
442 static const struct opcode_info_t sa_in_iarr[1] = {
443 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
444 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445 	     0xff, 0xff, 0xff, 0, 0xc7} },
446 };
447 
448 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
449 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
450 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
451 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
452 };
453 
454 static const struct opcode_info_t maint_in_iarr[2] = {
455 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
456 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
457 	     0xc7, 0, 0, 0, 0} },
458 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
459 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
460 	     0, 0} },
461 };
462 
463 static const struct opcode_info_t write_same_iarr[1] = {
464 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
465 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
466 	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
467 };
468 
469 static const struct opcode_info_t reserve_iarr[1] = {
470 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
471 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
472 };
473 
474 static const struct opcode_info_t release_iarr[1] = {
475 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
476 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
477 };
478 
479 
480 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
481  * plus the terminating elements for logic that scans this table such as
482  * REPORT SUPPORTED OPERATION CODES. */
483 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
484 /* 0 */
485 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
486 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
487 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
488 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
490 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
491 	     0, 0} },
492 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
493 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
494 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
495 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
496 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
497 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
498 	     0} },
499 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
500 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
501 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
502 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
503 	     0, 0, 0} },
504 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
505 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
506 	     0, 0} },
507 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
508 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
509 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
510 /* 10 */
511 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
512 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
513 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
514 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
515 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
517 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
518 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
519 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
520 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
521 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
522 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
523 	     0} },
524 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
525 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
527 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
528 	     0, 0, 0, 0, 0, 0} },
529 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
530 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
531 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
532 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
533 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
534 	     0} },
535 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
536 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
537 	     0} },
538 /* 20 */
539 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
540 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
541 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
542 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
543 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
544 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
545 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
546 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
548 	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
549 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
550 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
551 		   0, 0, 0, 0, 0, 0} },
552 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
553 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
554 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
555 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
556 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
557 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
558 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
559 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
560 	     0, 0, 0, 0} },
561 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
562 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
563 	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
564 
565 /* 30 */
566 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
567 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
568 };
569 
570 static int sdebug_add_host = DEF_NUM_HOST;
571 static int sdebug_ato = DEF_ATO;
572 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
573 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
574 static int sdebug_dif = DEF_DIF;
575 static int sdebug_dix = DEF_DIX;
576 static int sdebug_dsense = DEF_D_SENSE;
577 static int sdebug_every_nth = DEF_EVERY_NTH;
578 static int sdebug_fake_rw = DEF_FAKE_RW;
579 static unsigned int sdebug_guard = DEF_GUARD;
580 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
581 static int sdebug_max_luns = DEF_MAX_LUNS;
582 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
583 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
584 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
585 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
586 static int sdebug_no_uld;
587 static int sdebug_num_parts = DEF_NUM_PARTS;
588 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
589 static int sdebug_opt_blks = DEF_OPT_BLKS;
590 static int sdebug_opts = DEF_OPTS;
591 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
592 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
593 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
594 static int sdebug_sector_size = DEF_SECTOR_SIZE;
595 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
596 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
597 static unsigned int sdebug_lbpu = DEF_LBPU;
598 static unsigned int sdebug_lbpws = DEF_LBPWS;
599 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
600 static unsigned int sdebug_lbprz = DEF_LBPRZ;
601 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
602 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
603 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
604 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
605 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
606 static int sdebug_uuid_ctl = DEF_UUID_CTL;
607 static bool sdebug_removable = DEF_REMOVABLE;
608 static bool sdebug_clustering;
609 static bool sdebug_host_lock = DEF_HOST_LOCK;
610 static bool sdebug_strict = DEF_STRICT;
611 static bool sdebug_any_injecting_opt;
612 static bool sdebug_verbose;
613 static bool have_dif_prot;
614 static bool sdebug_statistics = DEF_STATISTICS;
615 static bool sdebug_mq_active;
616 
617 static unsigned int sdebug_store_sectors;
618 static sector_t sdebug_capacity;	/* in sectors */
619 
620 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
621    may still need them */
622 static int sdebug_heads;		/* heads per disk */
623 static int sdebug_cylinders_per;	/* cylinders per surface */
624 static int sdebug_sectors_per;		/* sectors per cylinder */
625 
626 static LIST_HEAD(sdebug_host_list);
627 static DEFINE_SPINLOCK(sdebug_host_list_lock);
628 
629 static unsigned char *fake_storep;	/* ramdisk storage */
630 static struct sd_dif_tuple *dif_storep;	/* protection info */
631 static void *map_storep;		/* provisioning map */
632 
633 static unsigned long map_size;
634 static int num_aborts;
635 static int num_dev_resets;
636 static int num_target_resets;
637 static int num_bus_resets;
638 static int num_host_resets;
639 static int dix_writes;
640 static int dix_reads;
641 static int dif_errors;
642 
643 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
644 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
645 
646 static DEFINE_RWLOCK(atomic_rw);
647 
648 static char sdebug_proc_name[] = MY_NAME;
649 static const char *my_name = MY_NAME;
650 
651 static struct bus_type pseudo_lld_bus;
652 
653 static struct device_driver sdebug_driverfs_driver = {
654 	.name 		= sdebug_proc_name,
655 	.bus		= &pseudo_lld_bus,
656 };
657 
658 static const int check_condition_result =
659 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
660 
661 static const int illegal_condition_result =
662 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
663 
664 static const int device_qfull_result =
665 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
666 
667 
668 /* Only do the extra work involved in logical block provisioning if one or
669  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
670  * real reads and writes (i.e. not skipping them for speed).
671  */
672 static inline bool scsi_debug_lbp(void)
673 {
674 	return 0 == sdebug_fake_rw &&
675 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
676 }
677 
678 static void *fake_store(unsigned long long lba)
679 {
680 	lba = do_div(lba, sdebug_store_sectors);
681 
682 	return fake_storep + lba * sdebug_sector_size;
683 }
684 
685 static struct sd_dif_tuple *dif_store(sector_t sector)
686 {
687 	sector = sector_div(sector, sdebug_store_sectors);
688 
689 	return dif_storep + sector;
690 }
691 
692 static void sdebug_max_tgts_luns(void)
693 {
694 	struct sdebug_host_info *sdbg_host;
695 	struct Scsi_Host *hpnt;
696 
697 	spin_lock(&sdebug_host_list_lock);
698 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
699 		hpnt = sdbg_host->shost;
700 		if ((hpnt->this_id >= 0) &&
701 		    (sdebug_num_tgts > hpnt->this_id))
702 			hpnt->max_id = sdebug_num_tgts + 1;
703 		else
704 			hpnt->max_id = sdebug_num_tgts;
705 		/* sdebug_max_luns; */
706 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
707 	}
708 	spin_unlock(&sdebug_host_list_lock);
709 }
710 
711 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
712 
713 /* Set in_bit to -1 to indicate no bit position of invalid field */
714 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
715 				 enum sdeb_cmd_data c_d,
716 				 int in_byte, int in_bit)
717 {
718 	unsigned char *sbuff;
719 	u8 sks[4];
720 	int sl, asc;
721 
722 	sbuff = scp->sense_buffer;
723 	if (!sbuff) {
724 		sdev_printk(KERN_ERR, scp->device,
725 			    "%s: sense_buffer is NULL\n", __func__);
726 		return;
727 	}
728 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
729 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
730 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
731 	memset(sks, 0, sizeof(sks));
732 	sks[0] = 0x80;
733 	if (c_d)
734 		sks[0] |= 0x40;
735 	if (in_bit >= 0) {
736 		sks[0] |= 0x8;
737 		sks[0] |= 0x7 & in_bit;
738 	}
739 	put_unaligned_be16(in_byte, sks + 1);
740 	if (sdebug_dsense) {
741 		sl = sbuff[7] + 8;
742 		sbuff[7] = sl;
743 		sbuff[sl] = 0x2;
744 		sbuff[sl + 1] = 0x6;
745 		memcpy(sbuff + sl + 4, sks, 3);
746 	} else
747 		memcpy(sbuff + 15, sks, 3);
748 	if (sdebug_verbose)
749 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
750 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
751 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
752 }
753 
754 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
755 {
756 	unsigned char *sbuff;
757 
758 	sbuff = scp->sense_buffer;
759 	if (!sbuff) {
760 		sdev_printk(KERN_ERR, scp->device,
761 			    "%s: sense_buffer is NULL\n", __func__);
762 		return;
763 	}
764 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
765 
766 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
767 
768 	if (sdebug_verbose)
769 		sdev_printk(KERN_INFO, scp->device,
770 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
771 			    my_name, key, asc, asq);
772 }
773 
774 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
775 {
776 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
777 }
778 
779 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
780 {
781 	if (sdebug_verbose) {
782 		if (0x1261 == cmd)
783 			sdev_printk(KERN_INFO, dev,
784 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
785 		else if (0x5331 == cmd)
786 			sdev_printk(KERN_INFO, dev,
787 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
788 				    __func__);
789 		else
790 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
791 				    __func__, cmd);
792 	}
793 	return -EINVAL;
794 	/* return -ENOTTY; // correct return but upsets fdisk */
795 }
796 
797 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
798 {
799 	struct sdebug_host_info *sdhp;
800 	struct sdebug_dev_info *dp;
801 
802 	spin_lock(&sdebug_host_list_lock);
803 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
804 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
805 			if ((devip->sdbg_host == dp->sdbg_host) &&
806 			    (devip->target == dp->target))
807 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
808 		}
809 	}
810 	spin_unlock(&sdebug_host_list_lock);
811 }
812 
813 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
814 {
815 	int k;
816 
817 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
818 	if (k != SDEBUG_NUM_UAS) {
819 		const char *cp = NULL;
820 
821 		switch (k) {
822 		case SDEBUG_UA_POR:
823 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
824 					POWER_ON_RESET_ASCQ);
825 			if (sdebug_verbose)
826 				cp = "power on reset";
827 			break;
828 		case SDEBUG_UA_BUS_RESET:
829 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
830 					BUS_RESET_ASCQ);
831 			if (sdebug_verbose)
832 				cp = "bus reset";
833 			break;
834 		case SDEBUG_UA_MODE_CHANGED:
835 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
836 					MODE_CHANGED_ASCQ);
837 			if (sdebug_verbose)
838 				cp = "mode parameters changed";
839 			break;
840 		case SDEBUG_UA_CAPACITY_CHANGED:
841 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
842 					CAPACITY_CHANGED_ASCQ);
843 			if (sdebug_verbose)
844 				cp = "capacity data changed";
845 			break;
846 		case SDEBUG_UA_MICROCODE_CHANGED:
847 			mk_sense_buffer(scp, UNIT_ATTENTION,
848 					TARGET_CHANGED_ASC,
849 					MICROCODE_CHANGED_ASCQ);
850 			if (sdebug_verbose)
851 				cp = "microcode has been changed";
852 			break;
853 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
854 			mk_sense_buffer(scp, UNIT_ATTENTION,
855 					TARGET_CHANGED_ASC,
856 					MICROCODE_CHANGED_WO_RESET_ASCQ);
857 			if (sdebug_verbose)
858 				cp = "microcode has been changed without reset";
859 			break;
860 		case SDEBUG_UA_LUNS_CHANGED:
861 			/*
862 			 * SPC-3 behavior is to report a UNIT ATTENTION with
863 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
864 			 * on the target, until a REPORT LUNS command is
865 			 * received.  SPC-4 behavior is to report it only once.
866 			 * NOTE:  sdebug_scsi_level does not use the same
867 			 * values as struct scsi_device->scsi_level.
868 			 */
869 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
870 				clear_luns_changed_on_target(devip);
871 			mk_sense_buffer(scp, UNIT_ATTENTION,
872 					TARGET_CHANGED_ASC,
873 					LUNS_CHANGED_ASCQ);
874 			if (sdebug_verbose)
875 				cp = "reported luns data has changed";
876 			break;
877 		default:
878 			pr_warn("unexpected unit attention code=%d\n", k);
879 			if (sdebug_verbose)
880 				cp = "unknown";
881 			break;
882 		}
883 		clear_bit(k, devip->uas_bm);
884 		if (sdebug_verbose)
885 			sdev_printk(KERN_INFO, scp->device,
886 				   "%s reports: Unit attention: %s\n",
887 				   my_name, cp);
888 		return check_condition_result;
889 	}
890 	return 0;
891 }
892 
893 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
894 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
895 				int arr_len)
896 {
897 	int act_len;
898 	struct scsi_data_buffer *sdb = scsi_in(scp);
899 
900 	if (!sdb->length)
901 		return 0;
902 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
903 		return DID_ERROR << 16;
904 
905 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
906 				      arr, arr_len);
907 	sdb->resid = scsi_bufflen(scp) - act_len;
908 
909 	return 0;
910 }
911 
912 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
913  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
914  * calls, not required to write in ascending offset order. Assumes resid
915  * set to scsi_bufflen() prior to any calls.
916  */
917 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
918 				  int arr_len, unsigned int off_dst)
919 {
920 	int act_len, n;
921 	struct scsi_data_buffer *sdb = scsi_in(scp);
922 	off_t skip = off_dst;
923 
924 	if (sdb->length <= off_dst)
925 		return 0;
926 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
927 		return DID_ERROR << 16;
928 
929 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
930 				       arr, arr_len, skip);
931 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
932 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
933 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
934 	sdb->resid = min(sdb->resid, n);
935 	return 0;
936 }
937 
938 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
939  * 'arr' or -1 if error.
940  */
941 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
942 			       int arr_len)
943 {
944 	if (!scsi_bufflen(scp))
945 		return 0;
946 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
947 		return -1;
948 
949 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
950 }
951 
952 
953 static const char * inq_vendor_id = "Linux   ";
954 static const char * inq_product_id = "scsi_debug      ";
955 static const char *inq_product_rev = "0186";	/* version less '.' */
956 /* Use some locally assigned NAAs for SAS addresses. */
957 static const u64 naa3_comp_a = 0x3222222000000000ULL;
958 static const u64 naa3_comp_b = 0x3333333000000000ULL;
959 static const u64 naa3_comp_c = 0x3111111000000000ULL;
960 
961 /* Device identification VPD page. Returns number of bytes placed in arr */
962 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
963 			  int target_dev_id, int dev_id_num,
964 			  const char *dev_id_str, int dev_id_str_len,
965 			  const uuid_be *lu_name)
966 {
967 	int num, port_a;
968 	char b[32];
969 
970 	port_a = target_dev_id + 1;
971 	/* T10 vendor identifier field format (faked) */
972 	arr[0] = 0x2;	/* ASCII */
973 	arr[1] = 0x1;
974 	arr[2] = 0x0;
975 	memcpy(&arr[4], inq_vendor_id, 8);
976 	memcpy(&arr[12], inq_product_id, 16);
977 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
978 	num = 8 + 16 + dev_id_str_len;
979 	arr[3] = num;
980 	num += 4;
981 	if (dev_id_num >= 0) {
982 		if (sdebug_uuid_ctl) {
983 			/* Locally assigned UUID */
984 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
985 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
986 			arr[num++] = 0x0;
987 			arr[num++] = 0x12;
988 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
989 			arr[num++] = 0x0;
990 			memcpy(arr + num, lu_name, 16);
991 			num += 16;
992 		} else {
993 			/* NAA-3, Logical unit identifier (binary) */
994 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
995 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
996 			arr[num++] = 0x0;
997 			arr[num++] = 0x8;
998 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
999 			num += 8;
1000 		}
1001 		/* Target relative port number */
1002 		arr[num++] = 0x61;	/* proto=sas, binary */
1003 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1004 		arr[num++] = 0x0;	/* reserved */
1005 		arr[num++] = 0x4;	/* length */
1006 		arr[num++] = 0x0;	/* reserved */
1007 		arr[num++] = 0x0;	/* reserved */
1008 		arr[num++] = 0x0;
1009 		arr[num++] = 0x1;	/* relative port A */
1010 	}
1011 	/* NAA-3, Target port identifier */
1012 	arr[num++] = 0x61;	/* proto=sas, binary */
1013 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1014 	arr[num++] = 0x0;
1015 	arr[num++] = 0x8;
1016 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1017 	num += 8;
1018 	/* NAA-3, Target port group identifier */
1019 	arr[num++] = 0x61;	/* proto=sas, binary */
1020 	arr[num++] = 0x95;	/* piv=1, target port group id */
1021 	arr[num++] = 0x0;
1022 	arr[num++] = 0x4;
1023 	arr[num++] = 0;
1024 	arr[num++] = 0;
1025 	put_unaligned_be16(port_group_id, arr + num);
1026 	num += 2;
1027 	/* NAA-3, Target device identifier */
1028 	arr[num++] = 0x61;	/* proto=sas, binary */
1029 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1030 	arr[num++] = 0x0;
1031 	arr[num++] = 0x8;
1032 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1033 	num += 8;
1034 	/* SCSI name string: Target device identifier */
1035 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1036 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1037 	arr[num++] = 0x0;
1038 	arr[num++] = 24;
1039 	memcpy(arr + num, "naa.32222220", 12);
1040 	num += 12;
1041 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1042 	memcpy(arr + num, b, 8);
1043 	num += 8;
1044 	memset(arr + num, 0, 4);
1045 	num += 4;
1046 	return num;
1047 }
1048 
1049 static unsigned char vpd84_data[] = {
1050 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1051     0x22,0x22,0x22,0x0,0xbb,0x1,
1052     0x22,0x22,0x22,0x0,0xbb,0x2,
1053 };
1054 
1055 /*  Software interface identification VPD page */
1056 static int inquiry_vpd_84(unsigned char *arr)
1057 {
1058 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1059 	return sizeof(vpd84_data);
1060 }
1061 
1062 /* Management network addresses VPD page */
1063 static int inquiry_vpd_85(unsigned char *arr)
1064 {
1065 	int num = 0;
1066 	const char * na1 = "https://www.kernel.org/config";
1067 	const char * na2 = "http://www.kernel.org/log";
1068 	int plen, olen;
1069 
1070 	arr[num++] = 0x1;	/* lu, storage config */
1071 	arr[num++] = 0x0;	/* reserved */
1072 	arr[num++] = 0x0;
1073 	olen = strlen(na1);
1074 	plen = olen + 1;
1075 	if (plen % 4)
1076 		plen = ((plen / 4) + 1) * 4;
1077 	arr[num++] = plen;	/* length, null termianted, padded */
1078 	memcpy(arr + num, na1, olen);
1079 	memset(arr + num + olen, 0, plen - olen);
1080 	num += plen;
1081 
1082 	arr[num++] = 0x4;	/* lu, logging */
1083 	arr[num++] = 0x0;	/* reserved */
1084 	arr[num++] = 0x0;
1085 	olen = strlen(na2);
1086 	plen = olen + 1;
1087 	if (plen % 4)
1088 		plen = ((plen / 4) + 1) * 4;
1089 	arr[num++] = plen;	/* length, null terminated, padded */
1090 	memcpy(arr + num, na2, olen);
1091 	memset(arr + num + olen, 0, plen - olen);
1092 	num += plen;
1093 
1094 	return num;
1095 }
1096 
1097 /* SCSI ports VPD page */
1098 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1099 {
1100 	int num = 0;
1101 	int port_a, port_b;
1102 
1103 	port_a = target_dev_id + 1;
1104 	port_b = port_a + 1;
1105 	arr[num++] = 0x0;	/* reserved */
1106 	arr[num++] = 0x0;	/* reserved */
1107 	arr[num++] = 0x0;
1108 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1109 	memset(arr + num, 0, 6);
1110 	num += 6;
1111 	arr[num++] = 0x0;
1112 	arr[num++] = 12;	/* length tp descriptor */
1113 	/* naa-5 target port identifier (A) */
1114 	arr[num++] = 0x61;	/* proto=sas, binary */
1115 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1116 	arr[num++] = 0x0;	/* reserved */
1117 	arr[num++] = 0x8;	/* length */
1118 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1119 	num += 8;
1120 	arr[num++] = 0x0;	/* reserved */
1121 	arr[num++] = 0x0;	/* reserved */
1122 	arr[num++] = 0x0;
1123 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1124 	memset(arr + num, 0, 6);
1125 	num += 6;
1126 	arr[num++] = 0x0;
1127 	arr[num++] = 12;	/* length tp descriptor */
1128 	/* naa-5 target port identifier (B) */
1129 	arr[num++] = 0x61;	/* proto=sas, binary */
1130 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1131 	arr[num++] = 0x0;	/* reserved */
1132 	arr[num++] = 0x8;	/* length */
1133 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1134 	num += 8;
1135 
1136 	return num;
1137 }
1138 
1139 
1140 static unsigned char vpd89_data[] = {
1141 /* from 4th byte */ 0,0,0,0,
1142 'l','i','n','u','x',' ',' ',' ',
1143 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1144 '1','2','3','4',
1145 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1146 0xec,0,0,0,
1147 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1148 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1149 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1150 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1151 0x53,0x41,
1152 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1153 0x20,0x20,
1154 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1155 0x10,0x80,
1156 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1157 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1158 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1159 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1160 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1161 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1162 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1163 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1164 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1165 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1166 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1167 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1168 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1169 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1170 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1171 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1172 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1173 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1174 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1175 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1176 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1177 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1178 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1179 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1180 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1181 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1182 };
1183 
1184 /* ATA Information VPD page */
1185 static int inquiry_vpd_89(unsigned char *arr)
1186 {
1187 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1188 	return sizeof(vpd89_data);
1189 }
1190 
1191 
1192 static unsigned char vpdb0_data[] = {
1193 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1194 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1195 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1196 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1197 };
1198 
1199 /* Block limits VPD page (SBC-3) */
1200 static int inquiry_vpd_b0(unsigned char *arr)
1201 {
1202 	unsigned int gran;
1203 
1204 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1205 
1206 	/* Optimal transfer length granularity */
1207 	gran = 1 << sdebug_physblk_exp;
1208 	put_unaligned_be16(gran, arr + 2);
1209 
1210 	/* Maximum Transfer Length */
1211 	if (sdebug_store_sectors > 0x400)
1212 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1213 
1214 	/* Optimal Transfer Length */
1215 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1216 
1217 	if (sdebug_lbpu) {
1218 		/* Maximum Unmap LBA Count */
1219 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1220 
1221 		/* Maximum Unmap Block Descriptor Count */
1222 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1223 	}
1224 
1225 	/* Unmap Granularity Alignment */
1226 	if (sdebug_unmap_alignment) {
1227 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1228 		arr[28] |= 0x80; /* UGAVALID */
1229 	}
1230 
1231 	/* Optimal Unmap Granularity */
1232 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1233 
1234 	/* Maximum WRITE SAME Length */
1235 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1236 
1237 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1238 
1239 	return sizeof(vpdb0_data);
1240 }
1241 
1242 /* Block device characteristics VPD page (SBC-3) */
1243 static int inquiry_vpd_b1(unsigned char *arr)
1244 {
1245 	memset(arr, 0, 0x3c);
1246 	arr[0] = 0;
1247 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1248 	arr[2] = 0;
1249 	arr[3] = 5;	/* less than 1.8" */
1250 
1251 	return 0x3c;
1252 }
1253 
1254 /* Logical block provisioning VPD page (SBC-4) */
1255 static int inquiry_vpd_b2(unsigned char *arr)
1256 {
1257 	memset(arr, 0, 0x4);
1258 	arr[0] = 0;			/* threshold exponent */
1259 	if (sdebug_lbpu)
1260 		arr[1] = 1 << 7;
1261 	if (sdebug_lbpws)
1262 		arr[1] |= 1 << 6;
1263 	if (sdebug_lbpws10)
1264 		arr[1] |= 1 << 5;
1265 	if (sdebug_lbprz && scsi_debug_lbp())
1266 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1267 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1268 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1269 	/* threshold_percentage=0 */
1270 	return 0x4;
1271 }
1272 
1273 #define SDEBUG_LONG_INQ_SZ 96
1274 #define SDEBUG_MAX_INQ_ARR_SZ 584
1275 
1276 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1277 {
1278 	unsigned char pq_pdt;
1279 	unsigned char * arr;
1280 	unsigned char *cmd = scp->cmnd;
1281 	int alloc_len, n, ret;
1282 	bool have_wlun, is_disk;
1283 
1284 	alloc_len = get_unaligned_be16(cmd + 3);
1285 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1286 	if (! arr)
1287 		return DID_REQUEUE << 16;
1288 	is_disk = (sdebug_ptype == TYPE_DISK);
1289 	have_wlun = scsi_is_wlun(scp->device->lun);
1290 	if (have_wlun)
1291 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1292 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1293 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1294 	else
1295 		pq_pdt = (sdebug_ptype & 0x1f);
1296 	arr[0] = pq_pdt;
1297 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1298 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1299 		kfree(arr);
1300 		return check_condition_result;
1301 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1302 		int lu_id_num, port_group_id, target_dev_id, len;
1303 		char lu_id_str[6];
1304 		int host_no = devip->sdbg_host->shost->host_no;
1305 
1306 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1307 		    (devip->channel & 0x7f);
1308 		if (sdebug_vpd_use_hostno == 0)
1309 			host_no = 0;
1310 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1311 			    (devip->target * 1000) + devip->lun);
1312 		target_dev_id = ((host_no + 1) * 2000) +
1313 				 (devip->target * 1000) - 3;
1314 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1315 		if (0 == cmd[2]) { /* supported vital product data pages */
1316 			arr[1] = cmd[2];	/*sanity */
1317 			n = 4;
1318 			arr[n++] = 0x0;   /* this page */
1319 			arr[n++] = 0x80;  /* unit serial number */
1320 			arr[n++] = 0x83;  /* device identification */
1321 			arr[n++] = 0x84;  /* software interface ident. */
1322 			arr[n++] = 0x85;  /* management network addresses */
1323 			arr[n++] = 0x86;  /* extended inquiry */
1324 			arr[n++] = 0x87;  /* mode page policy */
1325 			arr[n++] = 0x88;  /* SCSI ports */
1326 			if (is_disk) {	  /* SBC only */
1327 				arr[n++] = 0x89;  /* ATA information */
1328 				arr[n++] = 0xb0;  /* Block limits */
1329 				arr[n++] = 0xb1;  /* Block characteristics */
1330 				arr[n++] = 0xb2;  /* Logical Block Prov */
1331 			}
1332 			arr[3] = n - 4;	  /* number of supported VPD pages */
1333 		} else if (0x80 == cmd[2]) { /* unit serial number */
1334 			arr[1] = cmd[2];	/*sanity */
1335 			arr[3] = len;
1336 			memcpy(&arr[4], lu_id_str, len);
1337 		} else if (0x83 == cmd[2]) { /* device identification */
1338 			arr[1] = cmd[2];	/*sanity */
1339 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1340 						target_dev_id, lu_id_num,
1341 						lu_id_str, len,
1342 						&devip->lu_name);
1343 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1344 			arr[1] = cmd[2];	/*sanity */
1345 			arr[3] = inquiry_vpd_84(&arr[4]);
1346 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1347 			arr[1] = cmd[2];	/*sanity */
1348 			arr[3] = inquiry_vpd_85(&arr[4]);
1349 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1350 			arr[1] = cmd[2];	/*sanity */
1351 			arr[3] = 0x3c;	/* number of following entries */
1352 			if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1353 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1354 			else if (have_dif_prot)
1355 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1356 			else
1357 				arr[4] = 0x0;   /* no protection stuff */
1358 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1359 		} else if (0x87 == cmd[2]) { /* mode page policy */
1360 			arr[1] = cmd[2];	/*sanity */
1361 			arr[3] = 0x8;	/* number of following entries */
1362 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1363 			arr[6] = 0x80;	/* mlus, shared */
1364 			arr[8] = 0x18;	 /* protocol specific lu */
1365 			arr[10] = 0x82;	 /* mlus, per initiator port */
1366 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1367 			arr[1] = cmd[2];	/*sanity */
1368 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1369 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1370 			arr[1] = cmd[2];        /*sanity */
1371 			n = inquiry_vpd_89(&arr[4]);
1372 			put_unaligned_be16(n, arr + 2);
1373 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1374 			arr[1] = cmd[2];        /*sanity */
1375 			arr[3] = inquiry_vpd_b0(&arr[4]);
1376 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1377 			arr[1] = cmd[2];        /*sanity */
1378 			arr[3] = inquiry_vpd_b1(&arr[4]);
1379 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1380 			arr[1] = cmd[2];        /*sanity */
1381 			arr[3] = inquiry_vpd_b2(&arr[4]);
1382 		} else {
1383 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1384 			kfree(arr);
1385 			return check_condition_result;
1386 		}
1387 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1388 		ret = fill_from_dev_buffer(scp, arr,
1389 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1390 		kfree(arr);
1391 		return ret;
1392 	}
1393 	/* drops through here for a standard inquiry */
1394 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1395 	arr[2] = sdebug_scsi_level;
1396 	arr[3] = 2;    /* response_data_format==2 */
1397 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1398 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1399 	if (sdebug_vpd_use_hostno == 0)
1400 		arr[5] = 0x10; /* claim: implicit TGPS */
1401 	arr[6] = 0x10; /* claim: MultiP */
1402 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1403 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1404 	memcpy(&arr[8], inq_vendor_id, 8);
1405 	memcpy(&arr[16], inq_product_id, 16);
1406 	memcpy(&arr[32], inq_product_rev, 4);
1407 	/* version descriptors (2 bytes each) follow */
1408 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1409 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1410 	n = 62;
1411 	if (is_disk) {		/* SBC-4 no version claimed */
1412 		put_unaligned_be16(0x600, arr + n);
1413 		n += 2;
1414 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1415 		put_unaligned_be16(0x525, arr + n);
1416 		n += 2;
1417 	}
1418 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1419 	ret = fill_from_dev_buffer(scp, arr,
1420 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1421 	kfree(arr);
1422 	return ret;
1423 }
1424 
1425 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1426 				   0, 0, 0x0, 0x0};
1427 
1428 static int resp_requests(struct scsi_cmnd * scp,
1429 			 struct sdebug_dev_info * devip)
1430 {
1431 	unsigned char * sbuff;
1432 	unsigned char *cmd = scp->cmnd;
1433 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1434 	bool dsense;
1435 	int len = 18;
1436 
1437 	memset(arr, 0, sizeof(arr));
1438 	dsense = !!(cmd[1] & 1);
1439 	sbuff = scp->sense_buffer;
1440 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1441 		if (dsense) {
1442 			arr[0] = 0x72;
1443 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1444 			arr[2] = THRESHOLD_EXCEEDED;
1445 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1446 			len = 8;
1447 		} else {
1448 			arr[0] = 0x70;
1449 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1450 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1451 			arr[12] = THRESHOLD_EXCEEDED;
1452 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1453 		}
1454 	} else {
1455 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1456 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1457 			;	/* have sense and formats match */
1458 		else if (arr[0] <= 0x70) {
1459 			if (dsense) {
1460 				memset(arr, 0, 8);
1461 				arr[0] = 0x72;
1462 				len = 8;
1463 			} else {
1464 				memset(arr, 0, 18);
1465 				arr[0] = 0x70;
1466 				arr[7] = 0xa;
1467 			}
1468 		} else if (dsense) {
1469 			memset(arr, 0, 8);
1470 			arr[0] = 0x72;
1471 			arr[1] = sbuff[2];     /* sense key */
1472 			arr[2] = sbuff[12];    /* asc */
1473 			arr[3] = sbuff[13];    /* ascq */
1474 			len = 8;
1475 		} else {
1476 			memset(arr, 0, 18);
1477 			arr[0] = 0x70;
1478 			arr[2] = sbuff[1];
1479 			arr[7] = 0xa;
1480 			arr[12] = sbuff[1];
1481 			arr[13] = sbuff[3];
1482 		}
1483 
1484 	}
1485 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1486 	return fill_from_dev_buffer(scp, arr, len);
1487 }
1488 
1489 static int resp_start_stop(struct scsi_cmnd * scp,
1490 			   struct sdebug_dev_info * devip)
1491 {
1492 	unsigned char *cmd = scp->cmnd;
1493 	int power_cond, stop;
1494 
1495 	power_cond = (cmd[4] & 0xf0) >> 4;
1496 	if (power_cond) {
1497 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1498 		return check_condition_result;
1499 	}
1500 	stop = !(cmd[4] & 1);
1501 	atomic_xchg(&devip->stopped, stop);
1502 	return 0;
1503 }
1504 
1505 static sector_t get_sdebug_capacity(void)
1506 {
1507 	static const unsigned int gibibyte = 1073741824;
1508 
1509 	if (sdebug_virtual_gb > 0)
1510 		return (sector_t)sdebug_virtual_gb *
1511 			(gibibyte / sdebug_sector_size);
1512 	else
1513 		return sdebug_store_sectors;
1514 }
1515 
1516 #define SDEBUG_READCAP_ARR_SZ 8
1517 static int resp_readcap(struct scsi_cmnd * scp,
1518 			struct sdebug_dev_info * devip)
1519 {
1520 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1521 	unsigned int capac;
1522 
1523 	/* following just in case virtual_gb changed */
1524 	sdebug_capacity = get_sdebug_capacity();
1525 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1526 	if (sdebug_capacity < 0xffffffff) {
1527 		capac = (unsigned int)sdebug_capacity - 1;
1528 		put_unaligned_be32(capac, arr + 0);
1529 	} else
1530 		put_unaligned_be32(0xffffffff, arr + 0);
1531 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1532 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1533 }
1534 
1535 #define SDEBUG_READCAP16_ARR_SZ 32
1536 static int resp_readcap16(struct scsi_cmnd * scp,
1537 			  struct sdebug_dev_info * devip)
1538 {
1539 	unsigned char *cmd = scp->cmnd;
1540 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1541 	int alloc_len;
1542 
1543 	alloc_len = get_unaligned_be32(cmd + 10);
1544 	/* following just in case virtual_gb changed */
1545 	sdebug_capacity = get_sdebug_capacity();
1546 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1547 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1548 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1549 	arr[13] = sdebug_physblk_exp & 0xf;
1550 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1551 
1552 	if (scsi_debug_lbp()) {
1553 		arr[14] |= 0x80; /* LBPME */
1554 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1555 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1556 		 * in the wider field maps to 0 in this field.
1557 		 */
1558 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1559 			arr[14] |= 0x40;
1560 	}
1561 
1562 	arr[15] = sdebug_lowest_aligned & 0xff;
1563 
1564 	if (have_dif_prot) {
1565 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1566 		arr[12] |= 1; /* PROT_EN */
1567 	}
1568 
1569 	return fill_from_dev_buffer(scp, arr,
1570 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1571 }
1572 
1573 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1574 
1575 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1576 			      struct sdebug_dev_info * devip)
1577 {
1578 	unsigned char *cmd = scp->cmnd;
1579 	unsigned char * arr;
1580 	int host_no = devip->sdbg_host->shost->host_no;
1581 	int n, ret, alen, rlen;
1582 	int port_group_a, port_group_b, port_a, port_b;
1583 
1584 	alen = get_unaligned_be32(cmd + 6);
1585 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1586 	if (! arr)
1587 		return DID_REQUEUE << 16;
1588 	/*
1589 	 * EVPD page 0x88 states we have two ports, one
1590 	 * real and a fake port with no device connected.
1591 	 * So we create two port groups with one port each
1592 	 * and set the group with port B to unavailable.
1593 	 */
1594 	port_a = 0x1; /* relative port A */
1595 	port_b = 0x2; /* relative port B */
1596 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1597 			(devip->channel & 0x7f);
1598 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1599 			(devip->channel & 0x7f) + 0x80;
1600 
1601 	/*
1602 	 * The asymmetric access state is cycled according to the host_id.
1603 	 */
1604 	n = 4;
1605 	if (sdebug_vpd_use_hostno == 0) {
1606 		arr[n++] = host_no % 3; /* Asymm access state */
1607 		arr[n++] = 0x0F; /* claim: all states are supported */
1608 	} else {
1609 		arr[n++] = 0x0; /* Active/Optimized path */
1610 		arr[n++] = 0x01; /* only support active/optimized paths */
1611 	}
1612 	put_unaligned_be16(port_group_a, arr + n);
1613 	n += 2;
1614 	arr[n++] = 0;    /* Reserved */
1615 	arr[n++] = 0;    /* Status code */
1616 	arr[n++] = 0;    /* Vendor unique */
1617 	arr[n++] = 0x1;  /* One port per group */
1618 	arr[n++] = 0;    /* Reserved */
1619 	arr[n++] = 0;    /* Reserved */
1620 	put_unaligned_be16(port_a, arr + n);
1621 	n += 2;
1622 	arr[n++] = 3;    /* Port unavailable */
1623 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1624 	put_unaligned_be16(port_group_b, arr + n);
1625 	n += 2;
1626 	arr[n++] = 0;    /* Reserved */
1627 	arr[n++] = 0;    /* Status code */
1628 	arr[n++] = 0;    /* Vendor unique */
1629 	arr[n++] = 0x1;  /* One port per group */
1630 	arr[n++] = 0;    /* Reserved */
1631 	arr[n++] = 0;    /* Reserved */
1632 	put_unaligned_be16(port_b, arr + n);
1633 	n += 2;
1634 
1635 	rlen = n - 4;
1636 	put_unaligned_be32(rlen, arr + 0);
1637 
1638 	/*
1639 	 * Return the smallest value of either
1640 	 * - The allocated length
1641 	 * - The constructed command length
1642 	 * - The maximum array size
1643 	 */
1644 	rlen = min(alen,n);
1645 	ret = fill_from_dev_buffer(scp, arr,
1646 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1647 	kfree(arr);
1648 	return ret;
1649 }
1650 
1651 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1652 			     struct sdebug_dev_info *devip)
1653 {
1654 	bool rctd;
1655 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1656 	u16 req_sa, u;
1657 	u32 alloc_len, a_len;
1658 	int k, offset, len, errsts, count, bump, na;
1659 	const struct opcode_info_t *oip;
1660 	const struct opcode_info_t *r_oip;
1661 	u8 *arr;
1662 	u8 *cmd = scp->cmnd;
1663 
1664 	rctd = !!(cmd[2] & 0x80);
1665 	reporting_opts = cmd[2] & 0x7;
1666 	req_opcode = cmd[3];
1667 	req_sa = get_unaligned_be16(cmd + 4);
1668 	alloc_len = get_unaligned_be32(cmd + 6);
1669 	if (alloc_len < 4 || alloc_len > 0xffff) {
1670 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1671 		return check_condition_result;
1672 	}
1673 	if (alloc_len > 8192)
1674 		a_len = 8192;
1675 	else
1676 		a_len = alloc_len;
1677 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1678 	if (NULL == arr) {
1679 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1680 				INSUFF_RES_ASCQ);
1681 		return check_condition_result;
1682 	}
1683 	switch (reporting_opts) {
1684 	case 0:	/* all commands */
1685 		/* count number of commands */
1686 		for (count = 0, oip = opcode_info_arr;
1687 		     oip->num_attached != 0xff; ++oip) {
1688 			if (F_INV_OP & oip->flags)
1689 				continue;
1690 			count += (oip->num_attached + 1);
1691 		}
1692 		bump = rctd ? 20 : 8;
1693 		put_unaligned_be32(count * bump, arr);
1694 		for (offset = 4, oip = opcode_info_arr;
1695 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1696 			if (F_INV_OP & oip->flags)
1697 				continue;
1698 			na = oip->num_attached;
1699 			arr[offset] = oip->opcode;
1700 			put_unaligned_be16(oip->sa, arr + offset + 2);
1701 			if (rctd)
1702 				arr[offset + 5] |= 0x2;
1703 			if (FF_SA & oip->flags)
1704 				arr[offset + 5] |= 0x1;
1705 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1706 			if (rctd)
1707 				put_unaligned_be16(0xa, arr + offset + 8);
1708 			r_oip = oip;
1709 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1710 				if (F_INV_OP & oip->flags)
1711 					continue;
1712 				offset += bump;
1713 				arr[offset] = oip->opcode;
1714 				put_unaligned_be16(oip->sa, arr + offset + 2);
1715 				if (rctd)
1716 					arr[offset + 5] |= 0x2;
1717 				if (FF_SA & oip->flags)
1718 					arr[offset + 5] |= 0x1;
1719 				put_unaligned_be16(oip->len_mask[0],
1720 						   arr + offset + 6);
1721 				if (rctd)
1722 					put_unaligned_be16(0xa,
1723 							   arr + offset + 8);
1724 			}
1725 			oip = r_oip;
1726 			offset += bump;
1727 		}
1728 		break;
1729 	case 1:	/* one command: opcode only */
1730 	case 2:	/* one command: opcode plus service action */
1731 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1732 		sdeb_i = opcode_ind_arr[req_opcode];
1733 		oip = &opcode_info_arr[sdeb_i];
1734 		if (F_INV_OP & oip->flags) {
1735 			supp = 1;
1736 			offset = 4;
1737 		} else {
1738 			if (1 == reporting_opts) {
1739 				if (FF_SA & oip->flags) {
1740 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1741 							     2, 2);
1742 					kfree(arr);
1743 					return check_condition_result;
1744 				}
1745 				req_sa = 0;
1746 			} else if (2 == reporting_opts &&
1747 				   0 == (FF_SA & oip->flags)) {
1748 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1749 				kfree(arr);	/* point at requested sa */
1750 				return check_condition_result;
1751 			}
1752 			if (0 == (FF_SA & oip->flags) &&
1753 			    req_opcode == oip->opcode)
1754 				supp = 3;
1755 			else if (0 == (FF_SA & oip->flags)) {
1756 				na = oip->num_attached;
1757 				for (k = 0, oip = oip->arrp; k < na;
1758 				     ++k, ++oip) {
1759 					if (req_opcode == oip->opcode)
1760 						break;
1761 				}
1762 				supp = (k >= na) ? 1 : 3;
1763 			} else if (req_sa != oip->sa) {
1764 				na = oip->num_attached;
1765 				for (k = 0, oip = oip->arrp; k < na;
1766 				     ++k, ++oip) {
1767 					if (req_sa == oip->sa)
1768 						break;
1769 				}
1770 				supp = (k >= na) ? 1 : 3;
1771 			} else
1772 				supp = 3;
1773 			if (3 == supp) {
1774 				u = oip->len_mask[0];
1775 				put_unaligned_be16(u, arr + 2);
1776 				arr[4] = oip->opcode;
1777 				for (k = 1; k < u; ++k)
1778 					arr[4 + k] = (k < 16) ?
1779 						 oip->len_mask[k] : 0xff;
1780 				offset = 4 + u;
1781 			} else
1782 				offset = 4;
1783 		}
1784 		arr[1] = (rctd ? 0x80 : 0) | supp;
1785 		if (rctd) {
1786 			put_unaligned_be16(0xa, arr + offset);
1787 			offset += 12;
1788 		}
1789 		break;
1790 	default:
1791 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1792 		kfree(arr);
1793 		return check_condition_result;
1794 	}
1795 	offset = (offset < a_len) ? offset : a_len;
1796 	len = (offset < alloc_len) ? offset : alloc_len;
1797 	errsts = fill_from_dev_buffer(scp, arr, len);
1798 	kfree(arr);
1799 	return errsts;
1800 }
1801 
1802 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1803 			  struct sdebug_dev_info *devip)
1804 {
1805 	bool repd;
1806 	u32 alloc_len, len;
1807 	u8 arr[16];
1808 	u8 *cmd = scp->cmnd;
1809 
1810 	memset(arr, 0, sizeof(arr));
1811 	repd = !!(cmd[2] & 0x80);
1812 	alloc_len = get_unaligned_be32(cmd + 6);
1813 	if (alloc_len < 4) {
1814 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1815 		return check_condition_result;
1816 	}
1817 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1818 	arr[1] = 0x1;		/* ITNRS */
1819 	if (repd) {
1820 		arr[3] = 0xc;
1821 		len = 16;
1822 	} else
1823 		len = 4;
1824 
1825 	len = (len < alloc_len) ? len : alloc_len;
1826 	return fill_from_dev_buffer(scp, arr, len);
1827 }
1828 
1829 /* <<Following mode page info copied from ST318451LW>> */
1830 
1831 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1832 {	/* Read-Write Error Recovery page for mode_sense */
1833 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1834 					5, 0, 0xff, 0xff};
1835 
1836 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1837 	if (1 == pcontrol)
1838 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1839 	return sizeof(err_recov_pg);
1840 }
1841 
1842 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1843 { 	/* Disconnect-Reconnect page for mode_sense */
1844 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1845 					 0, 0, 0, 0, 0, 0, 0, 0};
1846 
1847 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1848 	if (1 == pcontrol)
1849 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1850 	return sizeof(disconnect_pg);
1851 }
1852 
1853 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1854 {       /* Format device page for mode_sense */
1855 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1856 				     0, 0, 0, 0, 0, 0, 0, 0,
1857 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1858 
1859 	memcpy(p, format_pg, sizeof(format_pg));
1860 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1861 	put_unaligned_be16(sdebug_sector_size, p + 12);
1862 	if (sdebug_removable)
1863 		p[20] |= 0x20; /* should agree with INQUIRY */
1864 	if (1 == pcontrol)
1865 		memset(p + 2, 0, sizeof(format_pg) - 2);
1866 	return sizeof(format_pg);
1867 }
1868 
1869 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1870 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1871 				     0, 0, 0, 0};
1872 
1873 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1874 { 	/* Caching page for mode_sense */
1875 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1876 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1877 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1878 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1879 
1880 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1881 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1882 	memcpy(p, caching_pg, sizeof(caching_pg));
1883 	if (1 == pcontrol)
1884 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1885 	else if (2 == pcontrol)
1886 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1887 	return sizeof(caching_pg);
1888 }
1889 
1890 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1891 				    0, 0, 0x2, 0x4b};
1892 
1893 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1894 { 	/* Control mode page for mode_sense */
1895 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1896 				        0, 0, 0, 0};
1897 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1898 				     0, 0, 0x2, 0x4b};
1899 
1900 	if (sdebug_dsense)
1901 		ctrl_m_pg[2] |= 0x4;
1902 	else
1903 		ctrl_m_pg[2] &= ~0x4;
1904 
1905 	if (sdebug_ato)
1906 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1907 
1908 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1909 	if (1 == pcontrol)
1910 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1911 	else if (2 == pcontrol)
1912 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1913 	return sizeof(ctrl_m_pg);
1914 }
1915 
1916 
1917 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1918 {	/* Informational Exceptions control mode page for mode_sense */
1919 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1920 				       0, 0, 0x0, 0x0};
1921 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1922 				      0, 0, 0x0, 0x0};
1923 
1924 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1925 	if (1 == pcontrol)
1926 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1927 	else if (2 == pcontrol)
1928 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1929 	return sizeof(iec_m_pg);
1930 }
1931 
1932 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1933 {	/* SAS SSP mode page - short format for mode_sense */
1934 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1935 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1936 
1937 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1938 	if (1 == pcontrol)
1939 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1940 	return sizeof(sas_sf_m_pg);
1941 }
1942 
1943 
1944 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1945 			      int target_dev_id)
1946 {	/* SAS phy control and discover mode page for mode_sense */
1947 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1948 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1949 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1950 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1951 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1952 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1953 		    0, 0, 0, 0, 0, 0, 0, 0,
1954 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1955 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1956 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1957 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1958 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1959 		    0, 0, 0, 0, 0, 0, 0, 0,
1960 		};
1961 	int port_a, port_b;
1962 
1963 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
1964 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
1965 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
1966 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
1967 	port_a = target_dev_id + 1;
1968 	port_b = port_a + 1;
1969 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1970 	put_unaligned_be32(port_a, p + 20);
1971 	put_unaligned_be32(port_b, p + 48 + 20);
1972 	if (1 == pcontrol)
1973 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1974 	return sizeof(sas_pcd_m_pg);
1975 }
1976 
1977 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1978 {	/* SAS SSP shared protocol specific port mode subpage */
1979 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1980 		    0, 0, 0, 0, 0, 0, 0, 0,
1981 		};
1982 
1983 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1984 	if (1 == pcontrol)
1985 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1986 	return sizeof(sas_sha_m_pg);
1987 }
1988 
1989 #define SDEBUG_MAX_MSENSE_SZ 256
1990 
1991 static int resp_mode_sense(struct scsi_cmnd *scp,
1992 			   struct sdebug_dev_info *devip)
1993 {
1994 	int pcontrol, pcode, subpcode, bd_len;
1995 	unsigned char dev_spec;
1996 	int alloc_len, offset, len, target_dev_id;
1997 	int target = scp->device->id;
1998 	unsigned char * ap;
1999 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2000 	unsigned char *cmd = scp->cmnd;
2001 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2002 
2003 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2004 	pcontrol = (cmd[2] & 0xc0) >> 6;
2005 	pcode = cmd[2] & 0x3f;
2006 	subpcode = cmd[3];
2007 	msense_6 = (MODE_SENSE == cmd[0]);
2008 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2009 	is_disk = (sdebug_ptype == TYPE_DISK);
2010 	if (is_disk && !dbd)
2011 		bd_len = llbaa ? 16 : 8;
2012 	else
2013 		bd_len = 0;
2014 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2015 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2016 	if (0x3 == pcontrol) {  /* Saving values not supported */
2017 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2018 		return check_condition_result;
2019 	}
2020 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2021 			(devip->target * 1000) - 3;
2022 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2023 	if (is_disk)
2024 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2025 	else
2026 		dev_spec = 0x0;
2027 	if (msense_6) {
2028 		arr[2] = dev_spec;
2029 		arr[3] = bd_len;
2030 		offset = 4;
2031 	} else {
2032 		arr[3] = dev_spec;
2033 		if (16 == bd_len)
2034 			arr[4] = 0x1;	/* set LONGLBA bit */
2035 		arr[7] = bd_len;	/* assume 255 or less */
2036 		offset = 8;
2037 	}
2038 	ap = arr + offset;
2039 	if ((bd_len > 0) && (!sdebug_capacity))
2040 		sdebug_capacity = get_sdebug_capacity();
2041 
2042 	if (8 == bd_len) {
2043 		if (sdebug_capacity > 0xfffffffe)
2044 			put_unaligned_be32(0xffffffff, ap + 0);
2045 		else
2046 			put_unaligned_be32(sdebug_capacity, ap + 0);
2047 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2048 		offset += bd_len;
2049 		ap = arr + offset;
2050 	} else if (16 == bd_len) {
2051 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2052 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2053 		offset += bd_len;
2054 		ap = arr + offset;
2055 	}
2056 
2057 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2058 		/* TODO: Control Extension page */
2059 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2060 		return check_condition_result;
2061 	}
2062 	bad_pcode = false;
2063 
2064 	switch (pcode) {
2065 	case 0x1:	/* Read-Write error recovery page, direct access */
2066 		len = resp_err_recov_pg(ap, pcontrol, target);
2067 		offset += len;
2068 		break;
2069 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2070 		len = resp_disconnect_pg(ap, pcontrol, target);
2071 		offset += len;
2072 		break;
2073         case 0x3:       /* Format device page, direct access */
2074 		if (is_disk) {
2075 			len = resp_format_pg(ap, pcontrol, target);
2076 			offset += len;
2077 		} else
2078 			bad_pcode = true;
2079                 break;
2080 	case 0x8:	/* Caching page, direct access */
2081 		if (is_disk) {
2082 			len = resp_caching_pg(ap, pcontrol, target);
2083 			offset += len;
2084 		} else
2085 			bad_pcode = true;
2086 		break;
2087 	case 0xa:	/* Control Mode page, all devices */
2088 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2089 		offset += len;
2090 		break;
2091 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2092 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2093 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2094 			return check_condition_result;
2095 	        }
2096 		len = 0;
2097 		if ((0x0 == subpcode) || (0xff == subpcode))
2098 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2099 		if ((0x1 == subpcode) || (0xff == subpcode))
2100 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2101 						  target_dev_id);
2102 		if ((0x2 == subpcode) || (0xff == subpcode))
2103 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2104 		offset += len;
2105 		break;
2106 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2107 		len = resp_iec_m_pg(ap, pcontrol, target);
2108 		offset += len;
2109 		break;
2110 	case 0x3f:	/* Read all Mode pages */
2111 		if ((0 == subpcode) || (0xff == subpcode)) {
2112 			len = resp_err_recov_pg(ap, pcontrol, target);
2113 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2114 			if (is_disk) {
2115 				len += resp_format_pg(ap + len, pcontrol,
2116 						      target);
2117 				len += resp_caching_pg(ap + len, pcontrol,
2118 						       target);
2119 			}
2120 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2121 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2122 			if (0xff == subpcode) {
2123 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2124 						  target, target_dev_id);
2125 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2126 			}
2127 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2128 			offset += len;
2129 		} else {
2130 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2131 			return check_condition_result;
2132                 }
2133 		break;
2134 	default:
2135 		bad_pcode = true;
2136 		break;
2137 	}
2138 	if (bad_pcode) {
2139 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2140 		return check_condition_result;
2141 	}
2142 	if (msense_6)
2143 		arr[0] = offset - 1;
2144 	else
2145 		put_unaligned_be16((offset - 2), arr + 0);
2146 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2147 }
2148 
2149 #define SDEBUG_MAX_MSELECT_SZ 512
2150 
2151 static int resp_mode_select(struct scsi_cmnd *scp,
2152 			    struct sdebug_dev_info *devip)
2153 {
2154 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2155 	int param_len, res, mpage;
2156 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2157 	unsigned char *cmd = scp->cmnd;
2158 	int mselect6 = (MODE_SELECT == cmd[0]);
2159 
2160 	memset(arr, 0, sizeof(arr));
2161 	pf = cmd[1] & 0x10;
2162 	sp = cmd[1] & 0x1;
2163 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2164 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2165 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2166 		return check_condition_result;
2167 	}
2168         res = fetch_to_dev_buffer(scp, arr, param_len);
2169         if (-1 == res)
2170 		return DID_ERROR << 16;
2171 	else if (sdebug_verbose && (res < param_len))
2172 		sdev_printk(KERN_INFO, scp->device,
2173 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2174 			    __func__, param_len, res);
2175 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2176 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2177 	if (md_len > 2) {
2178 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2179 		return check_condition_result;
2180 	}
2181 	off = bd_len + (mselect6 ? 4 : 8);
2182 	mpage = arr[off] & 0x3f;
2183 	ps = !!(arr[off] & 0x80);
2184 	if (ps) {
2185 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2186 		return check_condition_result;
2187 	}
2188 	spf = !!(arr[off] & 0x40);
2189 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2190 		       (arr[off + 1] + 2);
2191 	if ((pg_len + off) > param_len) {
2192 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2193 				PARAMETER_LIST_LENGTH_ERR, 0);
2194 		return check_condition_result;
2195 	}
2196 	switch (mpage) {
2197 	case 0x8:      /* Caching Mode page */
2198 		if (caching_pg[1] == arr[off + 1]) {
2199 			memcpy(caching_pg + 2, arr + off + 2,
2200 			       sizeof(caching_pg) - 2);
2201 			goto set_mode_changed_ua;
2202 		}
2203 		break;
2204 	case 0xa:      /* Control Mode page */
2205 		if (ctrl_m_pg[1] == arr[off + 1]) {
2206 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2207 			       sizeof(ctrl_m_pg) - 2);
2208 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2209 			goto set_mode_changed_ua;
2210 		}
2211 		break;
2212 	case 0x1c:      /* Informational Exceptions Mode page */
2213 		if (iec_m_pg[1] == arr[off + 1]) {
2214 			memcpy(iec_m_pg + 2, arr + off + 2,
2215 			       sizeof(iec_m_pg) - 2);
2216 			goto set_mode_changed_ua;
2217 		}
2218 		break;
2219 	default:
2220 		break;
2221 	}
2222 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2223 	return check_condition_result;
2224 set_mode_changed_ua:
2225 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2226 	return 0;
2227 }
2228 
2229 static int resp_temp_l_pg(unsigned char * arr)
2230 {
2231 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2232 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2233 		};
2234 
2235         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2236         return sizeof(temp_l_pg);
2237 }
2238 
2239 static int resp_ie_l_pg(unsigned char * arr)
2240 {
2241 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2242 		};
2243 
2244         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2245 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2246 		arr[4] = THRESHOLD_EXCEEDED;
2247 		arr[5] = 0xff;
2248 	}
2249         return sizeof(ie_l_pg);
2250 }
2251 
2252 #define SDEBUG_MAX_LSENSE_SZ 512
2253 
2254 static int resp_log_sense(struct scsi_cmnd * scp,
2255                           struct sdebug_dev_info * devip)
2256 {
2257 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2258 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2259 	unsigned char *cmd = scp->cmnd;
2260 
2261 	memset(arr, 0, sizeof(arr));
2262 	ppc = cmd[1] & 0x2;
2263 	sp = cmd[1] & 0x1;
2264 	if (ppc || sp) {
2265 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2266 		return check_condition_result;
2267 	}
2268 	pcontrol = (cmd[2] & 0xc0) >> 6;
2269 	pcode = cmd[2] & 0x3f;
2270 	subpcode = cmd[3] & 0xff;
2271 	alloc_len = get_unaligned_be16(cmd + 7);
2272 	arr[0] = pcode;
2273 	if (0 == subpcode) {
2274 		switch (pcode) {
2275 		case 0x0:	/* Supported log pages log page */
2276 			n = 4;
2277 			arr[n++] = 0x0;		/* this page */
2278 			arr[n++] = 0xd;		/* Temperature */
2279 			arr[n++] = 0x2f;	/* Informational exceptions */
2280 			arr[3] = n - 4;
2281 			break;
2282 		case 0xd:	/* Temperature log page */
2283 			arr[3] = resp_temp_l_pg(arr + 4);
2284 			break;
2285 		case 0x2f:	/* Informational exceptions log page */
2286 			arr[3] = resp_ie_l_pg(arr + 4);
2287 			break;
2288 		default:
2289 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2290 			return check_condition_result;
2291 		}
2292 	} else if (0xff == subpcode) {
2293 		arr[0] |= 0x40;
2294 		arr[1] = subpcode;
2295 		switch (pcode) {
2296 		case 0x0:	/* Supported log pages and subpages log page */
2297 			n = 4;
2298 			arr[n++] = 0x0;
2299 			arr[n++] = 0x0;		/* 0,0 page */
2300 			arr[n++] = 0x0;
2301 			arr[n++] = 0xff;	/* this page */
2302 			arr[n++] = 0xd;
2303 			arr[n++] = 0x0;		/* Temperature */
2304 			arr[n++] = 0x2f;
2305 			arr[n++] = 0x0;	/* Informational exceptions */
2306 			arr[3] = n - 4;
2307 			break;
2308 		case 0xd:	/* Temperature subpages */
2309 			n = 4;
2310 			arr[n++] = 0xd;
2311 			arr[n++] = 0x0;		/* Temperature */
2312 			arr[3] = n - 4;
2313 			break;
2314 		case 0x2f:	/* Informational exceptions subpages */
2315 			n = 4;
2316 			arr[n++] = 0x2f;
2317 			arr[n++] = 0x0;		/* Informational exceptions */
2318 			arr[3] = n - 4;
2319 			break;
2320 		default:
2321 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2322 			return check_condition_result;
2323 		}
2324 	} else {
2325 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2326 		return check_condition_result;
2327 	}
2328 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2329 	return fill_from_dev_buffer(scp, arr,
2330 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2331 }
2332 
2333 static int check_device_access_params(struct scsi_cmnd *scp,
2334 				      unsigned long long lba, unsigned int num)
2335 {
2336 	if (lba + num > sdebug_capacity) {
2337 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2338 		return check_condition_result;
2339 	}
2340 	/* transfer length excessive (tie in to block limits VPD page) */
2341 	if (num > sdebug_store_sectors) {
2342 		/* needs work to find which cdb byte 'num' comes from */
2343 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2344 		return check_condition_result;
2345 	}
2346 	return 0;
2347 }
2348 
2349 /* Returns number of bytes copied or -1 if error. */
2350 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2351 			    bool do_write)
2352 {
2353 	int ret;
2354 	u64 block, rest = 0;
2355 	struct scsi_data_buffer *sdb;
2356 	enum dma_data_direction dir;
2357 
2358 	if (do_write) {
2359 		sdb = scsi_out(scmd);
2360 		dir = DMA_TO_DEVICE;
2361 	} else {
2362 		sdb = scsi_in(scmd);
2363 		dir = DMA_FROM_DEVICE;
2364 	}
2365 
2366 	if (!sdb->length)
2367 		return 0;
2368 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2369 		return -1;
2370 
2371 	block = do_div(lba, sdebug_store_sectors);
2372 	if (block + num > sdebug_store_sectors)
2373 		rest = block + num - sdebug_store_sectors;
2374 
2375 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2376 		   fake_storep + (block * sdebug_sector_size),
2377 		   (num - rest) * sdebug_sector_size, 0, do_write);
2378 	if (ret != (num - rest) * sdebug_sector_size)
2379 		return ret;
2380 
2381 	if (rest) {
2382 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2383 			    fake_storep, rest * sdebug_sector_size,
2384 			    (num - rest) * sdebug_sector_size, do_write);
2385 	}
2386 
2387 	return ret;
2388 }
2389 
2390 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2391  * arr into fake_store(lba,num) and return true. If comparison fails then
2392  * return false. */
2393 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2394 {
2395 	bool res;
2396 	u64 block, rest = 0;
2397 	u32 store_blks = sdebug_store_sectors;
2398 	u32 lb_size = sdebug_sector_size;
2399 
2400 	block = do_div(lba, store_blks);
2401 	if (block + num > store_blks)
2402 		rest = block + num - store_blks;
2403 
2404 	res = !memcmp(fake_storep + (block * lb_size), arr,
2405 		      (num - rest) * lb_size);
2406 	if (!res)
2407 		return res;
2408 	if (rest)
2409 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2410 			     rest * lb_size);
2411 	if (!res)
2412 		return res;
2413 	arr += num * lb_size;
2414 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2415 	if (rest)
2416 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2417 		       rest * lb_size);
2418 	return res;
2419 }
2420 
2421 static __be16 dif_compute_csum(const void *buf, int len)
2422 {
2423 	__be16 csum;
2424 
2425 	if (sdebug_guard)
2426 		csum = (__force __be16)ip_compute_csum(buf, len);
2427 	else
2428 		csum = cpu_to_be16(crc_t10dif(buf, len));
2429 
2430 	return csum;
2431 }
2432 
2433 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2434 		      sector_t sector, u32 ei_lba)
2435 {
2436 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2437 
2438 	if (sdt->guard_tag != csum) {
2439 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2440 			(unsigned long)sector,
2441 			be16_to_cpu(sdt->guard_tag),
2442 			be16_to_cpu(csum));
2443 		return 0x01;
2444 	}
2445 	if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2446 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2447 		pr_err("REF check failed on sector %lu\n",
2448 			(unsigned long)sector);
2449 		return 0x03;
2450 	}
2451 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2452 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2453 		pr_err("REF check failed on sector %lu\n",
2454 			(unsigned long)sector);
2455 		return 0x03;
2456 	}
2457 	return 0;
2458 }
2459 
2460 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2461 			  unsigned int sectors, bool read)
2462 {
2463 	size_t resid;
2464 	void *paddr;
2465 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2466 	struct sg_mapping_iter miter;
2467 
2468 	/* Bytes of protection data to copy into sgl */
2469 	resid = sectors * sizeof(*dif_storep);
2470 
2471 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2472 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2473 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2474 
2475 	while (sg_miter_next(&miter) && resid > 0) {
2476 		size_t len = min(miter.length, resid);
2477 		void *start = dif_store(sector);
2478 		size_t rest = 0;
2479 
2480 		if (dif_store_end < start + len)
2481 			rest = start + len - dif_store_end;
2482 
2483 		paddr = miter.addr;
2484 
2485 		if (read)
2486 			memcpy(paddr, start, len - rest);
2487 		else
2488 			memcpy(start, paddr, len - rest);
2489 
2490 		if (rest) {
2491 			if (read)
2492 				memcpy(paddr + len - rest, dif_storep, rest);
2493 			else
2494 				memcpy(dif_storep, paddr + len - rest, rest);
2495 		}
2496 
2497 		sector += len / sizeof(*dif_storep);
2498 		resid -= len;
2499 	}
2500 	sg_miter_stop(&miter);
2501 }
2502 
2503 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2504 			    unsigned int sectors, u32 ei_lba)
2505 {
2506 	unsigned int i;
2507 	struct sd_dif_tuple *sdt;
2508 	sector_t sector;
2509 
2510 	for (i = 0; i < sectors; i++, ei_lba++) {
2511 		int ret;
2512 
2513 		sector = start_sec + i;
2514 		sdt = dif_store(sector);
2515 
2516 		if (sdt->app_tag == cpu_to_be16(0xffff))
2517 			continue;
2518 
2519 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2520 		if (ret) {
2521 			dif_errors++;
2522 			return ret;
2523 		}
2524 	}
2525 
2526 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2527 	dix_reads++;
2528 
2529 	return 0;
2530 }
2531 
2532 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2533 {
2534 	u8 *cmd = scp->cmnd;
2535 	struct sdebug_queued_cmd *sqcp;
2536 	u64 lba;
2537 	u32 num;
2538 	u32 ei_lba;
2539 	unsigned long iflags;
2540 	int ret;
2541 	bool check_prot;
2542 
2543 	switch (cmd[0]) {
2544 	case READ_16:
2545 		ei_lba = 0;
2546 		lba = get_unaligned_be64(cmd + 2);
2547 		num = get_unaligned_be32(cmd + 10);
2548 		check_prot = true;
2549 		break;
2550 	case READ_10:
2551 		ei_lba = 0;
2552 		lba = get_unaligned_be32(cmd + 2);
2553 		num = get_unaligned_be16(cmd + 7);
2554 		check_prot = true;
2555 		break;
2556 	case READ_6:
2557 		ei_lba = 0;
2558 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2559 		      (u32)(cmd[1] & 0x1f) << 16;
2560 		num = (0 == cmd[4]) ? 256 : cmd[4];
2561 		check_prot = true;
2562 		break;
2563 	case READ_12:
2564 		ei_lba = 0;
2565 		lba = get_unaligned_be32(cmd + 2);
2566 		num = get_unaligned_be32(cmd + 6);
2567 		check_prot = true;
2568 		break;
2569 	case XDWRITEREAD_10:
2570 		ei_lba = 0;
2571 		lba = get_unaligned_be32(cmd + 2);
2572 		num = get_unaligned_be16(cmd + 7);
2573 		check_prot = false;
2574 		break;
2575 	default:	/* assume READ(32) */
2576 		lba = get_unaligned_be64(cmd + 12);
2577 		ei_lba = get_unaligned_be32(cmd + 20);
2578 		num = get_unaligned_be32(cmd + 28);
2579 		check_prot = false;
2580 		break;
2581 	}
2582 	if (unlikely(have_dif_prot && check_prot)) {
2583 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2584 		    (cmd[1] & 0xe0)) {
2585 			mk_sense_invalid_opcode(scp);
2586 			return check_condition_result;
2587 		}
2588 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2589 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2590 		    (cmd[1] & 0xe0) == 0)
2591 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2592 				    "to DIF device\n");
2593 	}
2594 	if (unlikely(sdebug_any_injecting_opt)) {
2595 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2596 
2597 		if (sqcp) {
2598 			if (sqcp->inj_short)
2599 				num /= 2;
2600 		}
2601 	} else
2602 		sqcp = NULL;
2603 
2604 	/* inline check_device_access_params() */
2605 	if (unlikely(lba + num > sdebug_capacity)) {
2606 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2607 		return check_condition_result;
2608 	}
2609 	/* transfer length excessive (tie in to block limits VPD page) */
2610 	if (unlikely(num > sdebug_store_sectors)) {
2611 		/* needs work to find which cdb byte 'num' comes from */
2612 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2613 		return check_condition_result;
2614 	}
2615 
2616 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2617 		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2618 		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2619 		/* claim unrecoverable read error */
2620 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2621 		/* set info field and valid bit for fixed descriptor */
2622 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2623 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2624 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2625 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2626 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2627 		}
2628 		scsi_set_resid(scp, scsi_bufflen(scp));
2629 		return check_condition_result;
2630 	}
2631 
2632 	read_lock_irqsave(&atomic_rw, iflags);
2633 
2634 	/* DIX + T10 DIF */
2635 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2636 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2637 
2638 		if (prot_ret) {
2639 			read_unlock_irqrestore(&atomic_rw, iflags);
2640 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2641 			return illegal_condition_result;
2642 		}
2643 	}
2644 
2645 	ret = do_device_access(scp, lba, num, false);
2646 	read_unlock_irqrestore(&atomic_rw, iflags);
2647 	if (unlikely(ret == -1))
2648 		return DID_ERROR << 16;
2649 
2650 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2651 
2652 	if (unlikely(sqcp)) {
2653 		if (sqcp->inj_recovered) {
2654 			mk_sense_buffer(scp, RECOVERED_ERROR,
2655 					THRESHOLD_EXCEEDED, 0);
2656 			return check_condition_result;
2657 		} else if (sqcp->inj_transport) {
2658 			mk_sense_buffer(scp, ABORTED_COMMAND,
2659 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2660 			return check_condition_result;
2661 		} else if (sqcp->inj_dif) {
2662 			/* Logical block guard check failed */
2663 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2664 			return illegal_condition_result;
2665 		} else if (sqcp->inj_dix) {
2666 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2667 			return illegal_condition_result;
2668 		}
2669 	}
2670 	return 0;
2671 }
2672 
2673 static void dump_sector(unsigned char *buf, int len)
2674 {
2675 	int i, j, n;
2676 
2677 	pr_err(">>> Sector Dump <<<\n");
2678 	for (i = 0 ; i < len ; i += 16) {
2679 		char b[128];
2680 
2681 		for (j = 0, n = 0; j < 16; j++) {
2682 			unsigned char c = buf[i+j];
2683 
2684 			if (c >= 0x20 && c < 0x7e)
2685 				n += scnprintf(b + n, sizeof(b) - n,
2686 					       " %c ", buf[i+j]);
2687 			else
2688 				n += scnprintf(b + n, sizeof(b) - n,
2689 					       "%02x ", buf[i+j]);
2690 		}
2691 		pr_err("%04d: %s\n", i, b);
2692 	}
2693 }
2694 
2695 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2696 			     unsigned int sectors, u32 ei_lba)
2697 {
2698 	int ret;
2699 	struct sd_dif_tuple *sdt;
2700 	void *daddr;
2701 	sector_t sector = start_sec;
2702 	int ppage_offset;
2703 	int dpage_offset;
2704 	struct sg_mapping_iter diter;
2705 	struct sg_mapping_iter piter;
2706 
2707 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2708 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2709 
2710 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2711 			scsi_prot_sg_count(SCpnt),
2712 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2713 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2714 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2715 
2716 	/* For each protection page */
2717 	while (sg_miter_next(&piter)) {
2718 		dpage_offset = 0;
2719 		if (WARN_ON(!sg_miter_next(&diter))) {
2720 			ret = 0x01;
2721 			goto out;
2722 		}
2723 
2724 		for (ppage_offset = 0; ppage_offset < piter.length;
2725 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
2726 			/* If we're at the end of the current
2727 			 * data page advance to the next one
2728 			 */
2729 			if (dpage_offset >= diter.length) {
2730 				if (WARN_ON(!sg_miter_next(&diter))) {
2731 					ret = 0x01;
2732 					goto out;
2733 				}
2734 				dpage_offset = 0;
2735 			}
2736 
2737 			sdt = piter.addr + ppage_offset;
2738 			daddr = diter.addr + dpage_offset;
2739 
2740 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2741 			if (ret) {
2742 				dump_sector(daddr, sdebug_sector_size);
2743 				goto out;
2744 			}
2745 
2746 			sector++;
2747 			ei_lba++;
2748 			dpage_offset += sdebug_sector_size;
2749 		}
2750 		diter.consumed = dpage_offset;
2751 		sg_miter_stop(&diter);
2752 	}
2753 	sg_miter_stop(&piter);
2754 
2755 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2756 	dix_writes++;
2757 
2758 	return 0;
2759 
2760 out:
2761 	dif_errors++;
2762 	sg_miter_stop(&diter);
2763 	sg_miter_stop(&piter);
2764 	return ret;
2765 }
2766 
2767 static unsigned long lba_to_map_index(sector_t lba)
2768 {
2769 	if (sdebug_unmap_alignment)
2770 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2771 	sector_div(lba, sdebug_unmap_granularity);
2772 	return lba;
2773 }
2774 
2775 static sector_t map_index_to_lba(unsigned long index)
2776 {
2777 	sector_t lba = index * sdebug_unmap_granularity;
2778 
2779 	if (sdebug_unmap_alignment)
2780 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2781 	return lba;
2782 }
2783 
2784 static unsigned int map_state(sector_t lba, unsigned int *num)
2785 {
2786 	sector_t end;
2787 	unsigned int mapped;
2788 	unsigned long index;
2789 	unsigned long next;
2790 
2791 	index = lba_to_map_index(lba);
2792 	mapped = test_bit(index, map_storep);
2793 
2794 	if (mapped)
2795 		next = find_next_zero_bit(map_storep, map_size, index);
2796 	else
2797 		next = find_next_bit(map_storep, map_size, index);
2798 
2799 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2800 	*num = end - lba;
2801 	return mapped;
2802 }
2803 
2804 static void map_region(sector_t lba, unsigned int len)
2805 {
2806 	sector_t end = lba + len;
2807 
2808 	while (lba < end) {
2809 		unsigned long index = lba_to_map_index(lba);
2810 
2811 		if (index < map_size)
2812 			set_bit(index, map_storep);
2813 
2814 		lba = map_index_to_lba(index + 1);
2815 	}
2816 }
2817 
2818 static void unmap_region(sector_t lba, unsigned int len)
2819 {
2820 	sector_t end = lba + len;
2821 
2822 	while (lba < end) {
2823 		unsigned long index = lba_to_map_index(lba);
2824 
2825 		if (lba == map_index_to_lba(index) &&
2826 		    lba + sdebug_unmap_granularity <= end &&
2827 		    index < map_size) {
2828 			clear_bit(index, map_storep);
2829 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2830 				memset(fake_storep +
2831 				       lba * sdebug_sector_size,
2832 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2833 				       sdebug_sector_size *
2834 				       sdebug_unmap_granularity);
2835 			}
2836 			if (dif_storep) {
2837 				memset(dif_storep + lba, 0xff,
2838 				       sizeof(*dif_storep) *
2839 				       sdebug_unmap_granularity);
2840 			}
2841 		}
2842 		lba = map_index_to_lba(index + 1);
2843 	}
2844 }
2845 
2846 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2847 {
2848 	u8 *cmd = scp->cmnd;
2849 	u64 lba;
2850 	u32 num;
2851 	u32 ei_lba;
2852 	unsigned long iflags;
2853 	int ret;
2854 	bool check_prot;
2855 
2856 	switch (cmd[0]) {
2857 	case WRITE_16:
2858 		ei_lba = 0;
2859 		lba = get_unaligned_be64(cmd + 2);
2860 		num = get_unaligned_be32(cmd + 10);
2861 		check_prot = true;
2862 		break;
2863 	case WRITE_10:
2864 		ei_lba = 0;
2865 		lba = get_unaligned_be32(cmd + 2);
2866 		num = get_unaligned_be16(cmd + 7);
2867 		check_prot = true;
2868 		break;
2869 	case WRITE_6:
2870 		ei_lba = 0;
2871 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2872 		      (u32)(cmd[1] & 0x1f) << 16;
2873 		num = (0 == cmd[4]) ? 256 : cmd[4];
2874 		check_prot = true;
2875 		break;
2876 	case WRITE_12:
2877 		ei_lba = 0;
2878 		lba = get_unaligned_be32(cmd + 2);
2879 		num = get_unaligned_be32(cmd + 6);
2880 		check_prot = true;
2881 		break;
2882 	case 0x53:	/* XDWRITEREAD(10) */
2883 		ei_lba = 0;
2884 		lba = get_unaligned_be32(cmd + 2);
2885 		num = get_unaligned_be16(cmd + 7);
2886 		check_prot = false;
2887 		break;
2888 	default:	/* assume WRITE(32) */
2889 		lba = get_unaligned_be64(cmd + 12);
2890 		ei_lba = get_unaligned_be32(cmd + 20);
2891 		num = get_unaligned_be32(cmd + 28);
2892 		check_prot = false;
2893 		break;
2894 	}
2895 	if (unlikely(have_dif_prot && check_prot)) {
2896 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2897 		    (cmd[1] & 0xe0)) {
2898 			mk_sense_invalid_opcode(scp);
2899 			return check_condition_result;
2900 		}
2901 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2902 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2903 		    (cmd[1] & 0xe0) == 0)
2904 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2905 				    "to DIF device\n");
2906 	}
2907 
2908 	/* inline check_device_access_params() */
2909 	if (unlikely(lba + num > sdebug_capacity)) {
2910 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2911 		return check_condition_result;
2912 	}
2913 	/* transfer length excessive (tie in to block limits VPD page) */
2914 	if (unlikely(num > sdebug_store_sectors)) {
2915 		/* needs work to find which cdb byte 'num' comes from */
2916 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2917 		return check_condition_result;
2918 	}
2919 
2920 	write_lock_irqsave(&atomic_rw, iflags);
2921 
2922 	/* DIX + T10 DIF */
2923 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2924 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2925 
2926 		if (prot_ret) {
2927 			write_unlock_irqrestore(&atomic_rw, iflags);
2928 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2929 			return illegal_condition_result;
2930 		}
2931 	}
2932 
2933 	ret = do_device_access(scp, lba, num, true);
2934 	if (unlikely(scsi_debug_lbp()))
2935 		map_region(lba, num);
2936 	write_unlock_irqrestore(&atomic_rw, iflags);
2937 	if (unlikely(-1 == ret))
2938 		return DID_ERROR << 16;
2939 	else if (unlikely(sdebug_verbose &&
2940 			  (ret < (num * sdebug_sector_size))))
2941 		sdev_printk(KERN_INFO, scp->device,
2942 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2943 			    my_name, num * sdebug_sector_size, ret);
2944 
2945 	if (unlikely(sdebug_any_injecting_opt)) {
2946 		struct sdebug_queued_cmd *sqcp =
2947 				(struct sdebug_queued_cmd *)scp->host_scribble;
2948 
2949 		if (sqcp) {
2950 			if (sqcp->inj_recovered) {
2951 				mk_sense_buffer(scp, RECOVERED_ERROR,
2952 						THRESHOLD_EXCEEDED, 0);
2953 				return check_condition_result;
2954 			} else if (sqcp->inj_dif) {
2955 				/* Logical block guard check failed */
2956 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2957 				return illegal_condition_result;
2958 			} else if (sqcp->inj_dix) {
2959 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2960 				return illegal_condition_result;
2961 			}
2962 		}
2963 	}
2964 	return 0;
2965 }
2966 
2967 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2968 			   u32 ei_lba, bool unmap, bool ndob)
2969 {
2970 	unsigned long iflags;
2971 	unsigned long long i;
2972 	int ret;
2973 	u64 lba_off;
2974 
2975 	ret = check_device_access_params(scp, lba, num);
2976 	if (ret)
2977 		return ret;
2978 
2979 	write_lock_irqsave(&atomic_rw, iflags);
2980 
2981 	if (unmap && scsi_debug_lbp()) {
2982 		unmap_region(lba, num);
2983 		goto out;
2984 	}
2985 
2986 	lba_off = lba * sdebug_sector_size;
2987 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
2988 	if (ndob) {
2989 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
2990 		ret = 0;
2991 	} else
2992 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2993 					  sdebug_sector_size);
2994 
2995 	if (-1 == ret) {
2996 		write_unlock_irqrestore(&atomic_rw, iflags);
2997 		return DID_ERROR << 16;
2998 	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2999 		sdev_printk(KERN_INFO, scp->device,
3000 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3001 			    my_name, "write same",
3002 			    num * sdebug_sector_size, ret);
3003 
3004 	/* Copy first sector to remaining blocks */
3005 	for (i = 1 ; i < num ; i++)
3006 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3007 		       fake_storep + lba_off,
3008 		       sdebug_sector_size);
3009 
3010 	if (scsi_debug_lbp())
3011 		map_region(lba, num);
3012 out:
3013 	write_unlock_irqrestore(&atomic_rw, iflags);
3014 
3015 	return 0;
3016 }
3017 
3018 static int resp_write_same_10(struct scsi_cmnd *scp,
3019 			      struct sdebug_dev_info *devip)
3020 {
3021 	u8 *cmd = scp->cmnd;
3022 	u32 lba;
3023 	u16 num;
3024 	u32 ei_lba = 0;
3025 	bool unmap = false;
3026 
3027 	if (cmd[1] & 0x8) {
3028 		if (sdebug_lbpws10 == 0) {
3029 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3030 			return check_condition_result;
3031 		} else
3032 			unmap = true;
3033 	}
3034 	lba = get_unaligned_be32(cmd + 2);
3035 	num = get_unaligned_be16(cmd + 7);
3036 	if (num > sdebug_write_same_length) {
3037 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3038 		return check_condition_result;
3039 	}
3040 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3041 }
3042 
3043 static int resp_write_same_16(struct scsi_cmnd *scp,
3044 			      struct sdebug_dev_info *devip)
3045 {
3046 	u8 *cmd = scp->cmnd;
3047 	u64 lba;
3048 	u32 num;
3049 	u32 ei_lba = 0;
3050 	bool unmap = false;
3051 	bool ndob = false;
3052 
3053 	if (cmd[1] & 0x8) {	/* UNMAP */
3054 		if (sdebug_lbpws == 0) {
3055 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3056 			return check_condition_result;
3057 		} else
3058 			unmap = true;
3059 	}
3060 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3061 		ndob = true;
3062 	lba = get_unaligned_be64(cmd + 2);
3063 	num = get_unaligned_be32(cmd + 10);
3064 	if (num > sdebug_write_same_length) {
3065 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3066 		return check_condition_result;
3067 	}
3068 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3069 }
3070 
3071 /* Note the mode field is in the same position as the (lower) service action
3072  * field. For the Report supported operation codes command, SPC-4 suggests
3073  * each mode of this command should be reported separately; for future. */
3074 static int resp_write_buffer(struct scsi_cmnd *scp,
3075 			     struct sdebug_dev_info *devip)
3076 {
3077 	u8 *cmd = scp->cmnd;
3078 	struct scsi_device *sdp = scp->device;
3079 	struct sdebug_dev_info *dp;
3080 	u8 mode;
3081 
3082 	mode = cmd[1] & 0x1f;
3083 	switch (mode) {
3084 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3085 		/* set UAs on this device only */
3086 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3087 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3088 		break;
3089 	case 0x5:	/* download MC, save and ACT */
3090 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3091 		break;
3092 	case 0x6:	/* download MC with offsets and ACT */
3093 		/* set UAs on most devices (LUs) in this target */
3094 		list_for_each_entry(dp,
3095 				    &devip->sdbg_host->dev_info_list,
3096 				    dev_list)
3097 			if (dp->target == sdp->id) {
3098 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3099 				if (devip != dp)
3100 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3101 						dp->uas_bm);
3102 			}
3103 		break;
3104 	case 0x7:	/* download MC with offsets, save, and ACT */
3105 		/* set UA on all devices (LUs) in this target */
3106 		list_for_each_entry(dp,
3107 				    &devip->sdbg_host->dev_info_list,
3108 				    dev_list)
3109 			if (dp->target == sdp->id)
3110 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3111 					dp->uas_bm);
3112 		break;
3113 	default:
3114 		/* do nothing for this command for other mode values */
3115 		break;
3116 	}
3117 	return 0;
3118 }
3119 
3120 static int resp_comp_write(struct scsi_cmnd *scp,
3121 			   struct sdebug_dev_info *devip)
3122 {
3123 	u8 *cmd = scp->cmnd;
3124 	u8 *arr;
3125 	u8 *fake_storep_hold;
3126 	u64 lba;
3127 	u32 dnum;
3128 	u32 lb_size = sdebug_sector_size;
3129 	u8 num;
3130 	unsigned long iflags;
3131 	int ret;
3132 	int retval = 0;
3133 
3134 	lba = get_unaligned_be64(cmd + 2);
3135 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3136 	if (0 == num)
3137 		return 0;	/* degenerate case, not an error */
3138 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3139 	    (cmd[1] & 0xe0)) {
3140 		mk_sense_invalid_opcode(scp);
3141 		return check_condition_result;
3142 	}
3143 	if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3144 	     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3145 	    (cmd[1] & 0xe0) == 0)
3146 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3147 			    "to DIF device\n");
3148 
3149 	/* inline check_device_access_params() */
3150 	if (lba + num > sdebug_capacity) {
3151 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3152 		return check_condition_result;
3153 	}
3154 	/* transfer length excessive (tie in to block limits VPD page) */
3155 	if (num > sdebug_store_sectors) {
3156 		/* needs work to find which cdb byte 'num' comes from */
3157 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3158 		return check_condition_result;
3159 	}
3160 	dnum = 2 * num;
3161 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3162 	if (NULL == arr) {
3163 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3164 				INSUFF_RES_ASCQ);
3165 		return check_condition_result;
3166 	}
3167 
3168 	write_lock_irqsave(&atomic_rw, iflags);
3169 
3170 	/* trick do_device_access() to fetch both compare and write buffers
3171 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3172 	fake_storep_hold = fake_storep;
3173 	fake_storep = arr;
3174 	ret = do_device_access(scp, 0, dnum, true);
3175 	fake_storep = fake_storep_hold;
3176 	if (ret == -1) {
3177 		retval = DID_ERROR << 16;
3178 		goto cleanup;
3179 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3180 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3181 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3182 			    dnum * lb_size, ret);
3183 	if (!comp_write_worker(lba, num, arr)) {
3184 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3185 		retval = check_condition_result;
3186 		goto cleanup;
3187 	}
3188 	if (scsi_debug_lbp())
3189 		map_region(lba, num);
3190 cleanup:
3191 	write_unlock_irqrestore(&atomic_rw, iflags);
3192 	kfree(arr);
3193 	return retval;
3194 }
3195 
3196 struct unmap_block_desc {
3197 	__be64	lba;
3198 	__be32	blocks;
3199 	__be32	__reserved;
3200 };
3201 
3202 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3203 {
3204 	unsigned char *buf;
3205 	struct unmap_block_desc *desc;
3206 	unsigned int i, payload_len, descriptors;
3207 	int ret;
3208 	unsigned long iflags;
3209 
3210 
3211 	if (!scsi_debug_lbp())
3212 		return 0;	/* fib and say its done */
3213 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3214 	BUG_ON(scsi_bufflen(scp) != payload_len);
3215 
3216 	descriptors = (payload_len - 8) / 16;
3217 	if (descriptors > sdebug_unmap_max_desc) {
3218 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3219 		return check_condition_result;
3220 	}
3221 
3222 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3223 	if (!buf) {
3224 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3225 				INSUFF_RES_ASCQ);
3226 		return check_condition_result;
3227 	}
3228 
3229 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3230 
3231 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3232 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3233 
3234 	desc = (void *)&buf[8];
3235 
3236 	write_lock_irqsave(&atomic_rw, iflags);
3237 
3238 	for (i = 0 ; i < descriptors ; i++) {
3239 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3240 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3241 
3242 		ret = check_device_access_params(scp, lba, num);
3243 		if (ret)
3244 			goto out;
3245 
3246 		unmap_region(lba, num);
3247 	}
3248 
3249 	ret = 0;
3250 
3251 out:
3252 	write_unlock_irqrestore(&atomic_rw, iflags);
3253 	kfree(buf);
3254 
3255 	return ret;
3256 }
3257 
3258 #define SDEBUG_GET_LBA_STATUS_LEN 32
3259 
3260 static int resp_get_lba_status(struct scsi_cmnd *scp,
3261 			       struct sdebug_dev_info *devip)
3262 {
3263 	u8 *cmd = scp->cmnd;
3264 	u64 lba;
3265 	u32 alloc_len, mapped, num;
3266 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3267 	int ret;
3268 
3269 	lba = get_unaligned_be64(cmd + 2);
3270 	alloc_len = get_unaligned_be32(cmd + 10);
3271 
3272 	if (alloc_len < 24)
3273 		return 0;
3274 
3275 	ret = check_device_access_params(scp, lba, 1);
3276 	if (ret)
3277 		return ret;
3278 
3279 	if (scsi_debug_lbp())
3280 		mapped = map_state(lba, &num);
3281 	else {
3282 		mapped = 1;
3283 		/* following just in case virtual_gb changed */
3284 		sdebug_capacity = get_sdebug_capacity();
3285 		if (sdebug_capacity - lba <= 0xffffffff)
3286 			num = sdebug_capacity - lba;
3287 		else
3288 			num = 0xffffffff;
3289 	}
3290 
3291 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3292 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3293 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3294 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3295 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3296 
3297 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3298 }
3299 
3300 #define RL_BUCKET_ELEMS 8
3301 
3302 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3303  * (W-LUN), the normal Linux scanning logic does not associate it with a
3304  * device (e.g. /dev/sg7). The following magic will make that association:
3305  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3306  * where <n> is a host number. If there are multiple targets in a host then
3307  * the above will associate a W-LUN to each target. To only get a W-LUN
3308  * for target 2, then use "echo '- 2 49409' > scan" .
3309  */
3310 static int resp_report_luns(struct scsi_cmnd *scp,
3311 			    struct sdebug_dev_info *devip)
3312 {
3313 	unsigned char *cmd = scp->cmnd;
3314 	unsigned int alloc_len;
3315 	unsigned char select_report;
3316 	u64 lun;
3317 	struct scsi_lun *lun_p;
3318 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3319 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3320 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3321 	unsigned int tlun_cnt;	/* total LUN count */
3322 	unsigned int rlen;	/* response length (in bytes) */
3323 	int k, j, n, res;
3324 	unsigned int off_rsp = 0;
3325 	const int sz_lun = sizeof(struct scsi_lun);
3326 
3327 	clear_luns_changed_on_target(devip);
3328 
3329 	select_report = cmd[2];
3330 	alloc_len = get_unaligned_be32(cmd + 6);
3331 
3332 	if (alloc_len < 4) {
3333 		pr_err("alloc len too small %d\n", alloc_len);
3334 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3335 		return check_condition_result;
3336 	}
3337 
3338 	switch (select_report) {
3339 	case 0:		/* all LUNs apart from W-LUNs */
3340 		lun_cnt = sdebug_max_luns;
3341 		wlun_cnt = 0;
3342 		break;
3343 	case 1:		/* only W-LUNs */
3344 		lun_cnt = 0;
3345 		wlun_cnt = 1;
3346 		break;
3347 	case 2:		/* all LUNs */
3348 		lun_cnt = sdebug_max_luns;
3349 		wlun_cnt = 1;
3350 		break;
3351 	case 0x10:	/* only administrative LUs */
3352 	case 0x11:	/* see SPC-5 */
3353 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3354 	default:
3355 		pr_debug("select report invalid %d\n", select_report);
3356 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3357 		return check_condition_result;
3358 	}
3359 
3360 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3361 		--lun_cnt;
3362 
3363 	tlun_cnt = lun_cnt + wlun_cnt;
3364 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3365 	scsi_set_resid(scp, scsi_bufflen(scp));
3366 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3367 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3368 
3369 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3370 	lun = sdebug_no_lun_0 ? 1 : 0;
3371 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3372 		memset(arr, 0, sizeof(arr));
3373 		lun_p = (struct scsi_lun *)&arr[0];
3374 		if (k == 0) {
3375 			put_unaligned_be32(rlen, &arr[0]);
3376 			++lun_p;
3377 			j = 1;
3378 		}
3379 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3380 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3381 				break;
3382 			int_to_scsilun(lun++, lun_p);
3383 		}
3384 		if (j < RL_BUCKET_ELEMS)
3385 			break;
3386 		n = j * sz_lun;
3387 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3388 		if (res)
3389 			return res;
3390 		off_rsp += n;
3391 	}
3392 	if (wlun_cnt) {
3393 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3394 		++j;
3395 	}
3396 	if (j > 0)
3397 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3398 	return res;
3399 }
3400 
3401 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3402 			    unsigned int num, struct sdebug_dev_info *devip)
3403 {
3404 	int j;
3405 	unsigned char *kaddr, *buf;
3406 	unsigned int offset;
3407 	struct scsi_data_buffer *sdb = scsi_in(scp);
3408 	struct sg_mapping_iter miter;
3409 
3410 	/* better not to use temporary buffer. */
3411 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3412 	if (!buf) {
3413 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3414 				INSUFF_RES_ASCQ);
3415 		return check_condition_result;
3416 	}
3417 
3418 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3419 
3420 	offset = 0;
3421 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3422 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3423 
3424 	while (sg_miter_next(&miter)) {
3425 		kaddr = miter.addr;
3426 		for (j = 0; j < miter.length; j++)
3427 			*(kaddr + j) ^= *(buf + offset + j);
3428 
3429 		offset += miter.length;
3430 	}
3431 	sg_miter_stop(&miter);
3432 	kfree(buf);
3433 
3434 	return 0;
3435 }
3436 
3437 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3438 			       struct sdebug_dev_info *devip)
3439 {
3440 	u8 *cmd = scp->cmnd;
3441 	u64 lba;
3442 	u32 num;
3443 	int errsts;
3444 
3445 	if (!scsi_bidi_cmnd(scp)) {
3446 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3447 				INSUFF_RES_ASCQ);
3448 		return check_condition_result;
3449 	}
3450 	errsts = resp_read_dt0(scp, devip);
3451 	if (errsts)
3452 		return errsts;
3453 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3454 		errsts = resp_write_dt0(scp, devip);
3455 		if (errsts)
3456 			return errsts;
3457 	}
3458 	lba = get_unaligned_be32(cmd + 2);
3459 	num = get_unaligned_be16(cmd + 7);
3460 	return resp_xdwriteread(scp, lba, num, devip);
3461 }
3462 
3463 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3464 {
3465 	struct sdebug_queue *sqp = sdebug_q_arr;
3466 
3467 	if (sdebug_mq_active) {
3468 		u32 tag = blk_mq_unique_tag(cmnd->request);
3469 		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3470 
3471 		if (unlikely(hwq >= submit_queues)) {
3472 			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3473 			hwq %= submit_queues;
3474 		}
3475 		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3476 		return sqp + hwq;
3477 	} else
3478 		return sqp;
3479 }
3480 
3481 /* Queued (deferred) command completions converge here. */
3482 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3483 {
3484 	int qc_idx;
3485 	int retiring = 0;
3486 	unsigned long iflags;
3487 	struct sdebug_queue *sqp;
3488 	struct sdebug_queued_cmd *sqcp;
3489 	struct scsi_cmnd *scp;
3490 	struct sdebug_dev_info *devip;
3491 
3492 	qc_idx = sd_dp->qc_idx;
3493 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3494 	if (sdebug_statistics) {
3495 		atomic_inc(&sdebug_completions);
3496 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3497 			atomic_inc(&sdebug_miss_cpus);
3498 	}
3499 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3500 		pr_err("wild qc_idx=%d\n", qc_idx);
3501 		return;
3502 	}
3503 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3504 	sqcp = &sqp->qc_arr[qc_idx];
3505 	scp = sqcp->a_cmnd;
3506 	if (unlikely(scp == NULL)) {
3507 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3508 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3509 		       sd_dp->sqa_idx, qc_idx);
3510 		return;
3511 	}
3512 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3513 	if (likely(devip))
3514 		atomic_dec(&devip->num_in_q);
3515 	else
3516 		pr_err("devip=NULL\n");
3517 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3518 		retiring = 1;
3519 
3520 	sqcp->a_cmnd = NULL;
3521 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3522 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3523 		pr_err("Unexpected completion\n");
3524 		return;
3525 	}
3526 
3527 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3528 		int k, retval;
3529 
3530 		retval = atomic_read(&retired_max_queue);
3531 		if (qc_idx >= retval) {
3532 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3533 			pr_err("index %d too large\n", retval);
3534 			return;
3535 		}
3536 		k = find_last_bit(sqp->in_use_bm, retval);
3537 		if ((k < sdebug_max_queue) || (k == retval))
3538 			atomic_set(&retired_max_queue, 0);
3539 		else
3540 			atomic_set(&retired_max_queue, k + 1);
3541 	}
3542 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3543 	scp->scsi_done(scp); /* callback to mid level */
3544 }
3545 
3546 /* When high resolution timer goes off this function is called. */
3547 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3548 {
3549 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3550 						  hrt);
3551 	sdebug_q_cmd_complete(sd_dp);
3552 	return HRTIMER_NORESTART;
3553 }
3554 
3555 /* When work queue schedules work, it calls this function. */
3556 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3557 {
3558 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3559 						  ew.work);
3560 	sdebug_q_cmd_complete(sd_dp);
3561 }
3562 
3563 static bool got_shared_uuid;
3564 static uuid_be shared_uuid;
3565 
3566 static struct sdebug_dev_info *sdebug_device_create(
3567 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3568 {
3569 	struct sdebug_dev_info *devip;
3570 
3571 	devip = kzalloc(sizeof(*devip), flags);
3572 	if (devip) {
3573 		if (sdebug_uuid_ctl == 1)
3574 			uuid_be_gen(&devip->lu_name);
3575 		else if (sdebug_uuid_ctl == 2) {
3576 			if (got_shared_uuid)
3577 				devip->lu_name = shared_uuid;
3578 			else {
3579 				uuid_be_gen(&shared_uuid);
3580 				got_shared_uuid = true;
3581 				devip->lu_name = shared_uuid;
3582 			}
3583 		}
3584 		devip->sdbg_host = sdbg_host;
3585 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3586 	}
3587 	return devip;
3588 }
3589 
3590 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3591 {
3592 	struct sdebug_host_info *sdbg_host;
3593 	struct sdebug_dev_info *open_devip = NULL;
3594 	struct sdebug_dev_info *devip;
3595 
3596 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3597 	if (!sdbg_host) {
3598 		pr_err("Host info NULL\n");
3599 		return NULL;
3600         }
3601 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3602 		if ((devip->used) && (devip->channel == sdev->channel) &&
3603                     (devip->target == sdev->id) &&
3604                     (devip->lun == sdev->lun))
3605                         return devip;
3606 		else {
3607 			if ((!devip->used) && (!open_devip))
3608 				open_devip = devip;
3609 		}
3610 	}
3611 	if (!open_devip) { /* try and make a new one */
3612 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3613 		if (!open_devip) {
3614 			pr_err("out of memory at line %d\n", __LINE__);
3615 			return NULL;
3616 		}
3617 	}
3618 
3619 	open_devip->channel = sdev->channel;
3620 	open_devip->target = sdev->id;
3621 	open_devip->lun = sdev->lun;
3622 	open_devip->sdbg_host = sdbg_host;
3623 	atomic_set(&open_devip->num_in_q, 0);
3624 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3625 	open_devip->used = true;
3626 	return open_devip;
3627 }
3628 
3629 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3630 {
3631 	if (sdebug_verbose)
3632 		pr_info("slave_alloc <%u %u %u %llu>\n",
3633 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3634 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3635 	return 0;
3636 }
3637 
3638 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3639 {
3640 	struct sdebug_dev_info *devip =
3641 			(struct sdebug_dev_info *)sdp->hostdata;
3642 
3643 	if (sdebug_verbose)
3644 		pr_info("slave_configure <%u %u %u %llu>\n",
3645 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3646 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3647 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3648 	if (devip == NULL) {
3649 		devip = find_build_dev_info(sdp);
3650 		if (devip == NULL)
3651 			return 1;  /* no resources, will be marked offline */
3652 	}
3653 	sdp->hostdata = devip;
3654 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3655 	if (sdebug_no_uld)
3656 		sdp->no_uld_attach = 1;
3657 	return 0;
3658 }
3659 
3660 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3661 {
3662 	struct sdebug_dev_info *devip =
3663 		(struct sdebug_dev_info *)sdp->hostdata;
3664 
3665 	if (sdebug_verbose)
3666 		pr_info("slave_destroy <%u %u %u %llu>\n",
3667 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3668 	if (devip) {
3669 		/* make this slot available for re-use */
3670 		devip->used = false;
3671 		sdp->hostdata = NULL;
3672 	}
3673 }
3674 
3675 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3676 {
3677 	if (!sd_dp)
3678 		return;
3679 	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3680 		hrtimer_cancel(&sd_dp->hrt);
3681 	else if (sdebug_jdelay < 0)
3682 		cancel_work_sync(&sd_dp->ew.work);
3683 }
3684 
3685 /* If @cmnd found deletes its timer or work queue and returns true; else
3686    returns false */
3687 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3688 {
3689 	unsigned long iflags;
3690 	int j, k, qmax, r_qmax;
3691 	struct sdebug_queue *sqp;
3692 	struct sdebug_queued_cmd *sqcp;
3693 	struct sdebug_dev_info *devip;
3694 	struct sdebug_defer *sd_dp;
3695 
3696 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3697 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3698 		qmax = sdebug_max_queue;
3699 		r_qmax = atomic_read(&retired_max_queue);
3700 		if (r_qmax > qmax)
3701 			qmax = r_qmax;
3702 		for (k = 0; k < qmax; ++k) {
3703 			if (test_bit(k, sqp->in_use_bm)) {
3704 				sqcp = &sqp->qc_arr[k];
3705 				if (cmnd != sqcp->a_cmnd)
3706 					continue;
3707 				/* found */
3708 				devip = (struct sdebug_dev_info *)
3709 						cmnd->device->hostdata;
3710 				if (devip)
3711 					atomic_dec(&devip->num_in_q);
3712 				sqcp->a_cmnd = NULL;
3713 				sd_dp = sqcp->sd_dp;
3714 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3715 				stop_qc_helper(sd_dp);
3716 				clear_bit(k, sqp->in_use_bm);
3717 				return true;
3718 			}
3719 		}
3720 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3721 	}
3722 	return false;
3723 }
3724 
3725 /* Deletes (stops) timers or work queues of all queued commands */
3726 static void stop_all_queued(void)
3727 {
3728 	unsigned long iflags;
3729 	int j, k;
3730 	struct sdebug_queue *sqp;
3731 	struct sdebug_queued_cmd *sqcp;
3732 	struct sdebug_dev_info *devip;
3733 	struct sdebug_defer *sd_dp;
3734 
3735 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3736 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3737 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3738 			if (test_bit(k, sqp->in_use_bm)) {
3739 				sqcp = &sqp->qc_arr[k];
3740 				if (sqcp->a_cmnd == NULL)
3741 					continue;
3742 				devip = (struct sdebug_dev_info *)
3743 					sqcp->a_cmnd->device->hostdata;
3744 				if (devip)
3745 					atomic_dec(&devip->num_in_q);
3746 				sqcp->a_cmnd = NULL;
3747 				sd_dp = sqcp->sd_dp;
3748 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3749 				stop_qc_helper(sd_dp);
3750 				clear_bit(k, sqp->in_use_bm);
3751 				spin_lock_irqsave(&sqp->qc_lock, iflags);
3752 			}
3753 		}
3754 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3755 	}
3756 }
3757 
3758 /* Free queued command memory on heap */
3759 static void free_all_queued(void)
3760 {
3761 	int j, k;
3762 	struct sdebug_queue *sqp;
3763 	struct sdebug_queued_cmd *sqcp;
3764 
3765 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3766 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3767 			sqcp = &sqp->qc_arr[k];
3768 			kfree(sqcp->sd_dp);
3769 			sqcp->sd_dp = NULL;
3770 		}
3771 	}
3772 }
3773 
3774 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3775 {
3776 	bool ok;
3777 
3778 	++num_aborts;
3779 	if (SCpnt) {
3780 		ok = stop_queued_cmnd(SCpnt);
3781 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3782 			sdev_printk(KERN_INFO, SCpnt->device,
3783 				    "%s: command%s found\n", __func__,
3784 				    ok ? "" : " not");
3785 	}
3786 	return SUCCESS;
3787 }
3788 
3789 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3790 {
3791 	++num_dev_resets;
3792 	if (SCpnt && SCpnt->device) {
3793 		struct scsi_device *sdp = SCpnt->device;
3794 		struct sdebug_dev_info *devip =
3795 				(struct sdebug_dev_info *)sdp->hostdata;
3796 
3797 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3798 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3799 		if (devip)
3800 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3801 	}
3802 	return SUCCESS;
3803 }
3804 
3805 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3806 {
3807 	struct sdebug_host_info *sdbg_host;
3808 	struct sdebug_dev_info *devip;
3809 	struct scsi_device *sdp;
3810 	struct Scsi_Host *hp;
3811 	int k = 0;
3812 
3813 	++num_target_resets;
3814 	if (!SCpnt)
3815 		goto lie;
3816 	sdp = SCpnt->device;
3817 	if (!sdp)
3818 		goto lie;
3819 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3820 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3821 	hp = sdp->host;
3822 	if (!hp)
3823 		goto lie;
3824 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3825 	if (sdbg_host) {
3826 		list_for_each_entry(devip,
3827 				    &sdbg_host->dev_info_list,
3828 				    dev_list)
3829 			if (devip->target == sdp->id) {
3830 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3831 				++k;
3832 			}
3833 	}
3834 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3835 		sdev_printk(KERN_INFO, sdp,
3836 			    "%s: %d device(s) found in target\n", __func__, k);
3837 lie:
3838 	return SUCCESS;
3839 }
3840 
3841 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3842 {
3843 	struct sdebug_host_info *sdbg_host;
3844 	struct sdebug_dev_info *devip;
3845         struct scsi_device * sdp;
3846         struct Scsi_Host * hp;
3847 	int k = 0;
3848 
3849 	++num_bus_resets;
3850 	if (!(SCpnt && SCpnt->device))
3851 		goto lie;
3852 	sdp = SCpnt->device;
3853 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3854 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3855 	hp = sdp->host;
3856 	if (hp) {
3857 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3858 		if (sdbg_host) {
3859 			list_for_each_entry(devip,
3860                                             &sdbg_host->dev_info_list,
3861 					    dev_list) {
3862 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3863 				++k;
3864 			}
3865 		}
3866 	}
3867 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3868 		sdev_printk(KERN_INFO, sdp,
3869 			    "%s: %d device(s) found in host\n", __func__, k);
3870 lie:
3871 	return SUCCESS;
3872 }
3873 
3874 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3875 {
3876 	struct sdebug_host_info * sdbg_host;
3877 	struct sdebug_dev_info *devip;
3878 	int k = 0;
3879 
3880 	++num_host_resets;
3881 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3882 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3883         spin_lock(&sdebug_host_list_lock);
3884         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3885 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3886 				    dev_list) {
3887 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3888 			++k;
3889 		}
3890         }
3891         spin_unlock(&sdebug_host_list_lock);
3892 	stop_all_queued();
3893 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3894 		sdev_printk(KERN_INFO, SCpnt->device,
3895 			    "%s: %d device(s) found\n", __func__, k);
3896 	return SUCCESS;
3897 }
3898 
3899 static void __init sdebug_build_parts(unsigned char *ramp,
3900 				      unsigned long store_size)
3901 {
3902 	struct partition * pp;
3903 	int starts[SDEBUG_MAX_PARTS + 2];
3904 	int sectors_per_part, num_sectors, k;
3905 	int heads_by_sects, start_sec, end_sec;
3906 
3907 	/* assume partition table already zeroed */
3908 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3909 		return;
3910 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3911 		sdebug_num_parts = SDEBUG_MAX_PARTS;
3912 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3913 	}
3914 	num_sectors = (int)sdebug_store_sectors;
3915 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3916 			   / sdebug_num_parts;
3917 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3918         starts[0] = sdebug_sectors_per;
3919 	for (k = 1; k < sdebug_num_parts; ++k)
3920 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3921 			    * heads_by_sects;
3922 	starts[sdebug_num_parts] = num_sectors;
3923 	starts[sdebug_num_parts + 1] = 0;
3924 
3925 	ramp[510] = 0x55;	/* magic partition markings */
3926 	ramp[511] = 0xAA;
3927 	pp = (struct partition *)(ramp + 0x1be);
3928 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3929 		start_sec = starts[k];
3930 		end_sec = starts[k + 1] - 1;
3931 		pp->boot_ind = 0;
3932 
3933 		pp->cyl = start_sec / heads_by_sects;
3934 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3935 			   / sdebug_sectors_per;
3936 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3937 
3938 		pp->end_cyl = end_sec / heads_by_sects;
3939 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3940 			       / sdebug_sectors_per;
3941 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3942 
3943 		pp->start_sect = cpu_to_le32(start_sec);
3944 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3945 		pp->sys_ind = 0x83;	/* plain Linux partition */
3946 	}
3947 }
3948 
3949 static void block_unblock_all_queues(bool block)
3950 {
3951 	int j;
3952 	struct sdebug_queue *sqp;
3953 
3954 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
3955 		atomic_set(&sqp->blocked, (int)block);
3956 }
3957 
3958 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3959  * commands will be processed normally before triggers occur.
3960  */
3961 static void tweak_cmnd_count(void)
3962 {
3963 	int count, modulo;
3964 
3965 	modulo = abs(sdebug_every_nth);
3966 	if (modulo < 2)
3967 		return;
3968 	block_unblock_all_queues(true);
3969 	count = atomic_read(&sdebug_cmnd_count);
3970 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
3971 	block_unblock_all_queues(false);
3972 }
3973 
3974 static void clear_queue_stats(void)
3975 {
3976 	atomic_set(&sdebug_cmnd_count, 0);
3977 	atomic_set(&sdebug_completions, 0);
3978 	atomic_set(&sdebug_miss_cpus, 0);
3979 	atomic_set(&sdebug_a_tsf, 0);
3980 }
3981 
3982 static void setup_inject(struct sdebug_queue *sqp,
3983 			 struct sdebug_queued_cmd *sqcp)
3984 {
3985 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
3986 		return;
3987 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
3988 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
3989 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3990 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3991 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
3992 }
3993 
3994 /* Complete the processing of the thread that queued a SCSI command to this
3995  * driver. It either completes the command by calling cmnd_done() or
3996  * schedules a hr timer or work queue then returns 0. Returns
3997  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
3998  */
3999 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4000 			 int scsi_result, int delta_jiff)
4001 {
4002 	unsigned long iflags;
4003 	int k, num_in_q, qdepth, inject;
4004 	struct sdebug_queue *sqp;
4005 	struct sdebug_queued_cmd *sqcp;
4006 	struct scsi_device *sdp;
4007 	struct sdebug_defer *sd_dp;
4008 
4009 	if (unlikely(devip == NULL)) {
4010 		if (scsi_result == 0)
4011 			scsi_result = DID_NO_CONNECT << 16;
4012 		goto respond_in_thread;
4013 	}
4014 	sdp = cmnd->device;
4015 
4016 	if (unlikely(sdebug_verbose && scsi_result))
4017 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4018 			    __func__, scsi_result);
4019 	if (delta_jiff == 0)
4020 		goto respond_in_thread;
4021 
4022 	/* schedule the response at a later time if resources permit */
4023 	sqp = get_queue(cmnd);
4024 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4025 	if (unlikely(atomic_read(&sqp->blocked))) {
4026 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4027 		return SCSI_MLQUEUE_HOST_BUSY;
4028 	}
4029 	num_in_q = atomic_read(&devip->num_in_q);
4030 	qdepth = cmnd->device->queue_depth;
4031 	inject = 0;
4032 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4033 		if (scsi_result) {
4034 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4035 			goto respond_in_thread;
4036 		} else
4037 			scsi_result = device_qfull_result;
4038 	} else if (unlikely(sdebug_every_nth &&
4039 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4040 			    (scsi_result == 0))) {
4041 		if ((num_in_q == (qdepth - 1)) &&
4042 		    (atomic_inc_return(&sdebug_a_tsf) >=
4043 		     abs(sdebug_every_nth))) {
4044 			atomic_set(&sdebug_a_tsf, 0);
4045 			inject = 1;
4046 			scsi_result = device_qfull_result;
4047 		}
4048 	}
4049 
4050 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4051 	if (unlikely(k >= sdebug_max_queue)) {
4052 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4053 		if (scsi_result)
4054 			goto respond_in_thread;
4055 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4056 			scsi_result = device_qfull_result;
4057 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4058 			sdev_printk(KERN_INFO, sdp,
4059 				    "%s: max_queue=%d exceeded, %s\n",
4060 				    __func__, sdebug_max_queue,
4061 				    (scsi_result ?  "status: TASK SET FULL" :
4062 						    "report: host busy"));
4063 		if (scsi_result)
4064 			goto respond_in_thread;
4065 		else
4066 			return SCSI_MLQUEUE_HOST_BUSY;
4067 	}
4068 	__set_bit(k, sqp->in_use_bm);
4069 	atomic_inc(&devip->num_in_q);
4070 	sqcp = &sqp->qc_arr[k];
4071 	sqcp->a_cmnd = cmnd;
4072 	cmnd->host_scribble = (unsigned char *)sqcp;
4073 	cmnd->result = scsi_result;
4074 	sd_dp = sqcp->sd_dp;
4075 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4076 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4077 		setup_inject(sqp, sqcp);
4078 	if (delta_jiff > 0 || sdebug_ndelay > 0) {
4079 		ktime_t kt;
4080 
4081 		if (delta_jiff > 0) {
4082 			struct timespec ts;
4083 
4084 			jiffies_to_timespec(delta_jiff, &ts);
4085 			kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4086 		} else
4087 			kt = ktime_set(0, sdebug_ndelay);
4088 		if (NULL == sd_dp) {
4089 			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4090 			if (NULL == sd_dp)
4091 				return SCSI_MLQUEUE_HOST_BUSY;
4092 			sqcp->sd_dp = sd_dp;
4093 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4094 				     HRTIMER_MODE_REL_PINNED);
4095 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4096 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4097 			sd_dp->qc_idx = k;
4098 		}
4099 		if (sdebug_statistics)
4100 			sd_dp->issuing_cpu = raw_smp_processor_id();
4101 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4102 	} else {	/* jdelay < 0, use work queue */
4103 		if (NULL == sd_dp) {
4104 			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4105 			if (NULL == sd_dp)
4106 				return SCSI_MLQUEUE_HOST_BUSY;
4107 			sqcp->sd_dp = sd_dp;
4108 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4109 			sd_dp->qc_idx = k;
4110 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4111 		}
4112 		if (sdebug_statistics)
4113 			sd_dp->issuing_cpu = raw_smp_processor_id();
4114 		schedule_work(&sd_dp->ew.work);
4115 	}
4116 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4117 		     (scsi_result == device_qfull_result)))
4118 		sdev_printk(KERN_INFO, sdp,
4119 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4120 			    num_in_q, (inject ? "<inject> " : ""),
4121 			    "status: TASK SET FULL");
4122 	return 0;
4123 
4124 respond_in_thread:	/* call back to mid-layer using invocation thread */
4125 	cmnd->result = scsi_result;
4126 	cmnd->scsi_done(cmnd);
4127 	return 0;
4128 }
4129 
4130 /* Note: The following macros create attribute files in the
4131    /sys/module/scsi_debug/parameters directory. Unfortunately this
4132    driver is unaware of a change and cannot trigger auxiliary actions
4133    as it can when the corresponding attribute in the
4134    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4135  */
4136 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4137 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4138 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4139 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4140 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4141 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4142 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4143 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4144 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4145 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4146 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4147 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4148 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4149 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4150 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4151 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4152 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4153 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4154 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4155 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4156 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4157 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4158 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4159 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4160 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4161 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4162 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4163 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4164 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4165 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4166 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4167 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4168 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4169 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4170 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4171 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4172 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4173 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4174 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4175 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4176 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4177 		   S_IRUGO | S_IWUSR);
4178 module_param_named(write_same_length, sdebug_write_same_length, int,
4179 		   S_IRUGO | S_IWUSR);
4180 
4181 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4182 MODULE_DESCRIPTION("SCSI debug adapter driver");
4183 MODULE_LICENSE("GPL");
4184 MODULE_VERSION(SDEBUG_VERSION);
4185 
4186 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4187 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4188 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4189 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4190 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4191 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4192 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4193 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4194 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4195 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4196 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4197 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4198 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4199 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4200 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4201 MODULE_PARM_DESC(lbprz,
4202 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4203 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4204 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4205 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4206 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4207 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4208 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4209 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4210 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4211 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4212 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4213 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4214 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4215 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4216 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4217 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4218 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4219 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4220 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4221 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4222 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4223 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4224 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4225 MODULE_PARM_DESC(uuid_ctl,
4226 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4227 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4228 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4229 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4230 
4231 #define SDEBUG_INFO_LEN 256
4232 static char sdebug_info[SDEBUG_INFO_LEN];
4233 
4234 static const char * scsi_debug_info(struct Scsi_Host * shp)
4235 {
4236 	int k;
4237 
4238 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4239 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4240 	if (k >= (SDEBUG_INFO_LEN - 1))
4241 		return sdebug_info;
4242 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4243 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4244 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4245 		  "statistics", (int)sdebug_statistics);
4246 	return sdebug_info;
4247 }
4248 
4249 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4250 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4251 				 int length)
4252 {
4253 	char arr[16];
4254 	int opts;
4255 	int minLen = length > 15 ? 15 : length;
4256 
4257 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4258 		return -EACCES;
4259 	memcpy(arr, buffer, minLen);
4260 	arr[minLen] = '\0';
4261 	if (1 != sscanf(arr, "%d", &opts))
4262 		return -EINVAL;
4263 	sdebug_opts = opts;
4264 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4265 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4266 	if (sdebug_every_nth != 0)
4267 		tweak_cmnd_count();
4268 	return length;
4269 }
4270 
4271 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4272  * same for each scsi_debug host (if more than one). Some of the counters
4273  * output are not atomics so might be inaccurate in a busy system. */
4274 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4275 {
4276 	int f, j, l;
4277 	struct sdebug_queue *sqp;
4278 
4279 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4280 		   SDEBUG_VERSION, sdebug_version_date);
4281 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4282 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4283 		   sdebug_opts, sdebug_every_nth);
4284 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4285 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4286 		   sdebug_sector_size, "bytes");
4287 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4288 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4289 		   num_aborts);
4290 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4291 		   num_dev_resets, num_target_resets, num_bus_resets,
4292 		   num_host_resets);
4293 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4294 		   dix_reads, dix_writes, dif_errors);
4295 	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4296 		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4297 		   sdebug_mq_active);
4298 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4299 		   atomic_read(&sdebug_cmnd_count),
4300 		   atomic_read(&sdebug_completions),
4301 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4302 		   atomic_read(&sdebug_a_tsf));
4303 
4304 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4305 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4306 		seq_printf(m, "  queue %d:\n", j);
4307 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4308 		if (f != sdebug_max_queue) {
4309 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4310 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4311 				   "first,last bits", f, l);
4312 		}
4313 	}
4314 	return 0;
4315 }
4316 
4317 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4318 {
4319 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4320 }
4321 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4322  * of delay is jiffies.
4323  */
4324 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4325 			   size_t count)
4326 {
4327 	int jdelay, res;
4328 
4329 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4330 		res = count;
4331 		if (sdebug_jdelay != jdelay) {
4332 			int j, k;
4333 			struct sdebug_queue *sqp;
4334 
4335 			block_unblock_all_queues(true);
4336 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4337 			     ++j, ++sqp) {
4338 				k = find_first_bit(sqp->in_use_bm,
4339 						   sdebug_max_queue);
4340 				if (k != sdebug_max_queue) {
4341 					res = -EBUSY;   /* queued commands */
4342 					break;
4343 				}
4344 			}
4345 			if (res > 0) {
4346 				/* make sure sdebug_defer instances get
4347 				 * re-allocated for new delay variant */
4348 				free_all_queued();
4349 				sdebug_jdelay = jdelay;
4350 				sdebug_ndelay = 0;
4351 			}
4352 			block_unblock_all_queues(false);
4353 		}
4354 		return res;
4355 	}
4356 	return -EINVAL;
4357 }
4358 static DRIVER_ATTR_RW(delay);
4359 
4360 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4361 {
4362 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4363 }
4364 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4365 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4366 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4367 			    size_t count)
4368 {
4369 	int ndelay, res;
4370 
4371 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4372 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4373 		res = count;
4374 		if (sdebug_ndelay != ndelay) {
4375 			int j, k;
4376 			struct sdebug_queue *sqp;
4377 
4378 			block_unblock_all_queues(true);
4379 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4380 			     ++j, ++sqp) {
4381 				k = find_first_bit(sqp->in_use_bm,
4382 						   sdebug_max_queue);
4383 				if (k != sdebug_max_queue) {
4384 					res = -EBUSY;   /* queued commands */
4385 					break;
4386 				}
4387 			}
4388 			if (res > 0) {
4389 				/* make sure sdebug_defer instances get
4390 				 * re-allocated for new delay variant */
4391 				free_all_queued();
4392 				sdebug_ndelay = ndelay;
4393 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4394 							: DEF_JDELAY;
4395 			}
4396 			block_unblock_all_queues(false);
4397 		}
4398 		return res;
4399 	}
4400 	return -EINVAL;
4401 }
4402 static DRIVER_ATTR_RW(ndelay);
4403 
4404 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4405 {
4406 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4407 }
4408 
4409 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4410 			  size_t count)
4411 {
4412         int opts;
4413 	char work[20];
4414 
4415         if (1 == sscanf(buf, "%10s", work)) {
4416 		if (0 == strncasecmp(work,"0x", 2)) {
4417 			if (1 == sscanf(&work[2], "%x", &opts))
4418 				goto opts_done;
4419 		} else {
4420 			if (1 == sscanf(work, "%d", &opts))
4421 				goto opts_done;
4422 		}
4423 	}
4424 	return -EINVAL;
4425 opts_done:
4426 	sdebug_opts = opts;
4427 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4428 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4429 	tweak_cmnd_count();
4430 	return count;
4431 }
4432 static DRIVER_ATTR_RW(opts);
4433 
4434 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4435 {
4436 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4437 }
4438 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4439 			   size_t count)
4440 {
4441         int n;
4442 
4443 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4444 		sdebug_ptype = n;
4445 		return count;
4446 	}
4447 	return -EINVAL;
4448 }
4449 static DRIVER_ATTR_RW(ptype);
4450 
4451 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4452 {
4453 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4454 }
4455 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4456 			    size_t count)
4457 {
4458         int n;
4459 
4460 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4461 		sdebug_dsense = n;
4462 		return count;
4463 	}
4464 	return -EINVAL;
4465 }
4466 static DRIVER_ATTR_RW(dsense);
4467 
4468 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4469 {
4470 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4471 }
4472 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4473 			     size_t count)
4474 {
4475         int n;
4476 
4477 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4478 		n = (n > 0);
4479 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4480 		if (sdebug_fake_rw != n) {
4481 			if ((0 == n) && (NULL == fake_storep)) {
4482 				unsigned long sz =
4483 					(unsigned long)sdebug_dev_size_mb *
4484 					1048576;
4485 
4486 				fake_storep = vmalloc(sz);
4487 				if (NULL == fake_storep) {
4488 					pr_err("out of memory, 9\n");
4489 					return -ENOMEM;
4490 				}
4491 				memset(fake_storep, 0, sz);
4492 			}
4493 			sdebug_fake_rw = n;
4494 		}
4495 		return count;
4496 	}
4497 	return -EINVAL;
4498 }
4499 static DRIVER_ATTR_RW(fake_rw);
4500 
4501 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4502 {
4503 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4504 }
4505 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4506 			      size_t count)
4507 {
4508         int n;
4509 
4510 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4511 		sdebug_no_lun_0 = n;
4512 		return count;
4513 	}
4514 	return -EINVAL;
4515 }
4516 static DRIVER_ATTR_RW(no_lun_0);
4517 
4518 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4519 {
4520 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4521 }
4522 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4523 			      size_t count)
4524 {
4525         int n;
4526 
4527 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4528 		sdebug_num_tgts = n;
4529 		sdebug_max_tgts_luns();
4530 		return count;
4531 	}
4532 	return -EINVAL;
4533 }
4534 static DRIVER_ATTR_RW(num_tgts);
4535 
4536 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4537 {
4538 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4539 }
4540 static DRIVER_ATTR_RO(dev_size_mb);
4541 
4542 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4543 {
4544 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4545 }
4546 static DRIVER_ATTR_RO(num_parts);
4547 
4548 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4549 {
4550 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4551 }
4552 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4553 			       size_t count)
4554 {
4555         int nth;
4556 
4557 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4558 		sdebug_every_nth = nth;
4559 		if (nth && !sdebug_statistics) {
4560 			pr_info("every_nth needs statistics=1, set it\n");
4561 			sdebug_statistics = true;
4562 		}
4563 		tweak_cmnd_count();
4564 		return count;
4565 	}
4566 	return -EINVAL;
4567 }
4568 static DRIVER_ATTR_RW(every_nth);
4569 
4570 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4571 {
4572 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4573 }
4574 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4575 			      size_t count)
4576 {
4577         int n;
4578 	bool changed;
4579 
4580 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4581 		if (n > 256) {
4582 			pr_warn("max_luns can be no more than 256\n");
4583 			return -EINVAL;
4584 		}
4585 		changed = (sdebug_max_luns != n);
4586 		sdebug_max_luns = n;
4587 		sdebug_max_tgts_luns();
4588 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4589 			struct sdebug_host_info *sdhp;
4590 			struct sdebug_dev_info *dp;
4591 
4592 			spin_lock(&sdebug_host_list_lock);
4593 			list_for_each_entry(sdhp, &sdebug_host_list,
4594 					    host_list) {
4595 				list_for_each_entry(dp, &sdhp->dev_info_list,
4596 						    dev_list) {
4597 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4598 						dp->uas_bm);
4599 				}
4600 			}
4601 			spin_unlock(&sdebug_host_list_lock);
4602 		}
4603 		return count;
4604 	}
4605 	return -EINVAL;
4606 }
4607 static DRIVER_ATTR_RW(max_luns);
4608 
4609 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4610 {
4611 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4612 }
4613 /* N.B. max_queue can be changed while there are queued commands. In flight
4614  * commands beyond the new max_queue will be completed. */
4615 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4616 			       size_t count)
4617 {
4618 	int j, n, k, a;
4619 	struct sdebug_queue *sqp;
4620 
4621 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4622 	    (n <= SDEBUG_CANQUEUE)) {
4623 		block_unblock_all_queues(true);
4624 		k = 0;
4625 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4626 		     ++j, ++sqp) {
4627 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4628 			if (a > k)
4629 				k = a;
4630 		}
4631 		sdebug_max_queue = n;
4632 		if (k == SDEBUG_CANQUEUE)
4633 			atomic_set(&retired_max_queue, 0);
4634 		else if (k >= n)
4635 			atomic_set(&retired_max_queue, k + 1);
4636 		else
4637 			atomic_set(&retired_max_queue, 0);
4638 		block_unblock_all_queues(false);
4639 		return count;
4640 	}
4641 	return -EINVAL;
4642 }
4643 static DRIVER_ATTR_RW(max_queue);
4644 
4645 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4646 {
4647 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4648 }
4649 static DRIVER_ATTR_RO(no_uld);
4650 
4651 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4652 {
4653 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4654 }
4655 static DRIVER_ATTR_RO(scsi_level);
4656 
4657 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4658 {
4659 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4660 }
4661 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4662 				size_t count)
4663 {
4664         int n;
4665 	bool changed;
4666 
4667 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4668 		changed = (sdebug_virtual_gb != n);
4669 		sdebug_virtual_gb = n;
4670 		sdebug_capacity = get_sdebug_capacity();
4671 		if (changed) {
4672 			struct sdebug_host_info *sdhp;
4673 			struct sdebug_dev_info *dp;
4674 
4675 			spin_lock(&sdebug_host_list_lock);
4676 			list_for_each_entry(sdhp, &sdebug_host_list,
4677 					    host_list) {
4678 				list_for_each_entry(dp, &sdhp->dev_info_list,
4679 						    dev_list) {
4680 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4681 						dp->uas_bm);
4682 				}
4683 			}
4684 			spin_unlock(&sdebug_host_list_lock);
4685 		}
4686 		return count;
4687 	}
4688 	return -EINVAL;
4689 }
4690 static DRIVER_ATTR_RW(virtual_gb);
4691 
4692 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4693 {
4694 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4695 }
4696 
4697 static int sdebug_add_adapter(void);
4698 static void sdebug_remove_adapter(void);
4699 
4700 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4701 			      size_t count)
4702 {
4703 	int delta_hosts;
4704 
4705 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4706 		return -EINVAL;
4707 	if (delta_hosts > 0) {
4708 		do {
4709 			sdebug_add_adapter();
4710 		} while (--delta_hosts);
4711 	} else if (delta_hosts < 0) {
4712 		do {
4713 			sdebug_remove_adapter();
4714 		} while (++delta_hosts);
4715 	}
4716 	return count;
4717 }
4718 static DRIVER_ATTR_RW(add_host);
4719 
4720 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4721 {
4722 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4723 }
4724 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4725 				    size_t count)
4726 {
4727 	int n;
4728 
4729 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4730 		sdebug_vpd_use_hostno = n;
4731 		return count;
4732 	}
4733 	return -EINVAL;
4734 }
4735 static DRIVER_ATTR_RW(vpd_use_hostno);
4736 
4737 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4738 {
4739 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4740 }
4741 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4742 				size_t count)
4743 {
4744 	int n;
4745 
4746 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4747 		if (n > 0)
4748 			sdebug_statistics = true;
4749 		else {
4750 			clear_queue_stats();
4751 			sdebug_statistics = false;
4752 		}
4753 		return count;
4754 	}
4755 	return -EINVAL;
4756 }
4757 static DRIVER_ATTR_RW(statistics);
4758 
4759 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4760 {
4761 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4762 }
4763 static DRIVER_ATTR_RO(sector_size);
4764 
4765 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4766 {
4767 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4768 }
4769 static DRIVER_ATTR_RO(submit_queues);
4770 
4771 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4772 {
4773 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4774 }
4775 static DRIVER_ATTR_RO(dix);
4776 
4777 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4778 {
4779 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4780 }
4781 static DRIVER_ATTR_RO(dif);
4782 
4783 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4784 {
4785 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4786 }
4787 static DRIVER_ATTR_RO(guard);
4788 
4789 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4790 {
4791 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4792 }
4793 static DRIVER_ATTR_RO(ato);
4794 
4795 static ssize_t map_show(struct device_driver *ddp, char *buf)
4796 {
4797 	ssize_t count;
4798 
4799 	if (!scsi_debug_lbp())
4800 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4801 				 sdebug_store_sectors);
4802 
4803 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4804 			  (int)map_size, map_storep);
4805 	buf[count++] = '\n';
4806 	buf[count] = '\0';
4807 
4808 	return count;
4809 }
4810 static DRIVER_ATTR_RO(map);
4811 
4812 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4813 {
4814 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4815 }
4816 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4817 			       size_t count)
4818 {
4819 	int n;
4820 
4821 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4822 		sdebug_removable = (n > 0);
4823 		return count;
4824 	}
4825 	return -EINVAL;
4826 }
4827 static DRIVER_ATTR_RW(removable);
4828 
4829 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4830 {
4831 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4832 }
4833 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4834 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4835 			       size_t count)
4836 {
4837 	int n;
4838 
4839 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4840 		sdebug_host_lock = (n > 0);
4841 		return count;
4842 	}
4843 	return -EINVAL;
4844 }
4845 static DRIVER_ATTR_RW(host_lock);
4846 
4847 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4848 {
4849 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4850 }
4851 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4852 			    size_t count)
4853 {
4854 	int n;
4855 
4856 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4857 		sdebug_strict = (n > 0);
4858 		return count;
4859 	}
4860 	return -EINVAL;
4861 }
4862 static DRIVER_ATTR_RW(strict);
4863 
4864 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4865 {
4866 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4867 }
4868 static DRIVER_ATTR_RO(uuid_ctl);
4869 
4870 
4871 /* Note: The following array creates attribute files in the
4872    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4873    files (over those found in the /sys/module/scsi_debug/parameters
4874    directory) is that auxiliary actions can be triggered when an attribute
4875    is changed. For example see: sdebug_add_host_store() above.
4876  */
4877 
4878 static struct attribute *sdebug_drv_attrs[] = {
4879 	&driver_attr_delay.attr,
4880 	&driver_attr_opts.attr,
4881 	&driver_attr_ptype.attr,
4882 	&driver_attr_dsense.attr,
4883 	&driver_attr_fake_rw.attr,
4884 	&driver_attr_no_lun_0.attr,
4885 	&driver_attr_num_tgts.attr,
4886 	&driver_attr_dev_size_mb.attr,
4887 	&driver_attr_num_parts.attr,
4888 	&driver_attr_every_nth.attr,
4889 	&driver_attr_max_luns.attr,
4890 	&driver_attr_max_queue.attr,
4891 	&driver_attr_no_uld.attr,
4892 	&driver_attr_scsi_level.attr,
4893 	&driver_attr_virtual_gb.attr,
4894 	&driver_attr_add_host.attr,
4895 	&driver_attr_vpd_use_hostno.attr,
4896 	&driver_attr_sector_size.attr,
4897 	&driver_attr_statistics.attr,
4898 	&driver_attr_submit_queues.attr,
4899 	&driver_attr_dix.attr,
4900 	&driver_attr_dif.attr,
4901 	&driver_attr_guard.attr,
4902 	&driver_attr_ato.attr,
4903 	&driver_attr_map.attr,
4904 	&driver_attr_removable.attr,
4905 	&driver_attr_host_lock.attr,
4906 	&driver_attr_ndelay.attr,
4907 	&driver_attr_strict.attr,
4908 	&driver_attr_uuid_ctl.attr,
4909 	NULL,
4910 };
4911 ATTRIBUTE_GROUPS(sdebug_drv);
4912 
4913 static struct device *pseudo_primary;
4914 
4915 static int __init scsi_debug_init(void)
4916 {
4917 	unsigned long sz;
4918 	int host_to_add;
4919 	int k;
4920 	int ret;
4921 
4922 	atomic_set(&retired_max_queue, 0);
4923 
4924 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4925 		pr_warn("ndelay must be less than 1 second, ignored\n");
4926 		sdebug_ndelay = 0;
4927 	} else if (sdebug_ndelay > 0)
4928 		sdebug_jdelay = JDELAY_OVERRIDDEN;
4929 
4930 	switch (sdebug_sector_size) {
4931 	case  512:
4932 	case 1024:
4933 	case 2048:
4934 	case 4096:
4935 		break;
4936 	default:
4937 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
4938 		return -EINVAL;
4939 	}
4940 
4941 	switch (sdebug_dif) {
4942 
4943 	case SD_DIF_TYPE0_PROTECTION:
4944 		break;
4945 	case SD_DIF_TYPE1_PROTECTION:
4946 	case SD_DIF_TYPE2_PROTECTION:
4947 	case SD_DIF_TYPE3_PROTECTION:
4948 		have_dif_prot = true;
4949 		break;
4950 
4951 	default:
4952 		pr_err("dif must be 0, 1, 2 or 3\n");
4953 		return -EINVAL;
4954 	}
4955 
4956 	if (sdebug_guard > 1) {
4957 		pr_err("guard must be 0 or 1\n");
4958 		return -EINVAL;
4959 	}
4960 
4961 	if (sdebug_ato > 1) {
4962 		pr_err("ato must be 0 or 1\n");
4963 		return -EINVAL;
4964 	}
4965 
4966 	if (sdebug_physblk_exp > 15) {
4967 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4968 		return -EINVAL;
4969 	}
4970 	if (sdebug_max_luns > 256) {
4971 		pr_warn("max_luns can be no more than 256, use default\n");
4972 		sdebug_max_luns = DEF_MAX_LUNS;
4973 	}
4974 
4975 	if (sdebug_lowest_aligned > 0x3fff) {
4976 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4977 		return -EINVAL;
4978 	}
4979 
4980 	if (submit_queues < 1) {
4981 		pr_err("submit_queues must be 1 or more\n");
4982 		return -EINVAL;
4983 	}
4984 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
4985 			       GFP_KERNEL);
4986 	if (sdebug_q_arr == NULL)
4987 		return -ENOMEM;
4988 	for (k = 0; k < submit_queues; ++k)
4989 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
4990 
4991 	if (sdebug_dev_size_mb < 1)
4992 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4993 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4994 	sdebug_store_sectors = sz / sdebug_sector_size;
4995 	sdebug_capacity = get_sdebug_capacity();
4996 
4997 	/* play around with geometry, don't waste too much on track 0 */
4998 	sdebug_heads = 8;
4999 	sdebug_sectors_per = 32;
5000 	if (sdebug_dev_size_mb >= 256)
5001 		sdebug_heads = 64;
5002 	else if (sdebug_dev_size_mb >= 16)
5003 		sdebug_heads = 32;
5004 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5005 			       (sdebug_sectors_per * sdebug_heads);
5006 	if (sdebug_cylinders_per >= 1024) {
5007 		/* other LLDs do this; implies >= 1GB ram disk ... */
5008 		sdebug_heads = 255;
5009 		sdebug_sectors_per = 63;
5010 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5011 			       (sdebug_sectors_per * sdebug_heads);
5012 	}
5013 
5014 	if (sdebug_fake_rw == 0) {
5015 		fake_storep = vmalloc(sz);
5016 		if (NULL == fake_storep) {
5017 			pr_err("out of memory, 1\n");
5018 			ret = -ENOMEM;
5019 			goto free_q_arr;
5020 		}
5021 		memset(fake_storep, 0, sz);
5022 		if (sdebug_num_parts > 0)
5023 			sdebug_build_parts(fake_storep, sz);
5024 	}
5025 
5026 	if (sdebug_dix) {
5027 		int dif_size;
5028 
5029 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
5030 		dif_storep = vmalloc(dif_size);
5031 
5032 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5033 
5034 		if (dif_storep == NULL) {
5035 			pr_err("out of mem. (DIX)\n");
5036 			ret = -ENOMEM;
5037 			goto free_vm;
5038 		}
5039 
5040 		memset(dif_storep, 0xff, dif_size);
5041 	}
5042 
5043 	/* Logical Block Provisioning */
5044 	if (scsi_debug_lbp()) {
5045 		sdebug_unmap_max_blocks =
5046 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5047 
5048 		sdebug_unmap_max_desc =
5049 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5050 
5051 		sdebug_unmap_granularity =
5052 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5053 
5054 		if (sdebug_unmap_alignment &&
5055 		    sdebug_unmap_granularity <=
5056 		    sdebug_unmap_alignment) {
5057 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5058 			ret = -EINVAL;
5059 			goto free_vm;
5060 		}
5061 
5062 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5063 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5064 
5065 		pr_info("%lu provisioning blocks\n", map_size);
5066 
5067 		if (map_storep == NULL) {
5068 			pr_err("out of mem. (MAP)\n");
5069 			ret = -ENOMEM;
5070 			goto free_vm;
5071 		}
5072 
5073 		bitmap_zero(map_storep, map_size);
5074 
5075 		/* Map first 1KB for partition table */
5076 		if (sdebug_num_parts)
5077 			map_region(0, 2);
5078 	}
5079 
5080 	pseudo_primary = root_device_register("pseudo_0");
5081 	if (IS_ERR(pseudo_primary)) {
5082 		pr_warn("root_device_register() error\n");
5083 		ret = PTR_ERR(pseudo_primary);
5084 		goto free_vm;
5085 	}
5086 	ret = bus_register(&pseudo_lld_bus);
5087 	if (ret < 0) {
5088 		pr_warn("bus_register error: %d\n", ret);
5089 		goto dev_unreg;
5090 	}
5091 	ret = driver_register(&sdebug_driverfs_driver);
5092 	if (ret < 0) {
5093 		pr_warn("driver_register error: %d\n", ret);
5094 		goto bus_unreg;
5095 	}
5096 
5097 	host_to_add = sdebug_add_host;
5098 	sdebug_add_host = 0;
5099 
5100         for (k = 0; k < host_to_add; k++) {
5101                 if (sdebug_add_adapter()) {
5102 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5103                         break;
5104                 }
5105         }
5106 
5107 	if (sdebug_verbose)
5108 		pr_info("built %d host(s)\n", sdebug_add_host);
5109 
5110 	return 0;
5111 
5112 bus_unreg:
5113 	bus_unregister(&pseudo_lld_bus);
5114 dev_unreg:
5115 	root_device_unregister(pseudo_primary);
5116 free_vm:
5117 	vfree(map_storep);
5118 	vfree(dif_storep);
5119 	vfree(fake_storep);
5120 free_q_arr:
5121 	kfree(sdebug_q_arr);
5122 	return ret;
5123 }
5124 
5125 static void __exit scsi_debug_exit(void)
5126 {
5127 	int k = sdebug_add_host;
5128 
5129 	stop_all_queued();
5130 	free_all_queued();
5131 	for (; k; k--)
5132 		sdebug_remove_adapter();
5133 	driver_unregister(&sdebug_driverfs_driver);
5134 	bus_unregister(&pseudo_lld_bus);
5135 	root_device_unregister(pseudo_primary);
5136 
5137 	vfree(dif_storep);
5138 	vfree(fake_storep);
5139 	kfree(sdebug_q_arr);
5140 }
5141 
5142 device_initcall(scsi_debug_init);
5143 module_exit(scsi_debug_exit);
5144 
5145 static void sdebug_release_adapter(struct device * dev)
5146 {
5147         struct sdebug_host_info *sdbg_host;
5148 
5149 	sdbg_host = to_sdebug_host(dev);
5150         kfree(sdbg_host);
5151 }
5152 
5153 static int sdebug_add_adapter(void)
5154 {
5155 	int k, devs_per_host;
5156         int error = 0;
5157         struct sdebug_host_info *sdbg_host;
5158 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5159 
5160         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5161         if (NULL == sdbg_host) {
5162 		pr_err("out of memory at line %d\n", __LINE__);
5163                 return -ENOMEM;
5164         }
5165 
5166         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5167 
5168 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5169         for (k = 0; k < devs_per_host; k++) {
5170 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5171 		if (!sdbg_devinfo) {
5172 			pr_err("out of memory at line %d\n", __LINE__);
5173                         error = -ENOMEM;
5174 			goto clean;
5175                 }
5176         }
5177 
5178         spin_lock(&sdebug_host_list_lock);
5179         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5180         spin_unlock(&sdebug_host_list_lock);
5181 
5182         sdbg_host->dev.bus = &pseudo_lld_bus;
5183         sdbg_host->dev.parent = pseudo_primary;
5184         sdbg_host->dev.release = &sdebug_release_adapter;
5185 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5186 
5187         error = device_register(&sdbg_host->dev);
5188 
5189         if (error)
5190 		goto clean;
5191 
5192 	++sdebug_add_host;
5193         return error;
5194 
5195 clean:
5196 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5197 				 dev_list) {
5198 		list_del(&sdbg_devinfo->dev_list);
5199 		kfree(sdbg_devinfo);
5200 	}
5201 
5202 	kfree(sdbg_host);
5203         return error;
5204 }
5205 
5206 static void sdebug_remove_adapter(void)
5207 {
5208         struct sdebug_host_info * sdbg_host = NULL;
5209 
5210         spin_lock(&sdebug_host_list_lock);
5211         if (!list_empty(&sdebug_host_list)) {
5212                 sdbg_host = list_entry(sdebug_host_list.prev,
5213                                        struct sdebug_host_info, host_list);
5214 		list_del(&sdbg_host->host_list);
5215 	}
5216         spin_unlock(&sdebug_host_list_lock);
5217 
5218 	if (!sdbg_host)
5219 		return;
5220 
5221 	device_unregister(&sdbg_host->dev);
5222 	--sdebug_add_host;
5223 }
5224 
5225 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5226 {
5227 	int num_in_q = 0;
5228 	struct sdebug_dev_info *devip;
5229 
5230 	block_unblock_all_queues(true);
5231 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5232 	if (NULL == devip) {
5233 		block_unblock_all_queues(false);
5234 		return	-ENODEV;
5235 	}
5236 	num_in_q = atomic_read(&devip->num_in_q);
5237 
5238 	if (qdepth < 1)
5239 		qdepth = 1;
5240 	/* allow to exceed max host qc_arr elements for testing */
5241 	if (qdepth > SDEBUG_CANQUEUE + 10)
5242 		qdepth = SDEBUG_CANQUEUE + 10;
5243 	scsi_change_queue_depth(sdev, qdepth);
5244 
5245 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5246 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5247 			    __func__, qdepth, num_in_q);
5248 	}
5249 	block_unblock_all_queues(false);
5250 	return sdev->queue_depth;
5251 }
5252 
5253 static bool fake_timeout(struct scsi_cmnd *scp)
5254 {
5255 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5256 		if (sdebug_every_nth < -1)
5257 			sdebug_every_nth = -1;
5258 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5259 			return true; /* ignore command causing timeout */
5260 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5261 			 scsi_medium_access_command(scp))
5262 			return true; /* time out reads and writes */
5263 	}
5264 	return false;
5265 }
5266 
5267 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5268 				   struct scsi_cmnd *scp)
5269 {
5270 	u8 sdeb_i;
5271 	struct scsi_device *sdp = scp->device;
5272 	const struct opcode_info_t *oip;
5273 	const struct opcode_info_t *r_oip;
5274 	struct sdebug_dev_info *devip;
5275 	u8 *cmd = scp->cmnd;
5276 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5277 	int k, na;
5278 	int errsts = 0;
5279 	u32 flags;
5280 	u16 sa;
5281 	u8 opcode = cmd[0];
5282 	bool has_wlun_rl;
5283 
5284 	scsi_set_resid(scp, 0);
5285 	if (sdebug_statistics)
5286 		atomic_inc(&sdebug_cmnd_count);
5287 	if (unlikely(sdebug_verbose &&
5288 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5289 		char b[120];
5290 		int n, len, sb;
5291 
5292 		len = scp->cmd_len;
5293 		sb = (int)sizeof(b);
5294 		if (len > 32)
5295 			strcpy(b, "too long, over 32 bytes");
5296 		else {
5297 			for (k = 0, n = 0; k < len && n < sb; ++k)
5298 				n += scnprintf(b + n, sb - n, "%02x ",
5299 					       (u32)cmd[k]);
5300 		}
5301 		if (sdebug_mq_active)
5302 			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5303 				    my_name, blk_mq_unique_tag(scp->request),
5304 				    b);
5305 		else
5306 			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5307 				    b);
5308 	}
5309 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5310 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5311 		goto err_out;
5312 
5313 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5314 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5315 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5316 	if (unlikely(!devip)) {
5317 		devip = find_build_dev_info(sdp);
5318 		if (NULL == devip)
5319 			goto err_out;
5320 	}
5321 	na = oip->num_attached;
5322 	r_pfp = oip->pfp;
5323 	if (na) {	/* multiple commands with this opcode */
5324 		r_oip = oip;
5325 		if (FF_SA & r_oip->flags) {
5326 			if (F_SA_LOW & oip->flags)
5327 				sa = 0x1f & cmd[1];
5328 			else
5329 				sa = get_unaligned_be16(cmd + 8);
5330 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5331 				if (opcode == oip->opcode && sa == oip->sa)
5332 					break;
5333 			}
5334 		} else {   /* since no service action only check opcode */
5335 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5336 				if (opcode == oip->opcode)
5337 					break;
5338 			}
5339 		}
5340 		if (k > na) {
5341 			if (F_SA_LOW & r_oip->flags)
5342 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5343 			else if (F_SA_HIGH & r_oip->flags)
5344 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5345 			else
5346 				mk_sense_invalid_opcode(scp);
5347 			goto check_cond;
5348 		}
5349 	}	/* else (when na==0) we assume the oip is a match */
5350 	flags = oip->flags;
5351 	if (unlikely(F_INV_OP & flags)) {
5352 		mk_sense_invalid_opcode(scp);
5353 		goto check_cond;
5354 	}
5355 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5356 		if (sdebug_verbose)
5357 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5358 				    my_name, opcode, " supported for wlun");
5359 		mk_sense_invalid_opcode(scp);
5360 		goto check_cond;
5361 	}
5362 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5363 		u8 rem;
5364 		int j;
5365 
5366 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5367 			rem = ~oip->len_mask[k] & cmd[k];
5368 			if (rem) {
5369 				for (j = 7; j >= 0; --j, rem <<= 1) {
5370 					if (0x80 & rem)
5371 						break;
5372 				}
5373 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5374 				goto check_cond;
5375 			}
5376 		}
5377 	}
5378 	if (unlikely(!(F_SKIP_UA & flags) &&
5379 		     find_first_bit(devip->uas_bm,
5380 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5381 		errsts = make_ua(scp, devip);
5382 		if (errsts)
5383 			goto check_cond;
5384 	}
5385 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5386 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5387 		if (sdebug_verbose)
5388 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5389 				    "%s\n", my_name, "initializing command "
5390 				    "required");
5391 		errsts = check_condition_result;
5392 		goto fini;
5393 	}
5394 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5395 		goto fini;
5396 	if (unlikely(sdebug_every_nth)) {
5397 		if (fake_timeout(scp))
5398 			return 0;	/* ignore command: make trouble */
5399 	}
5400 	if (likely(oip->pfp))
5401 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5402 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5403 		errsts = r_pfp(scp, devip);
5404 
5405 fini:
5406 	return schedule_resp(scp, devip, errsts,
5407 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5408 check_cond:
5409 	return schedule_resp(scp, devip, check_condition_result, 0);
5410 err_out:
5411 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5412 }
5413 
5414 static struct scsi_host_template sdebug_driver_template = {
5415 	.show_info =		scsi_debug_show_info,
5416 	.write_info =		scsi_debug_write_info,
5417 	.proc_name =		sdebug_proc_name,
5418 	.name =			"SCSI DEBUG",
5419 	.info =			scsi_debug_info,
5420 	.slave_alloc =		scsi_debug_slave_alloc,
5421 	.slave_configure =	scsi_debug_slave_configure,
5422 	.slave_destroy =	scsi_debug_slave_destroy,
5423 	.ioctl =		scsi_debug_ioctl,
5424 	.queuecommand =		scsi_debug_queuecommand,
5425 	.change_queue_depth =	sdebug_change_qdepth,
5426 	.eh_abort_handler =	scsi_debug_abort,
5427 	.eh_device_reset_handler = scsi_debug_device_reset,
5428 	.eh_target_reset_handler = scsi_debug_target_reset,
5429 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5430 	.eh_host_reset_handler = scsi_debug_host_reset,
5431 	.can_queue =		SDEBUG_CANQUEUE,
5432 	.this_id =		7,
5433 	.sg_tablesize =		SG_MAX_SEGMENTS,
5434 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5435 	.max_sectors =		-1U,
5436 	.use_clustering = 	DISABLE_CLUSTERING,
5437 	.module =		THIS_MODULE,
5438 	.track_queue_depth =	1,
5439 };
5440 
5441 static int sdebug_driver_probe(struct device * dev)
5442 {
5443 	int error = 0;
5444 	struct sdebug_host_info *sdbg_host;
5445 	struct Scsi_Host *hpnt;
5446 	int hprot;
5447 
5448 	sdbg_host = to_sdebug_host(dev);
5449 
5450 	sdebug_driver_template.can_queue = sdebug_max_queue;
5451 	if (sdebug_clustering)
5452 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5453 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5454 	if (NULL == hpnt) {
5455 		pr_err("scsi_host_alloc failed\n");
5456 		error = -ENODEV;
5457 		return error;
5458 	}
5459 	if (submit_queues > nr_cpu_ids) {
5460 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5461 			my_name, submit_queues, nr_cpu_ids);
5462 		submit_queues = nr_cpu_ids;
5463 	}
5464 	/* Decide whether to tell scsi subsystem that we want mq */
5465 	/* Following should give the same answer for each host */
5466 	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5467 	if (sdebug_mq_active)
5468 		hpnt->nr_hw_queues = submit_queues;
5469 
5470         sdbg_host->shost = hpnt;
5471 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5472 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5473 		hpnt->max_id = sdebug_num_tgts + 1;
5474 	else
5475 		hpnt->max_id = sdebug_num_tgts;
5476 	/* = sdebug_max_luns; */
5477 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5478 
5479 	hprot = 0;
5480 
5481 	switch (sdebug_dif) {
5482 
5483 	case SD_DIF_TYPE1_PROTECTION:
5484 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5485 		if (sdebug_dix)
5486 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5487 		break;
5488 
5489 	case SD_DIF_TYPE2_PROTECTION:
5490 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5491 		if (sdebug_dix)
5492 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5493 		break;
5494 
5495 	case SD_DIF_TYPE3_PROTECTION:
5496 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5497 		if (sdebug_dix)
5498 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5499 		break;
5500 
5501 	default:
5502 		if (sdebug_dix)
5503 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5504 		break;
5505 	}
5506 
5507 	scsi_host_set_prot(hpnt, hprot);
5508 
5509 	if (have_dif_prot || sdebug_dix)
5510 		pr_info("host protection%s%s%s%s%s%s%s\n",
5511 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5512 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5513 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5514 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5515 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5516 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5517 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5518 
5519 	if (sdebug_guard == 1)
5520 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5521 	else
5522 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5523 
5524 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5525 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5526 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5527 		sdebug_statistics = true;
5528         error = scsi_add_host(hpnt, &sdbg_host->dev);
5529         if (error) {
5530 		pr_err("scsi_add_host failed\n");
5531                 error = -ENODEV;
5532 		scsi_host_put(hpnt);
5533         } else
5534 		scsi_scan_host(hpnt);
5535 
5536 	return error;
5537 }
5538 
5539 static int sdebug_driver_remove(struct device * dev)
5540 {
5541         struct sdebug_host_info *sdbg_host;
5542 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5543 
5544 	sdbg_host = to_sdebug_host(dev);
5545 
5546 	if (!sdbg_host) {
5547 		pr_err("Unable to locate host info\n");
5548 		return -ENODEV;
5549 	}
5550 
5551         scsi_remove_host(sdbg_host->shost);
5552 
5553 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5554 				 dev_list) {
5555                 list_del(&sdbg_devinfo->dev_list);
5556                 kfree(sdbg_devinfo);
5557         }
5558 
5559         scsi_host_put(sdbg_host->shost);
5560         return 0;
5561 }
5562 
5563 static int pseudo_lld_bus_match(struct device *dev,
5564 				struct device_driver *dev_driver)
5565 {
5566 	return 1;
5567 }
5568 
5569 static struct bus_type pseudo_lld_bus = {
5570 	.name = "pseudo",
5571 	.match = pseudo_lld_bus_match,
5572 	.probe = sdebug_driver_probe,
5573 	.remove = sdebug_driver_remove,
5574 	.drv_groups = sdebug_drv_groups,
5575 };
5576