xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision d9da891a)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2017 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0187"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20171202";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
100 
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST   1
103 #define DEF_NUM_TGTS   1
104 #define DEF_MAX_LUNS   1
105 /* With these defaults, this driver will make 1 host with 1 target
106  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107  */
108 #define DEF_ATO 1
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB   8
112 #define DEF_DIF 0
113 #define DEF_DIX 0
114 #define DEF_D_SENSE   0
115 #define DEF_EVERY_NTH   0
116 #define DEF_FAKE_RW	0
117 #define DEF_GUARD 0
118 #define DEF_HOST_LOCK 0
119 #define DEF_LBPU 0
120 #define DEF_LBPWS 0
121 #define DEF_LBPWS10 0
122 #define DEF_LBPRZ 1
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0   0
126 #define DEF_NUM_PARTS   0
127 #define DEF_OPTS   0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE   TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB   0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
147 
148 #define SDEBUG_LUN_0_VAL 0
149 
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE		1
152 #define SDEBUG_OPT_MEDIUM_ERR		2
153 #define SDEBUG_OPT_TIMEOUT		4
154 #define SDEBUG_OPT_RECOVERED_ERR	8
155 #define SDEBUG_OPT_TRANSPORT_ERR	16
156 #define SDEBUG_OPT_DIF_ERR		32
157 #define SDEBUG_OPT_DIX_ERR		64
158 #define SDEBUG_OPT_MAC_TIMEOUT		128
159 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
160 #define SDEBUG_OPT_Q_NOISE		0x200
161 #define SDEBUG_OPT_ALL_TSF		0x400
162 #define SDEBUG_OPT_RARE_TSF		0x800
163 #define SDEBUG_OPT_N_WCE		0x1000
164 #define SDEBUG_OPT_RESET_NOISE		0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
166 #define SDEBUG_OPT_HOST_BUSY		0x8000
167 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
168 			      SDEBUG_OPT_RESET_NOISE)
169 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
170 				  SDEBUG_OPT_TRANSPORT_ERR | \
171 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
172 				  SDEBUG_OPT_SHORT_TRANSFER | \
173 				  SDEBUG_OPT_HOST_BUSY)
174 /* When "every_nth" > 0 then modulo "every_nth" commands:
175  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
176  *   - a RECOVERED_ERROR is simulated on successful read and write
177  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
178  *   - a TRANSPORT_ERROR is simulated on successful read and write
179  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
180  *
181  * When "every_nth" < 0 then after "- every_nth" commands:
182  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
183  *   - a RECOVERED_ERROR is simulated on successful read and write
184  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
185  *   - a TRANSPORT_ERROR is simulated on successful read and write
186  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
187  * This will continue on every subsequent command until some other action
188  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
189  * every_nth via sysfs).
190  */
191 
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193  * priority order. In the subset implemented here lower numbers have higher
194  * priority. The UA numbers should be a sequence starting from 0 with
195  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
204 
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206  * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
209 
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211  * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
213 
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215  * (for response) per submit queue at one time. Can be reduced by max_queue
216  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219  * but cannot exceed SDEBUG_CANQUEUE .
220  */
221 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN  255
224 
225 #define F_D_IN			1
226 #define F_D_OUT			2
227 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
228 #define F_D_UNKN		8
229 #define F_RL_WLUN_OK		0x10
230 #define F_SKIP_UA		0x20
231 #define F_DELAY_OVERR		0x40
232 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
234 #define F_INV_OP		0x200
235 #define F_FAKE_RW		0x400
236 #define F_M_ACCESS		0x800	/* media access */
237 
238 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
239 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
240 #define FF_SA (F_SA_HIGH | F_SA_LOW)
241 
242 #define SDEBUG_MAX_PARTS 4
243 
244 #define SDEBUG_MAX_CMD_LEN 32
245 
246 
247 struct sdebug_dev_info {
248 	struct list_head dev_list;
249 	unsigned int channel;
250 	unsigned int target;
251 	u64 lun;
252 	uuid_t lu_name;
253 	struct sdebug_host_info *sdbg_host;
254 	unsigned long uas_bm[1];
255 	atomic_t num_in_q;
256 	atomic_t stopped;
257 	bool used;
258 };
259 
260 struct sdebug_host_info {
261 	struct list_head host_list;
262 	struct Scsi_Host *shost;
263 	struct device dev;
264 	struct list_head dev_info_list;
265 };
266 
267 #define to_sdebug_host(d)	\
268 	container_of(d, struct sdebug_host_info, dev)
269 
270 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
271 		      SDEB_DEFER_WQ = 2};
272 
273 struct sdebug_defer {
274 	struct hrtimer hrt;
275 	struct execute_work ew;
276 	int sqa_idx;	/* index of sdebug_queue array */
277 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
278 	int issuing_cpu;
279 	bool init_hrt;
280 	bool init_wq;
281 	enum sdeb_defer_type defer_t;
282 };
283 
284 struct sdebug_queued_cmd {
285 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
286 	 * instance indicates this slot is in use.
287 	 */
288 	struct sdebug_defer *sd_dp;
289 	struct scsi_cmnd *a_cmnd;
290 	unsigned int inj_recovered:1;
291 	unsigned int inj_transport:1;
292 	unsigned int inj_dif:1;
293 	unsigned int inj_dix:1;
294 	unsigned int inj_short:1;
295 	unsigned int inj_host_busy:1;
296 };
297 
298 struct sdebug_queue {
299 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
300 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
301 	spinlock_t qc_lock;
302 	atomic_t blocked;	/* to temporarily stop more being queued */
303 };
304 
305 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
306 static atomic_t sdebug_completions;  /* count of deferred completions */
307 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
308 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
309 
310 struct opcode_info_t {
311 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
312 				/* for terminating element */
313 	u8 opcode;		/* if num_attached > 0, preferred */
314 	u16 sa;			/* service action */
315 	u32 flags;		/* OR-ed set of SDEB_F_* */
316 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
317 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
318 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
319 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
320 };
321 
322 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
323 enum sdeb_opcode_index {
324 	SDEB_I_INVALID_OPCODE =	0,
325 	SDEB_I_INQUIRY = 1,
326 	SDEB_I_REPORT_LUNS = 2,
327 	SDEB_I_REQUEST_SENSE = 3,
328 	SDEB_I_TEST_UNIT_READY = 4,
329 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
330 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
331 	SDEB_I_LOG_SENSE = 7,
332 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
333 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
334 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
335 	SDEB_I_START_STOP = 11,
336 	SDEB_I_SERV_ACT_IN_16 = 12,	/* add ...SERV_ACT_IN_12 if needed */
337 	SDEB_I_SERV_ACT_OUT_16 = 13,	/* add ...SERV_ACT_OUT_12 if needed */
338 	SDEB_I_MAINT_IN = 14,
339 	SDEB_I_MAINT_OUT = 15,
340 	SDEB_I_VERIFY = 16,		/* 10 only */
341 	SDEB_I_VARIABLE_LEN = 17,	/* READ(32), WRITE(32), WR_SCAT(32) */
342 	SDEB_I_RESERVE = 18,		/* 6, 10 */
343 	SDEB_I_RELEASE = 19,		/* 6, 10 */
344 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
345 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
346 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
347 	SDEB_I_SEND_DIAG = 23,
348 	SDEB_I_UNMAP = 24,
349 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
350 	SDEB_I_WRITE_BUFFER = 26,
351 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
352 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
353 	SDEB_I_COMP_WRITE = 29,
354 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
355 };
356 
357 
358 static const unsigned char opcode_ind_arr[256] = {
359 /* 0x0; 0x0->0x1f: 6 byte cdbs */
360 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
361 	    0, 0, 0, 0,
362 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
363 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
364 	    SDEB_I_RELEASE,
365 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
366 	    SDEB_I_ALLOW_REMOVAL, 0,
367 /* 0x20; 0x20->0x3f: 10 byte cdbs */
368 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
369 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
370 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
371 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
372 /* 0x40; 0x40->0x5f: 10 byte cdbs */
373 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
374 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
375 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
376 	    SDEB_I_RELEASE,
377 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
378 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
379 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
380 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
381 	0, SDEB_I_VARIABLE_LEN,
382 /* 0x80; 0x80->0x9f: 16 byte cdbs */
383 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
384 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
385 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
386 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
387 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
388 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
389 	     SDEB_I_MAINT_OUT, 0, 0, 0,
390 	SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
391 	     0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
392 	0, 0, 0, 0, 0, 0, 0, 0,
393 	0, 0, 0, 0, 0, 0, 0, 0,
394 /* 0xc0; 0xc0->0xff: vendor specific */
395 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
396 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
397 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
398 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
399 };
400 
401 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
409 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
410 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
412 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
423 
424 /*
425  * The following are overflow arrays for cdbs that "hit" the same index in
426  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
427  * should be placed in opcode_info_arr[], the others should be placed here.
428  */
429 static const struct opcode_info_t msense_iarr[] = {
430 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
431 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
432 };
433 
434 static const struct opcode_info_t mselect_iarr[] = {
435 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
436 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 };
438 
439 static const struct opcode_info_t read_iarr[] = {
440 	{0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
441 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
442 	     0, 0, 0, 0} },
443 	{0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
444 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
445 	{0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
446 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
447 	     0xc7, 0, 0, 0, 0} },
448 };
449 
450 static const struct opcode_info_t write_iarr[] = {
451 	{0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
452 	    NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
453 		   0, 0, 0, 0, 0, 0} },
454 	{0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
455 	    NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
456 		   0, 0, 0} },
457 	{0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
458 	    NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
459 		   0xbf, 0xc7, 0, 0, 0, 0} },
460 };
461 
462 static const struct opcode_info_t sa_in_16_iarr[] = {
463 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
464 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
465 	     0xff, 0xff, 0xff, 0, 0xc7} },	/* GET LBA STATUS(16) */
466 };
467 
468 static const struct opcode_info_t vl_iarr[] = {	/* VARIABLE LENGTH */
469 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
470 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
471 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
472 	{0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
473 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
474 		   0, 0xff, 0xff, 0x0, 0x0} },	/* WRITE SCATTERED(32) */
475 };
476 
477 static const struct opcode_info_t maint_in_iarr[] = {	/* MAINT IN */
478 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
479 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
480 	     0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
481 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
482 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
483 	     0, 0} },	/* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
484 };
485 
486 static const struct opcode_info_t write_same_iarr[] = {
487 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
488 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
489 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },		/* WRITE SAME(16) */
490 };
491 
492 static const struct opcode_info_t reserve_iarr[] = {
493 	{0, 0x16, 0, F_D_OUT, NULL, NULL,		/* RESERVE(6) */
494 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
495 };
496 
497 static const struct opcode_info_t release_iarr[] = {
498 	{0, 0x17, 0, F_D_OUT, NULL, NULL,		/* RELEASE(6) */
499 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
500 };
501 
502 
503 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
504  * plus the terminating elements for logic that scans this table such as
505  * REPORT SUPPORTED OPERATION CODES. */
506 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
507 /* 0 */
508 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,	/* unknown opcodes */
509 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
510 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
511 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
513 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
514 	     0, 0} },					/* REPORT LUNS */
515 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
516 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
518 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 /* 5 */
520 	{ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,	/* MODE SENSE(10) */
521 	    resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
522 		0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
523 	{ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,	/* MODE SELECT(10) */
524 	    resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
525 		0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
526 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,	/* LOG SENSE */
527 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
528 	     0, 0, 0} },
529 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
530 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
531 	     0, 0} },
532 	{ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
533 	    resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
534 	    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
535 /* 10 */
536 	{ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
537 	    resp_write_dt0, write_iarr,			/* WRITE(16) */
538 		{16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 		 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },		/* WRITE(16) */
540 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
541 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
542 	{ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
543 	    resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
544 		{16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 		 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
546 	{0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
547 	    NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
548 	    0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
549 	{ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
550 	    resp_report_tgtpgs,	/* MAINT IN, REPORT TARGET PORT GROUPS */
551 		maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
552 				0xff, 0, 0xc7, 0, 0, 0, 0} },
553 /* 15 */
554 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
555 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
556 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
557 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
558 	     0, 0, 0, 0, 0, 0} },
559 	{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
560 	    resp_read_dt0, vl_iarr,	/* VARIABLE LENGTH, READ(32) */
561 	    {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
562 	     0xff, 0xff} },
563 	{ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
564 	    NULL, reserve_iarr,	/* RESERVE(10) <no response function> */
565 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
566 	     0} },
567 	{ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
568 	    NULL, release_iarr, /* RELEASE(10) <no response function> */
569 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
570 	     0} },
571 /* 20 */
572 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
573 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
575 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
576 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
577 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
578 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
579 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
580 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
581 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
582 /* 25 */
583 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
584 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
585 		   0, 0, 0, 0, 0, 0} },		/* XDWRITEREAD(10) */
586 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
587 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
588 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
589 	{ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
590 	    resp_write_same_10, write_same_iarr,	/* WRITE SAME(10) */
591 		{10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
592 		 0, 0, 0, 0, 0} },
593 	{0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
594 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
595 	     0, 0, 0, 0} },
596 	{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
597 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
598 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
599 
600 /* 30 */
601 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
602 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
603 };
604 
605 static int sdebug_add_host = DEF_NUM_HOST;
606 static int sdebug_ato = DEF_ATO;
607 static int sdebug_cdb_len = DEF_CDB_LEN;
608 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
609 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
610 static int sdebug_dif = DEF_DIF;
611 static int sdebug_dix = DEF_DIX;
612 static int sdebug_dsense = DEF_D_SENSE;
613 static int sdebug_every_nth = DEF_EVERY_NTH;
614 static int sdebug_fake_rw = DEF_FAKE_RW;
615 static unsigned int sdebug_guard = DEF_GUARD;
616 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
617 static int sdebug_max_luns = DEF_MAX_LUNS;
618 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
619 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
620 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
621 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
622 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
623 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
624 static int sdebug_no_uld;
625 static int sdebug_num_parts = DEF_NUM_PARTS;
626 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
627 static int sdebug_opt_blks = DEF_OPT_BLKS;
628 static int sdebug_opts = DEF_OPTS;
629 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
630 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
631 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
632 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
633 static int sdebug_sector_size = DEF_SECTOR_SIZE;
634 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
635 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
636 static unsigned int sdebug_lbpu = DEF_LBPU;
637 static unsigned int sdebug_lbpws = DEF_LBPWS;
638 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
639 static unsigned int sdebug_lbprz = DEF_LBPRZ;
640 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
641 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
642 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
643 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
644 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
645 static int sdebug_uuid_ctl = DEF_UUID_CTL;
646 static bool sdebug_removable = DEF_REMOVABLE;
647 static bool sdebug_clustering;
648 static bool sdebug_host_lock = DEF_HOST_LOCK;
649 static bool sdebug_strict = DEF_STRICT;
650 static bool sdebug_any_injecting_opt;
651 static bool sdebug_verbose;
652 static bool have_dif_prot;
653 static bool sdebug_statistics = DEF_STATISTICS;
654 
655 static unsigned int sdebug_store_sectors;
656 static sector_t sdebug_capacity;	/* in sectors */
657 
658 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
659    may still need them */
660 static int sdebug_heads;		/* heads per disk */
661 static int sdebug_cylinders_per;	/* cylinders per surface */
662 static int sdebug_sectors_per;		/* sectors per cylinder */
663 
664 static LIST_HEAD(sdebug_host_list);
665 static DEFINE_SPINLOCK(sdebug_host_list_lock);
666 
667 static unsigned char *fake_storep;	/* ramdisk storage */
668 static struct t10_pi_tuple *dif_storep;	/* protection info */
669 static void *map_storep;		/* provisioning map */
670 
671 static unsigned long map_size;
672 static int num_aborts;
673 static int num_dev_resets;
674 static int num_target_resets;
675 static int num_bus_resets;
676 static int num_host_resets;
677 static int dix_writes;
678 static int dix_reads;
679 static int dif_errors;
680 
681 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
682 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
683 
684 static DEFINE_RWLOCK(atomic_rw);
685 
686 static char sdebug_proc_name[] = MY_NAME;
687 static const char *my_name = MY_NAME;
688 
689 static struct bus_type pseudo_lld_bus;
690 
691 static struct device_driver sdebug_driverfs_driver = {
692 	.name 		= sdebug_proc_name,
693 	.bus		= &pseudo_lld_bus,
694 };
695 
696 static const int check_condition_result =
697 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
698 
699 static const int illegal_condition_result =
700 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
701 
702 static const int device_qfull_result =
703 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
704 
705 
706 /* Only do the extra work involved in logical block provisioning if one or
707  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
708  * real reads and writes (i.e. not skipping them for speed).
709  */
710 static inline bool scsi_debug_lbp(void)
711 {
712 	return 0 == sdebug_fake_rw &&
713 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
714 }
715 
716 static void *fake_store(unsigned long long lba)
717 {
718 	lba = do_div(lba, sdebug_store_sectors);
719 
720 	return fake_storep + lba * sdebug_sector_size;
721 }
722 
723 static struct t10_pi_tuple *dif_store(sector_t sector)
724 {
725 	sector = sector_div(sector, sdebug_store_sectors);
726 
727 	return dif_storep + sector;
728 }
729 
730 static void sdebug_max_tgts_luns(void)
731 {
732 	struct sdebug_host_info *sdbg_host;
733 	struct Scsi_Host *hpnt;
734 
735 	spin_lock(&sdebug_host_list_lock);
736 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
737 		hpnt = sdbg_host->shost;
738 		if ((hpnt->this_id >= 0) &&
739 		    (sdebug_num_tgts > hpnt->this_id))
740 			hpnt->max_id = sdebug_num_tgts + 1;
741 		else
742 			hpnt->max_id = sdebug_num_tgts;
743 		/* sdebug_max_luns; */
744 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
745 	}
746 	spin_unlock(&sdebug_host_list_lock);
747 }
748 
749 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
750 
751 /* Set in_bit to -1 to indicate no bit position of invalid field */
752 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
753 				 enum sdeb_cmd_data c_d,
754 				 int in_byte, int in_bit)
755 {
756 	unsigned char *sbuff;
757 	u8 sks[4];
758 	int sl, asc;
759 
760 	sbuff = scp->sense_buffer;
761 	if (!sbuff) {
762 		sdev_printk(KERN_ERR, scp->device,
763 			    "%s: sense_buffer is NULL\n", __func__);
764 		return;
765 	}
766 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
767 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
768 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
769 	memset(sks, 0, sizeof(sks));
770 	sks[0] = 0x80;
771 	if (c_d)
772 		sks[0] |= 0x40;
773 	if (in_bit >= 0) {
774 		sks[0] |= 0x8;
775 		sks[0] |= 0x7 & in_bit;
776 	}
777 	put_unaligned_be16(in_byte, sks + 1);
778 	if (sdebug_dsense) {
779 		sl = sbuff[7] + 8;
780 		sbuff[7] = sl;
781 		sbuff[sl] = 0x2;
782 		sbuff[sl + 1] = 0x6;
783 		memcpy(sbuff + sl + 4, sks, 3);
784 	} else
785 		memcpy(sbuff + 15, sks, 3);
786 	if (sdebug_verbose)
787 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
788 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
789 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
790 }
791 
792 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
793 {
794 	unsigned char *sbuff;
795 
796 	sbuff = scp->sense_buffer;
797 	if (!sbuff) {
798 		sdev_printk(KERN_ERR, scp->device,
799 			    "%s: sense_buffer is NULL\n", __func__);
800 		return;
801 	}
802 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
803 
804 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
805 
806 	if (sdebug_verbose)
807 		sdev_printk(KERN_INFO, scp->device,
808 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
809 			    my_name, key, asc, asq);
810 }
811 
812 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
813 {
814 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
815 }
816 
817 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
818 {
819 	if (sdebug_verbose) {
820 		if (0x1261 == cmd)
821 			sdev_printk(KERN_INFO, dev,
822 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
823 		else if (0x5331 == cmd)
824 			sdev_printk(KERN_INFO, dev,
825 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
826 				    __func__);
827 		else
828 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
829 				    __func__, cmd);
830 	}
831 	return -EINVAL;
832 	/* return -ENOTTY; // correct return but upsets fdisk */
833 }
834 
835 static void config_cdb_len(struct scsi_device *sdev)
836 {
837 	switch (sdebug_cdb_len) {
838 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
839 		sdev->use_10_for_rw = false;
840 		sdev->use_16_for_rw = false;
841 		sdev->use_10_for_ms = false;
842 		break;
843 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
844 		sdev->use_10_for_rw = true;
845 		sdev->use_16_for_rw = false;
846 		sdev->use_10_for_ms = false;
847 		break;
848 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
849 		sdev->use_10_for_rw = true;
850 		sdev->use_16_for_rw = false;
851 		sdev->use_10_for_ms = true;
852 		break;
853 	case 16:
854 		sdev->use_10_for_rw = false;
855 		sdev->use_16_for_rw = true;
856 		sdev->use_10_for_ms = true;
857 		break;
858 	case 32: /* No knobs to suggest this so same as 16 for now */
859 		sdev->use_10_for_rw = false;
860 		sdev->use_16_for_rw = true;
861 		sdev->use_10_for_ms = true;
862 		break;
863 	default:
864 		pr_warn("unexpected cdb_len=%d, force to 10\n",
865 			sdebug_cdb_len);
866 		sdev->use_10_for_rw = true;
867 		sdev->use_16_for_rw = false;
868 		sdev->use_10_for_ms = false;
869 		sdebug_cdb_len = 10;
870 		break;
871 	}
872 }
873 
874 static void all_config_cdb_len(void)
875 {
876 	struct sdebug_host_info *sdbg_host;
877 	struct Scsi_Host *shost;
878 	struct scsi_device *sdev;
879 
880 	spin_lock(&sdebug_host_list_lock);
881 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
882 		shost = sdbg_host->shost;
883 		shost_for_each_device(sdev, shost) {
884 			config_cdb_len(sdev);
885 		}
886 	}
887 	spin_unlock(&sdebug_host_list_lock);
888 }
889 
890 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
891 {
892 	struct sdebug_host_info *sdhp;
893 	struct sdebug_dev_info *dp;
894 
895 	spin_lock(&sdebug_host_list_lock);
896 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
897 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
898 			if ((devip->sdbg_host == dp->sdbg_host) &&
899 			    (devip->target == dp->target))
900 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
901 		}
902 	}
903 	spin_unlock(&sdebug_host_list_lock);
904 }
905 
906 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
907 {
908 	int k;
909 
910 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
911 	if (k != SDEBUG_NUM_UAS) {
912 		const char *cp = NULL;
913 
914 		switch (k) {
915 		case SDEBUG_UA_POR:
916 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
917 					POWER_ON_RESET_ASCQ);
918 			if (sdebug_verbose)
919 				cp = "power on reset";
920 			break;
921 		case SDEBUG_UA_BUS_RESET:
922 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
923 					BUS_RESET_ASCQ);
924 			if (sdebug_verbose)
925 				cp = "bus reset";
926 			break;
927 		case SDEBUG_UA_MODE_CHANGED:
928 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
929 					MODE_CHANGED_ASCQ);
930 			if (sdebug_verbose)
931 				cp = "mode parameters changed";
932 			break;
933 		case SDEBUG_UA_CAPACITY_CHANGED:
934 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
935 					CAPACITY_CHANGED_ASCQ);
936 			if (sdebug_verbose)
937 				cp = "capacity data changed";
938 			break;
939 		case SDEBUG_UA_MICROCODE_CHANGED:
940 			mk_sense_buffer(scp, UNIT_ATTENTION,
941 					TARGET_CHANGED_ASC,
942 					MICROCODE_CHANGED_ASCQ);
943 			if (sdebug_verbose)
944 				cp = "microcode has been changed";
945 			break;
946 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
947 			mk_sense_buffer(scp, UNIT_ATTENTION,
948 					TARGET_CHANGED_ASC,
949 					MICROCODE_CHANGED_WO_RESET_ASCQ);
950 			if (sdebug_verbose)
951 				cp = "microcode has been changed without reset";
952 			break;
953 		case SDEBUG_UA_LUNS_CHANGED:
954 			/*
955 			 * SPC-3 behavior is to report a UNIT ATTENTION with
956 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
957 			 * on the target, until a REPORT LUNS command is
958 			 * received.  SPC-4 behavior is to report it only once.
959 			 * NOTE:  sdebug_scsi_level does not use the same
960 			 * values as struct scsi_device->scsi_level.
961 			 */
962 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
963 				clear_luns_changed_on_target(devip);
964 			mk_sense_buffer(scp, UNIT_ATTENTION,
965 					TARGET_CHANGED_ASC,
966 					LUNS_CHANGED_ASCQ);
967 			if (sdebug_verbose)
968 				cp = "reported luns data has changed";
969 			break;
970 		default:
971 			pr_warn("unexpected unit attention code=%d\n", k);
972 			if (sdebug_verbose)
973 				cp = "unknown";
974 			break;
975 		}
976 		clear_bit(k, devip->uas_bm);
977 		if (sdebug_verbose)
978 			sdev_printk(KERN_INFO, scp->device,
979 				   "%s reports: Unit attention: %s\n",
980 				   my_name, cp);
981 		return check_condition_result;
982 	}
983 	return 0;
984 }
985 
986 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
987 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
988 				int arr_len)
989 {
990 	int act_len;
991 	struct scsi_data_buffer *sdb = scsi_in(scp);
992 
993 	if (!sdb->length)
994 		return 0;
995 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
996 		return DID_ERROR << 16;
997 
998 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
999 				      arr, arr_len);
1000 	sdb->resid = scsi_bufflen(scp) - act_len;
1001 
1002 	return 0;
1003 }
1004 
1005 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1006  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1007  * calls, not required to write in ascending offset order. Assumes resid
1008  * set to scsi_bufflen() prior to any calls.
1009  */
1010 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1011 				  int arr_len, unsigned int off_dst)
1012 {
1013 	int act_len, n;
1014 	struct scsi_data_buffer *sdb = scsi_in(scp);
1015 	off_t skip = off_dst;
1016 
1017 	if (sdb->length <= off_dst)
1018 		return 0;
1019 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1020 		return DID_ERROR << 16;
1021 
1022 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1023 				       arr, arr_len, skip);
1024 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1025 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1026 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1027 	sdb->resid = min(sdb->resid, n);
1028 	return 0;
1029 }
1030 
1031 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1032  * 'arr' or -1 if error.
1033  */
1034 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1035 			       int arr_len)
1036 {
1037 	if (!scsi_bufflen(scp))
1038 		return 0;
1039 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1040 		return -1;
1041 
1042 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1043 }
1044 
1045 
1046 static char sdebug_inq_vendor_id[9] = "Linux   ";
1047 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1048 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1049 /* Use some locally assigned NAAs for SAS addresses. */
1050 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1051 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1052 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1053 
1054 /* Device identification VPD page. Returns number of bytes placed in arr */
1055 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1056 			  int target_dev_id, int dev_id_num,
1057 			  const char *dev_id_str, int dev_id_str_len,
1058 			  const uuid_t *lu_name)
1059 {
1060 	int num, port_a;
1061 	char b[32];
1062 
1063 	port_a = target_dev_id + 1;
1064 	/* T10 vendor identifier field format (faked) */
1065 	arr[0] = 0x2;	/* ASCII */
1066 	arr[1] = 0x1;
1067 	arr[2] = 0x0;
1068 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1069 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1070 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1071 	num = 8 + 16 + dev_id_str_len;
1072 	arr[3] = num;
1073 	num += 4;
1074 	if (dev_id_num >= 0) {
1075 		if (sdebug_uuid_ctl) {
1076 			/* Locally assigned UUID */
1077 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1078 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1079 			arr[num++] = 0x0;
1080 			arr[num++] = 0x12;
1081 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1082 			arr[num++] = 0x0;
1083 			memcpy(arr + num, lu_name, 16);
1084 			num += 16;
1085 		} else {
1086 			/* NAA-3, Logical unit identifier (binary) */
1087 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1088 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1089 			arr[num++] = 0x0;
1090 			arr[num++] = 0x8;
1091 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1092 			num += 8;
1093 		}
1094 		/* Target relative port number */
1095 		arr[num++] = 0x61;	/* proto=sas, binary */
1096 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1097 		arr[num++] = 0x0;	/* reserved */
1098 		arr[num++] = 0x4;	/* length */
1099 		arr[num++] = 0x0;	/* reserved */
1100 		arr[num++] = 0x0;	/* reserved */
1101 		arr[num++] = 0x0;
1102 		arr[num++] = 0x1;	/* relative port A */
1103 	}
1104 	/* NAA-3, Target port identifier */
1105 	arr[num++] = 0x61;	/* proto=sas, binary */
1106 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1107 	arr[num++] = 0x0;
1108 	arr[num++] = 0x8;
1109 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1110 	num += 8;
1111 	/* NAA-3, Target port group identifier */
1112 	arr[num++] = 0x61;	/* proto=sas, binary */
1113 	arr[num++] = 0x95;	/* piv=1, target port group id */
1114 	arr[num++] = 0x0;
1115 	arr[num++] = 0x4;
1116 	arr[num++] = 0;
1117 	arr[num++] = 0;
1118 	put_unaligned_be16(port_group_id, arr + num);
1119 	num += 2;
1120 	/* NAA-3, Target device identifier */
1121 	arr[num++] = 0x61;	/* proto=sas, binary */
1122 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1123 	arr[num++] = 0x0;
1124 	arr[num++] = 0x8;
1125 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1126 	num += 8;
1127 	/* SCSI name string: Target device identifier */
1128 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1129 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1130 	arr[num++] = 0x0;
1131 	arr[num++] = 24;
1132 	memcpy(arr + num, "naa.32222220", 12);
1133 	num += 12;
1134 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1135 	memcpy(arr + num, b, 8);
1136 	num += 8;
1137 	memset(arr + num, 0, 4);
1138 	num += 4;
1139 	return num;
1140 }
1141 
1142 static unsigned char vpd84_data[] = {
1143 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1144     0x22,0x22,0x22,0x0,0xbb,0x1,
1145     0x22,0x22,0x22,0x0,0xbb,0x2,
1146 };
1147 
1148 /*  Software interface identification VPD page */
1149 static int inquiry_vpd_84(unsigned char *arr)
1150 {
1151 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1152 	return sizeof(vpd84_data);
1153 }
1154 
1155 /* Management network addresses VPD page */
1156 static int inquiry_vpd_85(unsigned char *arr)
1157 {
1158 	int num = 0;
1159 	const char * na1 = "https://www.kernel.org/config";
1160 	const char * na2 = "http://www.kernel.org/log";
1161 	int plen, olen;
1162 
1163 	arr[num++] = 0x1;	/* lu, storage config */
1164 	arr[num++] = 0x0;	/* reserved */
1165 	arr[num++] = 0x0;
1166 	olen = strlen(na1);
1167 	plen = olen + 1;
1168 	if (plen % 4)
1169 		plen = ((plen / 4) + 1) * 4;
1170 	arr[num++] = plen;	/* length, null termianted, padded */
1171 	memcpy(arr + num, na1, olen);
1172 	memset(arr + num + olen, 0, plen - olen);
1173 	num += plen;
1174 
1175 	arr[num++] = 0x4;	/* lu, logging */
1176 	arr[num++] = 0x0;	/* reserved */
1177 	arr[num++] = 0x0;
1178 	olen = strlen(na2);
1179 	plen = olen + 1;
1180 	if (plen % 4)
1181 		plen = ((plen / 4) + 1) * 4;
1182 	arr[num++] = plen;	/* length, null terminated, padded */
1183 	memcpy(arr + num, na2, olen);
1184 	memset(arr + num + olen, 0, plen - olen);
1185 	num += plen;
1186 
1187 	return num;
1188 }
1189 
1190 /* SCSI ports VPD page */
1191 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1192 {
1193 	int num = 0;
1194 	int port_a, port_b;
1195 
1196 	port_a = target_dev_id + 1;
1197 	port_b = port_a + 1;
1198 	arr[num++] = 0x0;	/* reserved */
1199 	arr[num++] = 0x0;	/* reserved */
1200 	arr[num++] = 0x0;
1201 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1202 	memset(arr + num, 0, 6);
1203 	num += 6;
1204 	arr[num++] = 0x0;
1205 	arr[num++] = 12;	/* length tp descriptor */
1206 	/* naa-5 target port identifier (A) */
1207 	arr[num++] = 0x61;	/* proto=sas, binary */
1208 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1209 	arr[num++] = 0x0;	/* reserved */
1210 	arr[num++] = 0x8;	/* length */
1211 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1212 	num += 8;
1213 	arr[num++] = 0x0;	/* reserved */
1214 	arr[num++] = 0x0;	/* reserved */
1215 	arr[num++] = 0x0;
1216 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1217 	memset(arr + num, 0, 6);
1218 	num += 6;
1219 	arr[num++] = 0x0;
1220 	arr[num++] = 12;	/* length tp descriptor */
1221 	/* naa-5 target port identifier (B) */
1222 	arr[num++] = 0x61;	/* proto=sas, binary */
1223 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1224 	arr[num++] = 0x0;	/* reserved */
1225 	arr[num++] = 0x8;	/* length */
1226 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1227 	num += 8;
1228 
1229 	return num;
1230 }
1231 
1232 
1233 static unsigned char vpd89_data[] = {
1234 /* from 4th byte */ 0,0,0,0,
1235 'l','i','n','u','x',' ',' ',' ',
1236 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1237 '1','2','3','4',
1238 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1239 0xec,0,0,0,
1240 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1241 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1242 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1243 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1244 0x53,0x41,
1245 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1246 0x20,0x20,
1247 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1248 0x10,0x80,
1249 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1250 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1251 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1252 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1253 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1254 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1255 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1256 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1257 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1258 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1259 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1260 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1261 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1262 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1263 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1264 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1265 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1266 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1267 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1268 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1269 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1270 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1271 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1272 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1273 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1274 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1275 };
1276 
1277 /* ATA Information VPD page */
1278 static int inquiry_vpd_89(unsigned char *arr)
1279 {
1280 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1281 	return sizeof(vpd89_data);
1282 }
1283 
1284 
1285 static unsigned char vpdb0_data[] = {
1286 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1287 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 };
1291 
1292 /* Block limits VPD page (SBC-3) */
1293 static int inquiry_vpd_b0(unsigned char *arr)
1294 {
1295 	unsigned int gran;
1296 
1297 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1298 
1299 	/* Optimal transfer length granularity */
1300 	if (sdebug_opt_xferlen_exp != 0 &&
1301 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1302 		gran = 1 << sdebug_opt_xferlen_exp;
1303 	else
1304 		gran = 1 << sdebug_physblk_exp;
1305 	put_unaligned_be16(gran, arr + 2);
1306 
1307 	/* Maximum Transfer Length */
1308 	if (sdebug_store_sectors > 0x400)
1309 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1310 
1311 	/* Optimal Transfer Length */
1312 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1313 
1314 	if (sdebug_lbpu) {
1315 		/* Maximum Unmap LBA Count */
1316 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1317 
1318 		/* Maximum Unmap Block Descriptor Count */
1319 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1320 	}
1321 
1322 	/* Unmap Granularity Alignment */
1323 	if (sdebug_unmap_alignment) {
1324 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1325 		arr[28] |= 0x80; /* UGAVALID */
1326 	}
1327 
1328 	/* Optimal Unmap Granularity */
1329 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1330 
1331 	/* Maximum WRITE SAME Length */
1332 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1333 
1334 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1335 
1336 	return sizeof(vpdb0_data);
1337 }
1338 
1339 /* Block device characteristics VPD page (SBC-3) */
1340 static int inquiry_vpd_b1(unsigned char *arr)
1341 {
1342 	memset(arr, 0, 0x3c);
1343 	arr[0] = 0;
1344 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1345 	arr[2] = 0;
1346 	arr[3] = 5;	/* less than 1.8" */
1347 
1348 	return 0x3c;
1349 }
1350 
1351 /* Logical block provisioning VPD page (SBC-4) */
1352 static int inquiry_vpd_b2(unsigned char *arr)
1353 {
1354 	memset(arr, 0, 0x4);
1355 	arr[0] = 0;			/* threshold exponent */
1356 	if (sdebug_lbpu)
1357 		arr[1] = 1 << 7;
1358 	if (sdebug_lbpws)
1359 		arr[1] |= 1 << 6;
1360 	if (sdebug_lbpws10)
1361 		arr[1] |= 1 << 5;
1362 	if (sdebug_lbprz && scsi_debug_lbp())
1363 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1364 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1365 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1366 	/* threshold_percentage=0 */
1367 	return 0x4;
1368 }
1369 
1370 #define SDEBUG_LONG_INQ_SZ 96
1371 #define SDEBUG_MAX_INQ_ARR_SZ 584
1372 
1373 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1374 {
1375 	unsigned char pq_pdt;
1376 	unsigned char * arr;
1377 	unsigned char *cmd = scp->cmnd;
1378 	int alloc_len, n, ret;
1379 	bool have_wlun, is_disk;
1380 
1381 	alloc_len = get_unaligned_be16(cmd + 3);
1382 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1383 	if (! arr)
1384 		return DID_REQUEUE << 16;
1385 	is_disk = (sdebug_ptype == TYPE_DISK);
1386 	have_wlun = scsi_is_wlun(scp->device->lun);
1387 	if (have_wlun)
1388 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1389 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1390 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1391 	else
1392 		pq_pdt = (sdebug_ptype & 0x1f);
1393 	arr[0] = pq_pdt;
1394 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1395 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1396 		kfree(arr);
1397 		return check_condition_result;
1398 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1399 		int lu_id_num, port_group_id, target_dev_id, len;
1400 		char lu_id_str[6];
1401 		int host_no = devip->sdbg_host->shost->host_no;
1402 
1403 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1404 		    (devip->channel & 0x7f);
1405 		if (sdebug_vpd_use_hostno == 0)
1406 			host_no = 0;
1407 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1408 			    (devip->target * 1000) + devip->lun);
1409 		target_dev_id = ((host_no + 1) * 2000) +
1410 				 (devip->target * 1000) - 3;
1411 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1412 		if (0 == cmd[2]) { /* supported vital product data pages */
1413 			arr[1] = cmd[2];	/*sanity */
1414 			n = 4;
1415 			arr[n++] = 0x0;   /* this page */
1416 			arr[n++] = 0x80;  /* unit serial number */
1417 			arr[n++] = 0x83;  /* device identification */
1418 			arr[n++] = 0x84;  /* software interface ident. */
1419 			arr[n++] = 0x85;  /* management network addresses */
1420 			arr[n++] = 0x86;  /* extended inquiry */
1421 			arr[n++] = 0x87;  /* mode page policy */
1422 			arr[n++] = 0x88;  /* SCSI ports */
1423 			if (is_disk) {	  /* SBC only */
1424 				arr[n++] = 0x89;  /* ATA information */
1425 				arr[n++] = 0xb0;  /* Block limits */
1426 				arr[n++] = 0xb1;  /* Block characteristics */
1427 				arr[n++] = 0xb2;  /* Logical Block Prov */
1428 			}
1429 			arr[3] = n - 4;	  /* number of supported VPD pages */
1430 		} else if (0x80 == cmd[2]) { /* unit serial number */
1431 			arr[1] = cmd[2];	/*sanity */
1432 			arr[3] = len;
1433 			memcpy(&arr[4], lu_id_str, len);
1434 		} else if (0x83 == cmd[2]) { /* device identification */
1435 			arr[1] = cmd[2];	/*sanity */
1436 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1437 						target_dev_id, lu_id_num,
1438 						lu_id_str, len,
1439 						&devip->lu_name);
1440 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1441 			arr[1] = cmd[2];	/*sanity */
1442 			arr[3] = inquiry_vpd_84(&arr[4]);
1443 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1444 			arr[1] = cmd[2];	/*sanity */
1445 			arr[3] = inquiry_vpd_85(&arr[4]);
1446 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1447 			arr[1] = cmd[2];	/*sanity */
1448 			arr[3] = 0x3c;	/* number of following entries */
1449 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1450 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1451 			else if (have_dif_prot)
1452 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1453 			else
1454 				arr[4] = 0x0;   /* no protection stuff */
1455 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1456 		} else if (0x87 == cmd[2]) { /* mode page policy */
1457 			arr[1] = cmd[2];	/*sanity */
1458 			arr[3] = 0x8;	/* number of following entries */
1459 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1460 			arr[6] = 0x80;	/* mlus, shared */
1461 			arr[8] = 0x18;	 /* protocol specific lu */
1462 			arr[10] = 0x82;	 /* mlus, per initiator port */
1463 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1464 			arr[1] = cmd[2];	/*sanity */
1465 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1466 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1467 			arr[1] = cmd[2];        /*sanity */
1468 			n = inquiry_vpd_89(&arr[4]);
1469 			put_unaligned_be16(n, arr + 2);
1470 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1471 			arr[1] = cmd[2];        /*sanity */
1472 			arr[3] = inquiry_vpd_b0(&arr[4]);
1473 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1474 			arr[1] = cmd[2];        /*sanity */
1475 			arr[3] = inquiry_vpd_b1(&arr[4]);
1476 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1477 			arr[1] = cmd[2];        /*sanity */
1478 			arr[3] = inquiry_vpd_b2(&arr[4]);
1479 		} else {
1480 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1481 			kfree(arr);
1482 			return check_condition_result;
1483 		}
1484 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1485 		ret = fill_from_dev_buffer(scp, arr,
1486 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1487 		kfree(arr);
1488 		return ret;
1489 	}
1490 	/* drops through here for a standard inquiry */
1491 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1492 	arr[2] = sdebug_scsi_level;
1493 	arr[3] = 2;    /* response_data_format==2 */
1494 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1495 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1496 	if (sdebug_vpd_use_hostno == 0)
1497 		arr[5] |= 0x10; /* claim: implicit TPGS */
1498 	arr[6] = 0x10; /* claim: MultiP */
1499 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1500 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1501 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1502 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1503 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1504 	/* Use Vendor Specific area to place driver date in ASCII hex */
1505 	memcpy(&arr[36], sdebug_version_date, 8);
1506 	/* version descriptors (2 bytes each) follow */
1507 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1508 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1509 	n = 62;
1510 	if (is_disk) {		/* SBC-4 no version claimed */
1511 		put_unaligned_be16(0x600, arr + n);
1512 		n += 2;
1513 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1514 		put_unaligned_be16(0x525, arr + n);
1515 		n += 2;
1516 	}
1517 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1518 	ret = fill_from_dev_buffer(scp, arr,
1519 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1520 	kfree(arr);
1521 	return ret;
1522 }
1523 
1524 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1525 				   0, 0, 0x0, 0x0};
1526 
1527 static int resp_requests(struct scsi_cmnd * scp,
1528 			 struct sdebug_dev_info * devip)
1529 {
1530 	unsigned char * sbuff;
1531 	unsigned char *cmd = scp->cmnd;
1532 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1533 	bool dsense;
1534 	int len = 18;
1535 
1536 	memset(arr, 0, sizeof(arr));
1537 	dsense = !!(cmd[1] & 1);
1538 	sbuff = scp->sense_buffer;
1539 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1540 		if (dsense) {
1541 			arr[0] = 0x72;
1542 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1543 			arr[2] = THRESHOLD_EXCEEDED;
1544 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1545 			len = 8;
1546 		} else {
1547 			arr[0] = 0x70;
1548 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1549 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1550 			arr[12] = THRESHOLD_EXCEEDED;
1551 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1552 		}
1553 	} else {
1554 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1555 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1556 			;	/* have sense and formats match */
1557 		else if (arr[0] <= 0x70) {
1558 			if (dsense) {
1559 				memset(arr, 0, 8);
1560 				arr[0] = 0x72;
1561 				len = 8;
1562 			} else {
1563 				memset(arr, 0, 18);
1564 				arr[0] = 0x70;
1565 				arr[7] = 0xa;
1566 			}
1567 		} else if (dsense) {
1568 			memset(arr, 0, 8);
1569 			arr[0] = 0x72;
1570 			arr[1] = sbuff[2];     /* sense key */
1571 			arr[2] = sbuff[12];    /* asc */
1572 			arr[3] = sbuff[13];    /* ascq */
1573 			len = 8;
1574 		} else {
1575 			memset(arr, 0, 18);
1576 			arr[0] = 0x70;
1577 			arr[2] = sbuff[1];
1578 			arr[7] = 0xa;
1579 			arr[12] = sbuff[1];
1580 			arr[13] = sbuff[3];
1581 		}
1582 
1583 	}
1584 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1585 	return fill_from_dev_buffer(scp, arr, len);
1586 }
1587 
1588 static int resp_start_stop(struct scsi_cmnd * scp,
1589 			   struct sdebug_dev_info * devip)
1590 {
1591 	unsigned char *cmd = scp->cmnd;
1592 	int power_cond, stop;
1593 
1594 	power_cond = (cmd[4] & 0xf0) >> 4;
1595 	if (power_cond) {
1596 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1597 		return check_condition_result;
1598 	}
1599 	stop = !(cmd[4] & 1);
1600 	atomic_xchg(&devip->stopped, stop);
1601 	return 0;
1602 }
1603 
1604 static sector_t get_sdebug_capacity(void)
1605 {
1606 	static const unsigned int gibibyte = 1073741824;
1607 
1608 	if (sdebug_virtual_gb > 0)
1609 		return (sector_t)sdebug_virtual_gb *
1610 			(gibibyte / sdebug_sector_size);
1611 	else
1612 		return sdebug_store_sectors;
1613 }
1614 
1615 #define SDEBUG_READCAP_ARR_SZ 8
1616 static int resp_readcap(struct scsi_cmnd * scp,
1617 			struct sdebug_dev_info * devip)
1618 {
1619 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1620 	unsigned int capac;
1621 
1622 	/* following just in case virtual_gb changed */
1623 	sdebug_capacity = get_sdebug_capacity();
1624 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1625 	if (sdebug_capacity < 0xffffffff) {
1626 		capac = (unsigned int)sdebug_capacity - 1;
1627 		put_unaligned_be32(capac, arr + 0);
1628 	} else
1629 		put_unaligned_be32(0xffffffff, arr + 0);
1630 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1631 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1632 }
1633 
1634 #define SDEBUG_READCAP16_ARR_SZ 32
1635 static int resp_readcap16(struct scsi_cmnd * scp,
1636 			  struct sdebug_dev_info * devip)
1637 {
1638 	unsigned char *cmd = scp->cmnd;
1639 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1640 	int alloc_len;
1641 
1642 	alloc_len = get_unaligned_be32(cmd + 10);
1643 	/* following just in case virtual_gb changed */
1644 	sdebug_capacity = get_sdebug_capacity();
1645 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1646 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1647 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1648 	arr[13] = sdebug_physblk_exp & 0xf;
1649 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1650 
1651 	if (scsi_debug_lbp()) {
1652 		arr[14] |= 0x80; /* LBPME */
1653 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1654 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1655 		 * in the wider field maps to 0 in this field.
1656 		 */
1657 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1658 			arr[14] |= 0x40;
1659 	}
1660 
1661 	arr[15] = sdebug_lowest_aligned & 0xff;
1662 
1663 	if (have_dif_prot) {
1664 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1665 		arr[12] |= 1; /* PROT_EN */
1666 	}
1667 
1668 	return fill_from_dev_buffer(scp, arr,
1669 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1670 }
1671 
1672 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1673 
1674 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1675 			      struct sdebug_dev_info * devip)
1676 {
1677 	unsigned char *cmd = scp->cmnd;
1678 	unsigned char * arr;
1679 	int host_no = devip->sdbg_host->shost->host_no;
1680 	int n, ret, alen, rlen;
1681 	int port_group_a, port_group_b, port_a, port_b;
1682 
1683 	alen = get_unaligned_be32(cmd + 6);
1684 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1685 	if (! arr)
1686 		return DID_REQUEUE << 16;
1687 	/*
1688 	 * EVPD page 0x88 states we have two ports, one
1689 	 * real and a fake port with no device connected.
1690 	 * So we create two port groups with one port each
1691 	 * and set the group with port B to unavailable.
1692 	 */
1693 	port_a = 0x1; /* relative port A */
1694 	port_b = 0x2; /* relative port B */
1695 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1696 			(devip->channel & 0x7f);
1697 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1698 			(devip->channel & 0x7f) + 0x80;
1699 
1700 	/*
1701 	 * The asymmetric access state is cycled according to the host_id.
1702 	 */
1703 	n = 4;
1704 	if (sdebug_vpd_use_hostno == 0) {
1705 		arr[n++] = host_no % 3; /* Asymm access state */
1706 		arr[n++] = 0x0F; /* claim: all states are supported */
1707 	} else {
1708 		arr[n++] = 0x0; /* Active/Optimized path */
1709 		arr[n++] = 0x01; /* only support active/optimized paths */
1710 	}
1711 	put_unaligned_be16(port_group_a, arr + n);
1712 	n += 2;
1713 	arr[n++] = 0;    /* Reserved */
1714 	arr[n++] = 0;    /* Status code */
1715 	arr[n++] = 0;    /* Vendor unique */
1716 	arr[n++] = 0x1;  /* One port per group */
1717 	arr[n++] = 0;    /* Reserved */
1718 	arr[n++] = 0;    /* Reserved */
1719 	put_unaligned_be16(port_a, arr + n);
1720 	n += 2;
1721 	arr[n++] = 3;    /* Port unavailable */
1722 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1723 	put_unaligned_be16(port_group_b, arr + n);
1724 	n += 2;
1725 	arr[n++] = 0;    /* Reserved */
1726 	arr[n++] = 0;    /* Status code */
1727 	arr[n++] = 0;    /* Vendor unique */
1728 	arr[n++] = 0x1;  /* One port per group */
1729 	arr[n++] = 0;    /* Reserved */
1730 	arr[n++] = 0;    /* Reserved */
1731 	put_unaligned_be16(port_b, arr + n);
1732 	n += 2;
1733 
1734 	rlen = n - 4;
1735 	put_unaligned_be32(rlen, arr + 0);
1736 
1737 	/*
1738 	 * Return the smallest value of either
1739 	 * - The allocated length
1740 	 * - The constructed command length
1741 	 * - The maximum array size
1742 	 */
1743 	rlen = min(alen,n);
1744 	ret = fill_from_dev_buffer(scp, arr,
1745 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1746 	kfree(arr);
1747 	return ret;
1748 }
1749 
1750 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1751 			     struct sdebug_dev_info *devip)
1752 {
1753 	bool rctd;
1754 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1755 	u16 req_sa, u;
1756 	u32 alloc_len, a_len;
1757 	int k, offset, len, errsts, count, bump, na;
1758 	const struct opcode_info_t *oip;
1759 	const struct opcode_info_t *r_oip;
1760 	u8 *arr;
1761 	u8 *cmd = scp->cmnd;
1762 
1763 	rctd = !!(cmd[2] & 0x80);
1764 	reporting_opts = cmd[2] & 0x7;
1765 	req_opcode = cmd[3];
1766 	req_sa = get_unaligned_be16(cmd + 4);
1767 	alloc_len = get_unaligned_be32(cmd + 6);
1768 	if (alloc_len < 4 || alloc_len > 0xffff) {
1769 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1770 		return check_condition_result;
1771 	}
1772 	if (alloc_len > 8192)
1773 		a_len = 8192;
1774 	else
1775 		a_len = alloc_len;
1776 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1777 	if (NULL == arr) {
1778 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1779 				INSUFF_RES_ASCQ);
1780 		return check_condition_result;
1781 	}
1782 	switch (reporting_opts) {
1783 	case 0:	/* all commands */
1784 		/* count number of commands */
1785 		for (count = 0, oip = opcode_info_arr;
1786 		     oip->num_attached != 0xff; ++oip) {
1787 			if (F_INV_OP & oip->flags)
1788 				continue;
1789 			count += (oip->num_attached + 1);
1790 		}
1791 		bump = rctd ? 20 : 8;
1792 		put_unaligned_be32(count * bump, arr);
1793 		for (offset = 4, oip = opcode_info_arr;
1794 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1795 			if (F_INV_OP & oip->flags)
1796 				continue;
1797 			na = oip->num_attached;
1798 			arr[offset] = oip->opcode;
1799 			put_unaligned_be16(oip->sa, arr + offset + 2);
1800 			if (rctd)
1801 				arr[offset + 5] |= 0x2;
1802 			if (FF_SA & oip->flags)
1803 				arr[offset + 5] |= 0x1;
1804 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1805 			if (rctd)
1806 				put_unaligned_be16(0xa, arr + offset + 8);
1807 			r_oip = oip;
1808 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1809 				if (F_INV_OP & oip->flags)
1810 					continue;
1811 				offset += bump;
1812 				arr[offset] = oip->opcode;
1813 				put_unaligned_be16(oip->sa, arr + offset + 2);
1814 				if (rctd)
1815 					arr[offset + 5] |= 0x2;
1816 				if (FF_SA & oip->flags)
1817 					arr[offset + 5] |= 0x1;
1818 				put_unaligned_be16(oip->len_mask[0],
1819 						   arr + offset + 6);
1820 				if (rctd)
1821 					put_unaligned_be16(0xa,
1822 							   arr + offset + 8);
1823 			}
1824 			oip = r_oip;
1825 			offset += bump;
1826 		}
1827 		break;
1828 	case 1:	/* one command: opcode only */
1829 	case 2:	/* one command: opcode plus service action */
1830 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1831 		sdeb_i = opcode_ind_arr[req_opcode];
1832 		oip = &opcode_info_arr[sdeb_i];
1833 		if (F_INV_OP & oip->flags) {
1834 			supp = 1;
1835 			offset = 4;
1836 		} else {
1837 			if (1 == reporting_opts) {
1838 				if (FF_SA & oip->flags) {
1839 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1840 							     2, 2);
1841 					kfree(arr);
1842 					return check_condition_result;
1843 				}
1844 				req_sa = 0;
1845 			} else if (2 == reporting_opts &&
1846 				   0 == (FF_SA & oip->flags)) {
1847 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1848 				kfree(arr);	/* point at requested sa */
1849 				return check_condition_result;
1850 			}
1851 			if (0 == (FF_SA & oip->flags) &&
1852 			    req_opcode == oip->opcode)
1853 				supp = 3;
1854 			else if (0 == (FF_SA & oip->flags)) {
1855 				na = oip->num_attached;
1856 				for (k = 0, oip = oip->arrp; k < na;
1857 				     ++k, ++oip) {
1858 					if (req_opcode == oip->opcode)
1859 						break;
1860 				}
1861 				supp = (k >= na) ? 1 : 3;
1862 			} else if (req_sa != oip->sa) {
1863 				na = oip->num_attached;
1864 				for (k = 0, oip = oip->arrp; k < na;
1865 				     ++k, ++oip) {
1866 					if (req_sa == oip->sa)
1867 						break;
1868 				}
1869 				supp = (k >= na) ? 1 : 3;
1870 			} else
1871 				supp = 3;
1872 			if (3 == supp) {
1873 				u = oip->len_mask[0];
1874 				put_unaligned_be16(u, arr + 2);
1875 				arr[4] = oip->opcode;
1876 				for (k = 1; k < u; ++k)
1877 					arr[4 + k] = (k < 16) ?
1878 						 oip->len_mask[k] : 0xff;
1879 				offset = 4 + u;
1880 			} else
1881 				offset = 4;
1882 		}
1883 		arr[1] = (rctd ? 0x80 : 0) | supp;
1884 		if (rctd) {
1885 			put_unaligned_be16(0xa, arr + offset);
1886 			offset += 12;
1887 		}
1888 		break;
1889 	default:
1890 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1891 		kfree(arr);
1892 		return check_condition_result;
1893 	}
1894 	offset = (offset < a_len) ? offset : a_len;
1895 	len = (offset < alloc_len) ? offset : alloc_len;
1896 	errsts = fill_from_dev_buffer(scp, arr, len);
1897 	kfree(arr);
1898 	return errsts;
1899 }
1900 
1901 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1902 			  struct sdebug_dev_info *devip)
1903 {
1904 	bool repd;
1905 	u32 alloc_len, len;
1906 	u8 arr[16];
1907 	u8 *cmd = scp->cmnd;
1908 
1909 	memset(arr, 0, sizeof(arr));
1910 	repd = !!(cmd[2] & 0x80);
1911 	alloc_len = get_unaligned_be32(cmd + 6);
1912 	if (alloc_len < 4) {
1913 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1914 		return check_condition_result;
1915 	}
1916 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1917 	arr[1] = 0x1;		/* ITNRS */
1918 	if (repd) {
1919 		arr[3] = 0xc;
1920 		len = 16;
1921 	} else
1922 		len = 4;
1923 
1924 	len = (len < alloc_len) ? len : alloc_len;
1925 	return fill_from_dev_buffer(scp, arr, len);
1926 }
1927 
1928 /* <<Following mode page info copied from ST318451LW>> */
1929 
1930 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1931 {	/* Read-Write Error Recovery page for mode_sense */
1932 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1933 					5, 0, 0xff, 0xff};
1934 
1935 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1936 	if (1 == pcontrol)
1937 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1938 	return sizeof(err_recov_pg);
1939 }
1940 
1941 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1942 { 	/* Disconnect-Reconnect page for mode_sense */
1943 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1944 					 0, 0, 0, 0, 0, 0, 0, 0};
1945 
1946 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1947 	if (1 == pcontrol)
1948 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1949 	return sizeof(disconnect_pg);
1950 }
1951 
1952 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1953 {       /* Format device page for mode_sense */
1954 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1955 				     0, 0, 0, 0, 0, 0, 0, 0,
1956 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1957 
1958 	memcpy(p, format_pg, sizeof(format_pg));
1959 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1960 	put_unaligned_be16(sdebug_sector_size, p + 12);
1961 	if (sdebug_removable)
1962 		p[20] |= 0x20; /* should agree with INQUIRY */
1963 	if (1 == pcontrol)
1964 		memset(p + 2, 0, sizeof(format_pg) - 2);
1965 	return sizeof(format_pg);
1966 }
1967 
1968 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1969 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1970 				     0, 0, 0, 0};
1971 
1972 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1973 { 	/* Caching page for mode_sense */
1974 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1975 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1976 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1977 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1978 
1979 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1980 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1981 	memcpy(p, caching_pg, sizeof(caching_pg));
1982 	if (1 == pcontrol)
1983 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1984 	else if (2 == pcontrol)
1985 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1986 	return sizeof(caching_pg);
1987 }
1988 
1989 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1990 				    0, 0, 0x2, 0x4b};
1991 
1992 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1993 { 	/* Control mode page for mode_sense */
1994 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1995 					0, 0, 0, 0};
1996 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1997 				     0, 0, 0x2, 0x4b};
1998 
1999 	if (sdebug_dsense)
2000 		ctrl_m_pg[2] |= 0x4;
2001 	else
2002 		ctrl_m_pg[2] &= ~0x4;
2003 
2004 	if (sdebug_ato)
2005 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2006 
2007 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2008 	if (1 == pcontrol)
2009 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2010 	else if (2 == pcontrol)
2011 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2012 	return sizeof(ctrl_m_pg);
2013 }
2014 
2015 
2016 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
2017 {	/* Informational Exceptions control mode page for mode_sense */
2018 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2019 				       0, 0, 0x0, 0x0};
2020 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2021 				      0, 0, 0x0, 0x0};
2022 
2023 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2024 	if (1 == pcontrol)
2025 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2026 	else if (2 == pcontrol)
2027 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2028 	return sizeof(iec_m_pg);
2029 }
2030 
2031 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
2032 {	/* SAS SSP mode page - short format for mode_sense */
2033 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2034 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2035 
2036 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2037 	if (1 == pcontrol)
2038 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2039 	return sizeof(sas_sf_m_pg);
2040 }
2041 
2042 
2043 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
2044 			      int target_dev_id)
2045 {	/* SAS phy control and discover mode page for mode_sense */
2046 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2047 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2048 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2049 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2050 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2051 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2052 		    0, 0, 0, 0, 0, 0, 0, 0,
2053 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2054 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2055 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2056 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2057 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2058 		    0, 0, 0, 0, 0, 0, 0, 0,
2059 		};
2060 	int port_a, port_b;
2061 
2062 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2063 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2064 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2065 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2066 	port_a = target_dev_id + 1;
2067 	port_b = port_a + 1;
2068 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2069 	put_unaligned_be32(port_a, p + 20);
2070 	put_unaligned_be32(port_b, p + 48 + 20);
2071 	if (1 == pcontrol)
2072 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2073 	return sizeof(sas_pcd_m_pg);
2074 }
2075 
2076 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
2077 {	/* SAS SSP shared protocol specific port mode subpage */
2078 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2079 		    0, 0, 0, 0, 0, 0, 0, 0,
2080 		};
2081 
2082 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2083 	if (1 == pcontrol)
2084 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2085 	return sizeof(sas_sha_m_pg);
2086 }
2087 
2088 #define SDEBUG_MAX_MSENSE_SZ 256
2089 
2090 static int resp_mode_sense(struct scsi_cmnd *scp,
2091 			   struct sdebug_dev_info *devip)
2092 {
2093 	int pcontrol, pcode, subpcode, bd_len;
2094 	unsigned char dev_spec;
2095 	int alloc_len, offset, len, target_dev_id;
2096 	int target = scp->device->id;
2097 	unsigned char * ap;
2098 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2099 	unsigned char *cmd = scp->cmnd;
2100 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2101 
2102 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2103 	pcontrol = (cmd[2] & 0xc0) >> 6;
2104 	pcode = cmd[2] & 0x3f;
2105 	subpcode = cmd[3];
2106 	msense_6 = (MODE_SENSE == cmd[0]);
2107 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2108 	is_disk = (sdebug_ptype == TYPE_DISK);
2109 	if (is_disk && !dbd)
2110 		bd_len = llbaa ? 16 : 8;
2111 	else
2112 		bd_len = 0;
2113 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2114 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2115 	if (0x3 == pcontrol) {  /* Saving values not supported */
2116 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2117 		return check_condition_result;
2118 	}
2119 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2120 			(devip->target * 1000) - 3;
2121 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2122 	if (is_disk)
2123 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2124 	else
2125 		dev_spec = 0x0;
2126 	if (msense_6) {
2127 		arr[2] = dev_spec;
2128 		arr[3] = bd_len;
2129 		offset = 4;
2130 	} else {
2131 		arr[3] = dev_spec;
2132 		if (16 == bd_len)
2133 			arr[4] = 0x1;	/* set LONGLBA bit */
2134 		arr[7] = bd_len;	/* assume 255 or less */
2135 		offset = 8;
2136 	}
2137 	ap = arr + offset;
2138 	if ((bd_len > 0) && (!sdebug_capacity))
2139 		sdebug_capacity = get_sdebug_capacity();
2140 
2141 	if (8 == bd_len) {
2142 		if (sdebug_capacity > 0xfffffffe)
2143 			put_unaligned_be32(0xffffffff, ap + 0);
2144 		else
2145 			put_unaligned_be32(sdebug_capacity, ap + 0);
2146 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2147 		offset += bd_len;
2148 		ap = arr + offset;
2149 	} else if (16 == bd_len) {
2150 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2151 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2152 		offset += bd_len;
2153 		ap = arr + offset;
2154 	}
2155 
2156 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2157 		/* TODO: Control Extension page */
2158 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2159 		return check_condition_result;
2160 	}
2161 	bad_pcode = false;
2162 
2163 	switch (pcode) {
2164 	case 0x1:	/* Read-Write error recovery page, direct access */
2165 		len = resp_err_recov_pg(ap, pcontrol, target);
2166 		offset += len;
2167 		break;
2168 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2169 		len = resp_disconnect_pg(ap, pcontrol, target);
2170 		offset += len;
2171 		break;
2172 	case 0x3:       /* Format device page, direct access */
2173 		if (is_disk) {
2174 			len = resp_format_pg(ap, pcontrol, target);
2175 			offset += len;
2176 		} else
2177 			bad_pcode = true;
2178 		break;
2179 	case 0x8:	/* Caching page, direct access */
2180 		if (is_disk) {
2181 			len = resp_caching_pg(ap, pcontrol, target);
2182 			offset += len;
2183 		} else
2184 			bad_pcode = true;
2185 		break;
2186 	case 0xa:	/* Control Mode page, all devices */
2187 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2188 		offset += len;
2189 		break;
2190 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2191 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2192 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2193 			return check_condition_result;
2194 		}
2195 		len = 0;
2196 		if ((0x0 == subpcode) || (0xff == subpcode))
2197 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2198 		if ((0x1 == subpcode) || (0xff == subpcode))
2199 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2200 						  target_dev_id);
2201 		if ((0x2 == subpcode) || (0xff == subpcode))
2202 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2203 		offset += len;
2204 		break;
2205 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2206 		len = resp_iec_m_pg(ap, pcontrol, target);
2207 		offset += len;
2208 		break;
2209 	case 0x3f:	/* Read all Mode pages */
2210 		if ((0 == subpcode) || (0xff == subpcode)) {
2211 			len = resp_err_recov_pg(ap, pcontrol, target);
2212 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2213 			if (is_disk) {
2214 				len += resp_format_pg(ap + len, pcontrol,
2215 						      target);
2216 				len += resp_caching_pg(ap + len, pcontrol,
2217 						       target);
2218 			}
2219 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2220 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2221 			if (0xff == subpcode) {
2222 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2223 						  target, target_dev_id);
2224 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2225 			}
2226 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2227 			offset += len;
2228 		} else {
2229 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2230 			return check_condition_result;
2231 		}
2232 		break;
2233 	default:
2234 		bad_pcode = true;
2235 		break;
2236 	}
2237 	if (bad_pcode) {
2238 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2239 		return check_condition_result;
2240 	}
2241 	if (msense_6)
2242 		arr[0] = offset - 1;
2243 	else
2244 		put_unaligned_be16((offset - 2), arr + 0);
2245 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2246 }
2247 
2248 #define SDEBUG_MAX_MSELECT_SZ 512
2249 
2250 static int resp_mode_select(struct scsi_cmnd *scp,
2251 			    struct sdebug_dev_info *devip)
2252 {
2253 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2254 	int param_len, res, mpage;
2255 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2256 	unsigned char *cmd = scp->cmnd;
2257 	int mselect6 = (MODE_SELECT == cmd[0]);
2258 
2259 	memset(arr, 0, sizeof(arr));
2260 	pf = cmd[1] & 0x10;
2261 	sp = cmd[1] & 0x1;
2262 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2263 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2264 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2265 		return check_condition_result;
2266 	}
2267 	res = fetch_to_dev_buffer(scp, arr, param_len);
2268 	if (-1 == res)
2269 		return DID_ERROR << 16;
2270 	else if (sdebug_verbose && (res < param_len))
2271 		sdev_printk(KERN_INFO, scp->device,
2272 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2273 			    __func__, param_len, res);
2274 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2275 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2276 	if (md_len > 2) {
2277 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2278 		return check_condition_result;
2279 	}
2280 	off = bd_len + (mselect6 ? 4 : 8);
2281 	mpage = arr[off] & 0x3f;
2282 	ps = !!(arr[off] & 0x80);
2283 	if (ps) {
2284 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2285 		return check_condition_result;
2286 	}
2287 	spf = !!(arr[off] & 0x40);
2288 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2289 		       (arr[off + 1] + 2);
2290 	if ((pg_len + off) > param_len) {
2291 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2292 				PARAMETER_LIST_LENGTH_ERR, 0);
2293 		return check_condition_result;
2294 	}
2295 	switch (mpage) {
2296 	case 0x8:      /* Caching Mode page */
2297 		if (caching_pg[1] == arr[off + 1]) {
2298 			memcpy(caching_pg + 2, arr + off + 2,
2299 			       sizeof(caching_pg) - 2);
2300 			goto set_mode_changed_ua;
2301 		}
2302 		break;
2303 	case 0xa:      /* Control Mode page */
2304 		if (ctrl_m_pg[1] == arr[off + 1]) {
2305 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2306 			       sizeof(ctrl_m_pg) - 2);
2307 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2308 			goto set_mode_changed_ua;
2309 		}
2310 		break;
2311 	case 0x1c:      /* Informational Exceptions Mode page */
2312 		if (iec_m_pg[1] == arr[off + 1]) {
2313 			memcpy(iec_m_pg + 2, arr + off + 2,
2314 			       sizeof(iec_m_pg) - 2);
2315 			goto set_mode_changed_ua;
2316 		}
2317 		break;
2318 	default:
2319 		break;
2320 	}
2321 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2322 	return check_condition_result;
2323 set_mode_changed_ua:
2324 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2325 	return 0;
2326 }
2327 
2328 static int resp_temp_l_pg(unsigned char * arr)
2329 {
2330 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2331 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2332 		};
2333 
2334 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2335 	return sizeof(temp_l_pg);
2336 }
2337 
2338 static int resp_ie_l_pg(unsigned char * arr)
2339 {
2340 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2341 		};
2342 
2343 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2344 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2345 		arr[4] = THRESHOLD_EXCEEDED;
2346 		arr[5] = 0xff;
2347 	}
2348 	return sizeof(ie_l_pg);
2349 }
2350 
2351 #define SDEBUG_MAX_LSENSE_SZ 512
2352 
2353 static int resp_log_sense(struct scsi_cmnd *scp,
2354 			  struct sdebug_dev_info *devip)
2355 {
2356 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2357 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2358 	unsigned char *cmd = scp->cmnd;
2359 
2360 	memset(arr, 0, sizeof(arr));
2361 	ppc = cmd[1] & 0x2;
2362 	sp = cmd[1] & 0x1;
2363 	if (ppc || sp) {
2364 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2365 		return check_condition_result;
2366 	}
2367 	pcode = cmd[2] & 0x3f;
2368 	subpcode = cmd[3] & 0xff;
2369 	alloc_len = get_unaligned_be16(cmd + 7);
2370 	arr[0] = pcode;
2371 	if (0 == subpcode) {
2372 		switch (pcode) {
2373 		case 0x0:	/* Supported log pages log page */
2374 			n = 4;
2375 			arr[n++] = 0x0;		/* this page */
2376 			arr[n++] = 0xd;		/* Temperature */
2377 			arr[n++] = 0x2f;	/* Informational exceptions */
2378 			arr[3] = n - 4;
2379 			break;
2380 		case 0xd:	/* Temperature log page */
2381 			arr[3] = resp_temp_l_pg(arr + 4);
2382 			break;
2383 		case 0x2f:	/* Informational exceptions log page */
2384 			arr[3] = resp_ie_l_pg(arr + 4);
2385 			break;
2386 		default:
2387 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2388 			return check_condition_result;
2389 		}
2390 	} else if (0xff == subpcode) {
2391 		arr[0] |= 0x40;
2392 		arr[1] = subpcode;
2393 		switch (pcode) {
2394 		case 0x0:	/* Supported log pages and subpages log page */
2395 			n = 4;
2396 			arr[n++] = 0x0;
2397 			arr[n++] = 0x0;		/* 0,0 page */
2398 			arr[n++] = 0x0;
2399 			arr[n++] = 0xff;	/* this page */
2400 			arr[n++] = 0xd;
2401 			arr[n++] = 0x0;		/* Temperature */
2402 			arr[n++] = 0x2f;
2403 			arr[n++] = 0x0;	/* Informational exceptions */
2404 			arr[3] = n - 4;
2405 			break;
2406 		case 0xd:	/* Temperature subpages */
2407 			n = 4;
2408 			arr[n++] = 0xd;
2409 			arr[n++] = 0x0;		/* Temperature */
2410 			arr[3] = n - 4;
2411 			break;
2412 		case 0x2f:	/* Informational exceptions subpages */
2413 			n = 4;
2414 			arr[n++] = 0x2f;
2415 			arr[n++] = 0x0;		/* Informational exceptions */
2416 			arr[3] = n - 4;
2417 			break;
2418 		default:
2419 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2420 			return check_condition_result;
2421 		}
2422 	} else {
2423 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2424 		return check_condition_result;
2425 	}
2426 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2427 	return fill_from_dev_buffer(scp, arr,
2428 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2429 }
2430 
2431 static int check_device_access_params(struct scsi_cmnd *scp,
2432 				      unsigned long long lba, unsigned int num)
2433 {
2434 	if (lba + num > sdebug_capacity) {
2435 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2436 		return check_condition_result;
2437 	}
2438 	/* transfer length excessive (tie in to block limits VPD page) */
2439 	if (num > sdebug_store_sectors) {
2440 		/* needs work to find which cdb byte 'num' comes from */
2441 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2442 		return check_condition_result;
2443 	}
2444 	return 0;
2445 }
2446 
2447 /* Returns number of bytes copied or -1 if error. */
2448 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2449 			    u32 num, bool do_write)
2450 {
2451 	int ret;
2452 	u64 block, rest = 0;
2453 	struct scsi_data_buffer *sdb;
2454 	enum dma_data_direction dir;
2455 
2456 	if (do_write) {
2457 		sdb = scsi_out(scmd);
2458 		dir = DMA_TO_DEVICE;
2459 	} else {
2460 		sdb = scsi_in(scmd);
2461 		dir = DMA_FROM_DEVICE;
2462 	}
2463 
2464 	if (!sdb->length)
2465 		return 0;
2466 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2467 		return -1;
2468 
2469 	block = do_div(lba, sdebug_store_sectors);
2470 	if (block + num > sdebug_store_sectors)
2471 		rest = block + num - sdebug_store_sectors;
2472 
2473 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2474 		   fake_storep + (block * sdebug_sector_size),
2475 		   (num - rest) * sdebug_sector_size, sg_skip, do_write);
2476 	if (ret != (num - rest) * sdebug_sector_size)
2477 		return ret;
2478 
2479 	if (rest) {
2480 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2481 			    fake_storep, rest * sdebug_sector_size,
2482 			    sg_skip + ((num - rest) * sdebug_sector_size),
2483 			    do_write);
2484 	}
2485 
2486 	return ret;
2487 }
2488 
2489 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2490  * arr into fake_store(lba,num) and return true. If comparison fails then
2491  * return false. */
2492 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2493 {
2494 	bool res;
2495 	u64 block, rest = 0;
2496 	u32 store_blks = sdebug_store_sectors;
2497 	u32 lb_size = sdebug_sector_size;
2498 
2499 	block = do_div(lba, store_blks);
2500 	if (block + num > store_blks)
2501 		rest = block + num - store_blks;
2502 
2503 	res = !memcmp(fake_storep + (block * lb_size), arr,
2504 		      (num - rest) * lb_size);
2505 	if (!res)
2506 		return res;
2507 	if (rest)
2508 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2509 			     rest * lb_size);
2510 	if (!res)
2511 		return res;
2512 	arr += num * lb_size;
2513 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2514 	if (rest)
2515 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2516 		       rest * lb_size);
2517 	return res;
2518 }
2519 
2520 static __be16 dif_compute_csum(const void *buf, int len)
2521 {
2522 	__be16 csum;
2523 
2524 	if (sdebug_guard)
2525 		csum = (__force __be16)ip_compute_csum(buf, len);
2526 	else
2527 		csum = cpu_to_be16(crc_t10dif(buf, len));
2528 
2529 	return csum;
2530 }
2531 
2532 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2533 		      sector_t sector, u32 ei_lba)
2534 {
2535 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2536 
2537 	if (sdt->guard_tag != csum) {
2538 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2539 			(unsigned long)sector,
2540 			be16_to_cpu(sdt->guard_tag),
2541 			be16_to_cpu(csum));
2542 		return 0x01;
2543 	}
2544 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2545 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2546 		pr_err("REF check failed on sector %lu\n",
2547 			(unsigned long)sector);
2548 		return 0x03;
2549 	}
2550 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2551 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2552 		pr_err("REF check failed on sector %lu\n",
2553 			(unsigned long)sector);
2554 		return 0x03;
2555 	}
2556 	return 0;
2557 }
2558 
2559 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2560 			  unsigned int sectors, bool read)
2561 {
2562 	size_t resid;
2563 	void *paddr;
2564 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2565 	struct sg_mapping_iter miter;
2566 
2567 	/* Bytes of protection data to copy into sgl */
2568 	resid = sectors * sizeof(*dif_storep);
2569 
2570 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2571 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2572 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2573 
2574 	while (sg_miter_next(&miter) && resid > 0) {
2575 		size_t len = min(miter.length, resid);
2576 		void *start = dif_store(sector);
2577 		size_t rest = 0;
2578 
2579 		if (dif_store_end < start + len)
2580 			rest = start + len - dif_store_end;
2581 
2582 		paddr = miter.addr;
2583 
2584 		if (read)
2585 			memcpy(paddr, start, len - rest);
2586 		else
2587 			memcpy(start, paddr, len - rest);
2588 
2589 		if (rest) {
2590 			if (read)
2591 				memcpy(paddr + len - rest, dif_storep, rest);
2592 			else
2593 				memcpy(dif_storep, paddr + len - rest, rest);
2594 		}
2595 
2596 		sector += len / sizeof(*dif_storep);
2597 		resid -= len;
2598 	}
2599 	sg_miter_stop(&miter);
2600 }
2601 
2602 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2603 			    unsigned int sectors, u32 ei_lba)
2604 {
2605 	unsigned int i;
2606 	struct t10_pi_tuple *sdt;
2607 	sector_t sector;
2608 
2609 	for (i = 0; i < sectors; i++, ei_lba++) {
2610 		int ret;
2611 
2612 		sector = start_sec + i;
2613 		sdt = dif_store(sector);
2614 
2615 		if (sdt->app_tag == cpu_to_be16(0xffff))
2616 			continue;
2617 
2618 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2619 		if (ret) {
2620 			dif_errors++;
2621 			return ret;
2622 		}
2623 	}
2624 
2625 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2626 	dix_reads++;
2627 
2628 	return 0;
2629 }
2630 
2631 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2632 {
2633 	u8 *cmd = scp->cmnd;
2634 	struct sdebug_queued_cmd *sqcp;
2635 	u64 lba;
2636 	u32 num;
2637 	u32 ei_lba;
2638 	unsigned long iflags;
2639 	int ret;
2640 	bool check_prot;
2641 
2642 	switch (cmd[0]) {
2643 	case READ_16:
2644 		ei_lba = 0;
2645 		lba = get_unaligned_be64(cmd + 2);
2646 		num = get_unaligned_be32(cmd + 10);
2647 		check_prot = true;
2648 		break;
2649 	case READ_10:
2650 		ei_lba = 0;
2651 		lba = get_unaligned_be32(cmd + 2);
2652 		num = get_unaligned_be16(cmd + 7);
2653 		check_prot = true;
2654 		break;
2655 	case READ_6:
2656 		ei_lba = 0;
2657 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2658 		      (u32)(cmd[1] & 0x1f) << 16;
2659 		num = (0 == cmd[4]) ? 256 : cmd[4];
2660 		check_prot = true;
2661 		break;
2662 	case READ_12:
2663 		ei_lba = 0;
2664 		lba = get_unaligned_be32(cmd + 2);
2665 		num = get_unaligned_be32(cmd + 6);
2666 		check_prot = true;
2667 		break;
2668 	case XDWRITEREAD_10:
2669 		ei_lba = 0;
2670 		lba = get_unaligned_be32(cmd + 2);
2671 		num = get_unaligned_be16(cmd + 7);
2672 		check_prot = false;
2673 		break;
2674 	default:	/* assume READ(32) */
2675 		lba = get_unaligned_be64(cmd + 12);
2676 		ei_lba = get_unaligned_be32(cmd + 20);
2677 		num = get_unaligned_be32(cmd + 28);
2678 		check_prot = false;
2679 		break;
2680 	}
2681 	if (unlikely(have_dif_prot && check_prot)) {
2682 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2683 		    (cmd[1] & 0xe0)) {
2684 			mk_sense_invalid_opcode(scp);
2685 			return check_condition_result;
2686 		}
2687 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2688 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2689 		    (cmd[1] & 0xe0) == 0)
2690 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2691 				    "to DIF device\n");
2692 	}
2693 	if (unlikely(sdebug_any_injecting_opt)) {
2694 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2695 
2696 		if (sqcp) {
2697 			if (sqcp->inj_short)
2698 				num /= 2;
2699 		}
2700 	} else
2701 		sqcp = NULL;
2702 
2703 	/* inline check_device_access_params() */
2704 	if (unlikely(lba + num > sdebug_capacity)) {
2705 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2706 		return check_condition_result;
2707 	}
2708 	/* transfer length excessive (tie in to block limits VPD page) */
2709 	if (unlikely(num > sdebug_store_sectors)) {
2710 		/* needs work to find which cdb byte 'num' comes from */
2711 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2712 		return check_condition_result;
2713 	}
2714 
2715 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2716 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
2717 		     ((lba + num) > sdebug_medium_error_start))) {
2718 		/* claim unrecoverable read error */
2719 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2720 		/* set info field and valid bit for fixed descriptor */
2721 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2722 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2723 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2724 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2725 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2726 		}
2727 		scsi_set_resid(scp, scsi_bufflen(scp));
2728 		return check_condition_result;
2729 	}
2730 
2731 	read_lock_irqsave(&atomic_rw, iflags);
2732 
2733 	/* DIX + T10 DIF */
2734 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2735 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2736 
2737 		if (prot_ret) {
2738 			read_unlock_irqrestore(&atomic_rw, iflags);
2739 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2740 			return illegal_condition_result;
2741 		}
2742 	}
2743 
2744 	ret = do_device_access(scp, 0, lba, num, false);
2745 	read_unlock_irqrestore(&atomic_rw, iflags);
2746 	if (unlikely(ret == -1))
2747 		return DID_ERROR << 16;
2748 
2749 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2750 
2751 	if (unlikely(sqcp)) {
2752 		if (sqcp->inj_recovered) {
2753 			mk_sense_buffer(scp, RECOVERED_ERROR,
2754 					THRESHOLD_EXCEEDED, 0);
2755 			return check_condition_result;
2756 		} else if (sqcp->inj_transport) {
2757 			mk_sense_buffer(scp, ABORTED_COMMAND,
2758 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2759 			return check_condition_result;
2760 		} else if (sqcp->inj_dif) {
2761 			/* Logical block guard check failed */
2762 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2763 			return illegal_condition_result;
2764 		} else if (sqcp->inj_dix) {
2765 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2766 			return illegal_condition_result;
2767 		}
2768 	}
2769 	return 0;
2770 }
2771 
2772 static void dump_sector(unsigned char *buf, int len)
2773 {
2774 	int i, j, n;
2775 
2776 	pr_err(">>> Sector Dump <<<\n");
2777 	for (i = 0 ; i < len ; i += 16) {
2778 		char b[128];
2779 
2780 		for (j = 0, n = 0; j < 16; j++) {
2781 			unsigned char c = buf[i+j];
2782 
2783 			if (c >= 0x20 && c < 0x7e)
2784 				n += scnprintf(b + n, sizeof(b) - n,
2785 					       " %c ", buf[i+j]);
2786 			else
2787 				n += scnprintf(b + n, sizeof(b) - n,
2788 					       "%02x ", buf[i+j]);
2789 		}
2790 		pr_err("%04d: %s\n", i, b);
2791 	}
2792 }
2793 
2794 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2795 			     unsigned int sectors, u32 ei_lba)
2796 {
2797 	int ret;
2798 	struct t10_pi_tuple *sdt;
2799 	void *daddr;
2800 	sector_t sector = start_sec;
2801 	int ppage_offset;
2802 	int dpage_offset;
2803 	struct sg_mapping_iter diter;
2804 	struct sg_mapping_iter piter;
2805 
2806 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2807 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2808 
2809 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2810 			scsi_prot_sg_count(SCpnt),
2811 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2812 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2813 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2814 
2815 	/* For each protection page */
2816 	while (sg_miter_next(&piter)) {
2817 		dpage_offset = 0;
2818 		if (WARN_ON(!sg_miter_next(&diter))) {
2819 			ret = 0x01;
2820 			goto out;
2821 		}
2822 
2823 		for (ppage_offset = 0; ppage_offset < piter.length;
2824 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2825 			/* If we're at the end of the current
2826 			 * data page advance to the next one
2827 			 */
2828 			if (dpage_offset >= diter.length) {
2829 				if (WARN_ON(!sg_miter_next(&diter))) {
2830 					ret = 0x01;
2831 					goto out;
2832 				}
2833 				dpage_offset = 0;
2834 			}
2835 
2836 			sdt = piter.addr + ppage_offset;
2837 			daddr = diter.addr + dpage_offset;
2838 
2839 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2840 			if (ret) {
2841 				dump_sector(daddr, sdebug_sector_size);
2842 				goto out;
2843 			}
2844 
2845 			sector++;
2846 			ei_lba++;
2847 			dpage_offset += sdebug_sector_size;
2848 		}
2849 		diter.consumed = dpage_offset;
2850 		sg_miter_stop(&diter);
2851 	}
2852 	sg_miter_stop(&piter);
2853 
2854 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2855 	dix_writes++;
2856 
2857 	return 0;
2858 
2859 out:
2860 	dif_errors++;
2861 	sg_miter_stop(&diter);
2862 	sg_miter_stop(&piter);
2863 	return ret;
2864 }
2865 
2866 static unsigned long lba_to_map_index(sector_t lba)
2867 {
2868 	if (sdebug_unmap_alignment)
2869 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2870 	sector_div(lba, sdebug_unmap_granularity);
2871 	return lba;
2872 }
2873 
2874 static sector_t map_index_to_lba(unsigned long index)
2875 {
2876 	sector_t lba = index * sdebug_unmap_granularity;
2877 
2878 	if (sdebug_unmap_alignment)
2879 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2880 	return lba;
2881 }
2882 
2883 static unsigned int map_state(sector_t lba, unsigned int *num)
2884 {
2885 	sector_t end;
2886 	unsigned int mapped;
2887 	unsigned long index;
2888 	unsigned long next;
2889 
2890 	index = lba_to_map_index(lba);
2891 	mapped = test_bit(index, map_storep);
2892 
2893 	if (mapped)
2894 		next = find_next_zero_bit(map_storep, map_size, index);
2895 	else
2896 		next = find_next_bit(map_storep, map_size, index);
2897 
2898 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2899 	*num = end - lba;
2900 	return mapped;
2901 }
2902 
2903 static void map_region(sector_t lba, unsigned int len)
2904 {
2905 	sector_t end = lba + len;
2906 
2907 	while (lba < end) {
2908 		unsigned long index = lba_to_map_index(lba);
2909 
2910 		if (index < map_size)
2911 			set_bit(index, map_storep);
2912 
2913 		lba = map_index_to_lba(index + 1);
2914 	}
2915 }
2916 
2917 static void unmap_region(sector_t lba, unsigned int len)
2918 {
2919 	sector_t end = lba + len;
2920 
2921 	while (lba < end) {
2922 		unsigned long index = lba_to_map_index(lba);
2923 
2924 		if (lba == map_index_to_lba(index) &&
2925 		    lba + sdebug_unmap_granularity <= end &&
2926 		    index < map_size) {
2927 			clear_bit(index, map_storep);
2928 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2929 				memset(fake_storep +
2930 				       lba * sdebug_sector_size,
2931 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2932 				       sdebug_sector_size *
2933 				       sdebug_unmap_granularity);
2934 			}
2935 			if (dif_storep) {
2936 				memset(dif_storep + lba, 0xff,
2937 				       sizeof(*dif_storep) *
2938 				       sdebug_unmap_granularity);
2939 			}
2940 		}
2941 		lba = map_index_to_lba(index + 1);
2942 	}
2943 }
2944 
2945 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2946 {
2947 	u8 *cmd = scp->cmnd;
2948 	u64 lba;
2949 	u32 num;
2950 	u32 ei_lba;
2951 	unsigned long iflags;
2952 	int ret;
2953 	bool check_prot;
2954 
2955 	switch (cmd[0]) {
2956 	case WRITE_16:
2957 		ei_lba = 0;
2958 		lba = get_unaligned_be64(cmd + 2);
2959 		num = get_unaligned_be32(cmd + 10);
2960 		check_prot = true;
2961 		break;
2962 	case WRITE_10:
2963 		ei_lba = 0;
2964 		lba = get_unaligned_be32(cmd + 2);
2965 		num = get_unaligned_be16(cmd + 7);
2966 		check_prot = true;
2967 		break;
2968 	case WRITE_6:
2969 		ei_lba = 0;
2970 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2971 		      (u32)(cmd[1] & 0x1f) << 16;
2972 		num = (0 == cmd[4]) ? 256 : cmd[4];
2973 		check_prot = true;
2974 		break;
2975 	case WRITE_12:
2976 		ei_lba = 0;
2977 		lba = get_unaligned_be32(cmd + 2);
2978 		num = get_unaligned_be32(cmd + 6);
2979 		check_prot = true;
2980 		break;
2981 	case 0x53:	/* XDWRITEREAD(10) */
2982 		ei_lba = 0;
2983 		lba = get_unaligned_be32(cmd + 2);
2984 		num = get_unaligned_be16(cmd + 7);
2985 		check_prot = false;
2986 		break;
2987 	default:	/* assume WRITE(32) */
2988 		lba = get_unaligned_be64(cmd + 12);
2989 		ei_lba = get_unaligned_be32(cmd + 20);
2990 		num = get_unaligned_be32(cmd + 28);
2991 		check_prot = false;
2992 		break;
2993 	}
2994 	if (unlikely(have_dif_prot && check_prot)) {
2995 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2996 		    (cmd[1] & 0xe0)) {
2997 			mk_sense_invalid_opcode(scp);
2998 			return check_condition_result;
2999 		}
3000 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3001 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3002 		    (cmd[1] & 0xe0) == 0)
3003 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3004 				    "to DIF device\n");
3005 	}
3006 
3007 	/* inline check_device_access_params() */
3008 	if (unlikely(lba + num > sdebug_capacity)) {
3009 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3010 		return check_condition_result;
3011 	}
3012 	/* transfer length excessive (tie in to block limits VPD page) */
3013 	if (unlikely(num > sdebug_store_sectors)) {
3014 		/* needs work to find which cdb byte 'num' comes from */
3015 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3016 		return check_condition_result;
3017 	}
3018 
3019 	write_lock_irqsave(&atomic_rw, iflags);
3020 
3021 	/* DIX + T10 DIF */
3022 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3023 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3024 
3025 		if (prot_ret) {
3026 			write_unlock_irqrestore(&atomic_rw, iflags);
3027 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3028 			return illegal_condition_result;
3029 		}
3030 	}
3031 
3032 	ret = do_device_access(scp, 0, lba, num, true);
3033 	if (unlikely(scsi_debug_lbp()))
3034 		map_region(lba, num);
3035 	write_unlock_irqrestore(&atomic_rw, iflags);
3036 	if (unlikely(-1 == ret))
3037 		return DID_ERROR << 16;
3038 	else if (unlikely(sdebug_verbose &&
3039 			  (ret < (num * sdebug_sector_size))))
3040 		sdev_printk(KERN_INFO, scp->device,
3041 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3042 			    my_name, num * sdebug_sector_size, ret);
3043 
3044 	if (unlikely(sdebug_any_injecting_opt)) {
3045 		struct sdebug_queued_cmd *sqcp =
3046 				(struct sdebug_queued_cmd *)scp->host_scribble;
3047 
3048 		if (sqcp) {
3049 			if (sqcp->inj_recovered) {
3050 				mk_sense_buffer(scp, RECOVERED_ERROR,
3051 						THRESHOLD_EXCEEDED, 0);
3052 				return check_condition_result;
3053 			} else if (sqcp->inj_dif) {
3054 				/* Logical block guard check failed */
3055 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3056 				return illegal_condition_result;
3057 			} else if (sqcp->inj_dix) {
3058 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3059 				return illegal_condition_result;
3060 			}
3061 		}
3062 	}
3063 	return 0;
3064 }
3065 
3066 /*
3067  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3068  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3069  */
3070 static int resp_write_scat(struct scsi_cmnd *scp,
3071 			   struct sdebug_dev_info *devip)
3072 {
3073 	u8 *cmd = scp->cmnd;
3074 	u8 *lrdp = NULL;
3075 	u8 *up;
3076 	u8 wrprotect;
3077 	u16 lbdof, num_lrd, k;
3078 	u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3079 	u32 lb_size = sdebug_sector_size;
3080 	u32 ei_lba;
3081 	u64 lba;
3082 	unsigned long iflags;
3083 	int ret, res;
3084 	bool is_16;
3085 	static const u32 lrd_size = 32; /* + parameter list header size */
3086 
3087 	if (cmd[0] == VARIABLE_LENGTH_CMD) {
3088 		is_16 = false;
3089 		wrprotect = (cmd[10] >> 5) & 0x7;
3090 		lbdof = get_unaligned_be16(cmd + 12);
3091 		num_lrd = get_unaligned_be16(cmd + 16);
3092 		bt_len = get_unaligned_be32(cmd + 28);
3093 	} else {        /* that leaves WRITE SCATTERED(16) */
3094 		is_16 = true;
3095 		wrprotect = (cmd[2] >> 5) & 0x7;
3096 		lbdof = get_unaligned_be16(cmd + 4);
3097 		num_lrd = get_unaligned_be16(cmd + 8);
3098 		bt_len = get_unaligned_be32(cmd + 10);
3099 		if (unlikely(have_dif_prot)) {
3100 			if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3101 			    wrprotect) {
3102 				mk_sense_invalid_opcode(scp);
3103 				return illegal_condition_result;
3104 			}
3105 			if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3106 			     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3107 			     wrprotect == 0)
3108 				sdev_printk(KERN_ERR, scp->device,
3109 					    "Unprotected WR to DIF device\n");
3110 		}
3111 	}
3112 	if ((num_lrd == 0) || (bt_len == 0))
3113 		return 0;       /* T10 says these do-nothings are not errors */
3114 	if (lbdof == 0) {
3115 		if (sdebug_verbose)
3116 			sdev_printk(KERN_INFO, scp->device,
3117 				"%s: %s: LB Data Offset field bad\n",
3118 				my_name, __func__);
3119 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3120 		return illegal_condition_result;
3121 	}
3122 	lbdof_blen = lbdof * lb_size;
3123 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3124 		if (sdebug_verbose)
3125 			sdev_printk(KERN_INFO, scp->device,
3126 				"%s: %s: LBA range descriptors don't fit\n",
3127 				my_name, __func__);
3128 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3129 		return illegal_condition_result;
3130 	}
3131 	lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3132 	if (lrdp == NULL)
3133 		return SCSI_MLQUEUE_HOST_BUSY;
3134 	if (sdebug_verbose)
3135 		sdev_printk(KERN_INFO, scp->device,
3136 			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3137 			my_name, __func__, lbdof_blen);
3138 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3139 	if (res == -1) {
3140 		ret = DID_ERROR << 16;
3141 		goto err_out;
3142 	}
3143 
3144 	write_lock_irqsave(&atomic_rw, iflags);
3145 	sg_off = lbdof_blen;
3146 	/* Spec says Buffer xfer Length field in number of LBs in dout */
3147 	cum_lb = 0;
3148 	for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3149 		lba = get_unaligned_be64(up + 0);
3150 		num = get_unaligned_be32(up + 8);
3151 		if (sdebug_verbose)
3152 			sdev_printk(KERN_INFO, scp->device,
3153 				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
3154 				my_name, __func__, k, lba, num, sg_off);
3155 		if (num == 0)
3156 			continue;
3157 		ret = check_device_access_params(scp, lba, num);
3158 		if (ret)
3159 			goto err_out_unlock;
3160 		num_by = num * lb_size;
3161 		ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3162 
3163 		if ((cum_lb + num) > bt_len) {
3164 			if (sdebug_verbose)
3165 				sdev_printk(KERN_INFO, scp->device,
3166 				    "%s: %s: sum of blocks > data provided\n",
3167 				    my_name, __func__);
3168 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3169 					0);
3170 			ret = illegal_condition_result;
3171 			goto err_out_unlock;
3172 		}
3173 
3174 		/* DIX + T10 DIF */
3175 		if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3176 			int prot_ret = prot_verify_write(scp, lba, num,
3177 							 ei_lba);
3178 
3179 			if (prot_ret) {
3180 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3181 						prot_ret);
3182 				ret = illegal_condition_result;
3183 				goto err_out_unlock;
3184 			}
3185 		}
3186 
3187 		ret = do_device_access(scp, sg_off, lba, num, true);
3188 		if (unlikely(scsi_debug_lbp()))
3189 			map_region(lba, num);
3190 		if (unlikely(-1 == ret)) {
3191 			ret = DID_ERROR << 16;
3192 			goto err_out_unlock;
3193 		} else if (unlikely(sdebug_verbose && (ret < num_by)))
3194 			sdev_printk(KERN_INFO, scp->device,
3195 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3196 			    my_name, num_by, ret);
3197 
3198 		if (unlikely(sdebug_any_injecting_opt)) {
3199 			struct sdebug_queued_cmd *sqcp =
3200 				(struct sdebug_queued_cmd *)scp->host_scribble;
3201 
3202 			if (sqcp) {
3203 				if (sqcp->inj_recovered) {
3204 					mk_sense_buffer(scp, RECOVERED_ERROR,
3205 							THRESHOLD_EXCEEDED, 0);
3206 					ret = illegal_condition_result;
3207 					goto err_out_unlock;
3208 				} else if (sqcp->inj_dif) {
3209 					/* Logical block guard check failed */
3210 					mk_sense_buffer(scp, ABORTED_COMMAND,
3211 							0x10, 1);
3212 					ret = illegal_condition_result;
3213 					goto err_out_unlock;
3214 				} else if (sqcp->inj_dix) {
3215 					mk_sense_buffer(scp, ILLEGAL_REQUEST,
3216 							0x10, 1);
3217 					ret = illegal_condition_result;
3218 					goto err_out_unlock;
3219 				}
3220 			}
3221 		}
3222 		sg_off += num_by;
3223 		cum_lb += num;
3224 	}
3225 	ret = 0;
3226 err_out_unlock:
3227 	write_unlock_irqrestore(&atomic_rw, iflags);
3228 err_out:
3229 	kfree(lrdp);
3230 	return ret;
3231 }
3232 
3233 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3234 			   u32 ei_lba, bool unmap, bool ndob)
3235 {
3236 	unsigned long iflags;
3237 	unsigned long long i;
3238 	int ret;
3239 	u64 lba_off;
3240 
3241 	ret = check_device_access_params(scp, lba, num);
3242 	if (ret)
3243 		return ret;
3244 
3245 	write_lock_irqsave(&atomic_rw, iflags);
3246 
3247 	if (unmap && scsi_debug_lbp()) {
3248 		unmap_region(lba, num);
3249 		goto out;
3250 	}
3251 
3252 	lba_off = lba * sdebug_sector_size;
3253 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3254 	if (ndob) {
3255 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
3256 		ret = 0;
3257 	} else
3258 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3259 					  sdebug_sector_size);
3260 
3261 	if (-1 == ret) {
3262 		write_unlock_irqrestore(&atomic_rw, iflags);
3263 		return DID_ERROR << 16;
3264 	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3265 		sdev_printk(KERN_INFO, scp->device,
3266 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3267 			    my_name, "write same",
3268 			    sdebug_sector_size, ret);
3269 
3270 	/* Copy first sector to remaining blocks */
3271 	for (i = 1 ; i < num ; i++)
3272 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3273 		       fake_storep + lba_off,
3274 		       sdebug_sector_size);
3275 
3276 	if (scsi_debug_lbp())
3277 		map_region(lba, num);
3278 out:
3279 	write_unlock_irqrestore(&atomic_rw, iflags);
3280 
3281 	return 0;
3282 }
3283 
3284 static int resp_write_same_10(struct scsi_cmnd *scp,
3285 			      struct sdebug_dev_info *devip)
3286 {
3287 	u8 *cmd = scp->cmnd;
3288 	u32 lba;
3289 	u16 num;
3290 	u32 ei_lba = 0;
3291 	bool unmap = false;
3292 
3293 	if (cmd[1] & 0x8) {
3294 		if (sdebug_lbpws10 == 0) {
3295 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3296 			return check_condition_result;
3297 		} else
3298 			unmap = true;
3299 	}
3300 	lba = get_unaligned_be32(cmd + 2);
3301 	num = get_unaligned_be16(cmd + 7);
3302 	if (num > sdebug_write_same_length) {
3303 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3304 		return check_condition_result;
3305 	}
3306 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3307 }
3308 
3309 static int resp_write_same_16(struct scsi_cmnd *scp,
3310 			      struct sdebug_dev_info *devip)
3311 {
3312 	u8 *cmd = scp->cmnd;
3313 	u64 lba;
3314 	u32 num;
3315 	u32 ei_lba = 0;
3316 	bool unmap = false;
3317 	bool ndob = false;
3318 
3319 	if (cmd[1] & 0x8) {	/* UNMAP */
3320 		if (sdebug_lbpws == 0) {
3321 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3322 			return check_condition_result;
3323 		} else
3324 			unmap = true;
3325 	}
3326 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3327 		ndob = true;
3328 	lba = get_unaligned_be64(cmd + 2);
3329 	num = get_unaligned_be32(cmd + 10);
3330 	if (num > sdebug_write_same_length) {
3331 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3332 		return check_condition_result;
3333 	}
3334 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3335 }
3336 
3337 /* Note the mode field is in the same position as the (lower) service action
3338  * field. For the Report supported operation codes command, SPC-4 suggests
3339  * each mode of this command should be reported separately; for future. */
3340 static int resp_write_buffer(struct scsi_cmnd *scp,
3341 			     struct sdebug_dev_info *devip)
3342 {
3343 	u8 *cmd = scp->cmnd;
3344 	struct scsi_device *sdp = scp->device;
3345 	struct sdebug_dev_info *dp;
3346 	u8 mode;
3347 
3348 	mode = cmd[1] & 0x1f;
3349 	switch (mode) {
3350 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3351 		/* set UAs on this device only */
3352 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3353 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3354 		break;
3355 	case 0x5:	/* download MC, save and ACT */
3356 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3357 		break;
3358 	case 0x6:	/* download MC with offsets and ACT */
3359 		/* set UAs on most devices (LUs) in this target */
3360 		list_for_each_entry(dp,
3361 				    &devip->sdbg_host->dev_info_list,
3362 				    dev_list)
3363 			if (dp->target == sdp->id) {
3364 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3365 				if (devip != dp)
3366 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3367 						dp->uas_bm);
3368 			}
3369 		break;
3370 	case 0x7:	/* download MC with offsets, save, and ACT */
3371 		/* set UA on all devices (LUs) in this target */
3372 		list_for_each_entry(dp,
3373 				    &devip->sdbg_host->dev_info_list,
3374 				    dev_list)
3375 			if (dp->target == sdp->id)
3376 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3377 					dp->uas_bm);
3378 		break;
3379 	default:
3380 		/* do nothing for this command for other mode values */
3381 		break;
3382 	}
3383 	return 0;
3384 }
3385 
3386 static int resp_comp_write(struct scsi_cmnd *scp,
3387 			   struct sdebug_dev_info *devip)
3388 {
3389 	u8 *cmd = scp->cmnd;
3390 	u8 *arr;
3391 	u8 *fake_storep_hold;
3392 	u64 lba;
3393 	u32 dnum;
3394 	u32 lb_size = sdebug_sector_size;
3395 	u8 num;
3396 	unsigned long iflags;
3397 	int ret;
3398 	int retval = 0;
3399 
3400 	lba = get_unaligned_be64(cmd + 2);
3401 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3402 	if (0 == num)
3403 		return 0;	/* degenerate case, not an error */
3404 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3405 	    (cmd[1] & 0xe0)) {
3406 		mk_sense_invalid_opcode(scp);
3407 		return check_condition_result;
3408 	}
3409 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3410 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3411 	    (cmd[1] & 0xe0) == 0)
3412 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3413 			    "to DIF device\n");
3414 
3415 	/* inline check_device_access_params() */
3416 	if (lba + num > sdebug_capacity) {
3417 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3418 		return check_condition_result;
3419 	}
3420 	/* transfer length excessive (tie in to block limits VPD page) */
3421 	if (num > sdebug_store_sectors) {
3422 		/* needs work to find which cdb byte 'num' comes from */
3423 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3424 		return check_condition_result;
3425 	}
3426 	dnum = 2 * num;
3427 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3428 	if (NULL == arr) {
3429 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3430 				INSUFF_RES_ASCQ);
3431 		return check_condition_result;
3432 	}
3433 
3434 	write_lock_irqsave(&atomic_rw, iflags);
3435 
3436 	/* trick do_device_access() to fetch both compare and write buffers
3437 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3438 	fake_storep_hold = fake_storep;
3439 	fake_storep = arr;
3440 	ret = do_device_access(scp, 0, 0, dnum, true);
3441 	fake_storep = fake_storep_hold;
3442 	if (ret == -1) {
3443 		retval = DID_ERROR << 16;
3444 		goto cleanup;
3445 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3446 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3447 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3448 			    dnum * lb_size, ret);
3449 	if (!comp_write_worker(lba, num, arr)) {
3450 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3451 		retval = check_condition_result;
3452 		goto cleanup;
3453 	}
3454 	if (scsi_debug_lbp())
3455 		map_region(lba, num);
3456 cleanup:
3457 	write_unlock_irqrestore(&atomic_rw, iflags);
3458 	kfree(arr);
3459 	return retval;
3460 }
3461 
3462 struct unmap_block_desc {
3463 	__be64	lba;
3464 	__be32	blocks;
3465 	__be32	__reserved;
3466 };
3467 
3468 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3469 {
3470 	unsigned char *buf;
3471 	struct unmap_block_desc *desc;
3472 	unsigned int i, payload_len, descriptors;
3473 	int ret;
3474 	unsigned long iflags;
3475 
3476 
3477 	if (!scsi_debug_lbp())
3478 		return 0;	/* fib and say its done */
3479 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3480 	BUG_ON(scsi_bufflen(scp) != payload_len);
3481 
3482 	descriptors = (payload_len - 8) / 16;
3483 	if (descriptors > sdebug_unmap_max_desc) {
3484 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3485 		return check_condition_result;
3486 	}
3487 
3488 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3489 	if (!buf) {
3490 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3491 				INSUFF_RES_ASCQ);
3492 		return check_condition_result;
3493 	}
3494 
3495 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3496 
3497 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3498 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3499 
3500 	desc = (void *)&buf[8];
3501 
3502 	write_lock_irqsave(&atomic_rw, iflags);
3503 
3504 	for (i = 0 ; i < descriptors ; i++) {
3505 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3506 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3507 
3508 		ret = check_device_access_params(scp, lba, num);
3509 		if (ret)
3510 			goto out;
3511 
3512 		unmap_region(lba, num);
3513 	}
3514 
3515 	ret = 0;
3516 
3517 out:
3518 	write_unlock_irqrestore(&atomic_rw, iflags);
3519 	kfree(buf);
3520 
3521 	return ret;
3522 }
3523 
3524 #define SDEBUG_GET_LBA_STATUS_LEN 32
3525 
3526 static int resp_get_lba_status(struct scsi_cmnd *scp,
3527 			       struct sdebug_dev_info *devip)
3528 {
3529 	u8 *cmd = scp->cmnd;
3530 	u64 lba;
3531 	u32 alloc_len, mapped, num;
3532 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3533 	int ret;
3534 
3535 	lba = get_unaligned_be64(cmd + 2);
3536 	alloc_len = get_unaligned_be32(cmd + 10);
3537 
3538 	if (alloc_len < 24)
3539 		return 0;
3540 
3541 	ret = check_device_access_params(scp, lba, 1);
3542 	if (ret)
3543 		return ret;
3544 
3545 	if (scsi_debug_lbp())
3546 		mapped = map_state(lba, &num);
3547 	else {
3548 		mapped = 1;
3549 		/* following just in case virtual_gb changed */
3550 		sdebug_capacity = get_sdebug_capacity();
3551 		if (sdebug_capacity - lba <= 0xffffffff)
3552 			num = sdebug_capacity - lba;
3553 		else
3554 			num = 0xffffffff;
3555 	}
3556 
3557 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3558 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3559 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3560 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3561 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3562 
3563 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3564 }
3565 
3566 #define RL_BUCKET_ELEMS 8
3567 
3568 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3569  * (W-LUN), the normal Linux scanning logic does not associate it with a
3570  * device (e.g. /dev/sg7). The following magic will make that association:
3571  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3572  * where <n> is a host number. If there are multiple targets in a host then
3573  * the above will associate a W-LUN to each target. To only get a W-LUN
3574  * for target 2, then use "echo '- 2 49409' > scan" .
3575  */
3576 static int resp_report_luns(struct scsi_cmnd *scp,
3577 			    struct sdebug_dev_info *devip)
3578 {
3579 	unsigned char *cmd = scp->cmnd;
3580 	unsigned int alloc_len;
3581 	unsigned char select_report;
3582 	u64 lun;
3583 	struct scsi_lun *lun_p;
3584 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3585 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3586 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3587 	unsigned int tlun_cnt;	/* total LUN count */
3588 	unsigned int rlen;	/* response length (in bytes) */
3589 	int k, j, n, res;
3590 	unsigned int off_rsp = 0;
3591 	const int sz_lun = sizeof(struct scsi_lun);
3592 
3593 	clear_luns_changed_on_target(devip);
3594 
3595 	select_report = cmd[2];
3596 	alloc_len = get_unaligned_be32(cmd + 6);
3597 
3598 	if (alloc_len < 4) {
3599 		pr_err("alloc len too small %d\n", alloc_len);
3600 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3601 		return check_condition_result;
3602 	}
3603 
3604 	switch (select_report) {
3605 	case 0:		/* all LUNs apart from W-LUNs */
3606 		lun_cnt = sdebug_max_luns;
3607 		wlun_cnt = 0;
3608 		break;
3609 	case 1:		/* only W-LUNs */
3610 		lun_cnt = 0;
3611 		wlun_cnt = 1;
3612 		break;
3613 	case 2:		/* all LUNs */
3614 		lun_cnt = sdebug_max_luns;
3615 		wlun_cnt = 1;
3616 		break;
3617 	case 0x10:	/* only administrative LUs */
3618 	case 0x11:	/* see SPC-5 */
3619 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3620 	default:
3621 		pr_debug("select report invalid %d\n", select_report);
3622 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3623 		return check_condition_result;
3624 	}
3625 
3626 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3627 		--lun_cnt;
3628 
3629 	tlun_cnt = lun_cnt + wlun_cnt;
3630 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3631 	scsi_set_resid(scp, scsi_bufflen(scp));
3632 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3633 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3634 
3635 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3636 	lun = sdebug_no_lun_0 ? 1 : 0;
3637 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3638 		memset(arr, 0, sizeof(arr));
3639 		lun_p = (struct scsi_lun *)&arr[0];
3640 		if (k == 0) {
3641 			put_unaligned_be32(rlen, &arr[0]);
3642 			++lun_p;
3643 			j = 1;
3644 		}
3645 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3646 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3647 				break;
3648 			int_to_scsilun(lun++, lun_p);
3649 		}
3650 		if (j < RL_BUCKET_ELEMS)
3651 			break;
3652 		n = j * sz_lun;
3653 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3654 		if (res)
3655 			return res;
3656 		off_rsp += n;
3657 	}
3658 	if (wlun_cnt) {
3659 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3660 		++j;
3661 	}
3662 	if (j > 0)
3663 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3664 	return res;
3665 }
3666 
3667 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3668 			    unsigned int num, struct sdebug_dev_info *devip)
3669 {
3670 	int j;
3671 	unsigned char *kaddr, *buf;
3672 	unsigned int offset;
3673 	struct scsi_data_buffer *sdb = scsi_in(scp);
3674 	struct sg_mapping_iter miter;
3675 
3676 	/* better not to use temporary buffer. */
3677 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3678 	if (!buf) {
3679 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3680 				INSUFF_RES_ASCQ);
3681 		return check_condition_result;
3682 	}
3683 
3684 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3685 
3686 	offset = 0;
3687 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3688 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3689 
3690 	while (sg_miter_next(&miter)) {
3691 		kaddr = miter.addr;
3692 		for (j = 0; j < miter.length; j++)
3693 			*(kaddr + j) ^= *(buf + offset + j);
3694 
3695 		offset += miter.length;
3696 	}
3697 	sg_miter_stop(&miter);
3698 	kfree(buf);
3699 
3700 	return 0;
3701 }
3702 
3703 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3704 			       struct sdebug_dev_info *devip)
3705 {
3706 	u8 *cmd = scp->cmnd;
3707 	u64 lba;
3708 	u32 num;
3709 	int errsts;
3710 
3711 	if (!scsi_bidi_cmnd(scp)) {
3712 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3713 				INSUFF_RES_ASCQ);
3714 		return check_condition_result;
3715 	}
3716 	errsts = resp_read_dt0(scp, devip);
3717 	if (errsts)
3718 		return errsts;
3719 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3720 		errsts = resp_write_dt0(scp, devip);
3721 		if (errsts)
3722 			return errsts;
3723 	}
3724 	lba = get_unaligned_be32(cmd + 2);
3725 	num = get_unaligned_be16(cmd + 7);
3726 	return resp_xdwriteread(scp, lba, num, devip);
3727 }
3728 
3729 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3730 {
3731 	u32 tag = blk_mq_unique_tag(cmnd->request);
3732 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3733 
3734 	pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
3735 	if (WARN_ON_ONCE(hwq >= submit_queues))
3736 		hwq = 0;
3737 	return sdebug_q_arr + hwq;
3738 }
3739 
3740 /* Queued (deferred) command completions converge here. */
3741 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3742 {
3743 	int qc_idx;
3744 	int retiring = 0;
3745 	unsigned long iflags;
3746 	struct sdebug_queue *sqp;
3747 	struct sdebug_queued_cmd *sqcp;
3748 	struct scsi_cmnd *scp;
3749 	struct sdebug_dev_info *devip;
3750 
3751 	sd_dp->defer_t = SDEB_DEFER_NONE;
3752 	qc_idx = sd_dp->qc_idx;
3753 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3754 	if (sdebug_statistics) {
3755 		atomic_inc(&sdebug_completions);
3756 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3757 			atomic_inc(&sdebug_miss_cpus);
3758 	}
3759 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3760 		pr_err("wild qc_idx=%d\n", qc_idx);
3761 		return;
3762 	}
3763 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3764 	sqcp = &sqp->qc_arr[qc_idx];
3765 	scp = sqcp->a_cmnd;
3766 	if (unlikely(scp == NULL)) {
3767 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3768 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3769 		       sd_dp->sqa_idx, qc_idx);
3770 		return;
3771 	}
3772 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3773 	if (likely(devip))
3774 		atomic_dec(&devip->num_in_q);
3775 	else
3776 		pr_err("devip=NULL\n");
3777 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3778 		retiring = 1;
3779 
3780 	sqcp->a_cmnd = NULL;
3781 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3782 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3783 		pr_err("Unexpected completion\n");
3784 		return;
3785 	}
3786 
3787 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3788 		int k, retval;
3789 
3790 		retval = atomic_read(&retired_max_queue);
3791 		if (qc_idx >= retval) {
3792 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3793 			pr_err("index %d too large\n", retval);
3794 			return;
3795 		}
3796 		k = find_last_bit(sqp->in_use_bm, retval);
3797 		if ((k < sdebug_max_queue) || (k == retval))
3798 			atomic_set(&retired_max_queue, 0);
3799 		else
3800 			atomic_set(&retired_max_queue, k + 1);
3801 	}
3802 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3803 	scp->scsi_done(scp); /* callback to mid level */
3804 }
3805 
3806 /* When high resolution timer goes off this function is called. */
3807 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3808 {
3809 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3810 						  hrt);
3811 	sdebug_q_cmd_complete(sd_dp);
3812 	return HRTIMER_NORESTART;
3813 }
3814 
3815 /* When work queue schedules work, it calls this function. */
3816 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3817 {
3818 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3819 						  ew.work);
3820 	sdebug_q_cmd_complete(sd_dp);
3821 }
3822 
3823 static bool got_shared_uuid;
3824 static uuid_t shared_uuid;
3825 
3826 static struct sdebug_dev_info *sdebug_device_create(
3827 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3828 {
3829 	struct sdebug_dev_info *devip;
3830 
3831 	devip = kzalloc(sizeof(*devip), flags);
3832 	if (devip) {
3833 		if (sdebug_uuid_ctl == 1)
3834 			uuid_gen(&devip->lu_name);
3835 		else if (sdebug_uuid_ctl == 2) {
3836 			if (got_shared_uuid)
3837 				devip->lu_name = shared_uuid;
3838 			else {
3839 				uuid_gen(&shared_uuid);
3840 				got_shared_uuid = true;
3841 				devip->lu_name = shared_uuid;
3842 			}
3843 		}
3844 		devip->sdbg_host = sdbg_host;
3845 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3846 	}
3847 	return devip;
3848 }
3849 
3850 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3851 {
3852 	struct sdebug_host_info *sdbg_host;
3853 	struct sdebug_dev_info *open_devip = NULL;
3854 	struct sdebug_dev_info *devip;
3855 
3856 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3857 	if (!sdbg_host) {
3858 		pr_err("Host info NULL\n");
3859 		return NULL;
3860 	}
3861 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3862 		if ((devip->used) && (devip->channel == sdev->channel) &&
3863 		    (devip->target == sdev->id) &&
3864 		    (devip->lun == sdev->lun))
3865 			return devip;
3866 		else {
3867 			if ((!devip->used) && (!open_devip))
3868 				open_devip = devip;
3869 		}
3870 	}
3871 	if (!open_devip) { /* try and make a new one */
3872 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3873 		if (!open_devip) {
3874 			pr_err("out of memory at line %d\n", __LINE__);
3875 			return NULL;
3876 		}
3877 	}
3878 
3879 	open_devip->channel = sdev->channel;
3880 	open_devip->target = sdev->id;
3881 	open_devip->lun = sdev->lun;
3882 	open_devip->sdbg_host = sdbg_host;
3883 	atomic_set(&open_devip->num_in_q, 0);
3884 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3885 	open_devip->used = true;
3886 	return open_devip;
3887 }
3888 
3889 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3890 {
3891 	if (sdebug_verbose)
3892 		pr_info("slave_alloc <%u %u %u %llu>\n",
3893 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3894 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3895 	return 0;
3896 }
3897 
3898 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3899 {
3900 	struct sdebug_dev_info *devip =
3901 			(struct sdebug_dev_info *)sdp->hostdata;
3902 
3903 	if (sdebug_verbose)
3904 		pr_info("slave_configure <%u %u %u %llu>\n",
3905 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3906 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3907 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3908 	if (devip == NULL) {
3909 		devip = find_build_dev_info(sdp);
3910 		if (devip == NULL)
3911 			return 1;  /* no resources, will be marked offline */
3912 	}
3913 	sdp->hostdata = devip;
3914 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3915 	if (sdebug_no_uld)
3916 		sdp->no_uld_attach = 1;
3917 	config_cdb_len(sdp);
3918 	return 0;
3919 }
3920 
3921 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3922 {
3923 	struct sdebug_dev_info *devip =
3924 		(struct sdebug_dev_info *)sdp->hostdata;
3925 
3926 	if (sdebug_verbose)
3927 		pr_info("slave_destroy <%u %u %u %llu>\n",
3928 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3929 	if (devip) {
3930 		/* make this slot available for re-use */
3931 		devip->used = false;
3932 		sdp->hostdata = NULL;
3933 	}
3934 }
3935 
3936 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3937 			   enum sdeb_defer_type defer_t)
3938 {
3939 	if (!sd_dp)
3940 		return;
3941 	if (defer_t == SDEB_DEFER_HRT)
3942 		hrtimer_cancel(&sd_dp->hrt);
3943 	else if (defer_t == SDEB_DEFER_WQ)
3944 		cancel_work_sync(&sd_dp->ew.work);
3945 }
3946 
3947 /* If @cmnd found deletes its timer or work queue and returns true; else
3948    returns false */
3949 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3950 {
3951 	unsigned long iflags;
3952 	int j, k, qmax, r_qmax;
3953 	enum sdeb_defer_type l_defer_t;
3954 	struct sdebug_queue *sqp;
3955 	struct sdebug_queued_cmd *sqcp;
3956 	struct sdebug_dev_info *devip;
3957 	struct sdebug_defer *sd_dp;
3958 
3959 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3960 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3961 		qmax = sdebug_max_queue;
3962 		r_qmax = atomic_read(&retired_max_queue);
3963 		if (r_qmax > qmax)
3964 			qmax = r_qmax;
3965 		for (k = 0; k < qmax; ++k) {
3966 			if (test_bit(k, sqp->in_use_bm)) {
3967 				sqcp = &sqp->qc_arr[k];
3968 				if (cmnd != sqcp->a_cmnd)
3969 					continue;
3970 				/* found */
3971 				devip = (struct sdebug_dev_info *)
3972 						cmnd->device->hostdata;
3973 				if (devip)
3974 					atomic_dec(&devip->num_in_q);
3975 				sqcp->a_cmnd = NULL;
3976 				sd_dp = sqcp->sd_dp;
3977 				if (sd_dp) {
3978 					l_defer_t = sd_dp->defer_t;
3979 					sd_dp->defer_t = SDEB_DEFER_NONE;
3980 				} else
3981 					l_defer_t = SDEB_DEFER_NONE;
3982 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3983 				stop_qc_helper(sd_dp, l_defer_t);
3984 				clear_bit(k, sqp->in_use_bm);
3985 				return true;
3986 			}
3987 		}
3988 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3989 	}
3990 	return false;
3991 }
3992 
3993 /* Deletes (stops) timers or work queues of all queued commands */
3994 static void stop_all_queued(void)
3995 {
3996 	unsigned long iflags;
3997 	int j, k;
3998 	enum sdeb_defer_type l_defer_t;
3999 	struct sdebug_queue *sqp;
4000 	struct sdebug_queued_cmd *sqcp;
4001 	struct sdebug_dev_info *devip;
4002 	struct sdebug_defer *sd_dp;
4003 
4004 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4005 		spin_lock_irqsave(&sqp->qc_lock, iflags);
4006 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4007 			if (test_bit(k, sqp->in_use_bm)) {
4008 				sqcp = &sqp->qc_arr[k];
4009 				if (sqcp->a_cmnd == NULL)
4010 					continue;
4011 				devip = (struct sdebug_dev_info *)
4012 					sqcp->a_cmnd->device->hostdata;
4013 				if (devip)
4014 					atomic_dec(&devip->num_in_q);
4015 				sqcp->a_cmnd = NULL;
4016 				sd_dp = sqcp->sd_dp;
4017 				if (sd_dp) {
4018 					l_defer_t = sd_dp->defer_t;
4019 					sd_dp->defer_t = SDEB_DEFER_NONE;
4020 				} else
4021 					l_defer_t = SDEB_DEFER_NONE;
4022 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4023 				stop_qc_helper(sd_dp, l_defer_t);
4024 				clear_bit(k, sqp->in_use_bm);
4025 				spin_lock_irqsave(&sqp->qc_lock, iflags);
4026 			}
4027 		}
4028 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4029 	}
4030 }
4031 
4032 /* Free queued command memory on heap */
4033 static void free_all_queued(void)
4034 {
4035 	int j, k;
4036 	struct sdebug_queue *sqp;
4037 	struct sdebug_queued_cmd *sqcp;
4038 
4039 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4040 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4041 			sqcp = &sqp->qc_arr[k];
4042 			kfree(sqcp->sd_dp);
4043 			sqcp->sd_dp = NULL;
4044 		}
4045 	}
4046 }
4047 
4048 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4049 {
4050 	bool ok;
4051 
4052 	++num_aborts;
4053 	if (SCpnt) {
4054 		ok = stop_queued_cmnd(SCpnt);
4055 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4056 			sdev_printk(KERN_INFO, SCpnt->device,
4057 				    "%s: command%s found\n", __func__,
4058 				    ok ? "" : " not");
4059 	}
4060 	return SUCCESS;
4061 }
4062 
4063 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
4064 {
4065 	++num_dev_resets;
4066 	if (SCpnt && SCpnt->device) {
4067 		struct scsi_device *sdp = SCpnt->device;
4068 		struct sdebug_dev_info *devip =
4069 				(struct sdebug_dev_info *)sdp->hostdata;
4070 
4071 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4072 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4073 		if (devip)
4074 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
4075 	}
4076 	return SUCCESS;
4077 }
4078 
4079 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4080 {
4081 	struct sdebug_host_info *sdbg_host;
4082 	struct sdebug_dev_info *devip;
4083 	struct scsi_device *sdp;
4084 	struct Scsi_Host *hp;
4085 	int k = 0;
4086 
4087 	++num_target_resets;
4088 	if (!SCpnt)
4089 		goto lie;
4090 	sdp = SCpnt->device;
4091 	if (!sdp)
4092 		goto lie;
4093 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4094 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4095 	hp = sdp->host;
4096 	if (!hp)
4097 		goto lie;
4098 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4099 	if (sdbg_host) {
4100 		list_for_each_entry(devip,
4101 				    &sdbg_host->dev_info_list,
4102 				    dev_list)
4103 			if (devip->target == sdp->id) {
4104 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4105 				++k;
4106 			}
4107 	}
4108 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4109 		sdev_printk(KERN_INFO, sdp,
4110 			    "%s: %d device(s) found in target\n", __func__, k);
4111 lie:
4112 	return SUCCESS;
4113 }
4114 
4115 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
4116 {
4117 	struct sdebug_host_info *sdbg_host;
4118 	struct sdebug_dev_info *devip;
4119 	struct scsi_device *sdp;
4120 	struct Scsi_Host *hp;
4121 	int k = 0;
4122 
4123 	++num_bus_resets;
4124 	if (!(SCpnt && SCpnt->device))
4125 		goto lie;
4126 	sdp = SCpnt->device;
4127 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4128 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4129 	hp = sdp->host;
4130 	if (hp) {
4131 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4132 		if (sdbg_host) {
4133 			list_for_each_entry(devip,
4134 					    &sdbg_host->dev_info_list,
4135 					    dev_list) {
4136 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4137 				++k;
4138 			}
4139 		}
4140 	}
4141 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4142 		sdev_printk(KERN_INFO, sdp,
4143 			    "%s: %d device(s) found in host\n", __func__, k);
4144 lie:
4145 	return SUCCESS;
4146 }
4147 
4148 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
4149 {
4150 	struct sdebug_host_info * sdbg_host;
4151 	struct sdebug_dev_info *devip;
4152 	int k = 0;
4153 
4154 	++num_host_resets;
4155 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4156 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4157 	spin_lock(&sdebug_host_list_lock);
4158 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4159 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
4160 				    dev_list) {
4161 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4162 			++k;
4163 		}
4164 	}
4165 	spin_unlock(&sdebug_host_list_lock);
4166 	stop_all_queued();
4167 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4168 		sdev_printk(KERN_INFO, SCpnt->device,
4169 			    "%s: %d device(s) found\n", __func__, k);
4170 	return SUCCESS;
4171 }
4172 
4173 static void __init sdebug_build_parts(unsigned char *ramp,
4174 				      unsigned long store_size)
4175 {
4176 	struct partition * pp;
4177 	int starts[SDEBUG_MAX_PARTS + 2];
4178 	int sectors_per_part, num_sectors, k;
4179 	int heads_by_sects, start_sec, end_sec;
4180 
4181 	/* assume partition table already zeroed */
4182 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
4183 		return;
4184 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4185 		sdebug_num_parts = SDEBUG_MAX_PARTS;
4186 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4187 	}
4188 	num_sectors = (int)sdebug_store_sectors;
4189 	sectors_per_part = (num_sectors - sdebug_sectors_per)
4190 			   / sdebug_num_parts;
4191 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
4192 	starts[0] = sdebug_sectors_per;
4193 	for (k = 1; k < sdebug_num_parts; ++k)
4194 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
4195 			    * heads_by_sects;
4196 	starts[sdebug_num_parts] = num_sectors;
4197 	starts[sdebug_num_parts + 1] = 0;
4198 
4199 	ramp[510] = 0x55;	/* magic partition markings */
4200 	ramp[511] = 0xAA;
4201 	pp = (struct partition *)(ramp + 0x1be);
4202 	for (k = 0; starts[k + 1]; ++k, ++pp) {
4203 		start_sec = starts[k];
4204 		end_sec = starts[k + 1] - 1;
4205 		pp->boot_ind = 0;
4206 
4207 		pp->cyl = start_sec / heads_by_sects;
4208 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4209 			   / sdebug_sectors_per;
4210 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4211 
4212 		pp->end_cyl = end_sec / heads_by_sects;
4213 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4214 			       / sdebug_sectors_per;
4215 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4216 
4217 		pp->start_sect = cpu_to_le32(start_sec);
4218 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4219 		pp->sys_ind = 0x83;	/* plain Linux partition */
4220 	}
4221 }
4222 
4223 static void block_unblock_all_queues(bool block)
4224 {
4225 	int j;
4226 	struct sdebug_queue *sqp;
4227 
4228 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4229 		atomic_set(&sqp->blocked, (int)block);
4230 }
4231 
4232 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4233  * commands will be processed normally before triggers occur.
4234  */
4235 static void tweak_cmnd_count(void)
4236 {
4237 	int count, modulo;
4238 
4239 	modulo = abs(sdebug_every_nth);
4240 	if (modulo < 2)
4241 		return;
4242 	block_unblock_all_queues(true);
4243 	count = atomic_read(&sdebug_cmnd_count);
4244 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4245 	block_unblock_all_queues(false);
4246 }
4247 
4248 static void clear_queue_stats(void)
4249 {
4250 	atomic_set(&sdebug_cmnd_count, 0);
4251 	atomic_set(&sdebug_completions, 0);
4252 	atomic_set(&sdebug_miss_cpus, 0);
4253 	atomic_set(&sdebug_a_tsf, 0);
4254 }
4255 
4256 static void setup_inject(struct sdebug_queue *sqp,
4257 			 struct sdebug_queued_cmd *sqcp)
4258 {
4259 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
4260 		return;
4261 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4262 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4263 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4264 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4265 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4266 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4267 }
4268 
4269 /* Complete the processing of the thread that queued a SCSI command to this
4270  * driver. It either completes the command by calling cmnd_done() or
4271  * schedules a hr timer or work queue then returns 0. Returns
4272  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4273  */
4274 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4275 			 int scsi_result, int delta_jiff, int ndelay)
4276 {
4277 	unsigned long iflags;
4278 	int k, num_in_q, qdepth, inject;
4279 	struct sdebug_queue *sqp;
4280 	struct sdebug_queued_cmd *sqcp;
4281 	struct scsi_device *sdp;
4282 	struct sdebug_defer *sd_dp;
4283 
4284 	if (unlikely(devip == NULL)) {
4285 		if (scsi_result == 0)
4286 			scsi_result = DID_NO_CONNECT << 16;
4287 		goto respond_in_thread;
4288 	}
4289 	sdp = cmnd->device;
4290 
4291 	if (unlikely(sdebug_verbose && scsi_result))
4292 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4293 			    __func__, scsi_result);
4294 	if (delta_jiff == 0)
4295 		goto respond_in_thread;
4296 
4297 	/* schedule the response at a later time if resources permit */
4298 	sqp = get_queue(cmnd);
4299 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4300 	if (unlikely(atomic_read(&sqp->blocked))) {
4301 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4302 		return SCSI_MLQUEUE_HOST_BUSY;
4303 	}
4304 	num_in_q = atomic_read(&devip->num_in_q);
4305 	qdepth = cmnd->device->queue_depth;
4306 	inject = 0;
4307 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4308 		if (scsi_result) {
4309 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4310 			goto respond_in_thread;
4311 		} else
4312 			scsi_result = device_qfull_result;
4313 	} else if (unlikely(sdebug_every_nth &&
4314 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4315 			    (scsi_result == 0))) {
4316 		if ((num_in_q == (qdepth - 1)) &&
4317 		    (atomic_inc_return(&sdebug_a_tsf) >=
4318 		     abs(sdebug_every_nth))) {
4319 			atomic_set(&sdebug_a_tsf, 0);
4320 			inject = 1;
4321 			scsi_result = device_qfull_result;
4322 		}
4323 	}
4324 
4325 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4326 	if (unlikely(k >= sdebug_max_queue)) {
4327 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4328 		if (scsi_result)
4329 			goto respond_in_thread;
4330 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4331 			scsi_result = device_qfull_result;
4332 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4333 			sdev_printk(KERN_INFO, sdp,
4334 				    "%s: max_queue=%d exceeded, %s\n",
4335 				    __func__, sdebug_max_queue,
4336 				    (scsi_result ?  "status: TASK SET FULL" :
4337 						    "report: host busy"));
4338 		if (scsi_result)
4339 			goto respond_in_thread;
4340 		else
4341 			return SCSI_MLQUEUE_HOST_BUSY;
4342 	}
4343 	__set_bit(k, sqp->in_use_bm);
4344 	atomic_inc(&devip->num_in_q);
4345 	sqcp = &sqp->qc_arr[k];
4346 	sqcp->a_cmnd = cmnd;
4347 	cmnd->host_scribble = (unsigned char *)sqcp;
4348 	cmnd->result = scsi_result;
4349 	sd_dp = sqcp->sd_dp;
4350 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4351 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4352 		setup_inject(sqp, sqcp);
4353 	if (sd_dp == NULL) {
4354 		sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4355 		if (sd_dp == NULL)
4356 			return SCSI_MLQUEUE_HOST_BUSY;
4357 	}
4358 	if (delta_jiff > 0 || ndelay > 0) {
4359 		ktime_t kt;
4360 
4361 		if (delta_jiff > 0) {
4362 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4363 		} else
4364 			kt = ndelay;
4365 		if (!sd_dp->init_hrt) {
4366 			sd_dp->init_hrt = true;
4367 			sqcp->sd_dp = sd_dp;
4368 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4369 				     HRTIMER_MODE_REL_PINNED);
4370 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4371 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4372 			sd_dp->qc_idx = k;
4373 		}
4374 		if (sdebug_statistics)
4375 			sd_dp->issuing_cpu = raw_smp_processor_id();
4376 		sd_dp->defer_t = SDEB_DEFER_HRT;
4377 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4378 	} else {	/* jdelay < 0, use work queue */
4379 		if (!sd_dp->init_wq) {
4380 			sd_dp->init_wq = true;
4381 			sqcp->sd_dp = sd_dp;
4382 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4383 			sd_dp->qc_idx = k;
4384 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4385 		}
4386 		if (sdebug_statistics)
4387 			sd_dp->issuing_cpu = raw_smp_processor_id();
4388 		sd_dp->defer_t = SDEB_DEFER_WQ;
4389 		schedule_work(&sd_dp->ew.work);
4390 	}
4391 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4392 		     (scsi_result == device_qfull_result)))
4393 		sdev_printk(KERN_INFO, sdp,
4394 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4395 			    num_in_q, (inject ? "<inject> " : ""),
4396 			    "status: TASK SET FULL");
4397 	return 0;
4398 
4399 respond_in_thread:	/* call back to mid-layer using invocation thread */
4400 	cmnd->result = scsi_result;
4401 	cmnd->scsi_done(cmnd);
4402 	return 0;
4403 }
4404 
4405 /* Note: The following macros create attribute files in the
4406    /sys/module/scsi_debug/parameters directory. Unfortunately this
4407    driver is unaware of a change and cannot trigger auxiliary actions
4408    as it can when the corresponding attribute in the
4409    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4410  */
4411 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4412 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4413 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4414 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4415 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4416 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4417 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4418 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4419 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4420 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4421 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4422 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4423 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4424 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4425 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4426 module_param_string(inq_product, sdebug_inq_product_id,
4427 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4428 module_param_string(inq_rev, sdebug_inq_product_rev,
4429 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4430 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4431 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4432 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4433 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4434 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4435 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4436 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4437 module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
4438 module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
4439 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4440 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4441 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4442 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4443 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4444 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4445 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4446 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4447 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4448 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4449 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4450 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4451 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4452 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4453 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4454 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4455 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4456 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4457 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4458 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4459 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4460 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4461 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4462 		   S_IRUGO | S_IWUSR);
4463 module_param_named(write_same_length, sdebug_write_same_length, int,
4464 		   S_IRUGO | S_IWUSR);
4465 
4466 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4467 MODULE_DESCRIPTION("SCSI debug adapter driver");
4468 MODULE_LICENSE("GPL");
4469 MODULE_VERSION(SDEBUG_VERSION);
4470 
4471 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4472 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4473 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4474 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4475 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4476 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4477 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4478 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4479 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4480 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4481 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4482 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4483 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4484 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4485 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4486 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4487 		 SDEBUG_VERSION "\")");
4488 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4489 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4490 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4491 MODULE_PARM_DESC(lbprz,
4492 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4493 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4494 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4495 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4496 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
4497 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
4498 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4499 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4500 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4501 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4502 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4503 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4504 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4505 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4506 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4507 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4508 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4509 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4510 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4511 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4512 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4513 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4514 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4515 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4516 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4517 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4518 MODULE_PARM_DESC(uuid_ctl,
4519 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4520 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4521 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4522 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4523 
4524 #define SDEBUG_INFO_LEN 256
4525 static char sdebug_info[SDEBUG_INFO_LEN];
4526 
4527 static const char * scsi_debug_info(struct Scsi_Host * shp)
4528 {
4529 	int k;
4530 
4531 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4532 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4533 	if (k >= (SDEBUG_INFO_LEN - 1))
4534 		return sdebug_info;
4535 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4536 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4537 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4538 		  "statistics", (int)sdebug_statistics);
4539 	return sdebug_info;
4540 }
4541 
4542 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4543 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4544 				 int length)
4545 {
4546 	char arr[16];
4547 	int opts;
4548 	int minLen = length > 15 ? 15 : length;
4549 
4550 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4551 		return -EACCES;
4552 	memcpy(arr, buffer, minLen);
4553 	arr[minLen] = '\0';
4554 	if (1 != sscanf(arr, "%d", &opts))
4555 		return -EINVAL;
4556 	sdebug_opts = opts;
4557 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4558 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4559 	if (sdebug_every_nth != 0)
4560 		tweak_cmnd_count();
4561 	return length;
4562 }
4563 
4564 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4565  * same for each scsi_debug host (if more than one). Some of the counters
4566  * output are not atomics so might be inaccurate in a busy system. */
4567 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4568 {
4569 	int f, j, l;
4570 	struct sdebug_queue *sqp;
4571 
4572 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4573 		   SDEBUG_VERSION, sdebug_version_date);
4574 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4575 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4576 		   sdebug_opts, sdebug_every_nth);
4577 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4578 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4579 		   sdebug_sector_size, "bytes");
4580 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4581 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4582 		   num_aborts);
4583 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4584 		   num_dev_resets, num_target_resets, num_bus_resets,
4585 		   num_host_resets);
4586 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4587 		   dix_reads, dix_writes, dif_errors);
4588 	seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
4589 		   sdebug_statistics);
4590 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4591 		   atomic_read(&sdebug_cmnd_count),
4592 		   atomic_read(&sdebug_completions),
4593 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4594 		   atomic_read(&sdebug_a_tsf));
4595 
4596 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4597 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4598 		seq_printf(m, "  queue %d:\n", j);
4599 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4600 		if (f != sdebug_max_queue) {
4601 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4602 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4603 				   "first,last bits", f, l);
4604 		}
4605 	}
4606 	return 0;
4607 }
4608 
4609 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4610 {
4611 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4612 }
4613 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4614  * of delay is jiffies.
4615  */
4616 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4617 			   size_t count)
4618 {
4619 	int jdelay, res;
4620 
4621 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4622 		res = count;
4623 		if (sdebug_jdelay != jdelay) {
4624 			int j, k;
4625 			struct sdebug_queue *sqp;
4626 
4627 			block_unblock_all_queues(true);
4628 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4629 			     ++j, ++sqp) {
4630 				k = find_first_bit(sqp->in_use_bm,
4631 						   sdebug_max_queue);
4632 				if (k != sdebug_max_queue) {
4633 					res = -EBUSY;   /* queued commands */
4634 					break;
4635 				}
4636 			}
4637 			if (res > 0) {
4638 				sdebug_jdelay = jdelay;
4639 				sdebug_ndelay = 0;
4640 			}
4641 			block_unblock_all_queues(false);
4642 		}
4643 		return res;
4644 	}
4645 	return -EINVAL;
4646 }
4647 static DRIVER_ATTR_RW(delay);
4648 
4649 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4650 {
4651 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4652 }
4653 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4654 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4655 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4656 			    size_t count)
4657 {
4658 	int ndelay, res;
4659 
4660 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4661 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4662 		res = count;
4663 		if (sdebug_ndelay != ndelay) {
4664 			int j, k;
4665 			struct sdebug_queue *sqp;
4666 
4667 			block_unblock_all_queues(true);
4668 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4669 			     ++j, ++sqp) {
4670 				k = find_first_bit(sqp->in_use_bm,
4671 						   sdebug_max_queue);
4672 				if (k != sdebug_max_queue) {
4673 					res = -EBUSY;   /* queued commands */
4674 					break;
4675 				}
4676 			}
4677 			if (res > 0) {
4678 				sdebug_ndelay = ndelay;
4679 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4680 							: DEF_JDELAY;
4681 			}
4682 			block_unblock_all_queues(false);
4683 		}
4684 		return res;
4685 	}
4686 	return -EINVAL;
4687 }
4688 static DRIVER_ATTR_RW(ndelay);
4689 
4690 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4691 {
4692 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4693 }
4694 
4695 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4696 			  size_t count)
4697 {
4698 	int opts;
4699 	char work[20];
4700 
4701 	if (sscanf(buf, "%10s", work) == 1) {
4702 		if (strncasecmp(work, "0x", 2) == 0) {
4703 			if (kstrtoint(work + 2, 16, &opts) == 0)
4704 				goto opts_done;
4705 		} else {
4706 			if (kstrtoint(work, 10, &opts) == 0)
4707 				goto opts_done;
4708 		}
4709 	}
4710 	return -EINVAL;
4711 opts_done:
4712 	sdebug_opts = opts;
4713 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4714 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4715 	tweak_cmnd_count();
4716 	return count;
4717 }
4718 static DRIVER_ATTR_RW(opts);
4719 
4720 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4721 {
4722 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4723 }
4724 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4725 			   size_t count)
4726 {
4727 	int n;
4728 
4729 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4730 		sdebug_ptype = n;
4731 		return count;
4732 	}
4733 	return -EINVAL;
4734 }
4735 static DRIVER_ATTR_RW(ptype);
4736 
4737 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4738 {
4739 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4740 }
4741 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4742 			    size_t count)
4743 {
4744 	int n;
4745 
4746 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4747 		sdebug_dsense = n;
4748 		return count;
4749 	}
4750 	return -EINVAL;
4751 }
4752 static DRIVER_ATTR_RW(dsense);
4753 
4754 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4755 {
4756 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4757 }
4758 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4759 			     size_t count)
4760 {
4761 	int n;
4762 
4763 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4764 		n = (n > 0);
4765 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4766 		if (sdebug_fake_rw != n) {
4767 			if ((0 == n) && (NULL == fake_storep)) {
4768 				unsigned long sz =
4769 					(unsigned long)sdebug_dev_size_mb *
4770 					1048576;
4771 
4772 				fake_storep = vmalloc(sz);
4773 				if (NULL == fake_storep) {
4774 					pr_err("out of memory, 9\n");
4775 					return -ENOMEM;
4776 				}
4777 				memset(fake_storep, 0, sz);
4778 			}
4779 			sdebug_fake_rw = n;
4780 		}
4781 		return count;
4782 	}
4783 	return -EINVAL;
4784 }
4785 static DRIVER_ATTR_RW(fake_rw);
4786 
4787 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4788 {
4789 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4790 }
4791 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4792 			      size_t count)
4793 {
4794 	int n;
4795 
4796 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4797 		sdebug_no_lun_0 = n;
4798 		return count;
4799 	}
4800 	return -EINVAL;
4801 }
4802 static DRIVER_ATTR_RW(no_lun_0);
4803 
4804 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4805 {
4806 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4807 }
4808 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4809 			      size_t count)
4810 {
4811 	int n;
4812 
4813 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4814 		sdebug_num_tgts = n;
4815 		sdebug_max_tgts_luns();
4816 		return count;
4817 	}
4818 	return -EINVAL;
4819 }
4820 static DRIVER_ATTR_RW(num_tgts);
4821 
4822 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4823 {
4824 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4825 }
4826 static DRIVER_ATTR_RO(dev_size_mb);
4827 
4828 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4829 {
4830 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4831 }
4832 static DRIVER_ATTR_RO(num_parts);
4833 
4834 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4835 {
4836 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4837 }
4838 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4839 			       size_t count)
4840 {
4841 	int nth;
4842 
4843 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4844 		sdebug_every_nth = nth;
4845 		if (nth && !sdebug_statistics) {
4846 			pr_info("every_nth needs statistics=1, set it\n");
4847 			sdebug_statistics = true;
4848 		}
4849 		tweak_cmnd_count();
4850 		return count;
4851 	}
4852 	return -EINVAL;
4853 }
4854 static DRIVER_ATTR_RW(every_nth);
4855 
4856 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4857 {
4858 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4859 }
4860 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4861 			      size_t count)
4862 {
4863 	int n;
4864 	bool changed;
4865 
4866 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4867 		if (n > 256) {
4868 			pr_warn("max_luns can be no more than 256\n");
4869 			return -EINVAL;
4870 		}
4871 		changed = (sdebug_max_luns != n);
4872 		sdebug_max_luns = n;
4873 		sdebug_max_tgts_luns();
4874 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4875 			struct sdebug_host_info *sdhp;
4876 			struct sdebug_dev_info *dp;
4877 
4878 			spin_lock(&sdebug_host_list_lock);
4879 			list_for_each_entry(sdhp, &sdebug_host_list,
4880 					    host_list) {
4881 				list_for_each_entry(dp, &sdhp->dev_info_list,
4882 						    dev_list) {
4883 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4884 						dp->uas_bm);
4885 				}
4886 			}
4887 			spin_unlock(&sdebug_host_list_lock);
4888 		}
4889 		return count;
4890 	}
4891 	return -EINVAL;
4892 }
4893 static DRIVER_ATTR_RW(max_luns);
4894 
4895 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4896 {
4897 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4898 }
4899 /* N.B. max_queue can be changed while there are queued commands. In flight
4900  * commands beyond the new max_queue will be completed. */
4901 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4902 			       size_t count)
4903 {
4904 	int j, n, k, a;
4905 	struct sdebug_queue *sqp;
4906 
4907 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4908 	    (n <= SDEBUG_CANQUEUE)) {
4909 		block_unblock_all_queues(true);
4910 		k = 0;
4911 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4912 		     ++j, ++sqp) {
4913 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4914 			if (a > k)
4915 				k = a;
4916 		}
4917 		sdebug_max_queue = n;
4918 		if (k == SDEBUG_CANQUEUE)
4919 			atomic_set(&retired_max_queue, 0);
4920 		else if (k >= n)
4921 			atomic_set(&retired_max_queue, k + 1);
4922 		else
4923 			atomic_set(&retired_max_queue, 0);
4924 		block_unblock_all_queues(false);
4925 		return count;
4926 	}
4927 	return -EINVAL;
4928 }
4929 static DRIVER_ATTR_RW(max_queue);
4930 
4931 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4932 {
4933 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4934 }
4935 static DRIVER_ATTR_RO(no_uld);
4936 
4937 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4938 {
4939 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4940 }
4941 static DRIVER_ATTR_RO(scsi_level);
4942 
4943 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4944 {
4945 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4946 }
4947 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4948 				size_t count)
4949 {
4950 	int n;
4951 	bool changed;
4952 
4953 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4954 		changed = (sdebug_virtual_gb != n);
4955 		sdebug_virtual_gb = n;
4956 		sdebug_capacity = get_sdebug_capacity();
4957 		if (changed) {
4958 			struct sdebug_host_info *sdhp;
4959 			struct sdebug_dev_info *dp;
4960 
4961 			spin_lock(&sdebug_host_list_lock);
4962 			list_for_each_entry(sdhp, &sdebug_host_list,
4963 					    host_list) {
4964 				list_for_each_entry(dp, &sdhp->dev_info_list,
4965 						    dev_list) {
4966 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4967 						dp->uas_bm);
4968 				}
4969 			}
4970 			spin_unlock(&sdebug_host_list_lock);
4971 		}
4972 		return count;
4973 	}
4974 	return -EINVAL;
4975 }
4976 static DRIVER_ATTR_RW(virtual_gb);
4977 
4978 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4979 {
4980 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4981 }
4982 
4983 static int sdebug_add_adapter(void);
4984 static void sdebug_remove_adapter(void);
4985 
4986 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4987 			      size_t count)
4988 {
4989 	int delta_hosts;
4990 
4991 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4992 		return -EINVAL;
4993 	if (delta_hosts > 0) {
4994 		do {
4995 			sdebug_add_adapter();
4996 		} while (--delta_hosts);
4997 	} else if (delta_hosts < 0) {
4998 		do {
4999 			sdebug_remove_adapter();
5000 		} while (++delta_hosts);
5001 	}
5002 	return count;
5003 }
5004 static DRIVER_ATTR_RW(add_host);
5005 
5006 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5007 {
5008 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5009 }
5010 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5011 				    size_t count)
5012 {
5013 	int n;
5014 
5015 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5016 		sdebug_vpd_use_hostno = n;
5017 		return count;
5018 	}
5019 	return -EINVAL;
5020 }
5021 static DRIVER_ATTR_RW(vpd_use_hostno);
5022 
5023 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5024 {
5025 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5026 }
5027 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5028 				size_t count)
5029 {
5030 	int n;
5031 
5032 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5033 		if (n > 0)
5034 			sdebug_statistics = true;
5035 		else {
5036 			clear_queue_stats();
5037 			sdebug_statistics = false;
5038 		}
5039 		return count;
5040 	}
5041 	return -EINVAL;
5042 }
5043 static DRIVER_ATTR_RW(statistics);
5044 
5045 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5046 {
5047 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5048 }
5049 static DRIVER_ATTR_RO(sector_size);
5050 
5051 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5052 {
5053 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5054 }
5055 static DRIVER_ATTR_RO(submit_queues);
5056 
5057 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5058 {
5059 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5060 }
5061 static DRIVER_ATTR_RO(dix);
5062 
5063 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5064 {
5065 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5066 }
5067 static DRIVER_ATTR_RO(dif);
5068 
5069 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5070 {
5071 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5072 }
5073 static DRIVER_ATTR_RO(guard);
5074 
5075 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5076 {
5077 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5078 }
5079 static DRIVER_ATTR_RO(ato);
5080 
5081 static ssize_t map_show(struct device_driver *ddp, char *buf)
5082 {
5083 	ssize_t count;
5084 
5085 	if (!scsi_debug_lbp())
5086 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5087 				 sdebug_store_sectors);
5088 
5089 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5090 			  (int)map_size, map_storep);
5091 	buf[count++] = '\n';
5092 	buf[count] = '\0';
5093 
5094 	return count;
5095 }
5096 static DRIVER_ATTR_RO(map);
5097 
5098 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5099 {
5100 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5101 }
5102 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5103 			       size_t count)
5104 {
5105 	int n;
5106 
5107 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5108 		sdebug_removable = (n > 0);
5109 		return count;
5110 	}
5111 	return -EINVAL;
5112 }
5113 static DRIVER_ATTR_RW(removable);
5114 
5115 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5116 {
5117 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5118 }
5119 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5120 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5121 			       size_t count)
5122 {
5123 	int n;
5124 
5125 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5126 		sdebug_host_lock = (n > 0);
5127 		return count;
5128 	}
5129 	return -EINVAL;
5130 }
5131 static DRIVER_ATTR_RW(host_lock);
5132 
5133 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5134 {
5135 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5136 }
5137 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5138 			    size_t count)
5139 {
5140 	int n;
5141 
5142 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5143 		sdebug_strict = (n > 0);
5144 		return count;
5145 	}
5146 	return -EINVAL;
5147 }
5148 static DRIVER_ATTR_RW(strict);
5149 
5150 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5151 {
5152 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5153 }
5154 static DRIVER_ATTR_RO(uuid_ctl);
5155 
5156 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5157 {
5158 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5159 }
5160 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5161 			     size_t count)
5162 {
5163 	int ret, n;
5164 
5165 	ret = kstrtoint(buf, 0, &n);
5166 	if (ret)
5167 		return ret;
5168 	sdebug_cdb_len = n;
5169 	all_config_cdb_len();
5170 	return count;
5171 }
5172 static DRIVER_ATTR_RW(cdb_len);
5173 
5174 
5175 /* Note: The following array creates attribute files in the
5176    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5177    files (over those found in the /sys/module/scsi_debug/parameters
5178    directory) is that auxiliary actions can be triggered when an attribute
5179    is changed. For example see: sdebug_add_host_store() above.
5180  */
5181 
5182 static struct attribute *sdebug_drv_attrs[] = {
5183 	&driver_attr_delay.attr,
5184 	&driver_attr_opts.attr,
5185 	&driver_attr_ptype.attr,
5186 	&driver_attr_dsense.attr,
5187 	&driver_attr_fake_rw.attr,
5188 	&driver_attr_no_lun_0.attr,
5189 	&driver_attr_num_tgts.attr,
5190 	&driver_attr_dev_size_mb.attr,
5191 	&driver_attr_num_parts.attr,
5192 	&driver_attr_every_nth.attr,
5193 	&driver_attr_max_luns.attr,
5194 	&driver_attr_max_queue.attr,
5195 	&driver_attr_no_uld.attr,
5196 	&driver_attr_scsi_level.attr,
5197 	&driver_attr_virtual_gb.attr,
5198 	&driver_attr_add_host.attr,
5199 	&driver_attr_vpd_use_hostno.attr,
5200 	&driver_attr_sector_size.attr,
5201 	&driver_attr_statistics.attr,
5202 	&driver_attr_submit_queues.attr,
5203 	&driver_attr_dix.attr,
5204 	&driver_attr_dif.attr,
5205 	&driver_attr_guard.attr,
5206 	&driver_attr_ato.attr,
5207 	&driver_attr_map.attr,
5208 	&driver_attr_removable.attr,
5209 	&driver_attr_host_lock.attr,
5210 	&driver_attr_ndelay.attr,
5211 	&driver_attr_strict.attr,
5212 	&driver_attr_uuid_ctl.attr,
5213 	&driver_attr_cdb_len.attr,
5214 	NULL,
5215 };
5216 ATTRIBUTE_GROUPS(sdebug_drv);
5217 
5218 static struct device *pseudo_primary;
5219 
5220 static int __init scsi_debug_init(void)
5221 {
5222 	unsigned long sz;
5223 	int host_to_add;
5224 	int k;
5225 	int ret;
5226 
5227 	atomic_set(&retired_max_queue, 0);
5228 
5229 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5230 		pr_warn("ndelay must be less than 1 second, ignored\n");
5231 		sdebug_ndelay = 0;
5232 	} else if (sdebug_ndelay > 0)
5233 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5234 
5235 	switch (sdebug_sector_size) {
5236 	case  512:
5237 	case 1024:
5238 	case 2048:
5239 	case 4096:
5240 		break;
5241 	default:
5242 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5243 		return -EINVAL;
5244 	}
5245 
5246 	switch (sdebug_dif) {
5247 	case T10_PI_TYPE0_PROTECTION:
5248 		break;
5249 	case T10_PI_TYPE1_PROTECTION:
5250 	case T10_PI_TYPE2_PROTECTION:
5251 	case T10_PI_TYPE3_PROTECTION:
5252 		have_dif_prot = true;
5253 		break;
5254 
5255 	default:
5256 		pr_err("dif must be 0, 1, 2 or 3\n");
5257 		return -EINVAL;
5258 	}
5259 
5260 	if (sdebug_guard > 1) {
5261 		pr_err("guard must be 0 or 1\n");
5262 		return -EINVAL;
5263 	}
5264 
5265 	if (sdebug_ato > 1) {
5266 		pr_err("ato must be 0 or 1\n");
5267 		return -EINVAL;
5268 	}
5269 
5270 	if (sdebug_physblk_exp > 15) {
5271 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5272 		return -EINVAL;
5273 	}
5274 	if (sdebug_max_luns > 256) {
5275 		pr_warn("max_luns can be no more than 256, use default\n");
5276 		sdebug_max_luns = DEF_MAX_LUNS;
5277 	}
5278 
5279 	if (sdebug_lowest_aligned > 0x3fff) {
5280 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5281 		return -EINVAL;
5282 	}
5283 
5284 	if (submit_queues < 1) {
5285 		pr_err("submit_queues must be 1 or more\n");
5286 		return -EINVAL;
5287 	}
5288 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5289 			       GFP_KERNEL);
5290 	if (sdebug_q_arr == NULL)
5291 		return -ENOMEM;
5292 	for (k = 0; k < submit_queues; ++k)
5293 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5294 
5295 	if (sdebug_dev_size_mb < 1)
5296 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5297 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5298 	sdebug_store_sectors = sz / sdebug_sector_size;
5299 	sdebug_capacity = get_sdebug_capacity();
5300 
5301 	/* play around with geometry, don't waste too much on track 0 */
5302 	sdebug_heads = 8;
5303 	sdebug_sectors_per = 32;
5304 	if (sdebug_dev_size_mb >= 256)
5305 		sdebug_heads = 64;
5306 	else if (sdebug_dev_size_mb >= 16)
5307 		sdebug_heads = 32;
5308 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5309 			       (sdebug_sectors_per * sdebug_heads);
5310 	if (sdebug_cylinders_per >= 1024) {
5311 		/* other LLDs do this; implies >= 1GB ram disk ... */
5312 		sdebug_heads = 255;
5313 		sdebug_sectors_per = 63;
5314 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5315 			       (sdebug_sectors_per * sdebug_heads);
5316 	}
5317 
5318 	if (sdebug_fake_rw == 0) {
5319 		fake_storep = vmalloc(sz);
5320 		if (NULL == fake_storep) {
5321 			pr_err("out of memory, 1\n");
5322 			ret = -ENOMEM;
5323 			goto free_q_arr;
5324 		}
5325 		memset(fake_storep, 0, sz);
5326 		if (sdebug_num_parts > 0)
5327 			sdebug_build_parts(fake_storep, sz);
5328 	}
5329 
5330 	if (sdebug_dix) {
5331 		int dif_size;
5332 
5333 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5334 		dif_storep = vmalloc(dif_size);
5335 
5336 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5337 
5338 		if (dif_storep == NULL) {
5339 			pr_err("out of mem. (DIX)\n");
5340 			ret = -ENOMEM;
5341 			goto free_vm;
5342 		}
5343 
5344 		memset(dif_storep, 0xff, dif_size);
5345 	}
5346 
5347 	/* Logical Block Provisioning */
5348 	if (scsi_debug_lbp()) {
5349 		sdebug_unmap_max_blocks =
5350 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5351 
5352 		sdebug_unmap_max_desc =
5353 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5354 
5355 		sdebug_unmap_granularity =
5356 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5357 
5358 		if (sdebug_unmap_alignment &&
5359 		    sdebug_unmap_granularity <=
5360 		    sdebug_unmap_alignment) {
5361 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5362 			ret = -EINVAL;
5363 			goto free_vm;
5364 		}
5365 
5366 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5367 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5368 
5369 		pr_info("%lu provisioning blocks\n", map_size);
5370 
5371 		if (map_storep == NULL) {
5372 			pr_err("out of mem. (MAP)\n");
5373 			ret = -ENOMEM;
5374 			goto free_vm;
5375 		}
5376 
5377 		bitmap_zero(map_storep, map_size);
5378 
5379 		/* Map first 1KB for partition table */
5380 		if (sdebug_num_parts)
5381 			map_region(0, 2);
5382 	}
5383 
5384 	pseudo_primary = root_device_register("pseudo_0");
5385 	if (IS_ERR(pseudo_primary)) {
5386 		pr_warn("root_device_register() error\n");
5387 		ret = PTR_ERR(pseudo_primary);
5388 		goto free_vm;
5389 	}
5390 	ret = bus_register(&pseudo_lld_bus);
5391 	if (ret < 0) {
5392 		pr_warn("bus_register error: %d\n", ret);
5393 		goto dev_unreg;
5394 	}
5395 	ret = driver_register(&sdebug_driverfs_driver);
5396 	if (ret < 0) {
5397 		pr_warn("driver_register error: %d\n", ret);
5398 		goto bus_unreg;
5399 	}
5400 
5401 	host_to_add = sdebug_add_host;
5402 	sdebug_add_host = 0;
5403 
5404 	for (k = 0; k < host_to_add; k++) {
5405 		if (sdebug_add_adapter()) {
5406 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5407 			break;
5408 		}
5409 	}
5410 
5411 	if (sdebug_verbose)
5412 		pr_info("built %d host(s)\n", sdebug_add_host);
5413 
5414 	return 0;
5415 
5416 bus_unreg:
5417 	bus_unregister(&pseudo_lld_bus);
5418 dev_unreg:
5419 	root_device_unregister(pseudo_primary);
5420 free_vm:
5421 	vfree(map_storep);
5422 	vfree(dif_storep);
5423 	vfree(fake_storep);
5424 free_q_arr:
5425 	kfree(sdebug_q_arr);
5426 	return ret;
5427 }
5428 
5429 static void __exit scsi_debug_exit(void)
5430 {
5431 	int k = sdebug_add_host;
5432 
5433 	stop_all_queued();
5434 	free_all_queued();
5435 	for (; k; k--)
5436 		sdebug_remove_adapter();
5437 	driver_unregister(&sdebug_driverfs_driver);
5438 	bus_unregister(&pseudo_lld_bus);
5439 	root_device_unregister(pseudo_primary);
5440 
5441 	vfree(map_storep);
5442 	vfree(dif_storep);
5443 	vfree(fake_storep);
5444 	kfree(sdebug_q_arr);
5445 }
5446 
5447 device_initcall(scsi_debug_init);
5448 module_exit(scsi_debug_exit);
5449 
5450 static void sdebug_release_adapter(struct device * dev)
5451 {
5452 	struct sdebug_host_info *sdbg_host;
5453 
5454 	sdbg_host = to_sdebug_host(dev);
5455 	kfree(sdbg_host);
5456 }
5457 
5458 static int sdebug_add_adapter(void)
5459 {
5460 	int k, devs_per_host;
5461 	int error = 0;
5462 	struct sdebug_host_info *sdbg_host;
5463 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5464 
5465 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5466 	if (sdbg_host == NULL) {
5467 		pr_err("out of memory at line %d\n", __LINE__);
5468 		return -ENOMEM;
5469 	}
5470 
5471 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5472 
5473 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5474 	for (k = 0; k < devs_per_host; k++) {
5475 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5476 		if (!sdbg_devinfo) {
5477 			pr_err("out of memory at line %d\n", __LINE__);
5478 			error = -ENOMEM;
5479 			goto clean;
5480 		}
5481 	}
5482 
5483 	spin_lock(&sdebug_host_list_lock);
5484 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5485 	spin_unlock(&sdebug_host_list_lock);
5486 
5487 	sdbg_host->dev.bus = &pseudo_lld_bus;
5488 	sdbg_host->dev.parent = pseudo_primary;
5489 	sdbg_host->dev.release = &sdebug_release_adapter;
5490 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5491 
5492 	error = device_register(&sdbg_host->dev);
5493 
5494 	if (error)
5495 		goto clean;
5496 
5497 	++sdebug_add_host;
5498 	return error;
5499 
5500 clean:
5501 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5502 				 dev_list) {
5503 		list_del(&sdbg_devinfo->dev_list);
5504 		kfree(sdbg_devinfo);
5505 	}
5506 
5507 	kfree(sdbg_host);
5508 	return error;
5509 }
5510 
5511 static void sdebug_remove_adapter(void)
5512 {
5513 	struct sdebug_host_info *sdbg_host = NULL;
5514 
5515 	spin_lock(&sdebug_host_list_lock);
5516 	if (!list_empty(&sdebug_host_list)) {
5517 		sdbg_host = list_entry(sdebug_host_list.prev,
5518 				       struct sdebug_host_info, host_list);
5519 		list_del(&sdbg_host->host_list);
5520 	}
5521 	spin_unlock(&sdebug_host_list_lock);
5522 
5523 	if (!sdbg_host)
5524 		return;
5525 
5526 	device_unregister(&sdbg_host->dev);
5527 	--sdebug_add_host;
5528 }
5529 
5530 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5531 {
5532 	int num_in_q = 0;
5533 	struct sdebug_dev_info *devip;
5534 
5535 	block_unblock_all_queues(true);
5536 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5537 	if (NULL == devip) {
5538 		block_unblock_all_queues(false);
5539 		return	-ENODEV;
5540 	}
5541 	num_in_q = atomic_read(&devip->num_in_q);
5542 
5543 	if (qdepth < 1)
5544 		qdepth = 1;
5545 	/* allow to exceed max host qc_arr elements for testing */
5546 	if (qdepth > SDEBUG_CANQUEUE + 10)
5547 		qdepth = SDEBUG_CANQUEUE + 10;
5548 	scsi_change_queue_depth(sdev, qdepth);
5549 
5550 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5551 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5552 			    __func__, qdepth, num_in_q);
5553 	}
5554 	block_unblock_all_queues(false);
5555 	return sdev->queue_depth;
5556 }
5557 
5558 static bool fake_timeout(struct scsi_cmnd *scp)
5559 {
5560 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5561 		if (sdebug_every_nth < -1)
5562 			sdebug_every_nth = -1;
5563 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5564 			return true; /* ignore command causing timeout */
5565 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5566 			 scsi_medium_access_command(scp))
5567 			return true; /* time out reads and writes */
5568 	}
5569 	return false;
5570 }
5571 
5572 static bool fake_host_busy(struct scsi_cmnd *scp)
5573 {
5574 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5575 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5576 }
5577 
5578 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5579 				   struct scsi_cmnd *scp)
5580 {
5581 	u8 sdeb_i;
5582 	struct scsi_device *sdp = scp->device;
5583 	const struct opcode_info_t *oip;
5584 	const struct opcode_info_t *r_oip;
5585 	struct sdebug_dev_info *devip;
5586 	u8 *cmd = scp->cmnd;
5587 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5588 	int k, na;
5589 	int errsts = 0;
5590 	u32 flags;
5591 	u16 sa;
5592 	u8 opcode = cmd[0];
5593 	bool has_wlun_rl;
5594 
5595 	scsi_set_resid(scp, 0);
5596 	if (sdebug_statistics)
5597 		atomic_inc(&sdebug_cmnd_count);
5598 	if (unlikely(sdebug_verbose &&
5599 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5600 		char b[120];
5601 		int n, len, sb;
5602 
5603 		len = scp->cmd_len;
5604 		sb = (int)sizeof(b);
5605 		if (len > 32)
5606 			strcpy(b, "too long, over 32 bytes");
5607 		else {
5608 			for (k = 0, n = 0; k < len && n < sb; ++k)
5609 				n += scnprintf(b + n, sb - n, "%02x ",
5610 					       (u32)cmd[k]);
5611 		}
5612 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
5613 			    blk_mq_unique_tag(scp->request), b);
5614 	}
5615 	if (fake_host_busy(scp))
5616 		return SCSI_MLQUEUE_HOST_BUSY;
5617 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5618 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5619 		goto err_out;
5620 
5621 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5622 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5623 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5624 	if (unlikely(!devip)) {
5625 		devip = find_build_dev_info(sdp);
5626 		if (NULL == devip)
5627 			goto err_out;
5628 	}
5629 	na = oip->num_attached;
5630 	r_pfp = oip->pfp;
5631 	if (na) {	/* multiple commands with this opcode */
5632 		r_oip = oip;
5633 		if (FF_SA & r_oip->flags) {
5634 			if (F_SA_LOW & oip->flags)
5635 				sa = 0x1f & cmd[1];
5636 			else
5637 				sa = get_unaligned_be16(cmd + 8);
5638 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5639 				if (opcode == oip->opcode && sa == oip->sa)
5640 					break;
5641 			}
5642 		} else {   /* since no service action only check opcode */
5643 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5644 				if (opcode == oip->opcode)
5645 					break;
5646 			}
5647 		}
5648 		if (k > na) {
5649 			if (F_SA_LOW & r_oip->flags)
5650 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5651 			else if (F_SA_HIGH & r_oip->flags)
5652 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5653 			else
5654 				mk_sense_invalid_opcode(scp);
5655 			goto check_cond;
5656 		}
5657 	}	/* else (when na==0) we assume the oip is a match */
5658 	flags = oip->flags;
5659 	if (unlikely(F_INV_OP & flags)) {
5660 		mk_sense_invalid_opcode(scp);
5661 		goto check_cond;
5662 	}
5663 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5664 		if (sdebug_verbose)
5665 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5666 				    my_name, opcode, " supported for wlun");
5667 		mk_sense_invalid_opcode(scp);
5668 		goto check_cond;
5669 	}
5670 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5671 		u8 rem;
5672 		int j;
5673 
5674 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5675 			rem = ~oip->len_mask[k] & cmd[k];
5676 			if (rem) {
5677 				for (j = 7; j >= 0; --j, rem <<= 1) {
5678 					if (0x80 & rem)
5679 						break;
5680 				}
5681 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5682 				goto check_cond;
5683 			}
5684 		}
5685 	}
5686 	if (unlikely(!(F_SKIP_UA & flags) &&
5687 		     find_first_bit(devip->uas_bm,
5688 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5689 		errsts = make_ua(scp, devip);
5690 		if (errsts)
5691 			goto check_cond;
5692 	}
5693 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5694 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5695 		if (sdebug_verbose)
5696 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5697 				    "%s\n", my_name, "initializing command "
5698 				    "required");
5699 		errsts = check_condition_result;
5700 		goto fini;
5701 	}
5702 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5703 		goto fini;
5704 	if (unlikely(sdebug_every_nth)) {
5705 		if (fake_timeout(scp))
5706 			return 0;	/* ignore command: make trouble */
5707 	}
5708 	if (likely(oip->pfp))
5709 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5710 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5711 		errsts = r_pfp(scp, devip);
5712 
5713 fini:
5714 	if (F_DELAY_OVERR & flags)
5715 		return schedule_resp(scp, devip, errsts, 0, 0);
5716 	else
5717 		return schedule_resp(scp, devip, errsts, sdebug_jdelay,
5718 				     sdebug_ndelay);
5719 check_cond:
5720 	return schedule_resp(scp, devip, check_condition_result, 0, 0);
5721 err_out:
5722 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
5723 }
5724 
5725 static struct scsi_host_template sdebug_driver_template = {
5726 	.show_info =		scsi_debug_show_info,
5727 	.write_info =		scsi_debug_write_info,
5728 	.proc_name =		sdebug_proc_name,
5729 	.name =			"SCSI DEBUG",
5730 	.info =			scsi_debug_info,
5731 	.slave_alloc =		scsi_debug_slave_alloc,
5732 	.slave_configure =	scsi_debug_slave_configure,
5733 	.slave_destroy =	scsi_debug_slave_destroy,
5734 	.ioctl =		scsi_debug_ioctl,
5735 	.queuecommand =		scsi_debug_queuecommand,
5736 	.change_queue_depth =	sdebug_change_qdepth,
5737 	.eh_abort_handler =	scsi_debug_abort,
5738 	.eh_device_reset_handler = scsi_debug_device_reset,
5739 	.eh_target_reset_handler = scsi_debug_target_reset,
5740 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5741 	.eh_host_reset_handler = scsi_debug_host_reset,
5742 	.can_queue =		SDEBUG_CANQUEUE,
5743 	.this_id =		7,
5744 	.sg_tablesize =		SG_MAX_SEGMENTS,
5745 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5746 	.max_sectors =		-1U,
5747 	.use_clustering = 	DISABLE_CLUSTERING,
5748 	.module =		THIS_MODULE,
5749 	.track_queue_depth =	1,
5750 };
5751 
5752 static int sdebug_driver_probe(struct device * dev)
5753 {
5754 	int error = 0;
5755 	struct sdebug_host_info *sdbg_host;
5756 	struct Scsi_Host *hpnt;
5757 	int hprot;
5758 
5759 	sdbg_host = to_sdebug_host(dev);
5760 
5761 	sdebug_driver_template.can_queue = sdebug_max_queue;
5762 	if (sdebug_clustering)
5763 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5764 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5765 	if (NULL == hpnt) {
5766 		pr_err("scsi_host_alloc failed\n");
5767 		error = -ENODEV;
5768 		return error;
5769 	}
5770 	if (submit_queues > nr_cpu_ids) {
5771 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5772 			my_name, submit_queues, nr_cpu_ids);
5773 		submit_queues = nr_cpu_ids;
5774 	}
5775 	/* Decide whether to tell scsi subsystem that we want mq */
5776 	/* Following should give the same answer for each host */
5777 	if (shost_use_blk_mq(hpnt))
5778 		hpnt->nr_hw_queues = submit_queues;
5779 
5780 	sdbg_host->shost = hpnt;
5781 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5782 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5783 		hpnt->max_id = sdebug_num_tgts + 1;
5784 	else
5785 		hpnt->max_id = sdebug_num_tgts;
5786 	/* = sdebug_max_luns; */
5787 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5788 
5789 	hprot = 0;
5790 
5791 	switch (sdebug_dif) {
5792 
5793 	case T10_PI_TYPE1_PROTECTION:
5794 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5795 		if (sdebug_dix)
5796 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5797 		break;
5798 
5799 	case T10_PI_TYPE2_PROTECTION:
5800 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5801 		if (sdebug_dix)
5802 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5803 		break;
5804 
5805 	case T10_PI_TYPE3_PROTECTION:
5806 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5807 		if (sdebug_dix)
5808 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5809 		break;
5810 
5811 	default:
5812 		if (sdebug_dix)
5813 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5814 		break;
5815 	}
5816 
5817 	scsi_host_set_prot(hpnt, hprot);
5818 
5819 	if (have_dif_prot || sdebug_dix)
5820 		pr_info("host protection%s%s%s%s%s%s%s\n",
5821 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5822 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5823 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5824 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5825 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5826 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5827 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5828 
5829 	if (sdebug_guard == 1)
5830 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5831 	else
5832 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5833 
5834 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5835 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5836 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5837 		sdebug_statistics = true;
5838 	error = scsi_add_host(hpnt, &sdbg_host->dev);
5839 	if (error) {
5840 		pr_err("scsi_add_host failed\n");
5841 		error = -ENODEV;
5842 		scsi_host_put(hpnt);
5843 	} else
5844 		scsi_scan_host(hpnt);
5845 
5846 	return error;
5847 }
5848 
5849 static int sdebug_driver_remove(struct device * dev)
5850 {
5851 	struct sdebug_host_info *sdbg_host;
5852 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5853 
5854 	sdbg_host = to_sdebug_host(dev);
5855 
5856 	if (!sdbg_host) {
5857 		pr_err("Unable to locate host info\n");
5858 		return -ENODEV;
5859 	}
5860 
5861 	scsi_remove_host(sdbg_host->shost);
5862 
5863 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5864 				 dev_list) {
5865 		list_del(&sdbg_devinfo->dev_list);
5866 		kfree(sdbg_devinfo);
5867 	}
5868 
5869 	scsi_host_put(sdbg_host->shost);
5870 	return 0;
5871 }
5872 
5873 static int pseudo_lld_bus_match(struct device *dev,
5874 				struct device_driver *dev_driver)
5875 {
5876 	return 1;
5877 }
5878 
5879 static struct bus_type pseudo_lld_bus = {
5880 	.name = "pseudo",
5881 	.match = pseudo_lld_bus_match,
5882 	.probe = sdebug_driver_probe,
5883 	.remove = sdebug_driver_remove,
5884 	.drv_groups = sdebug_drv_groups,
5885 };
5886