xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision b7e24581)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2017 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0187"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20171202";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 
97 /* Additional Sense Code Qualifier (ASCQ) */
98 #define ACK_NAK_TO 0x3
99 
100 /* Default values for driver parameters */
101 #define DEF_NUM_HOST   1
102 #define DEF_NUM_TGTS   1
103 #define DEF_MAX_LUNS   1
104 /* With these defaults, this driver will make 1 host with 1 target
105  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
106  */
107 #define DEF_ATO 1
108 #define DEF_CDB_LEN 10
109 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
110 #define DEF_DEV_SIZE_MB   8
111 #define DEF_DIF 0
112 #define DEF_DIX 0
113 #define DEF_D_SENSE   0
114 #define DEF_EVERY_NTH   0
115 #define DEF_FAKE_RW	0
116 #define DEF_GUARD 0
117 #define DEF_HOST_LOCK 0
118 #define DEF_LBPU 0
119 #define DEF_LBPWS 0
120 #define DEF_LBPWS10 0
121 #define DEF_LBPRZ 1
122 #define DEF_LOWEST_ALIGNED 0
123 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
124 #define DEF_NO_LUN_0   0
125 #define DEF_NUM_PARTS   0
126 #define DEF_OPTS   0
127 #define DEF_OPT_BLKS 1024
128 #define DEF_PHYSBLK_EXP 0
129 #define DEF_OPT_XFERLEN_EXP 0
130 #define DEF_PTYPE   TYPE_DISK
131 #define DEF_REMOVABLE false
132 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
133 #define DEF_SECTOR_SIZE 512
134 #define DEF_UNMAP_ALIGNMENT 0
135 #define DEF_UNMAP_GRANULARITY 1
136 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
137 #define DEF_UNMAP_MAX_DESC 256
138 #define DEF_VIRTUAL_GB   0
139 #define DEF_VPD_USE_HOSTNO 1
140 #define DEF_WRITESAME_LENGTH 0xFFFF
141 #define DEF_STRICT 0
142 #define DEF_STATISTICS false
143 #define DEF_SUBMIT_QUEUES 1
144 #define DEF_UUID_CTL 0
145 #define JDELAY_OVERRIDDEN -9999
146 
147 #define SDEBUG_LUN_0_VAL 0
148 
149 /* bit mask values for sdebug_opts */
150 #define SDEBUG_OPT_NOISE		1
151 #define SDEBUG_OPT_MEDIUM_ERR		2
152 #define SDEBUG_OPT_TIMEOUT		4
153 #define SDEBUG_OPT_RECOVERED_ERR	8
154 #define SDEBUG_OPT_TRANSPORT_ERR	16
155 #define SDEBUG_OPT_DIF_ERR		32
156 #define SDEBUG_OPT_DIX_ERR		64
157 #define SDEBUG_OPT_MAC_TIMEOUT		128
158 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
159 #define SDEBUG_OPT_Q_NOISE		0x200
160 #define SDEBUG_OPT_ALL_TSF		0x400
161 #define SDEBUG_OPT_RARE_TSF		0x800
162 #define SDEBUG_OPT_N_WCE		0x1000
163 #define SDEBUG_OPT_RESET_NOISE		0x2000
164 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
165 #define SDEBUG_OPT_HOST_BUSY		0x8000
166 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
167 			      SDEBUG_OPT_RESET_NOISE)
168 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
169 				  SDEBUG_OPT_TRANSPORT_ERR | \
170 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
171 				  SDEBUG_OPT_SHORT_TRANSFER | \
172 				  SDEBUG_OPT_HOST_BUSY)
173 /* When "every_nth" > 0 then modulo "every_nth" commands:
174  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
175  *   - a RECOVERED_ERROR is simulated on successful read and write
176  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
177  *   - a TRANSPORT_ERROR is simulated on successful read and write
178  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
179  *
180  * When "every_nth" < 0 then after "- every_nth" commands:
181  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
182  *   - a RECOVERED_ERROR is simulated on successful read and write
183  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
184  *   - a TRANSPORT_ERROR is simulated on successful read and write
185  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
186  * This will continue on every subsequent command until some other action
187  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
188  * every_nth via sysfs).
189  */
190 
191 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
192  * priority order. In the subset implemented here lower numbers have higher
193  * priority. The UA numbers should be a sequence starting from 0 with
194  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
195 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
196 #define SDEBUG_UA_BUS_RESET 1
197 #define SDEBUG_UA_MODE_CHANGED 2
198 #define SDEBUG_UA_CAPACITY_CHANGED 3
199 #define SDEBUG_UA_LUNS_CHANGED 4
200 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
201 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
202 #define SDEBUG_NUM_UAS 7
203 
204 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
205  * sector on read commands: */
206 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
207 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
208 
209 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
210  * or "peripheral device" addressing (value 0) */
211 #define SAM2_LUN_ADDRESS_METHOD 0
212 
213 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
214  * (for response) per submit queue at one time. Can be reduced by max_queue
215  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
216  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
217  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
218  * but cannot exceed SDEBUG_CANQUEUE .
219  */
220 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
221 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
222 #define DEF_CMD_PER_LUN  255
223 
224 #define F_D_IN			1
225 #define F_D_OUT			2
226 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
227 #define F_D_UNKN		8
228 #define F_RL_WLUN_OK		0x10
229 #define F_SKIP_UA		0x20
230 #define F_DELAY_OVERR		0x40
231 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
233 #define F_INV_OP		0x200
234 #define F_FAKE_RW		0x400
235 #define F_M_ACCESS		0x800	/* media access */
236 
237 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
238 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
239 #define FF_SA (F_SA_HIGH | F_SA_LOW)
240 
241 #define SDEBUG_MAX_PARTS 4
242 
243 #define SDEBUG_MAX_CMD_LEN 32
244 
245 
246 struct sdebug_dev_info {
247 	struct list_head dev_list;
248 	unsigned int channel;
249 	unsigned int target;
250 	u64 lun;
251 	uuid_t lu_name;
252 	struct sdebug_host_info *sdbg_host;
253 	unsigned long uas_bm[1];
254 	atomic_t num_in_q;
255 	atomic_t stopped;
256 	bool used;
257 };
258 
259 struct sdebug_host_info {
260 	struct list_head host_list;
261 	struct Scsi_Host *shost;
262 	struct device dev;
263 	struct list_head dev_info_list;
264 };
265 
266 #define to_sdebug_host(d)	\
267 	container_of(d, struct sdebug_host_info, dev)
268 
269 struct sdebug_defer {
270 	struct hrtimer hrt;
271 	struct execute_work ew;
272 	int sqa_idx;	/* index of sdebug_queue array */
273 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
274 	int issuing_cpu;
275 };
276 
277 struct sdebug_queued_cmd {
278 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
279 	 * instance indicates this slot is in use.
280 	 */
281 	struct sdebug_defer *sd_dp;
282 	struct scsi_cmnd *a_cmnd;
283 	unsigned int inj_recovered:1;
284 	unsigned int inj_transport:1;
285 	unsigned int inj_dif:1;
286 	unsigned int inj_dix:1;
287 	unsigned int inj_short:1;
288 	unsigned int inj_host_busy:1;
289 };
290 
291 struct sdebug_queue {
292 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
293 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
294 	spinlock_t qc_lock;
295 	atomic_t blocked;	/* to temporarily stop more being queued */
296 };
297 
298 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
299 static atomic_t sdebug_completions;  /* count of deferred completions */
300 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
301 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
302 
303 struct opcode_info_t {
304 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
305 				/* for terminating element */
306 	u8 opcode;		/* if num_attached > 0, preferred */
307 	u16 sa;			/* service action */
308 	u32 flags;		/* OR-ed set of SDEB_F_* */
309 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
310 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
311 	u8 len_mask[16];	/* len_mask[0]-->cdb_len, then mask for cdb */
312 				/* 1 to min(cdb_len, 15); ignore cdb[15...] */
313 };
314 
315 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
316 enum sdeb_opcode_index {
317 	SDEB_I_INVALID_OPCODE =	0,
318 	SDEB_I_INQUIRY = 1,
319 	SDEB_I_REPORT_LUNS = 2,
320 	SDEB_I_REQUEST_SENSE = 3,
321 	SDEB_I_TEST_UNIT_READY = 4,
322 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
323 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
324 	SDEB_I_LOG_SENSE = 7,
325 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
326 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
327 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
328 	SDEB_I_START_STOP = 11,
329 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
330 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
331 	SDEB_I_MAINT_IN = 14,
332 	SDEB_I_MAINT_OUT = 15,
333 	SDEB_I_VERIFY = 16,		/* 10 only */
334 	SDEB_I_VARIABLE_LEN = 17,
335 	SDEB_I_RESERVE = 18,		/* 6, 10 */
336 	SDEB_I_RELEASE = 19,		/* 6, 10 */
337 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
338 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
339 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
340 	SDEB_I_SEND_DIAG = 23,
341 	SDEB_I_UNMAP = 24,
342 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
343 	SDEB_I_WRITE_BUFFER = 26,
344 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
345 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
346 	SDEB_I_COMP_WRITE = 29,
347 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
348 };
349 
350 
351 static const unsigned char opcode_ind_arr[256] = {
352 /* 0x0; 0x0->0x1f: 6 byte cdbs */
353 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
354 	    0, 0, 0, 0,
355 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
356 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
357 	    SDEB_I_RELEASE,
358 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
359 	    SDEB_I_ALLOW_REMOVAL, 0,
360 /* 0x20; 0x20->0x3f: 10 byte cdbs */
361 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
362 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
363 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
364 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
365 /* 0x40; 0x40->0x5f: 10 byte cdbs */
366 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
367 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
368 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
369 	    SDEB_I_RELEASE,
370 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
371 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
372 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
373 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
374 	0, SDEB_I_VARIABLE_LEN,
375 /* 0x80; 0x80->0x9f: 16 byte cdbs */
376 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
377 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
378 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
379 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
380 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
381 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
382 	     SDEB_I_MAINT_OUT, 0, 0, 0,
383 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
384 	     0, 0, 0, 0,
385 	0, 0, 0, 0, 0, 0, 0, 0,
386 	0, 0, 0, 0, 0, 0, 0, 0,
387 /* 0xc0; 0xc0->0xff: vendor specific */
388 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
389 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
390 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
391 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
392 };
393 
394 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
395 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
409 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
410 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
412 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
415 
416 static const struct opcode_info_t msense_iarr[1] = {
417 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
418 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
419 };
420 
421 static const struct opcode_info_t mselect_iarr[1] = {
422 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
423 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
424 };
425 
426 static const struct opcode_info_t read_iarr[3] = {
427 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
428 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
429 	     0, 0, 0, 0} },
430 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
431 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
432 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
433 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
434 	     0xc7, 0, 0, 0, 0} },
435 };
436 
437 static const struct opcode_info_t write_iarr[3] = {
438 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
439 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
440 	     0, 0, 0, 0} },
441 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
442 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
443 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
444 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
445 	     0xc7, 0, 0, 0, 0} },
446 };
447 
448 static const struct opcode_info_t sa_in_iarr[1] = {
449 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
450 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
451 	     0xff, 0xff, 0xff, 0, 0xc7} },
452 };
453 
454 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
455 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
456 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
457 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
458 };
459 
460 static const struct opcode_info_t maint_in_iarr[2] = {
461 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
462 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
463 	     0xc7, 0, 0, 0, 0} },
464 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
465 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
466 	     0, 0} },
467 };
468 
469 static const struct opcode_info_t write_same_iarr[1] = {
470 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
471 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
472 	     0xff, 0xff, 0xff, 0x3f, 0xc7} },
473 };
474 
475 static const struct opcode_info_t reserve_iarr[1] = {
476 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
477 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
478 };
479 
480 static const struct opcode_info_t release_iarr[1] = {
481 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
482 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
483 };
484 
485 
486 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
487  * plus the terminating elements for logic that scans this table such as
488  * REPORT SUPPORTED OPERATION CODES. */
489 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
490 /* 0 */
491 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
492 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
493 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
494 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
495 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
496 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
497 	     0, 0} },
498 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
499 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
500 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
501 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
502 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
503 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
504 	     0} },
505 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
506 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
507 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
508 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
509 	     0, 0, 0} },
510 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
511 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
512 	     0, 0} },
513 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
514 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515 	     0xff, 0xff, 0xff, 0xff, 0xc7} },		/* READ(16) */
516 /* 10 */
517 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
518 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
519 	     0xff, 0xff, 0xff, 0xff, 0xc7} },		/* WRITE(16) */
520 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
521 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
523 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
524 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
525 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
526 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
527 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
528 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
529 	     0} },
530 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
531 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
532 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
533 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
534 	     0, 0, 0, 0, 0, 0} },
535 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
536 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0,
537 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
538 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
539 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
540 	     0} },
541 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
542 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
543 	     0} },
544 /* 20 */
545 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
546 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
548 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
549 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
550 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
551 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
552 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
553 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
554 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
555 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
556 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
557 		   0, 0, 0, 0, 0, 0} },
558 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
559 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
560 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
561 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
562 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff,
563 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
564 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
565 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
566 	     0, 0, 0, 0} },
567 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
568 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
569 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
570 
571 /* 30 */
572 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
573 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 };
575 
576 static int sdebug_add_host = DEF_NUM_HOST;
577 static int sdebug_ato = DEF_ATO;
578 static int sdebug_cdb_len = DEF_CDB_LEN;
579 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
580 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
581 static int sdebug_dif = DEF_DIF;
582 static int sdebug_dix = DEF_DIX;
583 static int sdebug_dsense = DEF_D_SENSE;
584 static int sdebug_every_nth = DEF_EVERY_NTH;
585 static int sdebug_fake_rw = DEF_FAKE_RW;
586 static unsigned int sdebug_guard = DEF_GUARD;
587 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
588 static int sdebug_max_luns = DEF_MAX_LUNS;
589 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
590 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
591 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
592 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
593 static int sdebug_no_uld;
594 static int sdebug_num_parts = DEF_NUM_PARTS;
595 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
596 static int sdebug_opt_blks = DEF_OPT_BLKS;
597 static int sdebug_opts = DEF_OPTS;
598 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
599 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
600 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
601 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
602 static int sdebug_sector_size = DEF_SECTOR_SIZE;
603 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
604 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
605 static unsigned int sdebug_lbpu = DEF_LBPU;
606 static unsigned int sdebug_lbpws = DEF_LBPWS;
607 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
608 static unsigned int sdebug_lbprz = DEF_LBPRZ;
609 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
610 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
611 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
612 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
613 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
614 static int sdebug_uuid_ctl = DEF_UUID_CTL;
615 static bool sdebug_removable = DEF_REMOVABLE;
616 static bool sdebug_clustering;
617 static bool sdebug_host_lock = DEF_HOST_LOCK;
618 static bool sdebug_strict = DEF_STRICT;
619 static bool sdebug_any_injecting_opt;
620 static bool sdebug_verbose;
621 static bool have_dif_prot;
622 static bool sdebug_statistics = DEF_STATISTICS;
623 static bool sdebug_mq_active;
624 
625 static unsigned int sdebug_store_sectors;
626 static sector_t sdebug_capacity;	/* in sectors */
627 
628 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
629    may still need them */
630 static int sdebug_heads;		/* heads per disk */
631 static int sdebug_cylinders_per;	/* cylinders per surface */
632 static int sdebug_sectors_per;		/* sectors per cylinder */
633 
634 static LIST_HEAD(sdebug_host_list);
635 static DEFINE_SPINLOCK(sdebug_host_list_lock);
636 
637 static unsigned char *fake_storep;	/* ramdisk storage */
638 static struct t10_pi_tuple *dif_storep;	/* protection info */
639 static void *map_storep;		/* provisioning map */
640 
641 static unsigned long map_size;
642 static int num_aborts;
643 static int num_dev_resets;
644 static int num_target_resets;
645 static int num_bus_resets;
646 static int num_host_resets;
647 static int dix_writes;
648 static int dix_reads;
649 static int dif_errors;
650 
651 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
652 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
653 
654 static DEFINE_RWLOCK(atomic_rw);
655 
656 static char sdebug_proc_name[] = MY_NAME;
657 static const char *my_name = MY_NAME;
658 
659 static struct bus_type pseudo_lld_bus;
660 
661 static struct device_driver sdebug_driverfs_driver = {
662 	.name 		= sdebug_proc_name,
663 	.bus		= &pseudo_lld_bus,
664 };
665 
666 static const int check_condition_result =
667 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
668 
669 static const int illegal_condition_result =
670 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
671 
672 static const int device_qfull_result =
673 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
674 
675 
676 /* Only do the extra work involved in logical block provisioning if one or
677  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
678  * real reads and writes (i.e. not skipping them for speed).
679  */
680 static inline bool scsi_debug_lbp(void)
681 {
682 	return 0 == sdebug_fake_rw &&
683 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
684 }
685 
686 static void *fake_store(unsigned long long lba)
687 {
688 	lba = do_div(lba, sdebug_store_sectors);
689 
690 	return fake_storep + lba * sdebug_sector_size;
691 }
692 
693 static struct t10_pi_tuple *dif_store(sector_t sector)
694 {
695 	sector = sector_div(sector, sdebug_store_sectors);
696 
697 	return dif_storep + sector;
698 }
699 
700 static void sdebug_max_tgts_luns(void)
701 {
702 	struct sdebug_host_info *sdbg_host;
703 	struct Scsi_Host *hpnt;
704 
705 	spin_lock(&sdebug_host_list_lock);
706 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
707 		hpnt = sdbg_host->shost;
708 		if ((hpnt->this_id >= 0) &&
709 		    (sdebug_num_tgts > hpnt->this_id))
710 			hpnt->max_id = sdebug_num_tgts + 1;
711 		else
712 			hpnt->max_id = sdebug_num_tgts;
713 		/* sdebug_max_luns; */
714 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
715 	}
716 	spin_unlock(&sdebug_host_list_lock);
717 }
718 
719 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
720 
721 /* Set in_bit to -1 to indicate no bit position of invalid field */
722 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
723 				 enum sdeb_cmd_data c_d,
724 				 int in_byte, int in_bit)
725 {
726 	unsigned char *sbuff;
727 	u8 sks[4];
728 	int sl, asc;
729 
730 	sbuff = scp->sense_buffer;
731 	if (!sbuff) {
732 		sdev_printk(KERN_ERR, scp->device,
733 			    "%s: sense_buffer is NULL\n", __func__);
734 		return;
735 	}
736 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
737 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
738 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
739 	memset(sks, 0, sizeof(sks));
740 	sks[0] = 0x80;
741 	if (c_d)
742 		sks[0] |= 0x40;
743 	if (in_bit >= 0) {
744 		sks[0] |= 0x8;
745 		sks[0] |= 0x7 & in_bit;
746 	}
747 	put_unaligned_be16(in_byte, sks + 1);
748 	if (sdebug_dsense) {
749 		sl = sbuff[7] + 8;
750 		sbuff[7] = sl;
751 		sbuff[sl] = 0x2;
752 		sbuff[sl + 1] = 0x6;
753 		memcpy(sbuff + sl + 4, sks, 3);
754 	} else
755 		memcpy(sbuff + 15, sks, 3);
756 	if (sdebug_verbose)
757 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
758 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
759 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
760 }
761 
762 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
763 {
764 	unsigned char *sbuff;
765 
766 	sbuff = scp->sense_buffer;
767 	if (!sbuff) {
768 		sdev_printk(KERN_ERR, scp->device,
769 			    "%s: sense_buffer is NULL\n", __func__);
770 		return;
771 	}
772 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
773 
774 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
775 
776 	if (sdebug_verbose)
777 		sdev_printk(KERN_INFO, scp->device,
778 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
779 			    my_name, key, asc, asq);
780 }
781 
782 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
783 {
784 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
785 }
786 
787 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
788 {
789 	if (sdebug_verbose) {
790 		if (0x1261 == cmd)
791 			sdev_printk(KERN_INFO, dev,
792 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
793 		else if (0x5331 == cmd)
794 			sdev_printk(KERN_INFO, dev,
795 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
796 				    __func__);
797 		else
798 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
799 				    __func__, cmd);
800 	}
801 	return -EINVAL;
802 	/* return -ENOTTY; // correct return but upsets fdisk */
803 }
804 
805 static void config_cdb_len(struct scsi_device *sdev)
806 {
807 	switch (sdebug_cdb_len) {
808 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
809 		sdev->use_10_for_rw = false;
810 		sdev->use_16_for_rw = false;
811 		sdev->use_10_for_ms = false;
812 		break;
813 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
814 		sdev->use_10_for_rw = true;
815 		sdev->use_16_for_rw = false;
816 		sdev->use_10_for_ms = false;
817 		break;
818 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
819 		sdev->use_10_for_rw = true;
820 		sdev->use_16_for_rw = false;
821 		sdev->use_10_for_ms = true;
822 		break;
823 	case 16:
824 		sdev->use_10_for_rw = false;
825 		sdev->use_16_for_rw = true;
826 		sdev->use_10_for_ms = true;
827 		break;
828 	case 32: /* No knobs to suggest this so same as 16 for now */
829 		sdev->use_10_for_rw = false;
830 		sdev->use_16_for_rw = true;
831 		sdev->use_10_for_ms = true;
832 		break;
833 	default:
834 		pr_warn("unexpected cdb_len=%d, force to 10\n",
835 			sdebug_cdb_len);
836 		sdev->use_10_for_rw = true;
837 		sdev->use_16_for_rw = false;
838 		sdev->use_10_for_ms = false;
839 		sdebug_cdb_len = 10;
840 		break;
841 	}
842 }
843 
844 static void all_config_cdb_len(void)
845 {
846 	struct sdebug_host_info *sdbg_host;
847 	struct Scsi_Host *shost;
848 	struct scsi_device *sdev;
849 
850 	spin_lock(&sdebug_host_list_lock);
851 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
852 		shost = sdbg_host->shost;
853 		shost_for_each_device(sdev, shost) {
854 			config_cdb_len(sdev);
855 		}
856 	}
857 	spin_unlock(&sdebug_host_list_lock);
858 }
859 
860 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
861 {
862 	struct sdebug_host_info *sdhp;
863 	struct sdebug_dev_info *dp;
864 
865 	spin_lock(&sdebug_host_list_lock);
866 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
867 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
868 			if ((devip->sdbg_host == dp->sdbg_host) &&
869 			    (devip->target == dp->target))
870 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
871 		}
872 	}
873 	spin_unlock(&sdebug_host_list_lock);
874 }
875 
876 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
877 {
878 	int k;
879 
880 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
881 	if (k != SDEBUG_NUM_UAS) {
882 		const char *cp = NULL;
883 
884 		switch (k) {
885 		case SDEBUG_UA_POR:
886 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
887 					POWER_ON_RESET_ASCQ);
888 			if (sdebug_verbose)
889 				cp = "power on reset";
890 			break;
891 		case SDEBUG_UA_BUS_RESET:
892 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
893 					BUS_RESET_ASCQ);
894 			if (sdebug_verbose)
895 				cp = "bus reset";
896 			break;
897 		case SDEBUG_UA_MODE_CHANGED:
898 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
899 					MODE_CHANGED_ASCQ);
900 			if (sdebug_verbose)
901 				cp = "mode parameters changed";
902 			break;
903 		case SDEBUG_UA_CAPACITY_CHANGED:
904 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
905 					CAPACITY_CHANGED_ASCQ);
906 			if (sdebug_verbose)
907 				cp = "capacity data changed";
908 			break;
909 		case SDEBUG_UA_MICROCODE_CHANGED:
910 			mk_sense_buffer(scp, UNIT_ATTENTION,
911 					TARGET_CHANGED_ASC,
912 					MICROCODE_CHANGED_ASCQ);
913 			if (sdebug_verbose)
914 				cp = "microcode has been changed";
915 			break;
916 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
917 			mk_sense_buffer(scp, UNIT_ATTENTION,
918 					TARGET_CHANGED_ASC,
919 					MICROCODE_CHANGED_WO_RESET_ASCQ);
920 			if (sdebug_verbose)
921 				cp = "microcode has been changed without reset";
922 			break;
923 		case SDEBUG_UA_LUNS_CHANGED:
924 			/*
925 			 * SPC-3 behavior is to report a UNIT ATTENTION with
926 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
927 			 * on the target, until a REPORT LUNS command is
928 			 * received.  SPC-4 behavior is to report it only once.
929 			 * NOTE:  sdebug_scsi_level does not use the same
930 			 * values as struct scsi_device->scsi_level.
931 			 */
932 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
933 				clear_luns_changed_on_target(devip);
934 			mk_sense_buffer(scp, UNIT_ATTENTION,
935 					TARGET_CHANGED_ASC,
936 					LUNS_CHANGED_ASCQ);
937 			if (sdebug_verbose)
938 				cp = "reported luns data has changed";
939 			break;
940 		default:
941 			pr_warn("unexpected unit attention code=%d\n", k);
942 			if (sdebug_verbose)
943 				cp = "unknown";
944 			break;
945 		}
946 		clear_bit(k, devip->uas_bm);
947 		if (sdebug_verbose)
948 			sdev_printk(KERN_INFO, scp->device,
949 				   "%s reports: Unit attention: %s\n",
950 				   my_name, cp);
951 		return check_condition_result;
952 	}
953 	return 0;
954 }
955 
956 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
957 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
958 				int arr_len)
959 {
960 	int act_len;
961 	struct scsi_data_buffer *sdb = scsi_in(scp);
962 
963 	if (!sdb->length)
964 		return 0;
965 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
966 		return DID_ERROR << 16;
967 
968 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
969 				      arr, arr_len);
970 	sdb->resid = scsi_bufflen(scp) - act_len;
971 
972 	return 0;
973 }
974 
975 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
976  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
977  * calls, not required to write in ascending offset order. Assumes resid
978  * set to scsi_bufflen() prior to any calls.
979  */
980 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
981 				  int arr_len, unsigned int off_dst)
982 {
983 	int act_len, n;
984 	struct scsi_data_buffer *sdb = scsi_in(scp);
985 	off_t skip = off_dst;
986 
987 	if (sdb->length <= off_dst)
988 		return 0;
989 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
990 		return DID_ERROR << 16;
991 
992 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
993 				       arr, arr_len, skip);
994 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
995 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
996 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
997 	sdb->resid = min(sdb->resid, n);
998 	return 0;
999 }
1000 
1001 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1002  * 'arr' or -1 if error.
1003  */
1004 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1005 			       int arr_len)
1006 {
1007 	if (!scsi_bufflen(scp))
1008 		return 0;
1009 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1010 		return -1;
1011 
1012 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1013 }
1014 
1015 
1016 static char sdebug_inq_vendor_id[9] = "Linux   ";
1017 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1018 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1019 /* Use some locally assigned NAAs for SAS addresses. */
1020 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1021 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1022 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1023 
1024 /* Device identification VPD page. Returns number of bytes placed in arr */
1025 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1026 			  int target_dev_id, int dev_id_num,
1027 			  const char *dev_id_str, int dev_id_str_len,
1028 			  const uuid_t *lu_name)
1029 {
1030 	int num, port_a;
1031 	char b[32];
1032 
1033 	port_a = target_dev_id + 1;
1034 	/* T10 vendor identifier field format (faked) */
1035 	arr[0] = 0x2;	/* ASCII */
1036 	arr[1] = 0x1;
1037 	arr[2] = 0x0;
1038 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1039 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1040 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1041 	num = 8 + 16 + dev_id_str_len;
1042 	arr[3] = num;
1043 	num += 4;
1044 	if (dev_id_num >= 0) {
1045 		if (sdebug_uuid_ctl) {
1046 			/* Locally assigned UUID */
1047 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1048 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1049 			arr[num++] = 0x0;
1050 			arr[num++] = 0x12;
1051 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1052 			arr[num++] = 0x0;
1053 			memcpy(arr + num, lu_name, 16);
1054 			num += 16;
1055 		} else {
1056 			/* NAA-3, Logical unit identifier (binary) */
1057 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1058 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1059 			arr[num++] = 0x0;
1060 			arr[num++] = 0x8;
1061 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1062 			num += 8;
1063 		}
1064 		/* Target relative port number */
1065 		arr[num++] = 0x61;	/* proto=sas, binary */
1066 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1067 		arr[num++] = 0x0;	/* reserved */
1068 		arr[num++] = 0x4;	/* length */
1069 		arr[num++] = 0x0;	/* reserved */
1070 		arr[num++] = 0x0;	/* reserved */
1071 		arr[num++] = 0x0;
1072 		arr[num++] = 0x1;	/* relative port A */
1073 	}
1074 	/* NAA-3, Target port identifier */
1075 	arr[num++] = 0x61;	/* proto=sas, binary */
1076 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1077 	arr[num++] = 0x0;
1078 	arr[num++] = 0x8;
1079 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1080 	num += 8;
1081 	/* NAA-3, Target port group identifier */
1082 	arr[num++] = 0x61;	/* proto=sas, binary */
1083 	arr[num++] = 0x95;	/* piv=1, target port group id */
1084 	arr[num++] = 0x0;
1085 	arr[num++] = 0x4;
1086 	arr[num++] = 0;
1087 	arr[num++] = 0;
1088 	put_unaligned_be16(port_group_id, arr + num);
1089 	num += 2;
1090 	/* NAA-3, Target device identifier */
1091 	arr[num++] = 0x61;	/* proto=sas, binary */
1092 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1093 	arr[num++] = 0x0;
1094 	arr[num++] = 0x8;
1095 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1096 	num += 8;
1097 	/* SCSI name string: Target device identifier */
1098 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1099 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1100 	arr[num++] = 0x0;
1101 	arr[num++] = 24;
1102 	memcpy(arr + num, "naa.32222220", 12);
1103 	num += 12;
1104 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1105 	memcpy(arr + num, b, 8);
1106 	num += 8;
1107 	memset(arr + num, 0, 4);
1108 	num += 4;
1109 	return num;
1110 }
1111 
1112 static unsigned char vpd84_data[] = {
1113 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1114     0x22,0x22,0x22,0x0,0xbb,0x1,
1115     0x22,0x22,0x22,0x0,0xbb,0x2,
1116 };
1117 
1118 /*  Software interface identification VPD page */
1119 static int inquiry_vpd_84(unsigned char *arr)
1120 {
1121 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1122 	return sizeof(vpd84_data);
1123 }
1124 
1125 /* Management network addresses VPD page */
1126 static int inquiry_vpd_85(unsigned char *arr)
1127 {
1128 	int num = 0;
1129 	const char * na1 = "https://www.kernel.org/config";
1130 	const char * na2 = "http://www.kernel.org/log";
1131 	int plen, olen;
1132 
1133 	arr[num++] = 0x1;	/* lu, storage config */
1134 	arr[num++] = 0x0;	/* reserved */
1135 	arr[num++] = 0x0;
1136 	olen = strlen(na1);
1137 	plen = olen + 1;
1138 	if (plen % 4)
1139 		plen = ((plen / 4) + 1) * 4;
1140 	arr[num++] = plen;	/* length, null termianted, padded */
1141 	memcpy(arr + num, na1, olen);
1142 	memset(arr + num + olen, 0, plen - olen);
1143 	num += plen;
1144 
1145 	arr[num++] = 0x4;	/* lu, logging */
1146 	arr[num++] = 0x0;	/* reserved */
1147 	arr[num++] = 0x0;
1148 	olen = strlen(na2);
1149 	plen = olen + 1;
1150 	if (plen % 4)
1151 		plen = ((plen / 4) + 1) * 4;
1152 	arr[num++] = plen;	/* length, null terminated, padded */
1153 	memcpy(arr + num, na2, olen);
1154 	memset(arr + num + olen, 0, plen - olen);
1155 	num += plen;
1156 
1157 	return num;
1158 }
1159 
1160 /* SCSI ports VPD page */
1161 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1162 {
1163 	int num = 0;
1164 	int port_a, port_b;
1165 
1166 	port_a = target_dev_id + 1;
1167 	port_b = port_a + 1;
1168 	arr[num++] = 0x0;	/* reserved */
1169 	arr[num++] = 0x0;	/* reserved */
1170 	arr[num++] = 0x0;
1171 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1172 	memset(arr + num, 0, 6);
1173 	num += 6;
1174 	arr[num++] = 0x0;
1175 	arr[num++] = 12;	/* length tp descriptor */
1176 	/* naa-5 target port identifier (A) */
1177 	arr[num++] = 0x61;	/* proto=sas, binary */
1178 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1179 	arr[num++] = 0x0;	/* reserved */
1180 	arr[num++] = 0x8;	/* length */
1181 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1182 	num += 8;
1183 	arr[num++] = 0x0;	/* reserved */
1184 	arr[num++] = 0x0;	/* reserved */
1185 	arr[num++] = 0x0;
1186 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1187 	memset(arr + num, 0, 6);
1188 	num += 6;
1189 	arr[num++] = 0x0;
1190 	arr[num++] = 12;	/* length tp descriptor */
1191 	/* naa-5 target port identifier (B) */
1192 	arr[num++] = 0x61;	/* proto=sas, binary */
1193 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1194 	arr[num++] = 0x0;	/* reserved */
1195 	arr[num++] = 0x8;	/* length */
1196 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1197 	num += 8;
1198 
1199 	return num;
1200 }
1201 
1202 
1203 static unsigned char vpd89_data[] = {
1204 /* from 4th byte */ 0,0,0,0,
1205 'l','i','n','u','x',' ',' ',' ',
1206 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1207 '1','2','3','4',
1208 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1209 0xec,0,0,0,
1210 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1211 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1212 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1213 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1214 0x53,0x41,
1215 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1216 0x20,0x20,
1217 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1218 0x10,0x80,
1219 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1220 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1221 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1222 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1223 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1224 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1225 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1226 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1227 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1228 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1229 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1230 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1231 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1232 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1233 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1234 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1235 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1236 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1237 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1238 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1239 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1240 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1241 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1242 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1243 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1244 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1245 };
1246 
1247 /* ATA Information VPD page */
1248 static int inquiry_vpd_89(unsigned char *arr)
1249 {
1250 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1251 	return sizeof(vpd89_data);
1252 }
1253 
1254 
1255 static unsigned char vpdb0_data[] = {
1256 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1257 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1258 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1259 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1260 };
1261 
1262 /* Block limits VPD page (SBC-3) */
1263 static int inquiry_vpd_b0(unsigned char *arr)
1264 {
1265 	unsigned int gran;
1266 
1267 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1268 
1269 	/* Optimal transfer length granularity */
1270 	if (sdebug_opt_xferlen_exp != 0 &&
1271 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1272 		gran = 1 << sdebug_opt_xferlen_exp;
1273 	else
1274 		gran = 1 << sdebug_physblk_exp;
1275 	put_unaligned_be16(gran, arr + 2);
1276 
1277 	/* Maximum Transfer Length */
1278 	if (sdebug_store_sectors > 0x400)
1279 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1280 
1281 	/* Optimal Transfer Length */
1282 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1283 
1284 	if (sdebug_lbpu) {
1285 		/* Maximum Unmap LBA Count */
1286 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1287 
1288 		/* Maximum Unmap Block Descriptor Count */
1289 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1290 	}
1291 
1292 	/* Unmap Granularity Alignment */
1293 	if (sdebug_unmap_alignment) {
1294 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1295 		arr[28] |= 0x80; /* UGAVALID */
1296 	}
1297 
1298 	/* Optimal Unmap Granularity */
1299 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1300 
1301 	/* Maximum WRITE SAME Length */
1302 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1303 
1304 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1305 
1306 	return sizeof(vpdb0_data);
1307 }
1308 
1309 /* Block device characteristics VPD page (SBC-3) */
1310 static int inquiry_vpd_b1(unsigned char *arr)
1311 {
1312 	memset(arr, 0, 0x3c);
1313 	arr[0] = 0;
1314 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1315 	arr[2] = 0;
1316 	arr[3] = 5;	/* less than 1.8" */
1317 
1318 	return 0x3c;
1319 }
1320 
1321 /* Logical block provisioning VPD page (SBC-4) */
1322 static int inquiry_vpd_b2(unsigned char *arr)
1323 {
1324 	memset(arr, 0, 0x4);
1325 	arr[0] = 0;			/* threshold exponent */
1326 	if (sdebug_lbpu)
1327 		arr[1] = 1 << 7;
1328 	if (sdebug_lbpws)
1329 		arr[1] |= 1 << 6;
1330 	if (sdebug_lbpws10)
1331 		arr[1] |= 1 << 5;
1332 	if (sdebug_lbprz && scsi_debug_lbp())
1333 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1334 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1335 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1336 	/* threshold_percentage=0 */
1337 	return 0x4;
1338 }
1339 
1340 #define SDEBUG_LONG_INQ_SZ 96
1341 #define SDEBUG_MAX_INQ_ARR_SZ 584
1342 
1343 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1344 {
1345 	unsigned char pq_pdt;
1346 	unsigned char * arr;
1347 	unsigned char *cmd = scp->cmnd;
1348 	int alloc_len, n, ret;
1349 	bool have_wlun, is_disk;
1350 
1351 	alloc_len = get_unaligned_be16(cmd + 3);
1352 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1353 	if (! arr)
1354 		return DID_REQUEUE << 16;
1355 	is_disk = (sdebug_ptype == TYPE_DISK);
1356 	have_wlun = scsi_is_wlun(scp->device->lun);
1357 	if (have_wlun)
1358 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1359 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1360 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1361 	else
1362 		pq_pdt = (sdebug_ptype & 0x1f);
1363 	arr[0] = pq_pdt;
1364 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1365 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1366 		kfree(arr);
1367 		return check_condition_result;
1368 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1369 		int lu_id_num, port_group_id, target_dev_id, len;
1370 		char lu_id_str[6];
1371 		int host_no = devip->sdbg_host->shost->host_no;
1372 
1373 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1374 		    (devip->channel & 0x7f);
1375 		if (sdebug_vpd_use_hostno == 0)
1376 			host_no = 0;
1377 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1378 			    (devip->target * 1000) + devip->lun);
1379 		target_dev_id = ((host_no + 1) * 2000) +
1380 				 (devip->target * 1000) - 3;
1381 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1382 		if (0 == cmd[2]) { /* supported vital product data pages */
1383 			arr[1] = cmd[2];	/*sanity */
1384 			n = 4;
1385 			arr[n++] = 0x0;   /* this page */
1386 			arr[n++] = 0x80;  /* unit serial number */
1387 			arr[n++] = 0x83;  /* device identification */
1388 			arr[n++] = 0x84;  /* software interface ident. */
1389 			arr[n++] = 0x85;  /* management network addresses */
1390 			arr[n++] = 0x86;  /* extended inquiry */
1391 			arr[n++] = 0x87;  /* mode page policy */
1392 			arr[n++] = 0x88;  /* SCSI ports */
1393 			if (is_disk) {	  /* SBC only */
1394 				arr[n++] = 0x89;  /* ATA information */
1395 				arr[n++] = 0xb0;  /* Block limits */
1396 				arr[n++] = 0xb1;  /* Block characteristics */
1397 				arr[n++] = 0xb2;  /* Logical Block Prov */
1398 			}
1399 			arr[3] = n - 4;	  /* number of supported VPD pages */
1400 		} else if (0x80 == cmd[2]) { /* unit serial number */
1401 			arr[1] = cmd[2];	/*sanity */
1402 			arr[3] = len;
1403 			memcpy(&arr[4], lu_id_str, len);
1404 		} else if (0x83 == cmd[2]) { /* device identification */
1405 			arr[1] = cmd[2];	/*sanity */
1406 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1407 						target_dev_id, lu_id_num,
1408 						lu_id_str, len,
1409 						&devip->lu_name);
1410 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1411 			arr[1] = cmd[2];	/*sanity */
1412 			arr[3] = inquiry_vpd_84(&arr[4]);
1413 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1414 			arr[1] = cmd[2];	/*sanity */
1415 			arr[3] = inquiry_vpd_85(&arr[4]);
1416 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1417 			arr[1] = cmd[2];	/*sanity */
1418 			arr[3] = 0x3c;	/* number of following entries */
1419 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1420 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1421 			else if (have_dif_prot)
1422 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1423 			else
1424 				arr[4] = 0x0;   /* no protection stuff */
1425 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1426 		} else if (0x87 == cmd[2]) { /* mode page policy */
1427 			arr[1] = cmd[2];	/*sanity */
1428 			arr[3] = 0x8;	/* number of following entries */
1429 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1430 			arr[6] = 0x80;	/* mlus, shared */
1431 			arr[8] = 0x18;	 /* protocol specific lu */
1432 			arr[10] = 0x82;	 /* mlus, per initiator port */
1433 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1434 			arr[1] = cmd[2];	/*sanity */
1435 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1436 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1437 			arr[1] = cmd[2];        /*sanity */
1438 			n = inquiry_vpd_89(&arr[4]);
1439 			put_unaligned_be16(n, arr + 2);
1440 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1441 			arr[1] = cmd[2];        /*sanity */
1442 			arr[3] = inquiry_vpd_b0(&arr[4]);
1443 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1444 			arr[1] = cmd[2];        /*sanity */
1445 			arr[3] = inquiry_vpd_b1(&arr[4]);
1446 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1447 			arr[1] = cmd[2];        /*sanity */
1448 			arr[3] = inquiry_vpd_b2(&arr[4]);
1449 		} else {
1450 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1451 			kfree(arr);
1452 			return check_condition_result;
1453 		}
1454 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1455 		ret = fill_from_dev_buffer(scp, arr,
1456 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1457 		kfree(arr);
1458 		return ret;
1459 	}
1460 	/* drops through here for a standard inquiry */
1461 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1462 	arr[2] = sdebug_scsi_level;
1463 	arr[3] = 2;    /* response_data_format==2 */
1464 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1465 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1466 	if (sdebug_vpd_use_hostno == 0)
1467 		arr[5] |= 0x10; /* claim: implicit TPGS */
1468 	arr[6] = 0x10; /* claim: MultiP */
1469 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1470 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1471 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1472 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1473 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1474 	/* Use Vendor Specific area to place driver date in ASCII hex */
1475 	memcpy(&arr[36], sdebug_version_date, 8);
1476 	/* version descriptors (2 bytes each) follow */
1477 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1478 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1479 	n = 62;
1480 	if (is_disk) {		/* SBC-4 no version claimed */
1481 		put_unaligned_be16(0x600, arr + n);
1482 		n += 2;
1483 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1484 		put_unaligned_be16(0x525, arr + n);
1485 		n += 2;
1486 	}
1487 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1488 	ret = fill_from_dev_buffer(scp, arr,
1489 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1490 	kfree(arr);
1491 	return ret;
1492 }
1493 
1494 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1495 				   0, 0, 0x0, 0x0};
1496 
1497 static int resp_requests(struct scsi_cmnd * scp,
1498 			 struct sdebug_dev_info * devip)
1499 {
1500 	unsigned char * sbuff;
1501 	unsigned char *cmd = scp->cmnd;
1502 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1503 	bool dsense;
1504 	int len = 18;
1505 
1506 	memset(arr, 0, sizeof(arr));
1507 	dsense = !!(cmd[1] & 1);
1508 	sbuff = scp->sense_buffer;
1509 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1510 		if (dsense) {
1511 			arr[0] = 0x72;
1512 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1513 			arr[2] = THRESHOLD_EXCEEDED;
1514 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1515 			len = 8;
1516 		} else {
1517 			arr[0] = 0x70;
1518 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1519 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1520 			arr[12] = THRESHOLD_EXCEEDED;
1521 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1522 		}
1523 	} else {
1524 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1525 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1526 			;	/* have sense and formats match */
1527 		else if (arr[0] <= 0x70) {
1528 			if (dsense) {
1529 				memset(arr, 0, 8);
1530 				arr[0] = 0x72;
1531 				len = 8;
1532 			} else {
1533 				memset(arr, 0, 18);
1534 				arr[0] = 0x70;
1535 				arr[7] = 0xa;
1536 			}
1537 		} else if (dsense) {
1538 			memset(arr, 0, 8);
1539 			arr[0] = 0x72;
1540 			arr[1] = sbuff[2];     /* sense key */
1541 			arr[2] = sbuff[12];    /* asc */
1542 			arr[3] = sbuff[13];    /* ascq */
1543 			len = 8;
1544 		} else {
1545 			memset(arr, 0, 18);
1546 			arr[0] = 0x70;
1547 			arr[2] = sbuff[1];
1548 			arr[7] = 0xa;
1549 			arr[12] = sbuff[1];
1550 			arr[13] = sbuff[3];
1551 		}
1552 
1553 	}
1554 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1555 	return fill_from_dev_buffer(scp, arr, len);
1556 }
1557 
1558 static int resp_start_stop(struct scsi_cmnd * scp,
1559 			   struct sdebug_dev_info * devip)
1560 {
1561 	unsigned char *cmd = scp->cmnd;
1562 	int power_cond, stop;
1563 
1564 	power_cond = (cmd[4] & 0xf0) >> 4;
1565 	if (power_cond) {
1566 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1567 		return check_condition_result;
1568 	}
1569 	stop = !(cmd[4] & 1);
1570 	atomic_xchg(&devip->stopped, stop);
1571 	return 0;
1572 }
1573 
1574 static sector_t get_sdebug_capacity(void)
1575 {
1576 	static const unsigned int gibibyte = 1073741824;
1577 
1578 	if (sdebug_virtual_gb > 0)
1579 		return (sector_t)sdebug_virtual_gb *
1580 			(gibibyte / sdebug_sector_size);
1581 	else
1582 		return sdebug_store_sectors;
1583 }
1584 
1585 #define SDEBUG_READCAP_ARR_SZ 8
1586 static int resp_readcap(struct scsi_cmnd * scp,
1587 			struct sdebug_dev_info * devip)
1588 {
1589 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1590 	unsigned int capac;
1591 
1592 	/* following just in case virtual_gb changed */
1593 	sdebug_capacity = get_sdebug_capacity();
1594 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1595 	if (sdebug_capacity < 0xffffffff) {
1596 		capac = (unsigned int)sdebug_capacity - 1;
1597 		put_unaligned_be32(capac, arr + 0);
1598 	} else
1599 		put_unaligned_be32(0xffffffff, arr + 0);
1600 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1601 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1602 }
1603 
1604 #define SDEBUG_READCAP16_ARR_SZ 32
1605 static int resp_readcap16(struct scsi_cmnd * scp,
1606 			  struct sdebug_dev_info * devip)
1607 {
1608 	unsigned char *cmd = scp->cmnd;
1609 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1610 	int alloc_len;
1611 
1612 	alloc_len = get_unaligned_be32(cmd + 10);
1613 	/* following just in case virtual_gb changed */
1614 	sdebug_capacity = get_sdebug_capacity();
1615 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1616 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1617 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1618 	arr[13] = sdebug_physblk_exp & 0xf;
1619 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1620 
1621 	if (scsi_debug_lbp()) {
1622 		arr[14] |= 0x80; /* LBPME */
1623 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1624 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1625 		 * in the wider field maps to 0 in this field.
1626 		 */
1627 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1628 			arr[14] |= 0x40;
1629 	}
1630 
1631 	arr[15] = sdebug_lowest_aligned & 0xff;
1632 
1633 	if (have_dif_prot) {
1634 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1635 		arr[12] |= 1; /* PROT_EN */
1636 	}
1637 
1638 	return fill_from_dev_buffer(scp, arr,
1639 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1640 }
1641 
1642 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1643 
1644 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1645 			      struct sdebug_dev_info * devip)
1646 {
1647 	unsigned char *cmd = scp->cmnd;
1648 	unsigned char * arr;
1649 	int host_no = devip->sdbg_host->shost->host_no;
1650 	int n, ret, alen, rlen;
1651 	int port_group_a, port_group_b, port_a, port_b;
1652 
1653 	alen = get_unaligned_be32(cmd + 6);
1654 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1655 	if (! arr)
1656 		return DID_REQUEUE << 16;
1657 	/*
1658 	 * EVPD page 0x88 states we have two ports, one
1659 	 * real and a fake port with no device connected.
1660 	 * So we create two port groups with one port each
1661 	 * and set the group with port B to unavailable.
1662 	 */
1663 	port_a = 0x1; /* relative port A */
1664 	port_b = 0x2; /* relative port B */
1665 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1666 			(devip->channel & 0x7f);
1667 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1668 			(devip->channel & 0x7f) + 0x80;
1669 
1670 	/*
1671 	 * The asymmetric access state is cycled according to the host_id.
1672 	 */
1673 	n = 4;
1674 	if (sdebug_vpd_use_hostno == 0) {
1675 		arr[n++] = host_no % 3; /* Asymm access state */
1676 		arr[n++] = 0x0F; /* claim: all states are supported */
1677 	} else {
1678 		arr[n++] = 0x0; /* Active/Optimized path */
1679 		arr[n++] = 0x01; /* only support active/optimized paths */
1680 	}
1681 	put_unaligned_be16(port_group_a, arr + n);
1682 	n += 2;
1683 	arr[n++] = 0;    /* Reserved */
1684 	arr[n++] = 0;    /* Status code */
1685 	arr[n++] = 0;    /* Vendor unique */
1686 	arr[n++] = 0x1;  /* One port per group */
1687 	arr[n++] = 0;    /* Reserved */
1688 	arr[n++] = 0;    /* Reserved */
1689 	put_unaligned_be16(port_a, arr + n);
1690 	n += 2;
1691 	arr[n++] = 3;    /* Port unavailable */
1692 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1693 	put_unaligned_be16(port_group_b, arr + n);
1694 	n += 2;
1695 	arr[n++] = 0;    /* Reserved */
1696 	arr[n++] = 0;    /* Status code */
1697 	arr[n++] = 0;    /* Vendor unique */
1698 	arr[n++] = 0x1;  /* One port per group */
1699 	arr[n++] = 0;    /* Reserved */
1700 	arr[n++] = 0;    /* Reserved */
1701 	put_unaligned_be16(port_b, arr + n);
1702 	n += 2;
1703 
1704 	rlen = n - 4;
1705 	put_unaligned_be32(rlen, arr + 0);
1706 
1707 	/*
1708 	 * Return the smallest value of either
1709 	 * - The allocated length
1710 	 * - The constructed command length
1711 	 * - The maximum array size
1712 	 */
1713 	rlen = min(alen,n);
1714 	ret = fill_from_dev_buffer(scp, arr,
1715 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1716 	kfree(arr);
1717 	return ret;
1718 }
1719 
1720 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1721 			     struct sdebug_dev_info *devip)
1722 {
1723 	bool rctd;
1724 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1725 	u16 req_sa, u;
1726 	u32 alloc_len, a_len;
1727 	int k, offset, len, errsts, count, bump, na;
1728 	const struct opcode_info_t *oip;
1729 	const struct opcode_info_t *r_oip;
1730 	u8 *arr;
1731 	u8 *cmd = scp->cmnd;
1732 
1733 	rctd = !!(cmd[2] & 0x80);
1734 	reporting_opts = cmd[2] & 0x7;
1735 	req_opcode = cmd[3];
1736 	req_sa = get_unaligned_be16(cmd + 4);
1737 	alloc_len = get_unaligned_be32(cmd + 6);
1738 	if (alloc_len < 4 || alloc_len > 0xffff) {
1739 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1740 		return check_condition_result;
1741 	}
1742 	if (alloc_len > 8192)
1743 		a_len = 8192;
1744 	else
1745 		a_len = alloc_len;
1746 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1747 	if (NULL == arr) {
1748 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1749 				INSUFF_RES_ASCQ);
1750 		return check_condition_result;
1751 	}
1752 	switch (reporting_opts) {
1753 	case 0:	/* all commands */
1754 		/* count number of commands */
1755 		for (count = 0, oip = opcode_info_arr;
1756 		     oip->num_attached != 0xff; ++oip) {
1757 			if (F_INV_OP & oip->flags)
1758 				continue;
1759 			count += (oip->num_attached + 1);
1760 		}
1761 		bump = rctd ? 20 : 8;
1762 		put_unaligned_be32(count * bump, arr);
1763 		for (offset = 4, oip = opcode_info_arr;
1764 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1765 			if (F_INV_OP & oip->flags)
1766 				continue;
1767 			na = oip->num_attached;
1768 			arr[offset] = oip->opcode;
1769 			put_unaligned_be16(oip->sa, arr + offset + 2);
1770 			if (rctd)
1771 				arr[offset + 5] |= 0x2;
1772 			if (FF_SA & oip->flags)
1773 				arr[offset + 5] |= 0x1;
1774 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1775 			if (rctd)
1776 				put_unaligned_be16(0xa, arr + offset + 8);
1777 			r_oip = oip;
1778 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1779 				if (F_INV_OP & oip->flags)
1780 					continue;
1781 				offset += bump;
1782 				arr[offset] = oip->opcode;
1783 				put_unaligned_be16(oip->sa, arr + offset + 2);
1784 				if (rctd)
1785 					arr[offset + 5] |= 0x2;
1786 				if (FF_SA & oip->flags)
1787 					arr[offset + 5] |= 0x1;
1788 				put_unaligned_be16(oip->len_mask[0],
1789 						   arr + offset + 6);
1790 				if (rctd)
1791 					put_unaligned_be16(0xa,
1792 							   arr + offset + 8);
1793 			}
1794 			oip = r_oip;
1795 			offset += bump;
1796 		}
1797 		break;
1798 	case 1:	/* one command: opcode only */
1799 	case 2:	/* one command: opcode plus service action */
1800 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1801 		sdeb_i = opcode_ind_arr[req_opcode];
1802 		oip = &opcode_info_arr[sdeb_i];
1803 		if (F_INV_OP & oip->flags) {
1804 			supp = 1;
1805 			offset = 4;
1806 		} else {
1807 			if (1 == reporting_opts) {
1808 				if (FF_SA & oip->flags) {
1809 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1810 							     2, 2);
1811 					kfree(arr);
1812 					return check_condition_result;
1813 				}
1814 				req_sa = 0;
1815 			} else if (2 == reporting_opts &&
1816 				   0 == (FF_SA & oip->flags)) {
1817 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1818 				kfree(arr);	/* point at requested sa */
1819 				return check_condition_result;
1820 			}
1821 			if (0 == (FF_SA & oip->flags) &&
1822 			    req_opcode == oip->opcode)
1823 				supp = 3;
1824 			else if (0 == (FF_SA & oip->flags)) {
1825 				na = oip->num_attached;
1826 				for (k = 0, oip = oip->arrp; k < na;
1827 				     ++k, ++oip) {
1828 					if (req_opcode == oip->opcode)
1829 						break;
1830 				}
1831 				supp = (k >= na) ? 1 : 3;
1832 			} else if (req_sa != oip->sa) {
1833 				na = oip->num_attached;
1834 				for (k = 0, oip = oip->arrp; k < na;
1835 				     ++k, ++oip) {
1836 					if (req_sa == oip->sa)
1837 						break;
1838 				}
1839 				supp = (k >= na) ? 1 : 3;
1840 			} else
1841 				supp = 3;
1842 			if (3 == supp) {
1843 				u = oip->len_mask[0];
1844 				put_unaligned_be16(u, arr + 2);
1845 				arr[4] = oip->opcode;
1846 				for (k = 1; k < u; ++k)
1847 					arr[4 + k] = (k < 16) ?
1848 						 oip->len_mask[k] : 0xff;
1849 				offset = 4 + u;
1850 			} else
1851 				offset = 4;
1852 		}
1853 		arr[1] = (rctd ? 0x80 : 0) | supp;
1854 		if (rctd) {
1855 			put_unaligned_be16(0xa, arr + offset);
1856 			offset += 12;
1857 		}
1858 		break;
1859 	default:
1860 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1861 		kfree(arr);
1862 		return check_condition_result;
1863 	}
1864 	offset = (offset < a_len) ? offset : a_len;
1865 	len = (offset < alloc_len) ? offset : alloc_len;
1866 	errsts = fill_from_dev_buffer(scp, arr, len);
1867 	kfree(arr);
1868 	return errsts;
1869 }
1870 
1871 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1872 			  struct sdebug_dev_info *devip)
1873 {
1874 	bool repd;
1875 	u32 alloc_len, len;
1876 	u8 arr[16];
1877 	u8 *cmd = scp->cmnd;
1878 
1879 	memset(arr, 0, sizeof(arr));
1880 	repd = !!(cmd[2] & 0x80);
1881 	alloc_len = get_unaligned_be32(cmd + 6);
1882 	if (alloc_len < 4) {
1883 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1884 		return check_condition_result;
1885 	}
1886 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1887 	arr[1] = 0x1;		/* ITNRS */
1888 	if (repd) {
1889 		arr[3] = 0xc;
1890 		len = 16;
1891 	} else
1892 		len = 4;
1893 
1894 	len = (len < alloc_len) ? len : alloc_len;
1895 	return fill_from_dev_buffer(scp, arr, len);
1896 }
1897 
1898 /* <<Following mode page info copied from ST318451LW>> */
1899 
1900 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1901 {	/* Read-Write Error Recovery page for mode_sense */
1902 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1903 					5, 0, 0xff, 0xff};
1904 
1905 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1906 	if (1 == pcontrol)
1907 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1908 	return sizeof(err_recov_pg);
1909 }
1910 
1911 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1912 { 	/* Disconnect-Reconnect page for mode_sense */
1913 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1914 					 0, 0, 0, 0, 0, 0, 0, 0};
1915 
1916 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1917 	if (1 == pcontrol)
1918 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1919 	return sizeof(disconnect_pg);
1920 }
1921 
1922 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1923 {       /* Format device page for mode_sense */
1924 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1925 				     0, 0, 0, 0, 0, 0, 0, 0,
1926 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1927 
1928 	memcpy(p, format_pg, sizeof(format_pg));
1929 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1930 	put_unaligned_be16(sdebug_sector_size, p + 12);
1931 	if (sdebug_removable)
1932 		p[20] |= 0x20; /* should agree with INQUIRY */
1933 	if (1 == pcontrol)
1934 		memset(p + 2, 0, sizeof(format_pg) - 2);
1935 	return sizeof(format_pg);
1936 }
1937 
1938 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1939 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1940 				     0, 0, 0, 0};
1941 
1942 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1943 { 	/* Caching page for mode_sense */
1944 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1945 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1946 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1947 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1948 
1949 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1950 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1951 	memcpy(p, caching_pg, sizeof(caching_pg));
1952 	if (1 == pcontrol)
1953 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1954 	else if (2 == pcontrol)
1955 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1956 	return sizeof(caching_pg);
1957 }
1958 
1959 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1960 				    0, 0, 0x2, 0x4b};
1961 
1962 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1963 { 	/* Control mode page for mode_sense */
1964 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1965 					0, 0, 0, 0};
1966 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1967 				     0, 0, 0x2, 0x4b};
1968 
1969 	if (sdebug_dsense)
1970 		ctrl_m_pg[2] |= 0x4;
1971 	else
1972 		ctrl_m_pg[2] &= ~0x4;
1973 
1974 	if (sdebug_ato)
1975 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1976 
1977 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1978 	if (1 == pcontrol)
1979 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1980 	else if (2 == pcontrol)
1981 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1982 	return sizeof(ctrl_m_pg);
1983 }
1984 
1985 
1986 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1987 {	/* Informational Exceptions control mode page for mode_sense */
1988 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1989 				       0, 0, 0x0, 0x0};
1990 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1991 				      0, 0, 0x0, 0x0};
1992 
1993 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1994 	if (1 == pcontrol)
1995 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1996 	else if (2 == pcontrol)
1997 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1998 	return sizeof(iec_m_pg);
1999 }
2000 
2001 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
2002 {	/* SAS SSP mode page - short format for mode_sense */
2003 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2004 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2005 
2006 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2007 	if (1 == pcontrol)
2008 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2009 	return sizeof(sas_sf_m_pg);
2010 }
2011 
2012 
2013 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
2014 			      int target_dev_id)
2015 {	/* SAS phy control and discover mode page for mode_sense */
2016 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2017 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2018 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2019 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2020 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2021 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2022 		    0, 0, 0, 0, 0, 0, 0, 0,
2023 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2024 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2025 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2026 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2027 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2028 		    0, 0, 0, 0, 0, 0, 0, 0,
2029 		};
2030 	int port_a, port_b;
2031 
2032 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2033 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2034 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2035 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2036 	port_a = target_dev_id + 1;
2037 	port_b = port_a + 1;
2038 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2039 	put_unaligned_be32(port_a, p + 20);
2040 	put_unaligned_be32(port_b, p + 48 + 20);
2041 	if (1 == pcontrol)
2042 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2043 	return sizeof(sas_pcd_m_pg);
2044 }
2045 
2046 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
2047 {	/* SAS SSP shared protocol specific port mode subpage */
2048 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2049 		    0, 0, 0, 0, 0, 0, 0, 0,
2050 		};
2051 
2052 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2053 	if (1 == pcontrol)
2054 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2055 	return sizeof(sas_sha_m_pg);
2056 }
2057 
2058 #define SDEBUG_MAX_MSENSE_SZ 256
2059 
2060 static int resp_mode_sense(struct scsi_cmnd *scp,
2061 			   struct sdebug_dev_info *devip)
2062 {
2063 	int pcontrol, pcode, subpcode, bd_len;
2064 	unsigned char dev_spec;
2065 	int alloc_len, offset, len, target_dev_id;
2066 	int target = scp->device->id;
2067 	unsigned char * ap;
2068 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2069 	unsigned char *cmd = scp->cmnd;
2070 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2071 
2072 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2073 	pcontrol = (cmd[2] & 0xc0) >> 6;
2074 	pcode = cmd[2] & 0x3f;
2075 	subpcode = cmd[3];
2076 	msense_6 = (MODE_SENSE == cmd[0]);
2077 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2078 	is_disk = (sdebug_ptype == TYPE_DISK);
2079 	if (is_disk && !dbd)
2080 		bd_len = llbaa ? 16 : 8;
2081 	else
2082 		bd_len = 0;
2083 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2084 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2085 	if (0x3 == pcontrol) {  /* Saving values not supported */
2086 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2087 		return check_condition_result;
2088 	}
2089 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2090 			(devip->target * 1000) - 3;
2091 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2092 	if (is_disk)
2093 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2094 	else
2095 		dev_spec = 0x0;
2096 	if (msense_6) {
2097 		arr[2] = dev_spec;
2098 		arr[3] = bd_len;
2099 		offset = 4;
2100 	} else {
2101 		arr[3] = dev_spec;
2102 		if (16 == bd_len)
2103 			arr[4] = 0x1;	/* set LONGLBA bit */
2104 		arr[7] = bd_len;	/* assume 255 or less */
2105 		offset = 8;
2106 	}
2107 	ap = arr + offset;
2108 	if ((bd_len > 0) && (!sdebug_capacity))
2109 		sdebug_capacity = get_sdebug_capacity();
2110 
2111 	if (8 == bd_len) {
2112 		if (sdebug_capacity > 0xfffffffe)
2113 			put_unaligned_be32(0xffffffff, ap + 0);
2114 		else
2115 			put_unaligned_be32(sdebug_capacity, ap + 0);
2116 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2117 		offset += bd_len;
2118 		ap = arr + offset;
2119 	} else if (16 == bd_len) {
2120 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2121 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2122 		offset += bd_len;
2123 		ap = arr + offset;
2124 	}
2125 
2126 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2127 		/* TODO: Control Extension page */
2128 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2129 		return check_condition_result;
2130 	}
2131 	bad_pcode = false;
2132 
2133 	switch (pcode) {
2134 	case 0x1:	/* Read-Write error recovery page, direct access */
2135 		len = resp_err_recov_pg(ap, pcontrol, target);
2136 		offset += len;
2137 		break;
2138 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2139 		len = resp_disconnect_pg(ap, pcontrol, target);
2140 		offset += len;
2141 		break;
2142 	case 0x3:       /* Format device page, direct access */
2143 		if (is_disk) {
2144 			len = resp_format_pg(ap, pcontrol, target);
2145 			offset += len;
2146 		} else
2147 			bad_pcode = true;
2148 		break;
2149 	case 0x8:	/* Caching page, direct access */
2150 		if (is_disk) {
2151 			len = resp_caching_pg(ap, pcontrol, target);
2152 			offset += len;
2153 		} else
2154 			bad_pcode = true;
2155 		break;
2156 	case 0xa:	/* Control Mode page, all devices */
2157 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2158 		offset += len;
2159 		break;
2160 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2161 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2162 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2163 			return check_condition_result;
2164 		}
2165 		len = 0;
2166 		if ((0x0 == subpcode) || (0xff == subpcode))
2167 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2168 		if ((0x1 == subpcode) || (0xff == subpcode))
2169 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2170 						  target_dev_id);
2171 		if ((0x2 == subpcode) || (0xff == subpcode))
2172 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2173 		offset += len;
2174 		break;
2175 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2176 		len = resp_iec_m_pg(ap, pcontrol, target);
2177 		offset += len;
2178 		break;
2179 	case 0x3f:	/* Read all Mode pages */
2180 		if ((0 == subpcode) || (0xff == subpcode)) {
2181 			len = resp_err_recov_pg(ap, pcontrol, target);
2182 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2183 			if (is_disk) {
2184 				len += resp_format_pg(ap + len, pcontrol,
2185 						      target);
2186 				len += resp_caching_pg(ap + len, pcontrol,
2187 						       target);
2188 			}
2189 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2190 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2191 			if (0xff == subpcode) {
2192 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2193 						  target, target_dev_id);
2194 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2195 			}
2196 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2197 			offset += len;
2198 		} else {
2199 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2200 			return check_condition_result;
2201 		}
2202 		break;
2203 	default:
2204 		bad_pcode = true;
2205 		break;
2206 	}
2207 	if (bad_pcode) {
2208 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2209 		return check_condition_result;
2210 	}
2211 	if (msense_6)
2212 		arr[0] = offset - 1;
2213 	else
2214 		put_unaligned_be16((offset - 2), arr + 0);
2215 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2216 }
2217 
2218 #define SDEBUG_MAX_MSELECT_SZ 512
2219 
2220 static int resp_mode_select(struct scsi_cmnd *scp,
2221 			    struct sdebug_dev_info *devip)
2222 {
2223 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2224 	int param_len, res, mpage;
2225 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2226 	unsigned char *cmd = scp->cmnd;
2227 	int mselect6 = (MODE_SELECT == cmd[0]);
2228 
2229 	memset(arr, 0, sizeof(arr));
2230 	pf = cmd[1] & 0x10;
2231 	sp = cmd[1] & 0x1;
2232 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2233 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2234 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2235 		return check_condition_result;
2236 	}
2237 	res = fetch_to_dev_buffer(scp, arr, param_len);
2238 	if (-1 == res)
2239 		return DID_ERROR << 16;
2240 	else if (sdebug_verbose && (res < param_len))
2241 		sdev_printk(KERN_INFO, scp->device,
2242 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2243 			    __func__, param_len, res);
2244 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2245 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2246 	if (md_len > 2) {
2247 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2248 		return check_condition_result;
2249 	}
2250 	off = bd_len + (mselect6 ? 4 : 8);
2251 	mpage = arr[off] & 0x3f;
2252 	ps = !!(arr[off] & 0x80);
2253 	if (ps) {
2254 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2255 		return check_condition_result;
2256 	}
2257 	spf = !!(arr[off] & 0x40);
2258 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2259 		       (arr[off + 1] + 2);
2260 	if ((pg_len + off) > param_len) {
2261 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2262 				PARAMETER_LIST_LENGTH_ERR, 0);
2263 		return check_condition_result;
2264 	}
2265 	switch (mpage) {
2266 	case 0x8:      /* Caching Mode page */
2267 		if (caching_pg[1] == arr[off + 1]) {
2268 			memcpy(caching_pg + 2, arr + off + 2,
2269 			       sizeof(caching_pg) - 2);
2270 			goto set_mode_changed_ua;
2271 		}
2272 		break;
2273 	case 0xa:      /* Control Mode page */
2274 		if (ctrl_m_pg[1] == arr[off + 1]) {
2275 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2276 			       sizeof(ctrl_m_pg) - 2);
2277 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2278 			goto set_mode_changed_ua;
2279 		}
2280 		break;
2281 	case 0x1c:      /* Informational Exceptions Mode page */
2282 		if (iec_m_pg[1] == arr[off + 1]) {
2283 			memcpy(iec_m_pg + 2, arr + off + 2,
2284 			       sizeof(iec_m_pg) - 2);
2285 			goto set_mode_changed_ua;
2286 		}
2287 		break;
2288 	default:
2289 		break;
2290 	}
2291 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2292 	return check_condition_result;
2293 set_mode_changed_ua:
2294 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2295 	return 0;
2296 }
2297 
2298 static int resp_temp_l_pg(unsigned char * arr)
2299 {
2300 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2301 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2302 		};
2303 
2304 	memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2305 	return sizeof(temp_l_pg);
2306 }
2307 
2308 static int resp_ie_l_pg(unsigned char * arr)
2309 {
2310 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2311 		};
2312 
2313 	memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2314 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2315 		arr[4] = THRESHOLD_EXCEEDED;
2316 		arr[5] = 0xff;
2317 	}
2318 	return sizeof(ie_l_pg);
2319 }
2320 
2321 #define SDEBUG_MAX_LSENSE_SZ 512
2322 
2323 static int resp_log_sense(struct scsi_cmnd *scp,
2324 			  struct sdebug_dev_info *devip)
2325 {
2326 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2327 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2328 	unsigned char *cmd = scp->cmnd;
2329 
2330 	memset(arr, 0, sizeof(arr));
2331 	ppc = cmd[1] & 0x2;
2332 	sp = cmd[1] & 0x1;
2333 	if (ppc || sp) {
2334 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2335 		return check_condition_result;
2336 	}
2337 	pcode = cmd[2] & 0x3f;
2338 	subpcode = cmd[3] & 0xff;
2339 	alloc_len = get_unaligned_be16(cmd + 7);
2340 	arr[0] = pcode;
2341 	if (0 == subpcode) {
2342 		switch (pcode) {
2343 		case 0x0:	/* Supported log pages log page */
2344 			n = 4;
2345 			arr[n++] = 0x0;		/* this page */
2346 			arr[n++] = 0xd;		/* Temperature */
2347 			arr[n++] = 0x2f;	/* Informational exceptions */
2348 			arr[3] = n - 4;
2349 			break;
2350 		case 0xd:	/* Temperature log page */
2351 			arr[3] = resp_temp_l_pg(arr + 4);
2352 			break;
2353 		case 0x2f:	/* Informational exceptions log page */
2354 			arr[3] = resp_ie_l_pg(arr + 4);
2355 			break;
2356 		default:
2357 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2358 			return check_condition_result;
2359 		}
2360 	} else if (0xff == subpcode) {
2361 		arr[0] |= 0x40;
2362 		arr[1] = subpcode;
2363 		switch (pcode) {
2364 		case 0x0:	/* Supported log pages and subpages log page */
2365 			n = 4;
2366 			arr[n++] = 0x0;
2367 			arr[n++] = 0x0;		/* 0,0 page */
2368 			arr[n++] = 0x0;
2369 			arr[n++] = 0xff;	/* this page */
2370 			arr[n++] = 0xd;
2371 			arr[n++] = 0x0;		/* Temperature */
2372 			arr[n++] = 0x2f;
2373 			arr[n++] = 0x0;	/* Informational exceptions */
2374 			arr[3] = n - 4;
2375 			break;
2376 		case 0xd:	/* Temperature subpages */
2377 			n = 4;
2378 			arr[n++] = 0xd;
2379 			arr[n++] = 0x0;		/* Temperature */
2380 			arr[3] = n - 4;
2381 			break;
2382 		case 0x2f:	/* Informational exceptions subpages */
2383 			n = 4;
2384 			arr[n++] = 0x2f;
2385 			arr[n++] = 0x0;		/* Informational exceptions */
2386 			arr[3] = n - 4;
2387 			break;
2388 		default:
2389 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2390 			return check_condition_result;
2391 		}
2392 	} else {
2393 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2394 		return check_condition_result;
2395 	}
2396 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2397 	return fill_from_dev_buffer(scp, arr,
2398 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2399 }
2400 
2401 static int check_device_access_params(struct scsi_cmnd *scp,
2402 				      unsigned long long lba, unsigned int num)
2403 {
2404 	if (lba + num > sdebug_capacity) {
2405 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2406 		return check_condition_result;
2407 	}
2408 	/* transfer length excessive (tie in to block limits VPD page) */
2409 	if (num > sdebug_store_sectors) {
2410 		/* needs work to find which cdb byte 'num' comes from */
2411 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2412 		return check_condition_result;
2413 	}
2414 	return 0;
2415 }
2416 
2417 /* Returns number of bytes copied or -1 if error. */
2418 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2419 			    bool do_write)
2420 {
2421 	int ret;
2422 	u64 block, rest = 0;
2423 	struct scsi_data_buffer *sdb;
2424 	enum dma_data_direction dir;
2425 
2426 	if (do_write) {
2427 		sdb = scsi_out(scmd);
2428 		dir = DMA_TO_DEVICE;
2429 	} else {
2430 		sdb = scsi_in(scmd);
2431 		dir = DMA_FROM_DEVICE;
2432 	}
2433 
2434 	if (!sdb->length)
2435 		return 0;
2436 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2437 		return -1;
2438 
2439 	block = do_div(lba, sdebug_store_sectors);
2440 	if (block + num > sdebug_store_sectors)
2441 		rest = block + num - sdebug_store_sectors;
2442 
2443 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2444 		   fake_storep + (block * sdebug_sector_size),
2445 		   (num - rest) * sdebug_sector_size, 0, do_write);
2446 	if (ret != (num - rest) * sdebug_sector_size)
2447 		return ret;
2448 
2449 	if (rest) {
2450 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2451 			    fake_storep, rest * sdebug_sector_size,
2452 			    (num - rest) * sdebug_sector_size, do_write);
2453 	}
2454 
2455 	return ret;
2456 }
2457 
2458 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2459  * arr into fake_store(lba,num) and return true. If comparison fails then
2460  * return false. */
2461 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2462 {
2463 	bool res;
2464 	u64 block, rest = 0;
2465 	u32 store_blks = sdebug_store_sectors;
2466 	u32 lb_size = sdebug_sector_size;
2467 
2468 	block = do_div(lba, store_blks);
2469 	if (block + num > store_blks)
2470 		rest = block + num - store_blks;
2471 
2472 	res = !memcmp(fake_storep + (block * lb_size), arr,
2473 		      (num - rest) * lb_size);
2474 	if (!res)
2475 		return res;
2476 	if (rest)
2477 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2478 			     rest * lb_size);
2479 	if (!res)
2480 		return res;
2481 	arr += num * lb_size;
2482 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2483 	if (rest)
2484 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2485 		       rest * lb_size);
2486 	return res;
2487 }
2488 
2489 static __be16 dif_compute_csum(const void *buf, int len)
2490 {
2491 	__be16 csum;
2492 
2493 	if (sdebug_guard)
2494 		csum = (__force __be16)ip_compute_csum(buf, len);
2495 	else
2496 		csum = cpu_to_be16(crc_t10dif(buf, len));
2497 
2498 	return csum;
2499 }
2500 
2501 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2502 		      sector_t sector, u32 ei_lba)
2503 {
2504 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2505 
2506 	if (sdt->guard_tag != csum) {
2507 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2508 			(unsigned long)sector,
2509 			be16_to_cpu(sdt->guard_tag),
2510 			be16_to_cpu(csum));
2511 		return 0x01;
2512 	}
2513 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2514 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2515 		pr_err("REF check failed on sector %lu\n",
2516 			(unsigned long)sector);
2517 		return 0x03;
2518 	}
2519 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2520 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2521 		pr_err("REF check failed on sector %lu\n",
2522 			(unsigned long)sector);
2523 		return 0x03;
2524 	}
2525 	return 0;
2526 }
2527 
2528 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2529 			  unsigned int sectors, bool read)
2530 {
2531 	size_t resid;
2532 	void *paddr;
2533 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2534 	struct sg_mapping_iter miter;
2535 
2536 	/* Bytes of protection data to copy into sgl */
2537 	resid = sectors * sizeof(*dif_storep);
2538 
2539 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2540 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2541 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2542 
2543 	while (sg_miter_next(&miter) && resid > 0) {
2544 		size_t len = min(miter.length, resid);
2545 		void *start = dif_store(sector);
2546 		size_t rest = 0;
2547 
2548 		if (dif_store_end < start + len)
2549 			rest = start + len - dif_store_end;
2550 
2551 		paddr = miter.addr;
2552 
2553 		if (read)
2554 			memcpy(paddr, start, len - rest);
2555 		else
2556 			memcpy(start, paddr, len - rest);
2557 
2558 		if (rest) {
2559 			if (read)
2560 				memcpy(paddr + len - rest, dif_storep, rest);
2561 			else
2562 				memcpy(dif_storep, paddr + len - rest, rest);
2563 		}
2564 
2565 		sector += len / sizeof(*dif_storep);
2566 		resid -= len;
2567 	}
2568 	sg_miter_stop(&miter);
2569 }
2570 
2571 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2572 			    unsigned int sectors, u32 ei_lba)
2573 {
2574 	unsigned int i;
2575 	struct t10_pi_tuple *sdt;
2576 	sector_t sector;
2577 
2578 	for (i = 0; i < sectors; i++, ei_lba++) {
2579 		int ret;
2580 
2581 		sector = start_sec + i;
2582 		sdt = dif_store(sector);
2583 
2584 		if (sdt->app_tag == cpu_to_be16(0xffff))
2585 			continue;
2586 
2587 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2588 		if (ret) {
2589 			dif_errors++;
2590 			return ret;
2591 		}
2592 	}
2593 
2594 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2595 	dix_reads++;
2596 
2597 	return 0;
2598 }
2599 
2600 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2601 {
2602 	u8 *cmd = scp->cmnd;
2603 	struct sdebug_queued_cmd *sqcp;
2604 	u64 lba;
2605 	u32 num;
2606 	u32 ei_lba;
2607 	unsigned long iflags;
2608 	int ret;
2609 	bool check_prot;
2610 
2611 	switch (cmd[0]) {
2612 	case READ_16:
2613 		ei_lba = 0;
2614 		lba = get_unaligned_be64(cmd + 2);
2615 		num = get_unaligned_be32(cmd + 10);
2616 		check_prot = true;
2617 		break;
2618 	case READ_10:
2619 		ei_lba = 0;
2620 		lba = get_unaligned_be32(cmd + 2);
2621 		num = get_unaligned_be16(cmd + 7);
2622 		check_prot = true;
2623 		break;
2624 	case READ_6:
2625 		ei_lba = 0;
2626 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2627 		      (u32)(cmd[1] & 0x1f) << 16;
2628 		num = (0 == cmd[4]) ? 256 : cmd[4];
2629 		check_prot = true;
2630 		break;
2631 	case READ_12:
2632 		ei_lba = 0;
2633 		lba = get_unaligned_be32(cmd + 2);
2634 		num = get_unaligned_be32(cmd + 6);
2635 		check_prot = true;
2636 		break;
2637 	case XDWRITEREAD_10:
2638 		ei_lba = 0;
2639 		lba = get_unaligned_be32(cmd + 2);
2640 		num = get_unaligned_be16(cmd + 7);
2641 		check_prot = false;
2642 		break;
2643 	default:	/* assume READ(32) */
2644 		lba = get_unaligned_be64(cmd + 12);
2645 		ei_lba = get_unaligned_be32(cmd + 20);
2646 		num = get_unaligned_be32(cmd + 28);
2647 		check_prot = false;
2648 		break;
2649 	}
2650 	if (unlikely(have_dif_prot && check_prot)) {
2651 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2652 		    (cmd[1] & 0xe0)) {
2653 			mk_sense_invalid_opcode(scp);
2654 			return check_condition_result;
2655 		}
2656 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2657 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2658 		    (cmd[1] & 0xe0) == 0)
2659 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2660 				    "to DIF device\n");
2661 	}
2662 	if (unlikely(sdebug_any_injecting_opt)) {
2663 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2664 
2665 		if (sqcp) {
2666 			if (sqcp->inj_short)
2667 				num /= 2;
2668 		}
2669 	} else
2670 		sqcp = NULL;
2671 
2672 	/* inline check_device_access_params() */
2673 	if (unlikely(lba + num > sdebug_capacity)) {
2674 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2675 		return check_condition_result;
2676 	}
2677 	/* transfer length excessive (tie in to block limits VPD page) */
2678 	if (unlikely(num > sdebug_store_sectors)) {
2679 		/* needs work to find which cdb byte 'num' comes from */
2680 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2681 		return check_condition_result;
2682 	}
2683 
2684 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2685 		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2686 		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2687 		/* claim unrecoverable read error */
2688 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2689 		/* set info field and valid bit for fixed descriptor */
2690 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2691 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2692 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2693 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2694 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2695 		}
2696 		scsi_set_resid(scp, scsi_bufflen(scp));
2697 		return check_condition_result;
2698 	}
2699 
2700 	read_lock_irqsave(&atomic_rw, iflags);
2701 
2702 	/* DIX + T10 DIF */
2703 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2704 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2705 
2706 		if (prot_ret) {
2707 			read_unlock_irqrestore(&atomic_rw, iflags);
2708 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2709 			return illegal_condition_result;
2710 		}
2711 	}
2712 
2713 	ret = do_device_access(scp, lba, num, false);
2714 	read_unlock_irqrestore(&atomic_rw, iflags);
2715 	if (unlikely(ret == -1))
2716 		return DID_ERROR << 16;
2717 
2718 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2719 
2720 	if (unlikely(sqcp)) {
2721 		if (sqcp->inj_recovered) {
2722 			mk_sense_buffer(scp, RECOVERED_ERROR,
2723 					THRESHOLD_EXCEEDED, 0);
2724 			return check_condition_result;
2725 		} else if (sqcp->inj_transport) {
2726 			mk_sense_buffer(scp, ABORTED_COMMAND,
2727 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2728 			return check_condition_result;
2729 		} else if (sqcp->inj_dif) {
2730 			/* Logical block guard check failed */
2731 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2732 			return illegal_condition_result;
2733 		} else if (sqcp->inj_dix) {
2734 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2735 			return illegal_condition_result;
2736 		}
2737 	}
2738 	return 0;
2739 }
2740 
2741 static void dump_sector(unsigned char *buf, int len)
2742 {
2743 	int i, j, n;
2744 
2745 	pr_err(">>> Sector Dump <<<\n");
2746 	for (i = 0 ; i < len ; i += 16) {
2747 		char b[128];
2748 
2749 		for (j = 0, n = 0; j < 16; j++) {
2750 			unsigned char c = buf[i+j];
2751 
2752 			if (c >= 0x20 && c < 0x7e)
2753 				n += scnprintf(b + n, sizeof(b) - n,
2754 					       " %c ", buf[i+j]);
2755 			else
2756 				n += scnprintf(b + n, sizeof(b) - n,
2757 					       "%02x ", buf[i+j]);
2758 		}
2759 		pr_err("%04d: %s\n", i, b);
2760 	}
2761 }
2762 
2763 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2764 			     unsigned int sectors, u32 ei_lba)
2765 {
2766 	int ret;
2767 	struct t10_pi_tuple *sdt;
2768 	void *daddr;
2769 	sector_t sector = start_sec;
2770 	int ppage_offset;
2771 	int dpage_offset;
2772 	struct sg_mapping_iter diter;
2773 	struct sg_mapping_iter piter;
2774 
2775 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2776 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2777 
2778 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2779 			scsi_prot_sg_count(SCpnt),
2780 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2781 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2782 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2783 
2784 	/* For each protection page */
2785 	while (sg_miter_next(&piter)) {
2786 		dpage_offset = 0;
2787 		if (WARN_ON(!sg_miter_next(&diter))) {
2788 			ret = 0x01;
2789 			goto out;
2790 		}
2791 
2792 		for (ppage_offset = 0; ppage_offset < piter.length;
2793 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2794 			/* If we're at the end of the current
2795 			 * data page advance to the next one
2796 			 */
2797 			if (dpage_offset >= diter.length) {
2798 				if (WARN_ON(!sg_miter_next(&diter))) {
2799 					ret = 0x01;
2800 					goto out;
2801 				}
2802 				dpage_offset = 0;
2803 			}
2804 
2805 			sdt = piter.addr + ppage_offset;
2806 			daddr = diter.addr + dpage_offset;
2807 
2808 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2809 			if (ret) {
2810 				dump_sector(daddr, sdebug_sector_size);
2811 				goto out;
2812 			}
2813 
2814 			sector++;
2815 			ei_lba++;
2816 			dpage_offset += sdebug_sector_size;
2817 		}
2818 		diter.consumed = dpage_offset;
2819 		sg_miter_stop(&diter);
2820 	}
2821 	sg_miter_stop(&piter);
2822 
2823 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2824 	dix_writes++;
2825 
2826 	return 0;
2827 
2828 out:
2829 	dif_errors++;
2830 	sg_miter_stop(&diter);
2831 	sg_miter_stop(&piter);
2832 	return ret;
2833 }
2834 
2835 static unsigned long lba_to_map_index(sector_t lba)
2836 {
2837 	if (sdebug_unmap_alignment)
2838 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2839 	sector_div(lba, sdebug_unmap_granularity);
2840 	return lba;
2841 }
2842 
2843 static sector_t map_index_to_lba(unsigned long index)
2844 {
2845 	sector_t lba = index * sdebug_unmap_granularity;
2846 
2847 	if (sdebug_unmap_alignment)
2848 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2849 	return lba;
2850 }
2851 
2852 static unsigned int map_state(sector_t lba, unsigned int *num)
2853 {
2854 	sector_t end;
2855 	unsigned int mapped;
2856 	unsigned long index;
2857 	unsigned long next;
2858 
2859 	index = lba_to_map_index(lba);
2860 	mapped = test_bit(index, map_storep);
2861 
2862 	if (mapped)
2863 		next = find_next_zero_bit(map_storep, map_size, index);
2864 	else
2865 		next = find_next_bit(map_storep, map_size, index);
2866 
2867 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2868 	*num = end - lba;
2869 	return mapped;
2870 }
2871 
2872 static void map_region(sector_t lba, unsigned int len)
2873 {
2874 	sector_t end = lba + len;
2875 
2876 	while (lba < end) {
2877 		unsigned long index = lba_to_map_index(lba);
2878 
2879 		if (index < map_size)
2880 			set_bit(index, map_storep);
2881 
2882 		lba = map_index_to_lba(index + 1);
2883 	}
2884 }
2885 
2886 static void unmap_region(sector_t lba, unsigned int len)
2887 {
2888 	sector_t end = lba + len;
2889 
2890 	while (lba < end) {
2891 		unsigned long index = lba_to_map_index(lba);
2892 
2893 		if (lba == map_index_to_lba(index) &&
2894 		    lba + sdebug_unmap_granularity <= end &&
2895 		    index < map_size) {
2896 			clear_bit(index, map_storep);
2897 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2898 				memset(fake_storep +
2899 				       lba * sdebug_sector_size,
2900 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2901 				       sdebug_sector_size *
2902 				       sdebug_unmap_granularity);
2903 			}
2904 			if (dif_storep) {
2905 				memset(dif_storep + lba, 0xff,
2906 				       sizeof(*dif_storep) *
2907 				       sdebug_unmap_granularity);
2908 			}
2909 		}
2910 		lba = map_index_to_lba(index + 1);
2911 	}
2912 }
2913 
2914 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2915 {
2916 	u8 *cmd = scp->cmnd;
2917 	u64 lba;
2918 	u32 num;
2919 	u32 ei_lba;
2920 	unsigned long iflags;
2921 	int ret;
2922 	bool check_prot;
2923 
2924 	switch (cmd[0]) {
2925 	case WRITE_16:
2926 		ei_lba = 0;
2927 		lba = get_unaligned_be64(cmd + 2);
2928 		num = get_unaligned_be32(cmd + 10);
2929 		check_prot = true;
2930 		break;
2931 	case WRITE_10:
2932 		ei_lba = 0;
2933 		lba = get_unaligned_be32(cmd + 2);
2934 		num = get_unaligned_be16(cmd + 7);
2935 		check_prot = true;
2936 		break;
2937 	case WRITE_6:
2938 		ei_lba = 0;
2939 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2940 		      (u32)(cmd[1] & 0x1f) << 16;
2941 		num = (0 == cmd[4]) ? 256 : cmd[4];
2942 		check_prot = true;
2943 		break;
2944 	case WRITE_12:
2945 		ei_lba = 0;
2946 		lba = get_unaligned_be32(cmd + 2);
2947 		num = get_unaligned_be32(cmd + 6);
2948 		check_prot = true;
2949 		break;
2950 	case 0x53:	/* XDWRITEREAD(10) */
2951 		ei_lba = 0;
2952 		lba = get_unaligned_be32(cmd + 2);
2953 		num = get_unaligned_be16(cmd + 7);
2954 		check_prot = false;
2955 		break;
2956 	default:	/* assume WRITE(32) */
2957 		lba = get_unaligned_be64(cmd + 12);
2958 		ei_lba = get_unaligned_be32(cmd + 20);
2959 		num = get_unaligned_be32(cmd + 28);
2960 		check_prot = false;
2961 		break;
2962 	}
2963 	if (unlikely(have_dif_prot && check_prot)) {
2964 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2965 		    (cmd[1] & 0xe0)) {
2966 			mk_sense_invalid_opcode(scp);
2967 			return check_condition_result;
2968 		}
2969 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2970 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2971 		    (cmd[1] & 0xe0) == 0)
2972 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2973 				    "to DIF device\n");
2974 	}
2975 
2976 	/* inline check_device_access_params() */
2977 	if (unlikely(lba + num > sdebug_capacity)) {
2978 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2979 		return check_condition_result;
2980 	}
2981 	/* transfer length excessive (tie in to block limits VPD page) */
2982 	if (unlikely(num > sdebug_store_sectors)) {
2983 		/* needs work to find which cdb byte 'num' comes from */
2984 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2985 		return check_condition_result;
2986 	}
2987 
2988 	write_lock_irqsave(&atomic_rw, iflags);
2989 
2990 	/* DIX + T10 DIF */
2991 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2992 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2993 
2994 		if (prot_ret) {
2995 			write_unlock_irqrestore(&atomic_rw, iflags);
2996 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2997 			return illegal_condition_result;
2998 		}
2999 	}
3000 
3001 	ret = do_device_access(scp, lba, num, true);
3002 	if (unlikely(scsi_debug_lbp()))
3003 		map_region(lba, num);
3004 	write_unlock_irqrestore(&atomic_rw, iflags);
3005 	if (unlikely(-1 == ret))
3006 		return DID_ERROR << 16;
3007 	else if (unlikely(sdebug_verbose &&
3008 			  (ret < (num * sdebug_sector_size))))
3009 		sdev_printk(KERN_INFO, scp->device,
3010 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3011 			    my_name, num * sdebug_sector_size, ret);
3012 
3013 	if (unlikely(sdebug_any_injecting_opt)) {
3014 		struct sdebug_queued_cmd *sqcp =
3015 				(struct sdebug_queued_cmd *)scp->host_scribble;
3016 
3017 		if (sqcp) {
3018 			if (sqcp->inj_recovered) {
3019 				mk_sense_buffer(scp, RECOVERED_ERROR,
3020 						THRESHOLD_EXCEEDED, 0);
3021 				return check_condition_result;
3022 			} else if (sqcp->inj_dif) {
3023 				/* Logical block guard check failed */
3024 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3025 				return illegal_condition_result;
3026 			} else if (sqcp->inj_dix) {
3027 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3028 				return illegal_condition_result;
3029 			}
3030 		}
3031 	}
3032 	return 0;
3033 }
3034 
3035 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3036 			   u32 ei_lba, bool unmap, bool ndob)
3037 {
3038 	unsigned long iflags;
3039 	unsigned long long i;
3040 	int ret;
3041 	u64 lba_off;
3042 
3043 	ret = check_device_access_params(scp, lba, num);
3044 	if (ret)
3045 		return ret;
3046 
3047 	write_lock_irqsave(&atomic_rw, iflags);
3048 
3049 	if (unmap && scsi_debug_lbp()) {
3050 		unmap_region(lba, num);
3051 		goto out;
3052 	}
3053 
3054 	lba_off = lba * sdebug_sector_size;
3055 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3056 	if (ndob) {
3057 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
3058 		ret = 0;
3059 	} else
3060 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3061 					  sdebug_sector_size);
3062 
3063 	if (-1 == ret) {
3064 		write_unlock_irqrestore(&atomic_rw, iflags);
3065 		return DID_ERROR << 16;
3066 	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3067 		sdev_printk(KERN_INFO, scp->device,
3068 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3069 			    my_name, "write same",
3070 			    sdebug_sector_size, ret);
3071 
3072 	/* Copy first sector to remaining blocks */
3073 	for (i = 1 ; i < num ; i++)
3074 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3075 		       fake_storep + lba_off,
3076 		       sdebug_sector_size);
3077 
3078 	if (scsi_debug_lbp())
3079 		map_region(lba, num);
3080 out:
3081 	write_unlock_irqrestore(&atomic_rw, iflags);
3082 
3083 	return 0;
3084 }
3085 
3086 static int resp_write_same_10(struct scsi_cmnd *scp,
3087 			      struct sdebug_dev_info *devip)
3088 {
3089 	u8 *cmd = scp->cmnd;
3090 	u32 lba;
3091 	u16 num;
3092 	u32 ei_lba = 0;
3093 	bool unmap = false;
3094 
3095 	if (cmd[1] & 0x8) {
3096 		if (sdebug_lbpws10 == 0) {
3097 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3098 			return check_condition_result;
3099 		} else
3100 			unmap = true;
3101 	}
3102 	lba = get_unaligned_be32(cmd + 2);
3103 	num = get_unaligned_be16(cmd + 7);
3104 	if (num > sdebug_write_same_length) {
3105 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3106 		return check_condition_result;
3107 	}
3108 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3109 }
3110 
3111 static int resp_write_same_16(struct scsi_cmnd *scp,
3112 			      struct sdebug_dev_info *devip)
3113 {
3114 	u8 *cmd = scp->cmnd;
3115 	u64 lba;
3116 	u32 num;
3117 	u32 ei_lba = 0;
3118 	bool unmap = false;
3119 	bool ndob = false;
3120 
3121 	if (cmd[1] & 0x8) {	/* UNMAP */
3122 		if (sdebug_lbpws == 0) {
3123 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3124 			return check_condition_result;
3125 		} else
3126 			unmap = true;
3127 	}
3128 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3129 		ndob = true;
3130 	lba = get_unaligned_be64(cmd + 2);
3131 	num = get_unaligned_be32(cmd + 10);
3132 	if (num > sdebug_write_same_length) {
3133 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3134 		return check_condition_result;
3135 	}
3136 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3137 }
3138 
3139 /* Note the mode field is in the same position as the (lower) service action
3140  * field. For the Report supported operation codes command, SPC-4 suggests
3141  * each mode of this command should be reported separately; for future. */
3142 static int resp_write_buffer(struct scsi_cmnd *scp,
3143 			     struct sdebug_dev_info *devip)
3144 {
3145 	u8 *cmd = scp->cmnd;
3146 	struct scsi_device *sdp = scp->device;
3147 	struct sdebug_dev_info *dp;
3148 	u8 mode;
3149 
3150 	mode = cmd[1] & 0x1f;
3151 	switch (mode) {
3152 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3153 		/* set UAs on this device only */
3154 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3155 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3156 		break;
3157 	case 0x5:	/* download MC, save and ACT */
3158 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3159 		break;
3160 	case 0x6:	/* download MC with offsets and ACT */
3161 		/* set UAs on most devices (LUs) in this target */
3162 		list_for_each_entry(dp,
3163 				    &devip->sdbg_host->dev_info_list,
3164 				    dev_list)
3165 			if (dp->target == sdp->id) {
3166 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3167 				if (devip != dp)
3168 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3169 						dp->uas_bm);
3170 			}
3171 		break;
3172 	case 0x7:	/* download MC with offsets, save, and ACT */
3173 		/* set UA on all devices (LUs) in this target */
3174 		list_for_each_entry(dp,
3175 				    &devip->sdbg_host->dev_info_list,
3176 				    dev_list)
3177 			if (dp->target == sdp->id)
3178 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3179 					dp->uas_bm);
3180 		break;
3181 	default:
3182 		/* do nothing for this command for other mode values */
3183 		break;
3184 	}
3185 	return 0;
3186 }
3187 
3188 static int resp_comp_write(struct scsi_cmnd *scp,
3189 			   struct sdebug_dev_info *devip)
3190 {
3191 	u8 *cmd = scp->cmnd;
3192 	u8 *arr;
3193 	u8 *fake_storep_hold;
3194 	u64 lba;
3195 	u32 dnum;
3196 	u32 lb_size = sdebug_sector_size;
3197 	u8 num;
3198 	unsigned long iflags;
3199 	int ret;
3200 	int retval = 0;
3201 
3202 	lba = get_unaligned_be64(cmd + 2);
3203 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3204 	if (0 == num)
3205 		return 0;	/* degenerate case, not an error */
3206 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3207 	    (cmd[1] & 0xe0)) {
3208 		mk_sense_invalid_opcode(scp);
3209 		return check_condition_result;
3210 	}
3211 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3212 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3213 	    (cmd[1] & 0xe0) == 0)
3214 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3215 			    "to DIF device\n");
3216 
3217 	/* inline check_device_access_params() */
3218 	if (lba + num > sdebug_capacity) {
3219 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3220 		return check_condition_result;
3221 	}
3222 	/* transfer length excessive (tie in to block limits VPD page) */
3223 	if (num > sdebug_store_sectors) {
3224 		/* needs work to find which cdb byte 'num' comes from */
3225 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3226 		return check_condition_result;
3227 	}
3228 	dnum = 2 * num;
3229 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3230 	if (NULL == arr) {
3231 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3232 				INSUFF_RES_ASCQ);
3233 		return check_condition_result;
3234 	}
3235 
3236 	write_lock_irqsave(&atomic_rw, iflags);
3237 
3238 	/* trick do_device_access() to fetch both compare and write buffers
3239 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3240 	fake_storep_hold = fake_storep;
3241 	fake_storep = arr;
3242 	ret = do_device_access(scp, 0, dnum, true);
3243 	fake_storep = fake_storep_hold;
3244 	if (ret == -1) {
3245 		retval = DID_ERROR << 16;
3246 		goto cleanup;
3247 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3248 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3249 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3250 			    dnum * lb_size, ret);
3251 	if (!comp_write_worker(lba, num, arr)) {
3252 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3253 		retval = check_condition_result;
3254 		goto cleanup;
3255 	}
3256 	if (scsi_debug_lbp())
3257 		map_region(lba, num);
3258 cleanup:
3259 	write_unlock_irqrestore(&atomic_rw, iflags);
3260 	kfree(arr);
3261 	return retval;
3262 }
3263 
3264 struct unmap_block_desc {
3265 	__be64	lba;
3266 	__be32	blocks;
3267 	__be32	__reserved;
3268 };
3269 
3270 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3271 {
3272 	unsigned char *buf;
3273 	struct unmap_block_desc *desc;
3274 	unsigned int i, payload_len, descriptors;
3275 	int ret;
3276 	unsigned long iflags;
3277 
3278 
3279 	if (!scsi_debug_lbp())
3280 		return 0;	/* fib and say its done */
3281 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3282 	BUG_ON(scsi_bufflen(scp) != payload_len);
3283 
3284 	descriptors = (payload_len - 8) / 16;
3285 	if (descriptors > sdebug_unmap_max_desc) {
3286 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3287 		return check_condition_result;
3288 	}
3289 
3290 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3291 	if (!buf) {
3292 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3293 				INSUFF_RES_ASCQ);
3294 		return check_condition_result;
3295 	}
3296 
3297 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3298 
3299 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3300 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3301 
3302 	desc = (void *)&buf[8];
3303 
3304 	write_lock_irqsave(&atomic_rw, iflags);
3305 
3306 	for (i = 0 ; i < descriptors ; i++) {
3307 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3308 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3309 
3310 		ret = check_device_access_params(scp, lba, num);
3311 		if (ret)
3312 			goto out;
3313 
3314 		unmap_region(lba, num);
3315 	}
3316 
3317 	ret = 0;
3318 
3319 out:
3320 	write_unlock_irqrestore(&atomic_rw, iflags);
3321 	kfree(buf);
3322 
3323 	return ret;
3324 }
3325 
3326 #define SDEBUG_GET_LBA_STATUS_LEN 32
3327 
3328 static int resp_get_lba_status(struct scsi_cmnd *scp,
3329 			       struct sdebug_dev_info *devip)
3330 {
3331 	u8 *cmd = scp->cmnd;
3332 	u64 lba;
3333 	u32 alloc_len, mapped, num;
3334 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3335 	int ret;
3336 
3337 	lba = get_unaligned_be64(cmd + 2);
3338 	alloc_len = get_unaligned_be32(cmd + 10);
3339 
3340 	if (alloc_len < 24)
3341 		return 0;
3342 
3343 	ret = check_device_access_params(scp, lba, 1);
3344 	if (ret)
3345 		return ret;
3346 
3347 	if (scsi_debug_lbp())
3348 		mapped = map_state(lba, &num);
3349 	else {
3350 		mapped = 1;
3351 		/* following just in case virtual_gb changed */
3352 		sdebug_capacity = get_sdebug_capacity();
3353 		if (sdebug_capacity - lba <= 0xffffffff)
3354 			num = sdebug_capacity - lba;
3355 		else
3356 			num = 0xffffffff;
3357 	}
3358 
3359 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3360 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3361 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3362 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3363 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3364 
3365 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3366 }
3367 
3368 #define RL_BUCKET_ELEMS 8
3369 
3370 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3371  * (W-LUN), the normal Linux scanning logic does not associate it with a
3372  * device (e.g. /dev/sg7). The following magic will make that association:
3373  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3374  * where <n> is a host number. If there are multiple targets in a host then
3375  * the above will associate a W-LUN to each target. To only get a W-LUN
3376  * for target 2, then use "echo '- 2 49409' > scan" .
3377  */
3378 static int resp_report_luns(struct scsi_cmnd *scp,
3379 			    struct sdebug_dev_info *devip)
3380 {
3381 	unsigned char *cmd = scp->cmnd;
3382 	unsigned int alloc_len;
3383 	unsigned char select_report;
3384 	u64 lun;
3385 	struct scsi_lun *lun_p;
3386 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3387 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3388 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3389 	unsigned int tlun_cnt;	/* total LUN count */
3390 	unsigned int rlen;	/* response length (in bytes) */
3391 	int k, j, n, res;
3392 	unsigned int off_rsp = 0;
3393 	const int sz_lun = sizeof(struct scsi_lun);
3394 
3395 	clear_luns_changed_on_target(devip);
3396 
3397 	select_report = cmd[2];
3398 	alloc_len = get_unaligned_be32(cmd + 6);
3399 
3400 	if (alloc_len < 4) {
3401 		pr_err("alloc len too small %d\n", alloc_len);
3402 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3403 		return check_condition_result;
3404 	}
3405 
3406 	switch (select_report) {
3407 	case 0:		/* all LUNs apart from W-LUNs */
3408 		lun_cnt = sdebug_max_luns;
3409 		wlun_cnt = 0;
3410 		break;
3411 	case 1:		/* only W-LUNs */
3412 		lun_cnt = 0;
3413 		wlun_cnt = 1;
3414 		break;
3415 	case 2:		/* all LUNs */
3416 		lun_cnt = sdebug_max_luns;
3417 		wlun_cnt = 1;
3418 		break;
3419 	case 0x10:	/* only administrative LUs */
3420 	case 0x11:	/* see SPC-5 */
3421 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3422 	default:
3423 		pr_debug("select report invalid %d\n", select_report);
3424 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3425 		return check_condition_result;
3426 	}
3427 
3428 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3429 		--lun_cnt;
3430 
3431 	tlun_cnt = lun_cnt + wlun_cnt;
3432 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3433 	scsi_set_resid(scp, scsi_bufflen(scp));
3434 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3435 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3436 
3437 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3438 	lun = sdebug_no_lun_0 ? 1 : 0;
3439 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3440 		memset(arr, 0, sizeof(arr));
3441 		lun_p = (struct scsi_lun *)&arr[0];
3442 		if (k == 0) {
3443 			put_unaligned_be32(rlen, &arr[0]);
3444 			++lun_p;
3445 			j = 1;
3446 		}
3447 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3448 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3449 				break;
3450 			int_to_scsilun(lun++, lun_p);
3451 		}
3452 		if (j < RL_BUCKET_ELEMS)
3453 			break;
3454 		n = j * sz_lun;
3455 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3456 		if (res)
3457 			return res;
3458 		off_rsp += n;
3459 	}
3460 	if (wlun_cnt) {
3461 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3462 		++j;
3463 	}
3464 	if (j > 0)
3465 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3466 	return res;
3467 }
3468 
3469 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3470 			    unsigned int num, struct sdebug_dev_info *devip)
3471 {
3472 	int j;
3473 	unsigned char *kaddr, *buf;
3474 	unsigned int offset;
3475 	struct scsi_data_buffer *sdb = scsi_in(scp);
3476 	struct sg_mapping_iter miter;
3477 
3478 	/* better not to use temporary buffer. */
3479 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3480 	if (!buf) {
3481 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3482 				INSUFF_RES_ASCQ);
3483 		return check_condition_result;
3484 	}
3485 
3486 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3487 
3488 	offset = 0;
3489 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3490 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3491 
3492 	while (sg_miter_next(&miter)) {
3493 		kaddr = miter.addr;
3494 		for (j = 0; j < miter.length; j++)
3495 			*(kaddr + j) ^= *(buf + offset + j);
3496 
3497 		offset += miter.length;
3498 	}
3499 	sg_miter_stop(&miter);
3500 	kfree(buf);
3501 
3502 	return 0;
3503 }
3504 
3505 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3506 			       struct sdebug_dev_info *devip)
3507 {
3508 	u8 *cmd = scp->cmnd;
3509 	u64 lba;
3510 	u32 num;
3511 	int errsts;
3512 
3513 	if (!scsi_bidi_cmnd(scp)) {
3514 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3515 				INSUFF_RES_ASCQ);
3516 		return check_condition_result;
3517 	}
3518 	errsts = resp_read_dt0(scp, devip);
3519 	if (errsts)
3520 		return errsts;
3521 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3522 		errsts = resp_write_dt0(scp, devip);
3523 		if (errsts)
3524 			return errsts;
3525 	}
3526 	lba = get_unaligned_be32(cmd + 2);
3527 	num = get_unaligned_be16(cmd + 7);
3528 	return resp_xdwriteread(scp, lba, num, devip);
3529 }
3530 
3531 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3532 {
3533 	struct sdebug_queue *sqp = sdebug_q_arr;
3534 
3535 	if (sdebug_mq_active) {
3536 		u32 tag = blk_mq_unique_tag(cmnd->request);
3537 		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3538 
3539 		if (unlikely(hwq >= submit_queues)) {
3540 			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3541 			hwq %= submit_queues;
3542 		}
3543 		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3544 		return sqp + hwq;
3545 	} else
3546 		return sqp;
3547 }
3548 
3549 /* Queued (deferred) command completions converge here. */
3550 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3551 {
3552 	int qc_idx;
3553 	int retiring = 0;
3554 	unsigned long iflags;
3555 	struct sdebug_queue *sqp;
3556 	struct sdebug_queued_cmd *sqcp;
3557 	struct scsi_cmnd *scp;
3558 	struct sdebug_dev_info *devip;
3559 
3560 	qc_idx = sd_dp->qc_idx;
3561 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3562 	if (sdebug_statistics) {
3563 		atomic_inc(&sdebug_completions);
3564 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3565 			atomic_inc(&sdebug_miss_cpus);
3566 	}
3567 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3568 		pr_err("wild qc_idx=%d\n", qc_idx);
3569 		return;
3570 	}
3571 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3572 	sqcp = &sqp->qc_arr[qc_idx];
3573 	scp = sqcp->a_cmnd;
3574 	if (unlikely(scp == NULL)) {
3575 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3576 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3577 		       sd_dp->sqa_idx, qc_idx);
3578 		return;
3579 	}
3580 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3581 	if (likely(devip))
3582 		atomic_dec(&devip->num_in_q);
3583 	else
3584 		pr_err("devip=NULL\n");
3585 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3586 		retiring = 1;
3587 
3588 	sqcp->a_cmnd = NULL;
3589 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3590 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3591 		pr_err("Unexpected completion\n");
3592 		return;
3593 	}
3594 
3595 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3596 		int k, retval;
3597 
3598 		retval = atomic_read(&retired_max_queue);
3599 		if (qc_idx >= retval) {
3600 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3601 			pr_err("index %d too large\n", retval);
3602 			return;
3603 		}
3604 		k = find_last_bit(sqp->in_use_bm, retval);
3605 		if ((k < sdebug_max_queue) || (k == retval))
3606 			atomic_set(&retired_max_queue, 0);
3607 		else
3608 			atomic_set(&retired_max_queue, k + 1);
3609 	}
3610 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3611 	scp->scsi_done(scp); /* callback to mid level */
3612 }
3613 
3614 /* When high resolution timer goes off this function is called. */
3615 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3616 {
3617 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3618 						  hrt);
3619 	sdebug_q_cmd_complete(sd_dp);
3620 	return HRTIMER_NORESTART;
3621 }
3622 
3623 /* When work queue schedules work, it calls this function. */
3624 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3625 {
3626 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3627 						  ew.work);
3628 	sdebug_q_cmd_complete(sd_dp);
3629 }
3630 
3631 static bool got_shared_uuid;
3632 static uuid_t shared_uuid;
3633 
3634 static struct sdebug_dev_info *sdebug_device_create(
3635 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3636 {
3637 	struct sdebug_dev_info *devip;
3638 
3639 	devip = kzalloc(sizeof(*devip), flags);
3640 	if (devip) {
3641 		if (sdebug_uuid_ctl == 1)
3642 			uuid_gen(&devip->lu_name);
3643 		else if (sdebug_uuid_ctl == 2) {
3644 			if (got_shared_uuid)
3645 				devip->lu_name = shared_uuid;
3646 			else {
3647 				uuid_gen(&shared_uuid);
3648 				got_shared_uuid = true;
3649 				devip->lu_name = shared_uuid;
3650 			}
3651 		}
3652 		devip->sdbg_host = sdbg_host;
3653 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3654 	}
3655 	return devip;
3656 }
3657 
3658 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3659 {
3660 	struct sdebug_host_info *sdbg_host;
3661 	struct sdebug_dev_info *open_devip = NULL;
3662 	struct sdebug_dev_info *devip;
3663 
3664 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3665 	if (!sdbg_host) {
3666 		pr_err("Host info NULL\n");
3667 		return NULL;
3668 	}
3669 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3670 		if ((devip->used) && (devip->channel == sdev->channel) &&
3671 		    (devip->target == sdev->id) &&
3672 		    (devip->lun == sdev->lun))
3673 			return devip;
3674 		else {
3675 			if ((!devip->used) && (!open_devip))
3676 				open_devip = devip;
3677 		}
3678 	}
3679 	if (!open_devip) { /* try and make a new one */
3680 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3681 		if (!open_devip) {
3682 			pr_err("out of memory at line %d\n", __LINE__);
3683 			return NULL;
3684 		}
3685 	}
3686 
3687 	open_devip->channel = sdev->channel;
3688 	open_devip->target = sdev->id;
3689 	open_devip->lun = sdev->lun;
3690 	open_devip->sdbg_host = sdbg_host;
3691 	atomic_set(&open_devip->num_in_q, 0);
3692 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3693 	open_devip->used = true;
3694 	return open_devip;
3695 }
3696 
3697 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3698 {
3699 	if (sdebug_verbose)
3700 		pr_info("slave_alloc <%u %u %u %llu>\n",
3701 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3702 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3703 	return 0;
3704 }
3705 
3706 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3707 {
3708 	struct sdebug_dev_info *devip =
3709 			(struct sdebug_dev_info *)sdp->hostdata;
3710 
3711 	if (sdebug_verbose)
3712 		pr_info("slave_configure <%u %u %u %llu>\n",
3713 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3714 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3715 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3716 	if (devip == NULL) {
3717 		devip = find_build_dev_info(sdp);
3718 		if (devip == NULL)
3719 			return 1;  /* no resources, will be marked offline */
3720 	}
3721 	sdp->hostdata = devip;
3722 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3723 	if (sdebug_no_uld)
3724 		sdp->no_uld_attach = 1;
3725 	config_cdb_len(sdp);
3726 	return 0;
3727 }
3728 
3729 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3730 {
3731 	struct sdebug_dev_info *devip =
3732 		(struct sdebug_dev_info *)sdp->hostdata;
3733 
3734 	if (sdebug_verbose)
3735 		pr_info("slave_destroy <%u %u %u %llu>\n",
3736 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3737 	if (devip) {
3738 		/* make this slot available for re-use */
3739 		devip->used = false;
3740 		sdp->hostdata = NULL;
3741 	}
3742 }
3743 
3744 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3745 {
3746 	if (!sd_dp)
3747 		return;
3748 	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3749 		hrtimer_cancel(&sd_dp->hrt);
3750 	else if (sdebug_jdelay < 0)
3751 		cancel_work_sync(&sd_dp->ew.work);
3752 }
3753 
3754 /* If @cmnd found deletes its timer or work queue and returns true; else
3755    returns false */
3756 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3757 {
3758 	unsigned long iflags;
3759 	int j, k, qmax, r_qmax;
3760 	struct sdebug_queue *sqp;
3761 	struct sdebug_queued_cmd *sqcp;
3762 	struct sdebug_dev_info *devip;
3763 	struct sdebug_defer *sd_dp;
3764 
3765 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3766 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3767 		qmax = sdebug_max_queue;
3768 		r_qmax = atomic_read(&retired_max_queue);
3769 		if (r_qmax > qmax)
3770 			qmax = r_qmax;
3771 		for (k = 0; k < qmax; ++k) {
3772 			if (test_bit(k, sqp->in_use_bm)) {
3773 				sqcp = &sqp->qc_arr[k];
3774 				if (cmnd != sqcp->a_cmnd)
3775 					continue;
3776 				/* found */
3777 				devip = (struct sdebug_dev_info *)
3778 						cmnd->device->hostdata;
3779 				if (devip)
3780 					atomic_dec(&devip->num_in_q);
3781 				sqcp->a_cmnd = NULL;
3782 				sd_dp = sqcp->sd_dp;
3783 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3784 				stop_qc_helper(sd_dp);
3785 				clear_bit(k, sqp->in_use_bm);
3786 				return true;
3787 			}
3788 		}
3789 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3790 	}
3791 	return false;
3792 }
3793 
3794 /* Deletes (stops) timers or work queues of all queued commands */
3795 static void stop_all_queued(void)
3796 {
3797 	unsigned long iflags;
3798 	int j, k;
3799 	struct sdebug_queue *sqp;
3800 	struct sdebug_queued_cmd *sqcp;
3801 	struct sdebug_dev_info *devip;
3802 	struct sdebug_defer *sd_dp;
3803 
3804 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3805 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3806 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3807 			if (test_bit(k, sqp->in_use_bm)) {
3808 				sqcp = &sqp->qc_arr[k];
3809 				if (sqcp->a_cmnd == NULL)
3810 					continue;
3811 				devip = (struct sdebug_dev_info *)
3812 					sqcp->a_cmnd->device->hostdata;
3813 				if (devip)
3814 					atomic_dec(&devip->num_in_q);
3815 				sqcp->a_cmnd = NULL;
3816 				sd_dp = sqcp->sd_dp;
3817 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3818 				stop_qc_helper(sd_dp);
3819 				clear_bit(k, sqp->in_use_bm);
3820 				spin_lock_irqsave(&sqp->qc_lock, iflags);
3821 			}
3822 		}
3823 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3824 	}
3825 }
3826 
3827 /* Free queued command memory on heap */
3828 static void free_all_queued(void)
3829 {
3830 	int j, k;
3831 	struct sdebug_queue *sqp;
3832 	struct sdebug_queued_cmd *sqcp;
3833 
3834 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3835 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3836 			sqcp = &sqp->qc_arr[k];
3837 			kfree(sqcp->sd_dp);
3838 			sqcp->sd_dp = NULL;
3839 		}
3840 	}
3841 }
3842 
3843 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3844 {
3845 	bool ok;
3846 
3847 	++num_aborts;
3848 	if (SCpnt) {
3849 		ok = stop_queued_cmnd(SCpnt);
3850 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3851 			sdev_printk(KERN_INFO, SCpnt->device,
3852 				    "%s: command%s found\n", __func__,
3853 				    ok ? "" : " not");
3854 	}
3855 	return SUCCESS;
3856 }
3857 
3858 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3859 {
3860 	++num_dev_resets;
3861 	if (SCpnt && SCpnt->device) {
3862 		struct scsi_device *sdp = SCpnt->device;
3863 		struct sdebug_dev_info *devip =
3864 				(struct sdebug_dev_info *)sdp->hostdata;
3865 
3866 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3867 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3868 		if (devip)
3869 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3870 	}
3871 	return SUCCESS;
3872 }
3873 
3874 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3875 {
3876 	struct sdebug_host_info *sdbg_host;
3877 	struct sdebug_dev_info *devip;
3878 	struct scsi_device *sdp;
3879 	struct Scsi_Host *hp;
3880 	int k = 0;
3881 
3882 	++num_target_resets;
3883 	if (!SCpnt)
3884 		goto lie;
3885 	sdp = SCpnt->device;
3886 	if (!sdp)
3887 		goto lie;
3888 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3889 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3890 	hp = sdp->host;
3891 	if (!hp)
3892 		goto lie;
3893 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3894 	if (sdbg_host) {
3895 		list_for_each_entry(devip,
3896 				    &sdbg_host->dev_info_list,
3897 				    dev_list)
3898 			if (devip->target == sdp->id) {
3899 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3900 				++k;
3901 			}
3902 	}
3903 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3904 		sdev_printk(KERN_INFO, sdp,
3905 			    "%s: %d device(s) found in target\n", __func__, k);
3906 lie:
3907 	return SUCCESS;
3908 }
3909 
3910 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3911 {
3912 	struct sdebug_host_info *sdbg_host;
3913 	struct sdebug_dev_info *devip;
3914 	struct scsi_device *sdp;
3915 	struct Scsi_Host *hp;
3916 	int k = 0;
3917 
3918 	++num_bus_resets;
3919 	if (!(SCpnt && SCpnt->device))
3920 		goto lie;
3921 	sdp = SCpnt->device;
3922 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3923 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3924 	hp = sdp->host;
3925 	if (hp) {
3926 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3927 		if (sdbg_host) {
3928 			list_for_each_entry(devip,
3929 					    &sdbg_host->dev_info_list,
3930 					    dev_list) {
3931 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3932 				++k;
3933 			}
3934 		}
3935 	}
3936 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3937 		sdev_printk(KERN_INFO, sdp,
3938 			    "%s: %d device(s) found in host\n", __func__, k);
3939 lie:
3940 	return SUCCESS;
3941 }
3942 
3943 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3944 {
3945 	struct sdebug_host_info * sdbg_host;
3946 	struct sdebug_dev_info *devip;
3947 	int k = 0;
3948 
3949 	++num_host_resets;
3950 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3951 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3952 	spin_lock(&sdebug_host_list_lock);
3953 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3954 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3955 				    dev_list) {
3956 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3957 			++k;
3958 		}
3959 	}
3960 	spin_unlock(&sdebug_host_list_lock);
3961 	stop_all_queued();
3962 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3963 		sdev_printk(KERN_INFO, SCpnt->device,
3964 			    "%s: %d device(s) found\n", __func__, k);
3965 	return SUCCESS;
3966 }
3967 
3968 static void __init sdebug_build_parts(unsigned char *ramp,
3969 				      unsigned long store_size)
3970 {
3971 	struct partition * pp;
3972 	int starts[SDEBUG_MAX_PARTS + 2];
3973 	int sectors_per_part, num_sectors, k;
3974 	int heads_by_sects, start_sec, end_sec;
3975 
3976 	/* assume partition table already zeroed */
3977 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3978 		return;
3979 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3980 		sdebug_num_parts = SDEBUG_MAX_PARTS;
3981 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3982 	}
3983 	num_sectors = (int)sdebug_store_sectors;
3984 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3985 			   / sdebug_num_parts;
3986 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3987 	starts[0] = sdebug_sectors_per;
3988 	for (k = 1; k < sdebug_num_parts; ++k)
3989 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3990 			    * heads_by_sects;
3991 	starts[sdebug_num_parts] = num_sectors;
3992 	starts[sdebug_num_parts + 1] = 0;
3993 
3994 	ramp[510] = 0x55;	/* magic partition markings */
3995 	ramp[511] = 0xAA;
3996 	pp = (struct partition *)(ramp + 0x1be);
3997 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3998 		start_sec = starts[k];
3999 		end_sec = starts[k + 1] - 1;
4000 		pp->boot_ind = 0;
4001 
4002 		pp->cyl = start_sec / heads_by_sects;
4003 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4004 			   / sdebug_sectors_per;
4005 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4006 
4007 		pp->end_cyl = end_sec / heads_by_sects;
4008 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4009 			       / sdebug_sectors_per;
4010 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4011 
4012 		pp->start_sect = cpu_to_le32(start_sec);
4013 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4014 		pp->sys_ind = 0x83;	/* plain Linux partition */
4015 	}
4016 }
4017 
4018 static void block_unblock_all_queues(bool block)
4019 {
4020 	int j;
4021 	struct sdebug_queue *sqp;
4022 
4023 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4024 		atomic_set(&sqp->blocked, (int)block);
4025 }
4026 
4027 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4028  * commands will be processed normally before triggers occur.
4029  */
4030 static void tweak_cmnd_count(void)
4031 {
4032 	int count, modulo;
4033 
4034 	modulo = abs(sdebug_every_nth);
4035 	if (modulo < 2)
4036 		return;
4037 	block_unblock_all_queues(true);
4038 	count = atomic_read(&sdebug_cmnd_count);
4039 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4040 	block_unblock_all_queues(false);
4041 }
4042 
4043 static void clear_queue_stats(void)
4044 {
4045 	atomic_set(&sdebug_cmnd_count, 0);
4046 	atomic_set(&sdebug_completions, 0);
4047 	atomic_set(&sdebug_miss_cpus, 0);
4048 	atomic_set(&sdebug_a_tsf, 0);
4049 }
4050 
4051 static void setup_inject(struct sdebug_queue *sqp,
4052 			 struct sdebug_queued_cmd *sqcp)
4053 {
4054 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
4055 		return;
4056 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4057 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4058 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4059 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4060 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4061 	sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4062 }
4063 
4064 /* Complete the processing of the thread that queued a SCSI command to this
4065  * driver. It either completes the command by calling cmnd_done() or
4066  * schedules a hr timer or work queue then returns 0. Returns
4067  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4068  */
4069 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4070 			 int scsi_result, int delta_jiff)
4071 {
4072 	unsigned long iflags;
4073 	int k, num_in_q, qdepth, inject;
4074 	struct sdebug_queue *sqp;
4075 	struct sdebug_queued_cmd *sqcp;
4076 	struct scsi_device *sdp;
4077 	struct sdebug_defer *sd_dp;
4078 
4079 	if (unlikely(devip == NULL)) {
4080 		if (scsi_result == 0)
4081 			scsi_result = DID_NO_CONNECT << 16;
4082 		goto respond_in_thread;
4083 	}
4084 	sdp = cmnd->device;
4085 
4086 	if (unlikely(sdebug_verbose && scsi_result))
4087 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4088 			    __func__, scsi_result);
4089 	if (delta_jiff == 0)
4090 		goto respond_in_thread;
4091 
4092 	/* schedule the response at a later time if resources permit */
4093 	sqp = get_queue(cmnd);
4094 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4095 	if (unlikely(atomic_read(&sqp->blocked))) {
4096 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4097 		return SCSI_MLQUEUE_HOST_BUSY;
4098 	}
4099 	num_in_q = atomic_read(&devip->num_in_q);
4100 	qdepth = cmnd->device->queue_depth;
4101 	inject = 0;
4102 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4103 		if (scsi_result) {
4104 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4105 			goto respond_in_thread;
4106 		} else
4107 			scsi_result = device_qfull_result;
4108 	} else if (unlikely(sdebug_every_nth &&
4109 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4110 			    (scsi_result == 0))) {
4111 		if ((num_in_q == (qdepth - 1)) &&
4112 		    (atomic_inc_return(&sdebug_a_tsf) >=
4113 		     abs(sdebug_every_nth))) {
4114 			atomic_set(&sdebug_a_tsf, 0);
4115 			inject = 1;
4116 			scsi_result = device_qfull_result;
4117 		}
4118 	}
4119 
4120 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4121 	if (unlikely(k >= sdebug_max_queue)) {
4122 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4123 		if (scsi_result)
4124 			goto respond_in_thread;
4125 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4126 			scsi_result = device_qfull_result;
4127 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4128 			sdev_printk(KERN_INFO, sdp,
4129 				    "%s: max_queue=%d exceeded, %s\n",
4130 				    __func__, sdebug_max_queue,
4131 				    (scsi_result ?  "status: TASK SET FULL" :
4132 						    "report: host busy"));
4133 		if (scsi_result)
4134 			goto respond_in_thread;
4135 		else
4136 			return SCSI_MLQUEUE_HOST_BUSY;
4137 	}
4138 	__set_bit(k, sqp->in_use_bm);
4139 	atomic_inc(&devip->num_in_q);
4140 	sqcp = &sqp->qc_arr[k];
4141 	sqcp->a_cmnd = cmnd;
4142 	cmnd->host_scribble = (unsigned char *)sqcp;
4143 	cmnd->result = scsi_result;
4144 	sd_dp = sqcp->sd_dp;
4145 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4146 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4147 		setup_inject(sqp, sqcp);
4148 	if (delta_jiff > 0 || sdebug_ndelay > 0) {
4149 		ktime_t kt;
4150 
4151 		if (delta_jiff > 0) {
4152 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4153 		} else
4154 			kt = sdebug_ndelay;
4155 		if (NULL == sd_dp) {
4156 			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4157 			if (NULL == sd_dp)
4158 				return SCSI_MLQUEUE_HOST_BUSY;
4159 			sqcp->sd_dp = sd_dp;
4160 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4161 				     HRTIMER_MODE_REL_PINNED);
4162 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4163 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4164 			sd_dp->qc_idx = k;
4165 		}
4166 		if (sdebug_statistics)
4167 			sd_dp->issuing_cpu = raw_smp_processor_id();
4168 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4169 	} else {	/* jdelay < 0, use work queue */
4170 		if (NULL == sd_dp) {
4171 			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4172 			if (NULL == sd_dp)
4173 				return SCSI_MLQUEUE_HOST_BUSY;
4174 			sqcp->sd_dp = sd_dp;
4175 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4176 			sd_dp->qc_idx = k;
4177 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4178 		}
4179 		if (sdebug_statistics)
4180 			sd_dp->issuing_cpu = raw_smp_processor_id();
4181 		schedule_work(&sd_dp->ew.work);
4182 	}
4183 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4184 		     (scsi_result == device_qfull_result)))
4185 		sdev_printk(KERN_INFO, sdp,
4186 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4187 			    num_in_q, (inject ? "<inject> " : ""),
4188 			    "status: TASK SET FULL");
4189 	return 0;
4190 
4191 respond_in_thread:	/* call back to mid-layer using invocation thread */
4192 	cmnd->result = scsi_result;
4193 	cmnd->scsi_done(cmnd);
4194 	return 0;
4195 }
4196 
4197 /* Note: The following macros create attribute files in the
4198    /sys/module/scsi_debug/parameters directory. Unfortunately this
4199    driver is unaware of a change and cannot trigger auxiliary actions
4200    as it can when the corresponding attribute in the
4201    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4202  */
4203 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4204 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4205 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4206 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4207 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4208 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4209 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4210 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4211 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4212 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4213 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4214 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4215 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4216 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4217 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4218 module_param_string(inq_product, sdebug_inq_product_id,
4219 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4220 module_param_string(inq_rev, sdebug_inq_product_rev,
4221 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4222 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4223 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4224 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4225 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4226 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4227 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4228 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4229 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4230 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4231 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4232 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4233 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4234 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4235 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4236 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4237 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4238 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4239 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4240 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4241 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4242 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4243 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4244 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4245 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4246 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4247 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4248 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4249 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4250 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4251 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4252 		   S_IRUGO | S_IWUSR);
4253 module_param_named(write_same_length, sdebug_write_same_length, int,
4254 		   S_IRUGO | S_IWUSR);
4255 
4256 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4257 MODULE_DESCRIPTION("SCSI debug adapter driver");
4258 MODULE_LICENSE("GPL");
4259 MODULE_VERSION(SDEBUG_VERSION);
4260 
4261 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4262 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4263 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4264 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4265 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4266 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4267 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4268 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4269 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4270 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4271 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4272 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4273 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4274 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4275 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4276 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4277 		 SDEBUG_VERSION "\")");
4278 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4279 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4280 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4281 MODULE_PARM_DESC(lbprz,
4282 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4283 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4284 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4285 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4286 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4287 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4288 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4289 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4290 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4291 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4292 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4293 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4294 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4295 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4296 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4297 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4298 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4299 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4300 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4301 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4302 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4303 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4304 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4305 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4306 MODULE_PARM_DESC(uuid_ctl,
4307 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4308 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4309 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4310 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4311 
4312 #define SDEBUG_INFO_LEN 256
4313 static char sdebug_info[SDEBUG_INFO_LEN];
4314 
4315 static const char * scsi_debug_info(struct Scsi_Host * shp)
4316 {
4317 	int k;
4318 
4319 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4320 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4321 	if (k >= (SDEBUG_INFO_LEN - 1))
4322 		return sdebug_info;
4323 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4324 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4325 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4326 		  "statistics", (int)sdebug_statistics);
4327 	return sdebug_info;
4328 }
4329 
4330 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4331 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4332 				 int length)
4333 {
4334 	char arr[16];
4335 	int opts;
4336 	int minLen = length > 15 ? 15 : length;
4337 
4338 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4339 		return -EACCES;
4340 	memcpy(arr, buffer, minLen);
4341 	arr[minLen] = '\0';
4342 	if (1 != sscanf(arr, "%d", &opts))
4343 		return -EINVAL;
4344 	sdebug_opts = opts;
4345 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4346 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4347 	if (sdebug_every_nth != 0)
4348 		tweak_cmnd_count();
4349 	return length;
4350 }
4351 
4352 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4353  * same for each scsi_debug host (if more than one). Some of the counters
4354  * output are not atomics so might be inaccurate in a busy system. */
4355 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4356 {
4357 	int f, j, l;
4358 	struct sdebug_queue *sqp;
4359 
4360 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4361 		   SDEBUG_VERSION, sdebug_version_date);
4362 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4363 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4364 		   sdebug_opts, sdebug_every_nth);
4365 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4366 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4367 		   sdebug_sector_size, "bytes");
4368 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4369 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4370 		   num_aborts);
4371 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4372 		   num_dev_resets, num_target_resets, num_bus_resets,
4373 		   num_host_resets);
4374 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4375 		   dix_reads, dix_writes, dif_errors);
4376 	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4377 		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4378 		   sdebug_mq_active);
4379 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4380 		   atomic_read(&sdebug_cmnd_count),
4381 		   atomic_read(&sdebug_completions),
4382 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4383 		   atomic_read(&sdebug_a_tsf));
4384 
4385 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4386 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4387 		seq_printf(m, "  queue %d:\n", j);
4388 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4389 		if (f != sdebug_max_queue) {
4390 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4391 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4392 				   "first,last bits", f, l);
4393 		}
4394 	}
4395 	return 0;
4396 }
4397 
4398 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4399 {
4400 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4401 }
4402 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4403  * of delay is jiffies.
4404  */
4405 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4406 			   size_t count)
4407 {
4408 	int jdelay, res;
4409 
4410 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4411 		res = count;
4412 		if (sdebug_jdelay != jdelay) {
4413 			int j, k;
4414 			struct sdebug_queue *sqp;
4415 
4416 			block_unblock_all_queues(true);
4417 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4418 			     ++j, ++sqp) {
4419 				k = find_first_bit(sqp->in_use_bm,
4420 						   sdebug_max_queue);
4421 				if (k != sdebug_max_queue) {
4422 					res = -EBUSY;   /* queued commands */
4423 					break;
4424 				}
4425 			}
4426 			if (res > 0) {
4427 				/* make sure sdebug_defer instances get
4428 				 * re-allocated for new delay variant */
4429 				free_all_queued();
4430 				sdebug_jdelay = jdelay;
4431 				sdebug_ndelay = 0;
4432 			}
4433 			block_unblock_all_queues(false);
4434 		}
4435 		return res;
4436 	}
4437 	return -EINVAL;
4438 }
4439 static DRIVER_ATTR_RW(delay);
4440 
4441 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4442 {
4443 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4444 }
4445 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4446 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4447 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4448 			    size_t count)
4449 {
4450 	int ndelay, res;
4451 
4452 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4453 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4454 		res = count;
4455 		if (sdebug_ndelay != ndelay) {
4456 			int j, k;
4457 			struct sdebug_queue *sqp;
4458 
4459 			block_unblock_all_queues(true);
4460 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4461 			     ++j, ++sqp) {
4462 				k = find_first_bit(sqp->in_use_bm,
4463 						   sdebug_max_queue);
4464 				if (k != sdebug_max_queue) {
4465 					res = -EBUSY;   /* queued commands */
4466 					break;
4467 				}
4468 			}
4469 			if (res > 0) {
4470 				/* make sure sdebug_defer instances get
4471 				 * re-allocated for new delay variant */
4472 				free_all_queued();
4473 				sdebug_ndelay = ndelay;
4474 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4475 							: DEF_JDELAY;
4476 			}
4477 			block_unblock_all_queues(false);
4478 		}
4479 		return res;
4480 	}
4481 	return -EINVAL;
4482 }
4483 static DRIVER_ATTR_RW(ndelay);
4484 
4485 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4486 {
4487 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4488 }
4489 
4490 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4491 			  size_t count)
4492 {
4493 	int opts;
4494 	char work[20];
4495 
4496 	if (sscanf(buf, "%10s", work) == 1) {
4497 		if (strncasecmp(work, "0x", 2) == 0) {
4498 			if (kstrtoint(work + 2, 16, &opts) == 0)
4499 				goto opts_done;
4500 		} else {
4501 			if (kstrtoint(work, 10, &opts) == 0)
4502 				goto opts_done;
4503 		}
4504 	}
4505 	return -EINVAL;
4506 opts_done:
4507 	sdebug_opts = opts;
4508 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4509 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4510 	tweak_cmnd_count();
4511 	return count;
4512 }
4513 static DRIVER_ATTR_RW(opts);
4514 
4515 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4516 {
4517 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4518 }
4519 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4520 			   size_t count)
4521 {
4522 	int n;
4523 
4524 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4525 		sdebug_ptype = n;
4526 		return count;
4527 	}
4528 	return -EINVAL;
4529 }
4530 static DRIVER_ATTR_RW(ptype);
4531 
4532 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4533 {
4534 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4535 }
4536 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4537 			    size_t count)
4538 {
4539 	int n;
4540 
4541 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4542 		sdebug_dsense = n;
4543 		return count;
4544 	}
4545 	return -EINVAL;
4546 }
4547 static DRIVER_ATTR_RW(dsense);
4548 
4549 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4550 {
4551 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4552 }
4553 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4554 			     size_t count)
4555 {
4556 	int n;
4557 
4558 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4559 		n = (n > 0);
4560 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4561 		if (sdebug_fake_rw != n) {
4562 			if ((0 == n) && (NULL == fake_storep)) {
4563 				unsigned long sz =
4564 					(unsigned long)sdebug_dev_size_mb *
4565 					1048576;
4566 
4567 				fake_storep = vmalloc(sz);
4568 				if (NULL == fake_storep) {
4569 					pr_err("out of memory, 9\n");
4570 					return -ENOMEM;
4571 				}
4572 				memset(fake_storep, 0, sz);
4573 			}
4574 			sdebug_fake_rw = n;
4575 		}
4576 		return count;
4577 	}
4578 	return -EINVAL;
4579 }
4580 static DRIVER_ATTR_RW(fake_rw);
4581 
4582 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4583 {
4584 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4585 }
4586 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4587 			      size_t count)
4588 {
4589 	int n;
4590 
4591 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4592 		sdebug_no_lun_0 = n;
4593 		return count;
4594 	}
4595 	return -EINVAL;
4596 }
4597 static DRIVER_ATTR_RW(no_lun_0);
4598 
4599 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4600 {
4601 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4602 }
4603 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4604 			      size_t count)
4605 {
4606 	int n;
4607 
4608 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4609 		sdebug_num_tgts = n;
4610 		sdebug_max_tgts_luns();
4611 		return count;
4612 	}
4613 	return -EINVAL;
4614 }
4615 static DRIVER_ATTR_RW(num_tgts);
4616 
4617 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4618 {
4619 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4620 }
4621 static DRIVER_ATTR_RO(dev_size_mb);
4622 
4623 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4624 {
4625 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4626 }
4627 static DRIVER_ATTR_RO(num_parts);
4628 
4629 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4630 {
4631 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4632 }
4633 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4634 			       size_t count)
4635 {
4636 	int nth;
4637 
4638 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4639 		sdebug_every_nth = nth;
4640 		if (nth && !sdebug_statistics) {
4641 			pr_info("every_nth needs statistics=1, set it\n");
4642 			sdebug_statistics = true;
4643 		}
4644 		tweak_cmnd_count();
4645 		return count;
4646 	}
4647 	return -EINVAL;
4648 }
4649 static DRIVER_ATTR_RW(every_nth);
4650 
4651 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4652 {
4653 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4654 }
4655 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4656 			      size_t count)
4657 {
4658 	int n;
4659 	bool changed;
4660 
4661 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4662 		if (n > 256) {
4663 			pr_warn("max_luns can be no more than 256\n");
4664 			return -EINVAL;
4665 		}
4666 		changed = (sdebug_max_luns != n);
4667 		sdebug_max_luns = n;
4668 		sdebug_max_tgts_luns();
4669 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4670 			struct sdebug_host_info *sdhp;
4671 			struct sdebug_dev_info *dp;
4672 
4673 			spin_lock(&sdebug_host_list_lock);
4674 			list_for_each_entry(sdhp, &sdebug_host_list,
4675 					    host_list) {
4676 				list_for_each_entry(dp, &sdhp->dev_info_list,
4677 						    dev_list) {
4678 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4679 						dp->uas_bm);
4680 				}
4681 			}
4682 			spin_unlock(&sdebug_host_list_lock);
4683 		}
4684 		return count;
4685 	}
4686 	return -EINVAL;
4687 }
4688 static DRIVER_ATTR_RW(max_luns);
4689 
4690 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4691 {
4692 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4693 }
4694 /* N.B. max_queue can be changed while there are queued commands. In flight
4695  * commands beyond the new max_queue will be completed. */
4696 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4697 			       size_t count)
4698 {
4699 	int j, n, k, a;
4700 	struct sdebug_queue *sqp;
4701 
4702 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4703 	    (n <= SDEBUG_CANQUEUE)) {
4704 		block_unblock_all_queues(true);
4705 		k = 0;
4706 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4707 		     ++j, ++sqp) {
4708 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4709 			if (a > k)
4710 				k = a;
4711 		}
4712 		sdebug_max_queue = n;
4713 		if (k == SDEBUG_CANQUEUE)
4714 			atomic_set(&retired_max_queue, 0);
4715 		else if (k >= n)
4716 			atomic_set(&retired_max_queue, k + 1);
4717 		else
4718 			atomic_set(&retired_max_queue, 0);
4719 		block_unblock_all_queues(false);
4720 		return count;
4721 	}
4722 	return -EINVAL;
4723 }
4724 static DRIVER_ATTR_RW(max_queue);
4725 
4726 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4727 {
4728 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4729 }
4730 static DRIVER_ATTR_RO(no_uld);
4731 
4732 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4733 {
4734 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4735 }
4736 static DRIVER_ATTR_RO(scsi_level);
4737 
4738 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4739 {
4740 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4741 }
4742 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4743 				size_t count)
4744 {
4745 	int n;
4746 	bool changed;
4747 
4748 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4749 		changed = (sdebug_virtual_gb != n);
4750 		sdebug_virtual_gb = n;
4751 		sdebug_capacity = get_sdebug_capacity();
4752 		if (changed) {
4753 			struct sdebug_host_info *sdhp;
4754 			struct sdebug_dev_info *dp;
4755 
4756 			spin_lock(&sdebug_host_list_lock);
4757 			list_for_each_entry(sdhp, &sdebug_host_list,
4758 					    host_list) {
4759 				list_for_each_entry(dp, &sdhp->dev_info_list,
4760 						    dev_list) {
4761 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4762 						dp->uas_bm);
4763 				}
4764 			}
4765 			spin_unlock(&sdebug_host_list_lock);
4766 		}
4767 		return count;
4768 	}
4769 	return -EINVAL;
4770 }
4771 static DRIVER_ATTR_RW(virtual_gb);
4772 
4773 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4774 {
4775 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4776 }
4777 
4778 static int sdebug_add_adapter(void);
4779 static void sdebug_remove_adapter(void);
4780 
4781 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4782 			      size_t count)
4783 {
4784 	int delta_hosts;
4785 
4786 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4787 		return -EINVAL;
4788 	if (delta_hosts > 0) {
4789 		do {
4790 			sdebug_add_adapter();
4791 		} while (--delta_hosts);
4792 	} else if (delta_hosts < 0) {
4793 		do {
4794 			sdebug_remove_adapter();
4795 		} while (++delta_hosts);
4796 	}
4797 	return count;
4798 }
4799 static DRIVER_ATTR_RW(add_host);
4800 
4801 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4802 {
4803 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4804 }
4805 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4806 				    size_t count)
4807 {
4808 	int n;
4809 
4810 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4811 		sdebug_vpd_use_hostno = n;
4812 		return count;
4813 	}
4814 	return -EINVAL;
4815 }
4816 static DRIVER_ATTR_RW(vpd_use_hostno);
4817 
4818 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4819 {
4820 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4821 }
4822 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4823 				size_t count)
4824 {
4825 	int n;
4826 
4827 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4828 		if (n > 0)
4829 			sdebug_statistics = true;
4830 		else {
4831 			clear_queue_stats();
4832 			sdebug_statistics = false;
4833 		}
4834 		return count;
4835 	}
4836 	return -EINVAL;
4837 }
4838 static DRIVER_ATTR_RW(statistics);
4839 
4840 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4841 {
4842 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4843 }
4844 static DRIVER_ATTR_RO(sector_size);
4845 
4846 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4847 {
4848 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4849 }
4850 static DRIVER_ATTR_RO(submit_queues);
4851 
4852 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4853 {
4854 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4855 }
4856 static DRIVER_ATTR_RO(dix);
4857 
4858 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4859 {
4860 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4861 }
4862 static DRIVER_ATTR_RO(dif);
4863 
4864 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4865 {
4866 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4867 }
4868 static DRIVER_ATTR_RO(guard);
4869 
4870 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4871 {
4872 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4873 }
4874 static DRIVER_ATTR_RO(ato);
4875 
4876 static ssize_t map_show(struct device_driver *ddp, char *buf)
4877 {
4878 	ssize_t count;
4879 
4880 	if (!scsi_debug_lbp())
4881 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4882 				 sdebug_store_sectors);
4883 
4884 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4885 			  (int)map_size, map_storep);
4886 	buf[count++] = '\n';
4887 	buf[count] = '\0';
4888 
4889 	return count;
4890 }
4891 static DRIVER_ATTR_RO(map);
4892 
4893 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4894 {
4895 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4896 }
4897 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4898 			       size_t count)
4899 {
4900 	int n;
4901 
4902 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4903 		sdebug_removable = (n > 0);
4904 		return count;
4905 	}
4906 	return -EINVAL;
4907 }
4908 static DRIVER_ATTR_RW(removable);
4909 
4910 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4911 {
4912 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4913 }
4914 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4915 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4916 			       size_t count)
4917 {
4918 	int n;
4919 
4920 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4921 		sdebug_host_lock = (n > 0);
4922 		return count;
4923 	}
4924 	return -EINVAL;
4925 }
4926 static DRIVER_ATTR_RW(host_lock);
4927 
4928 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4929 {
4930 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4931 }
4932 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4933 			    size_t count)
4934 {
4935 	int n;
4936 
4937 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4938 		sdebug_strict = (n > 0);
4939 		return count;
4940 	}
4941 	return -EINVAL;
4942 }
4943 static DRIVER_ATTR_RW(strict);
4944 
4945 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4946 {
4947 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4948 }
4949 static DRIVER_ATTR_RO(uuid_ctl);
4950 
4951 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
4952 {
4953 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
4954 }
4955 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
4956 			     size_t count)
4957 {
4958 	int ret, n;
4959 
4960 	ret = kstrtoint(buf, 0, &n);
4961 	if (ret)
4962 		return ret;
4963 	sdebug_cdb_len = n;
4964 	all_config_cdb_len();
4965 	return count;
4966 }
4967 static DRIVER_ATTR_RW(cdb_len);
4968 
4969 
4970 /* Note: The following array creates attribute files in the
4971    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4972    files (over those found in the /sys/module/scsi_debug/parameters
4973    directory) is that auxiliary actions can be triggered when an attribute
4974    is changed. For example see: sdebug_add_host_store() above.
4975  */
4976 
4977 static struct attribute *sdebug_drv_attrs[] = {
4978 	&driver_attr_delay.attr,
4979 	&driver_attr_opts.attr,
4980 	&driver_attr_ptype.attr,
4981 	&driver_attr_dsense.attr,
4982 	&driver_attr_fake_rw.attr,
4983 	&driver_attr_no_lun_0.attr,
4984 	&driver_attr_num_tgts.attr,
4985 	&driver_attr_dev_size_mb.attr,
4986 	&driver_attr_num_parts.attr,
4987 	&driver_attr_every_nth.attr,
4988 	&driver_attr_max_luns.attr,
4989 	&driver_attr_max_queue.attr,
4990 	&driver_attr_no_uld.attr,
4991 	&driver_attr_scsi_level.attr,
4992 	&driver_attr_virtual_gb.attr,
4993 	&driver_attr_add_host.attr,
4994 	&driver_attr_vpd_use_hostno.attr,
4995 	&driver_attr_sector_size.attr,
4996 	&driver_attr_statistics.attr,
4997 	&driver_attr_submit_queues.attr,
4998 	&driver_attr_dix.attr,
4999 	&driver_attr_dif.attr,
5000 	&driver_attr_guard.attr,
5001 	&driver_attr_ato.attr,
5002 	&driver_attr_map.attr,
5003 	&driver_attr_removable.attr,
5004 	&driver_attr_host_lock.attr,
5005 	&driver_attr_ndelay.attr,
5006 	&driver_attr_strict.attr,
5007 	&driver_attr_uuid_ctl.attr,
5008 	&driver_attr_cdb_len.attr,
5009 	NULL,
5010 };
5011 ATTRIBUTE_GROUPS(sdebug_drv);
5012 
5013 static struct device *pseudo_primary;
5014 
5015 static int __init scsi_debug_init(void)
5016 {
5017 	unsigned long sz;
5018 	int host_to_add;
5019 	int k;
5020 	int ret;
5021 
5022 	atomic_set(&retired_max_queue, 0);
5023 
5024 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5025 		pr_warn("ndelay must be less than 1 second, ignored\n");
5026 		sdebug_ndelay = 0;
5027 	} else if (sdebug_ndelay > 0)
5028 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5029 
5030 	switch (sdebug_sector_size) {
5031 	case  512:
5032 	case 1024:
5033 	case 2048:
5034 	case 4096:
5035 		break;
5036 	default:
5037 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5038 		return -EINVAL;
5039 	}
5040 
5041 	switch (sdebug_dif) {
5042 	case T10_PI_TYPE0_PROTECTION:
5043 		break;
5044 	case T10_PI_TYPE1_PROTECTION:
5045 	case T10_PI_TYPE2_PROTECTION:
5046 	case T10_PI_TYPE3_PROTECTION:
5047 		have_dif_prot = true;
5048 		break;
5049 
5050 	default:
5051 		pr_err("dif must be 0, 1, 2 or 3\n");
5052 		return -EINVAL;
5053 	}
5054 
5055 	if (sdebug_guard > 1) {
5056 		pr_err("guard must be 0 or 1\n");
5057 		return -EINVAL;
5058 	}
5059 
5060 	if (sdebug_ato > 1) {
5061 		pr_err("ato must be 0 or 1\n");
5062 		return -EINVAL;
5063 	}
5064 
5065 	if (sdebug_physblk_exp > 15) {
5066 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5067 		return -EINVAL;
5068 	}
5069 	if (sdebug_max_luns > 256) {
5070 		pr_warn("max_luns can be no more than 256, use default\n");
5071 		sdebug_max_luns = DEF_MAX_LUNS;
5072 	}
5073 
5074 	if (sdebug_lowest_aligned > 0x3fff) {
5075 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5076 		return -EINVAL;
5077 	}
5078 
5079 	if (submit_queues < 1) {
5080 		pr_err("submit_queues must be 1 or more\n");
5081 		return -EINVAL;
5082 	}
5083 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5084 			       GFP_KERNEL);
5085 	if (sdebug_q_arr == NULL)
5086 		return -ENOMEM;
5087 	for (k = 0; k < submit_queues; ++k)
5088 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5089 
5090 	if (sdebug_dev_size_mb < 1)
5091 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5092 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5093 	sdebug_store_sectors = sz / sdebug_sector_size;
5094 	sdebug_capacity = get_sdebug_capacity();
5095 
5096 	/* play around with geometry, don't waste too much on track 0 */
5097 	sdebug_heads = 8;
5098 	sdebug_sectors_per = 32;
5099 	if (sdebug_dev_size_mb >= 256)
5100 		sdebug_heads = 64;
5101 	else if (sdebug_dev_size_mb >= 16)
5102 		sdebug_heads = 32;
5103 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5104 			       (sdebug_sectors_per * sdebug_heads);
5105 	if (sdebug_cylinders_per >= 1024) {
5106 		/* other LLDs do this; implies >= 1GB ram disk ... */
5107 		sdebug_heads = 255;
5108 		sdebug_sectors_per = 63;
5109 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5110 			       (sdebug_sectors_per * sdebug_heads);
5111 	}
5112 
5113 	if (sdebug_fake_rw == 0) {
5114 		fake_storep = vmalloc(sz);
5115 		if (NULL == fake_storep) {
5116 			pr_err("out of memory, 1\n");
5117 			ret = -ENOMEM;
5118 			goto free_q_arr;
5119 		}
5120 		memset(fake_storep, 0, sz);
5121 		if (sdebug_num_parts > 0)
5122 			sdebug_build_parts(fake_storep, sz);
5123 	}
5124 
5125 	if (sdebug_dix) {
5126 		int dif_size;
5127 
5128 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5129 		dif_storep = vmalloc(dif_size);
5130 
5131 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5132 
5133 		if (dif_storep == NULL) {
5134 			pr_err("out of mem. (DIX)\n");
5135 			ret = -ENOMEM;
5136 			goto free_vm;
5137 		}
5138 
5139 		memset(dif_storep, 0xff, dif_size);
5140 	}
5141 
5142 	/* Logical Block Provisioning */
5143 	if (scsi_debug_lbp()) {
5144 		sdebug_unmap_max_blocks =
5145 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5146 
5147 		sdebug_unmap_max_desc =
5148 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5149 
5150 		sdebug_unmap_granularity =
5151 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5152 
5153 		if (sdebug_unmap_alignment &&
5154 		    sdebug_unmap_granularity <=
5155 		    sdebug_unmap_alignment) {
5156 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5157 			ret = -EINVAL;
5158 			goto free_vm;
5159 		}
5160 
5161 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5162 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5163 
5164 		pr_info("%lu provisioning blocks\n", map_size);
5165 
5166 		if (map_storep == NULL) {
5167 			pr_err("out of mem. (MAP)\n");
5168 			ret = -ENOMEM;
5169 			goto free_vm;
5170 		}
5171 
5172 		bitmap_zero(map_storep, map_size);
5173 
5174 		/* Map first 1KB for partition table */
5175 		if (sdebug_num_parts)
5176 			map_region(0, 2);
5177 	}
5178 
5179 	pseudo_primary = root_device_register("pseudo_0");
5180 	if (IS_ERR(pseudo_primary)) {
5181 		pr_warn("root_device_register() error\n");
5182 		ret = PTR_ERR(pseudo_primary);
5183 		goto free_vm;
5184 	}
5185 	ret = bus_register(&pseudo_lld_bus);
5186 	if (ret < 0) {
5187 		pr_warn("bus_register error: %d\n", ret);
5188 		goto dev_unreg;
5189 	}
5190 	ret = driver_register(&sdebug_driverfs_driver);
5191 	if (ret < 0) {
5192 		pr_warn("driver_register error: %d\n", ret);
5193 		goto bus_unreg;
5194 	}
5195 
5196 	host_to_add = sdebug_add_host;
5197 	sdebug_add_host = 0;
5198 
5199 	for (k = 0; k < host_to_add; k++) {
5200 		if (sdebug_add_adapter()) {
5201 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5202 			break;
5203 		}
5204 	}
5205 
5206 	if (sdebug_verbose)
5207 		pr_info("built %d host(s)\n", sdebug_add_host);
5208 
5209 	return 0;
5210 
5211 bus_unreg:
5212 	bus_unregister(&pseudo_lld_bus);
5213 dev_unreg:
5214 	root_device_unregister(pseudo_primary);
5215 free_vm:
5216 	vfree(map_storep);
5217 	vfree(dif_storep);
5218 	vfree(fake_storep);
5219 free_q_arr:
5220 	kfree(sdebug_q_arr);
5221 	return ret;
5222 }
5223 
5224 static void __exit scsi_debug_exit(void)
5225 {
5226 	int k = sdebug_add_host;
5227 
5228 	stop_all_queued();
5229 	free_all_queued();
5230 	for (; k; k--)
5231 		sdebug_remove_adapter();
5232 	driver_unregister(&sdebug_driverfs_driver);
5233 	bus_unregister(&pseudo_lld_bus);
5234 	root_device_unregister(pseudo_primary);
5235 
5236 	vfree(map_storep);
5237 	vfree(dif_storep);
5238 	vfree(fake_storep);
5239 	kfree(sdebug_q_arr);
5240 }
5241 
5242 device_initcall(scsi_debug_init);
5243 module_exit(scsi_debug_exit);
5244 
5245 static void sdebug_release_adapter(struct device * dev)
5246 {
5247 	struct sdebug_host_info *sdbg_host;
5248 
5249 	sdbg_host = to_sdebug_host(dev);
5250 	kfree(sdbg_host);
5251 }
5252 
5253 static int sdebug_add_adapter(void)
5254 {
5255 	int k, devs_per_host;
5256 	int error = 0;
5257 	struct sdebug_host_info *sdbg_host;
5258 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5259 
5260 	sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5261 	if (sdbg_host == NULL) {
5262 		pr_err("out of memory at line %d\n", __LINE__);
5263 		return -ENOMEM;
5264 	}
5265 
5266 	INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5267 
5268 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5269 	for (k = 0; k < devs_per_host; k++) {
5270 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5271 		if (!sdbg_devinfo) {
5272 			pr_err("out of memory at line %d\n", __LINE__);
5273 			error = -ENOMEM;
5274 			goto clean;
5275 		}
5276 	}
5277 
5278 	spin_lock(&sdebug_host_list_lock);
5279 	list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5280 	spin_unlock(&sdebug_host_list_lock);
5281 
5282 	sdbg_host->dev.bus = &pseudo_lld_bus;
5283 	sdbg_host->dev.parent = pseudo_primary;
5284 	sdbg_host->dev.release = &sdebug_release_adapter;
5285 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5286 
5287 	error = device_register(&sdbg_host->dev);
5288 
5289 	if (error)
5290 		goto clean;
5291 
5292 	++sdebug_add_host;
5293 	return error;
5294 
5295 clean:
5296 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5297 				 dev_list) {
5298 		list_del(&sdbg_devinfo->dev_list);
5299 		kfree(sdbg_devinfo);
5300 	}
5301 
5302 	kfree(sdbg_host);
5303 	return error;
5304 }
5305 
5306 static void sdebug_remove_adapter(void)
5307 {
5308 	struct sdebug_host_info *sdbg_host = NULL;
5309 
5310 	spin_lock(&sdebug_host_list_lock);
5311 	if (!list_empty(&sdebug_host_list)) {
5312 		sdbg_host = list_entry(sdebug_host_list.prev,
5313 				       struct sdebug_host_info, host_list);
5314 		list_del(&sdbg_host->host_list);
5315 	}
5316 	spin_unlock(&sdebug_host_list_lock);
5317 
5318 	if (!sdbg_host)
5319 		return;
5320 
5321 	device_unregister(&sdbg_host->dev);
5322 	--sdebug_add_host;
5323 }
5324 
5325 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5326 {
5327 	int num_in_q = 0;
5328 	struct sdebug_dev_info *devip;
5329 
5330 	block_unblock_all_queues(true);
5331 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5332 	if (NULL == devip) {
5333 		block_unblock_all_queues(false);
5334 		return	-ENODEV;
5335 	}
5336 	num_in_q = atomic_read(&devip->num_in_q);
5337 
5338 	if (qdepth < 1)
5339 		qdepth = 1;
5340 	/* allow to exceed max host qc_arr elements for testing */
5341 	if (qdepth > SDEBUG_CANQUEUE + 10)
5342 		qdepth = SDEBUG_CANQUEUE + 10;
5343 	scsi_change_queue_depth(sdev, qdepth);
5344 
5345 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5346 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5347 			    __func__, qdepth, num_in_q);
5348 	}
5349 	block_unblock_all_queues(false);
5350 	return sdev->queue_depth;
5351 }
5352 
5353 static bool fake_timeout(struct scsi_cmnd *scp)
5354 {
5355 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5356 		if (sdebug_every_nth < -1)
5357 			sdebug_every_nth = -1;
5358 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5359 			return true; /* ignore command causing timeout */
5360 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5361 			 scsi_medium_access_command(scp))
5362 			return true; /* time out reads and writes */
5363 	}
5364 	return false;
5365 }
5366 
5367 static bool fake_host_busy(struct scsi_cmnd *scp)
5368 {
5369 	return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5370 		(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5371 }
5372 
5373 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5374 				   struct scsi_cmnd *scp)
5375 {
5376 	u8 sdeb_i;
5377 	struct scsi_device *sdp = scp->device;
5378 	const struct opcode_info_t *oip;
5379 	const struct opcode_info_t *r_oip;
5380 	struct sdebug_dev_info *devip;
5381 	u8 *cmd = scp->cmnd;
5382 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5383 	int k, na;
5384 	int errsts = 0;
5385 	u32 flags;
5386 	u16 sa;
5387 	u8 opcode = cmd[0];
5388 	bool has_wlun_rl;
5389 
5390 	scsi_set_resid(scp, 0);
5391 	if (sdebug_statistics)
5392 		atomic_inc(&sdebug_cmnd_count);
5393 	if (unlikely(sdebug_verbose &&
5394 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5395 		char b[120];
5396 		int n, len, sb;
5397 
5398 		len = scp->cmd_len;
5399 		sb = (int)sizeof(b);
5400 		if (len > 32)
5401 			strcpy(b, "too long, over 32 bytes");
5402 		else {
5403 			for (k = 0, n = 0; k < len && n < sb; ++k)
5404 				n += scnprintf(b + n, sb - n, "%02x ",
5405 					       (u32)cmd[k]);
5406 		}
5407 		if (sdebug_mq_active)
5408 			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5409 				    my_name, blk_mq_unique_tag(scp->request),
5410 				    b);
5411 		else
5412 			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5413 				    b);
5414 	}
5415 	if (fake_host_busy(scp))
5416 		return SCSI_MLQUEUE_HOST_BUSY;
5417 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5418 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5419 		goto err_out;
5420 
5421 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5422 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5423 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5424 	if (unlikely(!devip)) {
5425 		devip = find_build_dev_info(sdp);
5426 		if (NULL == devip)
5427 			goto err_out;
5428 	}
5429 	na = oip->num_attached;
5430 	r_pfp = oip->pfp;
5431 	if (na) {	/* multiple commands with this opcode */
5432 		r_oip = oip;
5433 		if (FF_SA & r_oip->flags) {
5434 			if (F_SA_LOW & oip->flags)
5435 				sa = 0x1f & cmd[1];
5436 			else
5437 				sa = get_unaligned_be16(cmd + 8);
5438 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5439 				if (opcode == oip->opcode && sa == oip->sa)
5440 					break;
5441 			}
5442 		} else {   /* since no service action only check opcode */
5443 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5444 				if (opcode == oip->opcode)
5445 					break;
5446 			}
5447 		}
5448 		if (k > na) {
5449 			if (F_SA_LOW & r_oip->flags)
5450 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5451 			else if (F_SA_HIGH & r_oip->flags)
5452 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5453 			else
5454 				mk_sense_invalid_opcode(scp);
5455 			goto check_cond;
5456 		}
5457 	}	/* else (when na==0) we assume the oip is a match */
5458 	flags = oip->flags;
5459 	if (unlikely(F_INV_OP & flags)) {
5460 		mk_sense_invalid_opcode(scp);
5461 		goto check_cond;
5462 	}
5463 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5464 		if (sdebug_verbose)
5465 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5466 				    my_name, opcode, " supported for wlun");
5467 		mk_sense_invalid_opcode(scp);
5468 		goto check_cond;
5469 	}
5470 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5471 		u8 rem;
5472 		int j;
5473 
5474 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5475 			rem = ~oip->len_mask[k] & cmd[k];
5476 			if (rem) {
5477 				for (j = 7; j >= 0; --j, rem <<= 1) {
5478 					if (0x80 & rem)
5479 						break;
5480 				}
5481 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5482 				goto check_cond;
5483 			}
5484 		}
5485 	}
5486 	if (unlikely(!(F_SKIP_UA & flags) &&
5487 		     find_first_bit(devip->uas_bm,
5488 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5489 		errsts = make_ua(scp, devip);
5490 		if (errsts)
5491 			goto check_cond;
5492 	}
5493 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5494 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5495 		if (sdebug_verbose)
5496 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5497 				    "%s\n", my_name, "initializing command "
5498 				    "required");
5499 		errsts = check_condition_result;
5500 		goto fini;
5501 	}
5502 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5503 		goto fini;
5504 	if (unlikely(sdebug_every_nth)) {
5505 		if (fake_timeout(scp))
5506 			return 0;	/* ignore command: make trouble */
5507 	}
5508 	if (likely(oip->pfp))
5509 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5510 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5511 		errsts = r_pfp(scp, devip);
5512 
5513 fini:
5514 	return schedule_resp(scp, devip, errsts,
5515 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5516 check_cond:
5517 	return schedule_resp(scp, devip, check_condition_result, 0);
5518 err_out:
5519 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5520 }
5521 
5522 static struct scsi_host_template sdebug_driver_template = {
5523 	.show_info =		scsi_debug_show_info,
5524 	.write_info =		scsi_debug_write_info,
5525 	.proc_name =		sdebug_proc_name,
5526 	.name =			"SCSI DEBUG",
5527 	.info =			scsi_debug_info,
5528 	.slave_alloc =		scsi_debug_slave_alloc,
5529 	.slave_configure =	scsi_debug_slave_configure,
5530 	.slave_destroy =	scsi_debug_slave_destroy,
5531 	.ioctl =		scsi_debug_ioctl,
5532 	.queuecommand =		scsi_debug_queuecommand,
5533 	.change_queue_depth =	sdebug_change_qdepth,
5534 	.eh_abort_handler =	scsi_debug_abort,
5535 	.eh_device_reset_handler = scsi_debug_device_reset,
5536 	.eh_target_reset_handler = scsi_debug_target_reset,
5537 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5538 	.eh_host_reset_handler = scsi_debug_host_reset,
5539 	.can_queue =		SDEBUG_CANQUEUE,
5540 	.this_id =		7,
5541 	.sg_tablesize =		SG_MAX_SEGMENTS,
5542 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5543 	.max_sectors =		-1U,
5544 	.use_clustering = 	DISABLE_CLUSTERING,
5545 	.module =		THIS_MODULE,
5546 	.track_queue_depth =	1,
5547 };
5548 
5549 static int sdebug_driver_probe(struct device * dev)
5550 {
5551 	int error = 0;
5552 	struct sdebug_host_info *sdbg_host;
5553 	struct Scsi_Host *hpnt;
5554 	int hprot;
5555 
5556 	sdbg_host = to_sdebug_host(dev);
5557 
5558 	sdebug_driver_template.can_queue = sdebug_max_queue;
5559 	if (sdebug_clustering)
5560 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5561 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5562 	if (NULL == hpnt) {
5563 		pr_err("scsi_host_alloc failed\n");
5564 		error = -ENODEV;
5565 		return error;
5566 	}
5567 	if (submit_queues > nr_cpu_ids) {
5568 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5569 			my_name, submit_queues, nr_cpu_ids);
5570 		submit_queues = nr_cpu_ids;
5571 	}
5572 	/* Decide whether to tell scsi subsystem that we want mq */
5573 	/* Following should give the same answer for each host */
5574 	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5575 	if (sdebug_mq_active)
5576 		hpnt->nr_hw_queues = submit_queues;
5577 
5578 	sdbg_host->shost = hpnt;
5579 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5580 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5581 		hpnt->max_id = sdebug_num_tgts + 1;
5582 	else
5583 		hpnt->max_id = sdebug_num_tgts;
5584 	/* = sdebug_max_luns; */
5585 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5586 
5587 	hprot = 0;
5588 
5589 	switch (sdebug_dif) {
5590 
5591 	case T10_PI_TYPE1_PROTECTION:
5592 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5593 		if (sdebug_dix)
5594 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5595 		break;
5596 
5597 	case T10_PI_TYPE2_PROTECTION:
5598 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5599 		if (sdebug_dix)
5600 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5601 		break;
5602 
5603 	case T10_PI_TYPE3_PROTECTION:
5604 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5605 		if (sdebug_dix)
5606 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5607 		break;
5608 
5609 	default:
5610 		if (sdebug_dix)
5611 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5612 		break;
5613 	}
5614 
5615 	scsi_host_set_prot(hpnt, hprot);
5616 
5617 	if (have_dif_prot || sdebug_dix)
5618 		pr_info("host protection%s%s%s%s%s%s%s\n",
5619 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5620 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5621 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5622 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5623 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5624 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5625 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5626 
5627 	if (sdebug_guard == 1)
5628 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5629 	else
5630 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5631 
5632 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5633 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5634 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5635 		sdebug_statistics = true;
5636 	error = scsi_add_host(hpnt, &sdbg_host->dev);
5637 	if (error) {
5638 		pr_err("scsi_add_host failed\n");
5639 		error = -ENODEV;
5640 		scsi_host_put(hpnt);
5641 	} else
5642 		scsi_scan_host(hpnt);
5643 
5644 	return error;
5645 }
5646 
5647 static int sdebug_driver_remove(struct device * dev)
5648 {
5649 	struct sdebug_host_info *sdbg_host;
5650 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5651 
5652 	sdbg_host = to_sdebug_host(dev);
5653 
5654 	if (!sdbg_host) {
5655 		pr_err("Unable to locate host info\n");
5656 		return -ENODEV;
5657 	}
5658 
5659 	scsi_remove_host(sdbg_host->shost);
5660 
5661 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5662 				 dev_list) {
5663 		list_del(&sdbg_devinfo->dev_list);
5664 		kfree(sdbg_devinfo);
5665 	}
5666 
5667 	scsi_host_put(sdbg_host->shost);
5668 	return 0;
5669 }
5670 
5671 static int pseudo_lld_bus_match(struct device *dev,
5672 				struct device_driver *dev_driver)
5673 {
5674 	return 1;
5675 }
5676 
5677 static struct bus_type pseudo_lld_bus = {
5678 	.name = "pseudo",
5679 	.match = pseudo_lld_bus_match,
5680 	.probe = sdebug_driver_probe,
5681 	.remove = sdebug_driver_remove,
5682 	.drv_groups = sdebug_drv_groups,
5683 };
5684