xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 9b760fd8)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2017 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46 
47 #include <net/checksum.h>
48 
49 #include <asm/unaligned.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59 
60 #include "sd.h"
61 #include "scsi_logging.h"
62 
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0187"	/* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20171202";
66 
67 #define MY_NAME "scsi_debug"
68 
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 
97 /* Additional Sense Code Qualifier (ASCQ) */
98 #define ACK_NAK_TO 0x3
99 
100 /* Default values for driver parameters */
101 #define DEF_NUM_HOST   1
102 #define DEF_NUM_TGTS   1
103 #define DEF_MAX_LUNS   1
104 /* With these defaults, this driver will make 1 host with 1 target
105  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
106  */
107 #define DEF_ATO 1
108 #define DEF_CDB_LEN 10
109 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
110 #define DEF_DEV_SIZE_MB   8
111 #define DEF_DIF 0
112 #define DEF_DIX 0
113 #define DEF_D_SENSE   0
114 #define DEF_EVERY_NTH   0
115 #define DEF_FAKE_RW	0
116 #define DEF_GUARD 0
117 #define DEF_HOST_LOCK 0
118 #define DEF_LBPU 0
119 #define DEF_LBPWS 0
120 #define DEF_LBPWS10 0
121 #define DEF_LBPRZ 1
122 #define DEF_LOWEST_ALIGNED 0
123 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
124 #define DEF_NO_LUN_0   0
125 #define DEF_NUM_PARTS   0
126 #define DEF_OPTS   0
127 #define DEF_OPT_BLKS 1024
128 #define DEF_PHYSBLK_EXP 0
129 #define DEF_OPT_XFERLEN_EXP 0
130 #define DEF_PTYPE   TYPE_DISK
131 #define DEF_REMOVABLE false
132 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
133 #define DEF_SECTOR_SIZE 512
134 #define DEF_UNMAP_ALIGNMENT 0
135 #define DEF_UNMAP_GRANULARITY 1
136 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
137 #define DEF_UNMAP_MAX_DESC 256
138 #define DEF_VIRTUAL_GB   0
139 #define DEF_VPD_USE_HOSTNO 1
140 #define DEF_WRITESAME_LENGTH 0xFFFF
141 #define DEF_STRICT 0
142 #define DEF_STATISTICS false
143 #define DEF_SUBMIT_QUEUES 1
144 #define DEF_UUID_CTL 0
145 #define JDELAY_OVERRIDDEN -9999
146 
147 #define SDEBUG_LUN_0_VAL 0
148 
149 /* bit mask values for sdebug_opts */
150 #define SDEBUG_OPT_NOISE		1
151 #define SDEBUG_OPT_MEDIUM_ERR		2
152 #define SDEBUG_OPT_TIMEOUT		4
153 #define SDEBUG_OPT_RECOVERED_ERR	8
154 #define SDEBUG_OPT_TRANSPORT_ERR	16
155 #define SDEBUG_OPT_DIF_ERR		32
156 #define SDEBUG_OPT_DIX_ERR		64
157 #define SDEBUG_OPT_MAC_TIMEOUT		128
158 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
159 #define SDEBUG_OPT_Q_NOISE		0x200
160 #define SDEBUG_OPT_ALL_TSF		0x400
161 #define SDEBUG_OPT_RARE_TSF		0x800
162 #define SDEBUG_OPT_N_WCE		0x1000
163 #define SDEBUG_OPT_RESET_NOISE		0x2000
164 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
165 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
166 			      SDEBUG_OPT_RESET_NOISE)
167 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
168 				  SDEBUG_OPT_TRANSPORT_ERR | \
169 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
170 				  SDEBUG_OPT_SHORT_TRANSFER)
171 /* When "every_nth" > 0 then modulo "every_nth" commands:
172  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
173  *   - a RECOVERED_ERROR is simulated on successful read and write
174  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
175  *   - a TRANSPORT_ERROR is simulated on successful read and write
176  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
177  *
178  * When "every_nth" < 0 then after "- every_nth" commands:
179  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
180  *   - a RECOVERED_ERROR is simulated on successful read and write
181  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
182  *   - a TRANSPORT_ERROR is simulated on successful read and write
183  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
184  * This will continue on every subsequent command until some other action
185  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
186  * every_nth via sysfs).
187  */
188 
189 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
190  * priority order. In the subset implemented here lower numbers have higher
191  * priority. The UA numbers should be a sequence starting from 0 with
192  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
193 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
194 #define SDEBUG_UA_BUS_RESET 1
195 #define SDEBUG_UA_MODE_CHANGED 2
196 #define SDEBUG_UA_CAPACITY_CHANGED 3
197 #define SDEBUG_UA_LUNS_CHANGED 4
198 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
199 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
200 #define SDEBUG_NUM_UAS 7
201 
202 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
203  * sector on read commands: */
204 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
205 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
206 
207 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
208  * or "peripheral device" addressing (value 0) */
209 #define SAM2_LUN_ADDRESS_METHOD 0
210 
211 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
212  * (for response) per submit queue at one time. Can be reduced by max_queue
213  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
214  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
215  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
216  * but cannot exceed SDEBUG_CANQUEUE .
217  */
218 #define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
219 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
220 #define DEF_CMD_PER_LUN  255
221 
222 #define F_D_IN			1
223 #define F_D_OUT			2
224 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
225 #define F_D_UNKN		8
226 #define F_RL_WLUN_OK		0x10
227 #define F_SKIP_UA		0x20
228 #define F_DELAY_OVERR		0x40
229 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
230 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
231 #define F_INV_OP		0x200
232 #define F_FAKE_RW		0x400
233 #define F_M_ACCESS		0x800	/* media access */
234 
235 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
236 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
237 #define FF_SA (F_SA_HIGH | F_SA_LOW)
238 
239 #define SDEBUG_MAX_PARTS 4
240 
241 #define SDEBUG_MAX_CMD_LEN 32
242 
243 
244 struct sdebug_dev_info {
245 	struct list_head dev_list;
246 	unsigned int channel;
247 	unsigned int target;
248 	u64 lun;
249 	uuid_t lu_name;
250 	struct sdebug_host_info *sdbg_host;
251 	unsigned long uas_bm[1];
252 	atomic_t num_in_q;
253 	atomic_t stopped;
254 	bool used;
255 };
256 
257 struct sdebug_host_info {
258 	struct list_head host_list;
259 	struct Scsi_Host *shost;
260 	struct device dev;
261 	struct list_head dev_info_list;
262 };
263 
264 #define to_sdebug_host(d)	\
265 	container_of(d, struct sdebug_host_info, dev)
266 
267 struct sdebug_defer {
268 	struct hrtimer hrt;
269 	struct execute_work ew;
270 	int sqa_idx;	/* index of sdebug_queue array */
271 	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
272 	int issuing_cpu;
273 };
274 
275 struct sdebug_queued_cmd {
276 	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
277 	 * instance indicates this slot is in use.
278 	 */
279 	struct sdebug_defer *sd_dp;
280 	struct scsi_cmnd *a_cmnd;
281 	unsigned int inj_recovered:1;
282 	unsigned int inj_transport:1;
283 	unsigned int inj_dif:1;
284 	unsigned int inj_dix:1;
285 	unsigned int inj_short:1;
286 };
287 
288 struct sdebug_queue {
289 	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
290 	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
291 	spinlock_t qc_lock;
292 	atomic_t blocked;	/* to temporarily stop more being queued */
293 };
294 
295 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
296 static atomic_t sdebug_completions;  /* count of deferred completions */
297 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
298 static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
299 
300 struct opcode_info_t {
301 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
302 				/* for terminating element */
303 	u8 opcode;		/* if num_attached > 0, preferred */
304 	u16 sa;			/* service action */
305 	u32 flags;		/* OR-ed set of SDEB_F_* */
306 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
307 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
308 	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
309 				/* ignore cdb bytes after position 15 */
310 };
311 
312 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
313 enum sdeb_opcode_index {
314 	SDEB_I_INVALID_OPCODE =	0,
315 	SDEB_I_INQUIRY = 1,
316 	SDEB_I_REPORT_LUNS = 2,
317 	SDEB_I_REQUEST_SENSE = 3,
318 	SDEB_I_TEST_UNIT_READY = 4,
319 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
320 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
321 	SDEB_I_LOG_SENSE = 7,
322 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
323 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
324 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
325 	SDEB_I_START_STOP = 11,
326 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
327 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
328 	SDEB_I_MAINT_IN = 14,
329 	SDEB_I_MAINT_OUT = 15,
330 	SDEB_I_VERIFY = 16,		/* 10 only */
331 	SDEB_I_VARIABLE_LEN = 17,
332 	SDEB_I_RESERVE = 18,		/* 6, 10 */
333 	SDEB_I_RELEASE = 19,		/* 6, 10 */
334 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
335 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
336 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
337 	SDEB_I_SEND_DIAG = 23,
338 	SDEB_I_UNMAP = 24,
339 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
340 	SDEB_I_WRITE_BUFFER = 26,
341 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
342 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
343 	SDEB_I_COMP_WRITE = 29,
344 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
345 };
346 
347 
348 static const unsigned char opcode_ind_arr[256] = {
349 /* 0x0; 0x0->0x1f: 6 byte cdbs */
350 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
351 	    0, 0, 0, 0,
352 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
353 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
354 	    SDEB_I_RELEASE,
355 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
356 	    SDEB_I_ALLOW_REMOVAL, 0,
357 /* 0x20; 0x20->0x3f: 10 byte cdbs */
358 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
359 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
360 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
361 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
362 /* 0x40; 0x40->0x5f: 10 byte cdbs */
363 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
364 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
365 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
366 	    SDEB_I_RELEASE,
367 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
368 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
369 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
370 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
371 	0, SDEB_I_VARIABLE_LEN,
372 /* 0x80; 0x80->0x9f: 16 byte cdbs */
373 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
374 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
375 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
376 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
377 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
378 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
379 	     SDEB_I_MAINT_OUT, 0, 0, 0,
380 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
381 	     0, 0, 0, 0,
382 	0, 0, 0, 0, 0, 0, 0, 0,
383 	0, 0, 0, 0, 0, 0, 0, 0,
384 /* 0xc0; 0xc0->0xff: vendor specific */
385 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
387 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
388 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
389 };
390 
391 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
392 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
393 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
394 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
395 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
409 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
410 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
412 
413 static const struct opcode_info_t msense_iarr[1] = {
414 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
415 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
416 };
417 
418 static const struct opcode_info_t mselect_iarr[1] = {
419 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
420 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
421 };
422 
423 static const struct opcode_info_t read_iarr[3] = {
424 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
425 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
426 	     0, 0, 0, 0} },
427 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
428 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
430 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
431 	     0xc7, 0, 0, 0, 0} },
432 };
433 
434 static const struct opcode_info_t write_iarr[3] = {
435 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
436 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
437 	     0, 0, 0, 0} },
438 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
439 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
440 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
441 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
442 	     0xc7, 0, 0, 0, 0} },
443 };
444 
445 static const struct opcode_info_t sa_in_iarr[1] = {
446 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
447 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
448 	     0xff, 0xff, 0xff, 0, 0xc7} },
449 };
450 
451 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
452 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
453 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
454 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
455 };
456 
457 static const struct opcode_info_t maint_in_iarr[2] = {
458 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
459 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
460 	     0xc7, 0, 0, 0, 0} },
461 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
462 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
463 	     0, 0} },
464 };
465 
466 static const struct opcode_info_t write_same_iarr[1] = {
467 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
468 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
469 	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
470 };
471 
472 static const struct opcode_info_t reserve_iarr[1] = {
473 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
474 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
475 };
476 
477 static const struct opcode_info_t release_iarr[1] = {
478 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
479 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
480 };
481 
482 
483 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
484  * plus the terminating elements for logic that scans this table such as
485  * REPORT SUPPORTED OPERATION CODES. */
486 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
487 /* 0 */
488 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
489 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
490 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
491 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
492 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
493 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
494 	     0, 0} },
495 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
496 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
497 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
498 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
499 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
500 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
501 	     0} },
502 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
503 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
504 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
505 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
506 	     0, 0, 0} },
507 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
508 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
509 	     0, 0} },
510 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
511 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
512 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
513 /* 10 */
514 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
515 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
516 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
517 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
518 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
520 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
521 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
522 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
523 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
524 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
525 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
526 	     0} },
527 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
528 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
529 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
530 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
531 	     0, 0, 0, 0, 0, 0} },
532 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
533 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
534 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
535 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
536 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
537 	     0} },
538 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
539 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
540 	     0} },
541 /* 20 */
542 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
543 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
544 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
545 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
546 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
547 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
548 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
549 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
550 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
551 	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
552 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
553 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
554 		   0, 0, 0, 0, 0, 0} },
555 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
556 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
557 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
558 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
559 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
560 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
561 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
562 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
563 	     0, 0, 0, 0} },
564 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
565 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
566 	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
567 
568 /* 30 */
569 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
570 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
571 };
572 
573 static int sdebug_add_host = DEF_NUM_HOST;
574 static int sdebug_ato = DEF_ATO;
575 static int sdebug_cdb_len = DEF_CDB_LEN;
576 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
577 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
578 static int sdebug_dif = DEF_DIF;
579 static int sdebug_dix = DEF_DIX;
580 static int sdebug_dsense = DEF_D_SENSE;
581 static int sdebug_every_nth = DEF_EVERY_NTH;
582 static int sdebug_fake_rw = DEF_FAKE_RW;
583 static unsigned int sdebug_guard = DEF_GUARD;
584 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
585 static int sdebug_max_luns = DEF_MAX_LUNS;
586 static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
587 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
588 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
589 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
590 static int sdebug_no_uld;
591 static int sdebug_num_parts = DEF_NUM_PARTS;
592 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
593 static int sdebug_opt_blks = DEF_OPT_BLKS;
594 static int sdebug_opts = DEF_OPTS;
595 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
596 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
597 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
598 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
599 static int sdebug_sector_size = DEF_SECTOR_SIZE;
600 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
601 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
602 static unsigned int sdebug_lbpu = DEF_LBPU;
603 static unsigned int sdebug_lbpws = DEF_LBPWS;
604 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
605 static unsigned int sdebug_lbprz = DEF_LBPRZ;
606 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
607 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
608 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
609 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
610 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
611 static int sdebug_uuid_ctl = DEF_UUID_CTL;
612 static bool sdebug_removable = DEF_REMOVABLE;
613 static bool sdebug_clustering;
614 static bool sdebug_host_lock = DEF_HOST_LOCK;
615 static bool sdebug_strict = DEF_STRICT;
616 static bool sdebug_any_injecting_opt;
617 static bool sdebug_verbose;
618 static bool have_dif_prot;
619 static bool sdebug_statistics = DEF_STATISTICS;
620 static bool sdebug_mq_active;
621 
622 static unsigned int sdebug_store_sectors;
623 static sector_t sdebug_capacity;	/* in sectors */
624 
625 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
626    may still need them */
627 static int sdebug_heads;		/* heads per disk */
628 static int sdebug_cylinders_per;	/* cylinders per surface */
629 static int sdebug_sectors_per;		/* sectors per cylinder */
630 
631 static LIST_HEAD(sdebug_host_list);
632 static DEFINE_SPINLOCK(sdebug_host_list_lock);
633 
634 static unsigned char *fake_storep;	/* ramdisk storage */
635 static struct t10_pi_tuple *dif_storep;	/* protection info */
636 static void *map_storep;		/* provisioning map */
637 
638 static unsigned long map_size;
639 static int num_aborts;
640 static int num_dev_resets;
641 static int num_target_resets;
642 static int num_bus_resets;
643 static int num_host_resets;
644 static int dix_writes;
645 static int dix_reads;
646 static int dif_errors;
647 
648 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
649 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
650 
651 static DEFINE_RWLOCK(atomic_rw);
652 
653 static char sdebug_proc_name[] = MY_NAME;
654 static const char *my_name = MY_NAME;
655 
656 static struct bus_type pseudo_lld_bus;
657 
658 static struct device_driver sdebug_driverfs_driver = {
659 	.name 		= sdebug_proc_name,
660 	.bus		= &pseudo_lld_bus,
661 };
662 
663 static const int check_condition_result =
664 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
665 
666 static const int illegal_condition_result =
667 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
668 
669 static const int device_qfull_result =
670 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
671 
672 
673 /* Only do the extra work involved in logical block provisioning if one or
674  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
675  * real reads and writes (i.e. not skipping them for speed).
676  */
677 static inline bool scsi_debug_lbp(void)
678 {
679 	return 0 == sdebug_fake_rw &&
680 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
681 }
682 
683 static void *fake_store(unsigned long long lba)
684 {
685 	lba = do_div(lba, sdebug_store_sectors);
686 
687 	return fake_storep + lba * sdebug_sector_size;
688 }
689 
690 static struct t10_pi_tuple *dif_store(sector_t sector)
691 {
692 	sector = sector_div(sector, sdebug_store_sectors);
693 
694 	return dif_storep + sector;
695 }
696 
697 static void sdebug_max_tgts_luns(void)
698 {
699 	struct sdebug_host_info *sdbg_host;
700 	struct Scsi_Host *hpnt;
701 
702 	spin_lock(&sdebug_host_list_lock);
703 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
704 		hpnt = sdbg_host->shost;
705 		if ((hpnt->this_id >= 0) &&
706 		    (sdebug_num_tgts > hpnt->this_id))
707 			hpnt->max_id = sdebug_num_tgts + 1;
708 		else
709 			hpnt->max_id = sdebug_num_tgts;
710 		/* sdebug_max_luns; */
711 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
712 	}
713 	spin_unlock(&sdebug_host_list_lock);
714 }
715 
716 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
717 
718 /* Set in_bit to -1 to indicate no bit position of invalid field */
719 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
720 				 enum sdeb_cmd_data c_d,
721 				 int in_byte, int in_bit)
722 {
723 	unsigned char *sbuff;
724 	u8 sks[4];
725 	int sl, asc;
726 
727 	sbuff = scp->sense_buffer;
728 	if (!sbuff) {
729 		sdev_printk(KERN_ERR, scp->device,
730 			    "%s: sense_buffer is NULL\n", __func__);
731 		return;
732 	}
733 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
734 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
735 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
736 	memset(sks, 0, sizeof(sks));
737 	sks[0] = 0x80;
738 	if (c_d)
739 		sks[0] |= 0x40;
740 	if (in_bit >= 0) {
741 		sks[0] |= 0x8;
742 		sks[0] |= 0x7 & in_bit;
743 	}
744 	put_unaligned_be16(in_byte, sks + 1);
745 	if (sdebug_dsense) {
746 		sl = sbuff[7] + 8;
747 		sbuff[7] = sl;
748 		sbuff[sl] = 0x2;
749 		sbuff[sl + 1] = 0x6;
750 		memcpy(sbuff + sl + 4, sks, 3);
751 	} else
752 		memcpy(sbuff + 15, sks, 3);
753 	if (sdebug_verbose)
754 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
755 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
756 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
757 }
758 
759 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
760 {
761 	unsigned char *sbuff;
762 
763 	sbuff = scp->sense_buffer;
764 	if (!sbuff) {
765 		sdev_printk(KERN_ERR, scp->device,
766 			    "%s: sense_buffer is NULL\n", __func__);
767 		return;
768 	}
769 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
770 
771 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
772 
773 	if (sdebug_verbose)
774 		sdev_printk(KERN_INFO, scp->device,
775 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
776 			    my_name, key, asc, asq);
777 }
778 
779 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
780 {
781 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
782 }
783 
784 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
785 {
786 	if (sdebug_verbose) {
787 		if (0x1261 == cmd)
788 			sdev_printk(KERN_INFO, dev,
789 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
790 		else if (0x5331 == cmd)
791 			sdev_printk(KERN_INFO, dev,
792 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
793 				    __func__);
794 		else
795 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
796 				    __func__, cmd);
797 	}
798 	return -EINVAL;
799 	/* return -ENOTTY; // correct return but upsets fdisk */
800 }
801 
802 static void config_cdb_len(struct scsi_device *sdev)
803 {
804 	switch (sdebug_cdb_len) {
805 	case 6:	/* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
806 		sdev->use_10_for_rw = false;
807 		sdev->use_16_for_rw = false;
808 		sdev->use_10_for_ms = false;
809 		break;
810 	case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
811 		sdev->use_10_for_rw = true;
812 		sdev->use_16_for_rw = false;
813 		sdev->use_10_for_ms = false;
814 		break;
815 	case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
816 		sdev->use_10_for_rw = true;
817 		sdev->use_16_for_rw = false;
818 		sdev->use_10_for_ms = true;
819 		break;
820 	case 16:
821 		sdev->use_10_for_rw = false;
822 		sdev->use_16_for_rw = true;
823 		sdev->use_10_for_ms = true;
824 		break;
825 	case 32: /* No knobs to suggest this so same as 16 for now */
826 		sdev->use_10_for_rw = false;
827 		sdev->use_16_for_rw = true;
828 		sdev->use_10_for_ms = true;
829 		break;
830 	default:
831 		pr_warn("unexpected cdb_len=%d, force to 10\n",
832 			sdebug_cdb_len);
833 		sdev->use_10_for_rw = true;
834 		sdev->use_16_for_rw = false;
835 		sdev->use_10_for_ms = false;
836 		sdebug_cdb_len = 10;
837 		break;
838 	}
839 }
840 
841 static void all_config_cdb_len(void)
842 {
843 	struct sdebug_host_info *sdbg_host;
844 	struct Scsi_Host *shost;
845 	struct scsi_device *sdev;
846 
847 	spin_lock(&sdebug_host_list_lock);
848 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
849 		shost = sdbg_host->shost;
850 		shost_for_each_device(sdev, shost) {
851 			config_cdb_len(sdev);
852 		}
853 	}
854 	spin_unlock(&sdebug_host_list_lock);
855 }
856 
857 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
858 {
859 	struct sdebug_host_info *sdhp;
860 	struct sdebug_dev_info *dp;
861 
862 	spin_lock(&sdebug_host_list_lock);
863 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
864 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
865 			if ((devip->sdbg_host == dp->sdbg_host) &&
866 			    (devip->target == dp->target))
867 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
868 		}
869 	}
870 	spin_unlock(&sdebug_host_list_lock);
871 }
872 
873 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
874 {
875 	int k;
876 
877 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
878 	if (k != SDEBUG_NUM_UAS) {
879 		const char *cp = NULL;
880 
881 		switch (k) {
882 		case SDEBUG_UA_POR:
883 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
884 					POWER_ON_RESET_ASCQ);
885 			if (sdebug_verbose)
886 				cp = "power on reset";
887 			break;
888 		case SDEBUG_UA_BUS_RESET:
889 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
890 					BUS_RESET_ASCQ);
891 			if (sdebug_verbose)
892 				cp = "bus reset";
893 			break;
894 		case SDEBUG_UA_MODE_CHANGED:
895 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
896 					MODE_CHANGED_ASCQ);
897 			if (sdebug_verbose)
898 				cp = "mode parameters changed";
899 			break;
900 		case SDEBUG_UA_CAPACITY_CHANGED:
901 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
902 					CAPACITY_CHANGED_ASCQ);
903 			if (sdebug_verbose)
904 				cp = "capacity data changed";
905 			break;
906 		case SDEBUG_UA_MICROCODE_CHANGED:
907 			mk_sense_buffer(scp, UNIT_ATTENTION,
908 					TARGET_CHANGED_ASC,
909 					MICROCODE_CHANGED_ASCQ);
910 			if (sdebug_verbose)
911 				cp = "microcode has been changed";
912 			break;
913 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
914 			mk_sense_buffer(scp, UNIT_ATTENTION,
915 					TARGET_CHANGED_ASC,
916 					MICROCODE_CHANGED_WO_RESET_ASCQ);
917 			if (sdebug_verbose)
918 				cp = "microcode has been changed without reset";
919 			break;
920 		case SDEBUG_UA_LUNS_CHANGED:
921 			/*
922 			 * SPC-3 behavior is to report a UNIT ATTENTION with
923 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
924 			 * on the target, until a REPORT LUNS command is
925 			 * received.  SPC-4 behavior is to report it only once.
926 			 * NOTE:  sdebug_scsi_level does not use the same
927 			 * values as struct scsi_device->scsi_level.
928 			 */
929 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
930 				clear_luns_changed_on_target(devip);
931 			mk_sense_buffer(scp, UNIT_ATTENTION,
932 					TARGET_CHANGED_ASC,
933 					LUNS_CHANGED_ASCQ);
934 			if (sdebug_verbose)
935 				cp = "reported luns data has changed";
936 			break;
937 		default:
938 			pr_warn("unexpected unit attention code=%d\n", k);
939 			if (sdebug_verbose)
940 				cp = "unknown";
941 			break;
942 		}
943 		clear_bit(k, devip->uas_bm);
944 		if (sdebug_verbose)
945 			sdev_printk(KERN_INFO, scp->device,
946 				   "%s reports: Unit attention: %s\n",
947 				   my_name, cp);
948 		return check_condition_result;
949 	}
950 	return 0;
951 }
952 
953 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
954 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
955 				int arr_len)
956 {
957 	int act_len;
958 	struct scsi_data_buffer *sdb = scsi_in(scp);
959 
960 	if (!sdb->length)
961 		return 0;
962 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
963 		return DID_ERROR << 16;
964 
965 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
966 				      arr, arr_len);
967 	sdb->resid = scsi_bufflen(scp) - act_len;
968 
969 	return 0;
970 }
971 
972 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
973  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
974  * calls, not required to write in ascending offset order. Assumes resid
975  * set to scsi_bufflen() prior to any calls.
976  */
977 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
978 				  int arr_len, unsigned int off_dst)
979 {
980 	int act_len, n;
981 	struct scsi_data_buffer *sdb = scsi_in(scp);
982 	off_t skip = off_dst;
983 
984 	if (sdb->length <= off_dst)
985 		return 0;
986 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
987 		return DID_ERROR << 16;
988 
989 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
990 				       arr, arr_len, skip);
991 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
992 		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
993 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
994 	sdb->resid = min(sdb->resid, n);
995 	return 0;
996 }
997 
998 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
999  * 'arr' or -1 if error.
1000  */
1001 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1002 			       int arr_len)
1003 {
1004 	if (!scsi_bufflen(scp))
1005 		return 0;
1006 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1007 		return -1;
1008 
1009 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1010 }
1011 
1012 
1013 static char sdebug_inq_vendor_id[9] = "Linux   ";
1014 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1015 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1016 /* Use some locally assigned NAAs for SAS addresses. */
1017 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1018 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1019 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1020 
1021 /* Device identification VPD page. Returns number of bytes placed in arr */
1022 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1023 			  int target_dev_id, int dev_id_num,
1024 			  const char *dev_id_str, int dev_id_str_len,
1025 			  const uuid_t *lu_name)
1026 {
1027 	int num, port_a;
1028 	char b[32];
1029 
1030 	port_a = target_dev_id + 1;
1031 	/* T10 vendor identifier field format (faked) */
1032 	arr[0] = 0x2;	/* ASCII */
1033 	arr[1] = 0x1;
1034 	arr[2] = 0x0;
1035 	memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1036 	memcpy(&arr[12], sdebug_inq_product_id, 16);
1037 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
1038 	num = 8 + 16 + dev_id_str_len;
1039 	arr[3] = num;
1040 	num += 4;
1041 	if (dev_id_num >= 0) {
1042 		if (sdebug_uuid_ctl) {
1043 			/* Locally assigned UUID */
1044 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1045 			arr[num++] = 0xa;  /* PIV=0, lu, naa */
1046 			arr[num++] = 0x0;
1047 			arr[num++] = 0x12;
1048 			arr[num++] = 0x10; /* uuid type=1, locally assigned */
1049 			arr[num++] = 0x0;
1050 			memcpy(arr + num, lu_name, 16);
1051 			num += 16;
1052 		} else {
1053 			/* NAA-3, Logical unit identifier (binary) */
1054 			arr[num++] = 0x1;  /* binary (not necessarily sas) */
1055 			arr[num++] = 0x3;  /* PIV=0, lu, naa */
1056 			arr[num++] = 0x0;
1057 			arr[num++] = 0x8;
1058 			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1059 			num += 8;
1060 		}
1061 		/* Target relative port number */
1062 		arr[num++] = 0x61;	/* proto=sas, binary */
1063 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
1064 		arr[num++] = 0x0;	/* reserved */
1065 		arr[num++] = 0x4;	/* length */
1066 		arr[num++] = 0x0;	/* reserved */
1067 		arr[num++] = 0x0;	/* reserved */
1068 		arr[num++] = 0x0;
1069 		arr[num++] = 0x1;	/* relative port A */
1070 	}
1071 	/* NAA-3, Target port identifier */
1072 	arr[num++] = 0x61;	/* proto=sas, binary */
1073 	arr[num++] = 0x93;	/* piv=1, target port, naa */
1074 	arr[num++] = 0x0;
1075 	arr[num++] = 0x8;
1076 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1077 	num += 8;
1078 	/* NAA-3, Target port group identifier */
1079 	arr[num++] = 0x61;	/* proto=sas, binary */
1080 	arr[num++] = 0x95;	/* piv=1, target port group id */
1081 	arr[num++] = 0x0;
1082 	arr[num++] = 0x4;
1083 	arr[num++] = 0;
1084 	arr[num++] = 0;
1085 	put_unaligned_be16(port_group_id, arr + num);
1086 	num += 2;
1087 	/* NAA-3, Target device identifier */
1088 	arr[num++] = 0x61;	/* proto=sas, binary */
1089 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
1090 	arr[num++] = 0x0;
1091 	arr[num++] = 0x8;
1092 	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1093 	num += 8;
1094 	/* SCSI name string: Target device identifier */
1095 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
1096 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
1097 	arr[num++] = 0x0;
1098 	arr[num++] = 24;
1099 	memcpy(arr + num, "naa.32222220", 12);
1100 	num += 12;
1101 	snprintf(b, sizeof(b), "%08X", target_dev_id);
1102 	memcpy(arr + num, b, 8);
1103 	num += 8;
1104 	memset(arr + num, 0, 4);
1105 	num += 4;
1106 	return num;
1107 }
1108 
1109 static unsigned char vpd84_data[] = {
1110 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1111     0x22,0x22,0x22,0x0,0xbb,0x1,
1112     0x22,0x22,0x22,0x0,0xbb,0x2,
1113 };
1114 
1115 /*  Software interface identification VPD page */
1116 static int inquiry_vpd_84(unsigned char *arr)
1117 {
1118 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
1119 	return sizeof(vpd84_data);
1120 }
1121 
1122 /* Management network addresses VPD page */
1123 static int inquiry_vpd_85(unsigned char *arr)
1124 {
1125 	int num = 0;
1126 	const char * na1 = "https://www.kernel.org/config";
1127 	const char * na2 = "http://www.kernel.org/log";
1128 	int plen, olen;
1129 
1130 	arr[num++] = 0x1;	/* lu, storage config */
1131 	arr[num++] = 0x0;	/* reserved */
1132 	arr[num++] = 0x0;
1133 	olen = strlen(na1);
1134 	plen = olen + 1;
1135 	if (plen % 4)
1136 		plen = ((plen / 4) + 1) * 4;
1137 	arr[num++] = plen;	/* length, null termianted, padded */
1138 	memcpy(arr + num, na1, olen);
1139 	memset(arr + num + olen, 0, plen - olen);
1140 	num += plen;
1141 
1142 	arr[num++] = 0x4;	/* lu, logging */
1143 	arr[num++] = 0x0;	/* reserved */
1144 	arr[num++] = 0x0;
1145 	olen = strlen(na2);
1146 	plen = olen + 1;
1147 	if (plen % 4)
1148 		plen = ((plen / 4) + 1) * 4;
1149 	arr[num++] = plen;	/* length, null terminated, padded */
1150 	memcpy(arr + num, na2, olen);
1151 	memset(arr + num + olen, 0, plen - olen);
1152 	num += plen;
1153 
1154 	return num;
1155 }
1156 
1157 /* SCSI ports VPD page */
1158 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1159 {
1160 	int num = 0;
1161 	int port_a, port_b;
1162 
1163 	port_a = target_dev_id + 1;
1164 	port_b = port_a + 1;
1165 	arr[num++] = 0x0;	/* reserved */
1166 	arr[num++] = 0x0;	/* reserved */
1167 	arr[num++] = 0x0;
1168 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1169 	memset(arr + num, 0, 6);
1170 	num += 6;
1171 	arr[num++] = 0x0;
1172 	arr[num++] = 12;	/* length tp descriptor */
1173 	/* naa-5 target port identifier (A) */
1174 	arr[num++] = 0x61;	/* proto=sas, binary */
1175 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1176 	arr[num++] = 0x0;	/* reserved */
1177 	arr[num++] = 0x8;	/* length */
1178 	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1179 	num += 8;
1180 	arr[num++] = 0x0;	/* reserved */
1181 	arr[num++] = 0x0;	/* reserved */
1182 	arr[num++] = 0x0;
1183 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1184 	memset(arr + num, 0, 6);
1185 	num += 6;
1186 	arr[num++] = 0x0;
1187 	arr[num++] = 12;	/* length tp descriptor */
1188 	/* naa-5 target port identifier (B) */
1189 	arr[num++] = 0x61;	/* proto=sas, binary */
1190 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1191 	arr[num++] = 0x0;	/* reserved */
1192 	arr[num++] = 0x8;	/* length */
1193 	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1194 	num += 8;
1195 
1196 	return num;
1197 }
1198 
1199 
1200 static unsigned char vpd89_data[] = {
1201 /* from 4th byte */ 0,0,0,0,
1202 'l','i','n','u','x',' ',' ',' ',
1203 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1204 '1','2','3','4',
1205 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1206 0xec,0,0,0,
1207 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1208 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1209 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1210 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1211 0x53,0x41,
1212 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1213 0x20,0x20,
1214 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1215 0x10,0x80,
1216 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1217 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1218 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1219 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1220 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1221 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1222 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1223 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1224 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1225 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1226 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1227 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1228 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1229 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1230 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1231 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1232 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1233 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1234 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1235 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1236 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1237 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1238 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1239 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1240 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1241 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1242 };
1243 
1244 /* ATA Information VPD page */
1245 static int inquiry_vpd_89(unsigned char *arr)
1246 {
1247 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1248 	return sizeof(vpd89_data);
1249 }
1250 
1251 
1252 static unsigned char vpdb0_data[] = {
1253 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1254 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1255 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1256 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1257 };
1258 
1259 /* Block limits VPD page (SBC-3) */
1260 static int inquiry_vpd_b0(unsigned char *arr)
1261 {
1262 	unsigned int gran;
1263 
1264 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1265 
1266 	/* Optimal transfer length granularity */
1267 	if (sdebug_opt_xferlen_exp != 0 &&
1268 	    sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1269 		gran = 1 << sdebug_opt_xferlen_exp;
1270 	else
1271 		gran = 1 << sdebug_physblk_exp;
1272 	put_unaligned_be16(gran, arr + 2);
1273 
1274 	/* Maximum Transfer Length */
1275 	if (sdebug_store_sectors > 0x400)
1276 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1277 
1278 	/* Optimal Transfer Length */
1279 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1280 
1281 	if (sdebug_lbpu) {
1282 		/* Maximum Unmap LBA Count */
1283 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1284 
1285 		/* Maximum Unmap Block Descriptor Count */
1286 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1287 	}
1288 
1289 	/* Unmap Granularity Alignment */
1290 	if (sdebug_unmap_alignment) {
1291 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1292 		arr[28] |= 0x80; /* UGAVALID */
1293 	}
1294 
1295 	/* Optimal Unmap Granularity */
1296 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1297 
1298 	/* Maximum WRITE SAME Length */
1299 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1300 
1301 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1302 
1303 	return sizeof(vpdb0_data);
1304 }
1305 
1306 /* Block device characteristics VPD page (SBC-3) */
1307 static int inquiry_vpd_b1(unsigned char *arr)
1308 {
1309 	memset(arr, 0, 0x3c);
1310 	arr[0] = 0;
1311 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1312 	arr[2] = 0;
1313 	arr[3] = 5;	/* less than 1.8" */
1314 
1315 	return 0x3c;
1316 }
1317 
1318 /* Logical block provisioning VPD page (SBC-4) */
1319 static int inquiry_vpd_b2(unsigned char *arr)
1320 {
1321 	memset(arr, 0, 0x4);
1322 	arr[0] = 0;			/* threshold exponent */
1323 	if (sdebug_lbpu)
1324 		arr[1] = 1 << 7;
1325 	if (sdebug_lbpws)
1326 		arr[1] |= 1 << 6;
1327 	if (sdebug_lbpws10)
1328 		arr[1] |= 1 << 5;
1329 	if (sdebug_lbprz && scsi_debug_lbp())
1330 		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1331 	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
1332 	/* minimum_percentage=0; provisioning_type=0 (unknown) */
1333 	/* threshold_percentage=0 */
1334 	return 0x4;
1335 }
1336 
1337 #define SDEBUG_LONG_INQ_SZ 96
1338 #define SDEBUG_MAX_INQ_ARR_SZ 584
1339 
1340 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1341 {
1342 	unsigned char pq_pdt;
1343 	unsigned char * arr;
1344 	unsigned char *cmd = scp->cmnd;
1345 	int alloc_len, n, ret;
1346 	bool have_wlun, is_disk;
1347 
1348 	alloc_len = get_unaligned_be16(cmd + 3);
1349 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1350 	if (! arr)
1351 		return DID_REQUEUE << 16;
1352 	is_disk = (sdebug_ptype == TYPE_DISK);
1353 	have_wlun = scsi_is_wlun(scp->device->lun);
1354 	if (have_wlun)
1355 		pq_pdt = TYPE_WLUN;	/* present, wlun */
1356 	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1357 		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
1358 	else
1359 		pq_pdt = (sdebug_ptype & 0x1f);
1360 	arr[0] = pq_pdt;
1361 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1362 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1363 		kfree(arr);
1364 		return check_condition_result;
1365 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1366 		int lu_id_num, port_group_id, target_dev_id, len;
1367 		char lu_id_str[6];
1368 		int host_no = devip->sdbg_host->shost->host_no;
1369 
1370 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1371 		    (devip->channel & 0x7f);
1372 		if (sdebug_vpd_use_hostno == 0)
1373 			host_no = 0;
1374 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1375 			    (devip->target * 1000) + devip->lun);
1376 		target_dev_id = ((host_no + 1) * 2000) +
1377 				 (devip->target * 1000) - 3;
1378 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1379 		if (0 == cmd[2]) { /* supported vital product data pages */
1380 			arr[1] = cmd[2];	/*sanity */
1381 			n = 4;
1382 			arr[n++] = 0x0;   /* this page */
1383 			arr[n++] = 0x80;  /* unit serial number */
1384 			arr[n++] = 0x83;  /* device identification */
1385 			arr[n++] = 0x84;  /* software interface ident. */
1386 			arr[n++] = 0x85;  /* management network addresses */
1387 			arr[n++] = 0x86;  /* extended inquiry */
1388 			arr[n++] = 0x87;  /* mode page policy */
1389 			arr[n++] = 0x88;  /* SCSI ports */
1390 			if (is_disk) {	  /* SBC only */
1391 				arr[n++] = 0x89;  /* ATA information */
1392 				arr[n++] = 0xb0;  /* Block limits */
1393 				arr[n++] = 0xb1;  /* Block characteristics */
1394 				arr[n++] = 0xb2;  /* Logical Block Prov */
1395 			}
1396 			arr[3] = n - 4;	  /* number of supported VPD pages */
1397 		} else if (0x80 == cmd[2]) { /* unit serial number */
1398 			arr[1] = cmd[2];	/*sanity */
1399 			arr[3] = len;
1400 			memcpy(&arr[4], lu_id_str, len);
1401 		} else if (0x83 == cmd[2]) { /* device identification */
1402 			arr[1] = cmd[2];	/*sanity */
1403 			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1404 						target_dev_id, lu_id_num,
1405 						lu_id_str, len,
1406 						&devip->lu_name);
1407 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1408 			arr[1] = cmd[2];	/*sanity */
1409 			arr[3] = inquiry_vpd_84(&arr[4]);
1410 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1411 			arr[1] = cmd[2];	/*sanity */
1412 			arr[3] = inquiry_vpd_85(&arr[4]);
1413 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1414 			arr[1] = cmd[2];	/*sanity */
1415 			arr[3] = 0x3c;	/* number of following entries */
1416 			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1417 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1418 			else if (have_dif_prot)
1419 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1420 			else
1421 				arr[4] = 0x0;   /* no protection stuff */
1422 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1423 		} else if (0x87 == cmd[2]) { /* mode page policy */
1424 			arr[1] = cmd[2];	/*sanity */
1425 			arr[3] = 0x8;	/* number of following entries */
1426 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1427 			arr[6] = 0x80;	/* mlus, shared */
1428 			arr[8] = 0x18;	 /* protocol specific lu */
1429 			arr[10] = 0x82;	 /* mlus, per initiator port */
1430 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1431 			arr[1] = cmd[2];	/*sanity */
1432 			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1433 		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1434 			arr[1] = cmd[2];        /*sanity */
1435 			n = inquiry_vpd_89(&arr[4]);
1436 			put_unaligned_be16(n, arr + 2);
1437 		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1438 			arr[1] = cmd[2];        /*sanity */
1439 			arr[3] = inquiry_vpd_b0(&arr[4]);
1440 		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1441 			arr[1] = cmd[2];        /*sanity */
1442 			arr[3] = inquiry_vpd_b1(&arr[4]);
1443 		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1444 			arr[1] = cmd[2];        /*sanity */
1445 			arr[3] = inquiry_vpd_b2(&arr[4]);
1446 		} else {
1447 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1448 			kfree(arr);
1449 			return check_condition_result;
1450 		}
1451 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1452 		ret = fill_from_dev_buffer(scp, arr,
1453 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1454 		kfree(arr);
1455 		return ret;
1456 	}
1457 	/* drops through here for a standard inquiry */
1458 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1459 	arr[2] = sdebug_scsi_level;
1460 	arr[3] = 2;    /* response_data_format==2 */
1461 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1462 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1463 	if (sdebug_vpd_use_hostno == 0)
1464 		arr[5] |= 0x10; /* claim: implicit TPGS */
1465 	arr[6] = 0x10; /* claim: MultiP */
1466 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1467 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1468 	memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1469 	memcpy(&arr[16], sdebug_inq_product_id, 16);
1470 	memcpy(&arr[32], sdebug_inq_product_rev, 4);
1471 	/* Use Vendor Specific area to place driver date in ASCII hex */
1472 	memcpy(&arr[36], sdebug_version_date, 8);
1473 	/* version descriptors (2 bytes each) follow */
1474 	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1475 	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1476 	n = 62;
1477 	if (is_disk) {		/* SBC-4 no version claimed */
1478 		put_unaligned_be16(0x600, arr + n);
1479 		n += 2;
1480 	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
1481 		put_unaligned_be16(0x525, arr + n);
1482 		n += 2;
1483 	}
1484 	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
1485 	ret = fill_from_dev_buffer(scp, arr,
1486 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1487 	kfree(arr);
1488 	return ret;
1489 }
1490 
1491 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1492 				   0, 0, 0x0, 0x0};
1493 
1494 static int resp_requests(struct scsi_cmnd * scp,
1495 			 struct sdebug_dev_info * devip)
1496 {
1497 	unsigned char * sbuff;
1498 	unsigned char *cmd = scp->cmnd;
1499 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1500 	bool dsense;
1501 	int len = 18;
1502 
1503 	memset(arr, 0, sizeof(arr));
1504 	dsense = !!(cmd[1] & 1);
1505 	sbuff = scp->sense_buffer;
1506 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1507 		if (dsense) {
1508 			arr[0] = 0x72;
1509 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1510 			arr[2] = THRESHOLD_EXCEEDED;
1511 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1512 			len = 8;
1513 		} else {
1514 			arr[0] = 0x70;
1515 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1516 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1517 			arr[12] = THRESHOLD_EXCEEDED;
1518 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1519 		}
1520 	} else {
1521 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1522 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1523 			;	/* have sense and formats match */
1524 		else if (arr[0] <= 0x70) {
1525 			if (dsense) {
1526 				memset(arr, 0, 8);
1527 				arr[0] = 0x72;
1528 				len = 8;
1529 			} else {
1530 				memset(arr, 0, 18);
1531 				arr[0] = 0x70;
1532 				arr[7] = 0xa;
1533 			}
1534 		} else if (dsense) {
1535 			memset(arr, 0, 8);
1536 			arr[0] = 0x72;
1537 			arr[1] = sbuff[2];     /* sense key */
1538 			arr[2] = sbuff[12];    /* asc */
1539 			arr[3] = sbuff[13];    /* ascq */
1540 			len = 8;
1541 		} else {
1542 			memset(arr, 0, 18);
1543 			arr[0] = 0x70;
1544 			arr[2] = sbuff[1];
1545 			arr[7] = 0xa;
1546 			arr[12] = sbuff[1];
1547 			arr[13] = sbuff[3];
1548 		}
1549 
1550 	}
1551 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1552 	return fill_from_dev_buffer(scp, arr, len);
1553 }
1554 
1555 static int resp_start_stop(struct scsi_cmnd * scp,
1556 			   struct sdebug_dev_info * devip)
1557 {
1558 	unsigned char *cmd = scp->cmnd;
1559 	int power_cond, stop;
1560 
1561 	power_cond = (cmd[4] & 0xf0) >> 4;
1562 	if (power_cond) {
1563 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1564 		return check_condition_result;
1565 	}
1566 	stop = !(cmd[4] & 1);
1567 	atomic_xchg(&devip->stopped, stop);
1568 	return 0;
1569 }
1570 
1571 static sector_t get_sdebug_capacity(void)
1572 {
1573 	static const unsigned int gibibyte = 1073741824;
1574 
1575 	if (sdebug_virtual_gb > 0)
1576 		return (sector_t)sdebug_virtual_gb *
1577 			(gibibyte / sdebug_sector_size);
1578 	else
1579 		return sdebug_store_sectors;
1580 }
1581 
1582 #define SDEBUG_READCAP_ARR_SZ 8
1583 static int resp_readcap(struct scsi_cmnd * scp,
1584 			struct sdebug_dev_info * devip)
1585 {
1586 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1587 	unsigned int capac;
1588 
1589 	/* following just in case virtual_gb changed */
1590 	sdebug_capacity = get_sdebug_capacity();
1591 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1592 	if (sdebug_capacity < 0xffffffff) {
1593 		capac = (unsigned int)sdebug_capacity - 1;
1594 		put_unaligned_be32(capac, arr + 0);
1595 	} else
1596 		put_unaligned_be32(0xffffffff, arr + 0);
1597 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1598 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1599 }
1600 
1601 #define SDEBUG_READCAP16_ARR_SZ 32
1602 static int resp_readcap16(struct scsi_cmnd * scp,
1603 			  struct sdebug_dev_info * devip)
1604 {
1605 	unsigned char *cmd = scp->cmnd;
1606 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1607 	int alloc_len;
1608 
1609 	alloc_len = get_unaligned_be32(cmd + 10);
1610 	/* following just in case virtual_gb changed */
1611 	sdebug_capacity = get_sdebug_capacity();
1612 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1613 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1614 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1615 	arr[13] = sdebug_physblk_exp & 0xf;
1616 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1617 
1618 	if (scsi_debug_lbp()) {
1619 		arr[14] |= 0x80; /* LBPME */
1620 		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1621 		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1622 		 * in the wider field maps to 0 in this field.
1623 		 */
1624 		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
1625 			arr[14] |= 0x40;
1626 	}
1627 
1628 	arr[15] = sdebug_lowest_aligned & 0xff;
1629 
1630 	if (have_dif_prot) {
1631 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1632 		arr[12] |= 1; /* PROT_EN */
1633 	}
1634 
1635 	return fill_from_dev_buffer(scp, arr,
1636 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1637 }
1638 
1639 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1640 
1641 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1642 			      struct sdebug_dev_info * devip)
1643 {
1644 	unsigned char *cmd = scp->cmnd;
1645 	unsigned char * arr;
1646 	int host_no = devip->sdbg_host->shost->host_no;
1647 	int n, ret, alen, rlen;
1648 	int port_group_a, port_group_b, port_a, port_b;
1649 
1650 	alen = get_unaligned_be32(cmd + 6);
1651 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1652 	if (! arr)
1653 		return DID_REQUEUE << 16;
1654 	/*
1655 	 * EVPD page 0x88 states we have two ports, one
1656 	 * real and a fake port with no device connected.
1657 	 * So we create two port groups with one port each
1658 	 * and set the group with port B to unavailable.
1659 	 */
1660 	port_a = 0x1; /* relative port A */
1661 	port_b = 0x2; /* relative port B */
1662 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1663 			(devip->channel & 0x7f);
1664 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1665 			(devip->channel & 0x7f) + 0x80;
1666 
1667 	/*
1668 	 * The asymmetric access state is cycled according to the host_id.
1669 	 */
1670 	n = 4;
1671 	if (sdebug_vpd_use_hostno == 0) {
1672 		arr[n++] = host_no % 3; /* Asymm access state */
1673 		arr[n++] = 0x0F; /* claim: all states are supported */
1674 	} else {
1675 		arr[n++] = 0x0; /* Active/Optimized path */
1676 		arr[n++] = 0x01; /* only support active/optimized paths */
1677 	}
1678 	put_unaligned_be16(port_group_a, arr + n);
1679 	n += 2;
1680 	arr[n++] = 0;    /* Reserved */
1681 	arr[n++] = 0;    /* Status code */
1682 	arr[n++] = 0;    /* Vendor unique */
1683 	arr[n++] = 0x1;  /* One port per group */
1684 	arr[n++] = 0;    /* Reserved */
1685 	arr[n++] = 0;    /* Reserved */
1686 	put_unaligned_be16(port_a, arr + n);
1687 	n += 2;
1688 	arr[n++] = 3;    /* Port unavailable */
1689 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1690 	put_unaligned_be16(port_group_b, arr + n);
1691 	n += 2;
1692 	arr[n++] = 0;    /* Reserved */
1693 	arr[n++] = 0;    /* Status code */
1694 	arr[n++] = 0;    /* Vendor unique */
1695 	arr[n++] = 0x1;  /* One port per group */
1696 	arr[n++] = 0;    /* Reserved */
1697 	arr[n++] = 0;    /* Reserved */
1698 	put_unaligned_be16(port_b, arr + n);
1699 	n += 2;
1700 
1701 	rlen = n - 4;
1702 	put_unaligned_be32(rlen, arr + 0);
1703 
1704 	/*
1705 	 * Return the smallest value of either
1706 	 * - The allocated length
1707 	 * - The constructed command length
1708 	 * - The maximum array size
1709 	 */
1710 	rlen = min(alen,n);
1711 	ret = fill_from_dev_buffer(scp, arr,
1712 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1713 	kfree(arr);
1714 	return ret;
1715 }
1716 
1717 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1718 			     struct sdebug_dev_info *devip)
1719 {
1720 	bool rctd;
1721 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1722 	u16 req_sa, u;
1723 	u32 alloc_len, a_len;
1724 	int k, offset, len, errsts, count, bump, na;
1725 	const struct opcode_info_t *oip;
1726 	const struct opcode_info_t *r_oip;
1727 	u8 *arr;
1728 	u8 *cmd = scp->cmnd;
1729 
1730 	rctd = !!(cmd[2] & 0x80);
1731 	reporting_opts = cmd[2] & 0x7;
1732 	req_opcode = cmd[3];
1733 	req_sa = get_unaligned_be16(cmd + 4);
1734 	alloc_len = get_unaligned_be32(cmd + 6);
1735 	if (alloc_len < 4 || alloc_len > 0xffff) {
1736 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1737 		return check_condition_result;
1738 	}
1739 	if (alloc_len > 8192)
1740 		a_len = 8192;
1741 	else
1742 		a_len = alloc_len;
1743 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1744 	if (NULL == arr) {
1745 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1746 				INSUFF_RES_ASCQ);
1747 		return check_condition_result;
1748 	}
1749 	switch (reporting_opts) {
1750 	case 0:	/* all commands */
1751 		/* count number of commands */
1752 		for (count = 0, oip = opcode_info_arr;
1753 		     oip->num_attached != 0xff; ++oip) {
1754 			if (F_INV_OP & oip->flags)
1755 				continue;
1756 			count += (oip->num_attached + 1);
1757 		}
1758 		bump = rctd ? 20 : 8;
1759 		put_unaligned_be32(count * bump, arr);
1760 		for (offset = 4, oip = opcode_info_arr;
1761 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1762 			if (F_INV_OP & oip->flags)
1763 				continue;
1764 			na = oip->num_attached;
1765 			arr[offset] = oip->opcode;
1766 			put_unaligned_be16(oip->sa, arr + offset + 2);
1767 			if (rctd)
1768 				arr[offset + 5] |= 0x2;
1769 			if (FF_SA & oip->flags)
1770 				arr[offset + 5] |= 0x1;
1771 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1772 			if (rctd)
1773 				put_unaligned_be16(0xa, arr + offset + 8);
1774 			r_oip = oip;
1775 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1776 				if (F_INV_OP & oip->flags)
1777 					continue;
1778 				offset += bump;
1779 				arr[offset] = oip->opcode;
1780 				put_unaligned_be16(oip->sa, arr + offset + 2);
1781 				if (rctd)
1782 					arr[offset + 5] |= 0x2;
1783 				if (FF_SA & oip->flags)
1784 					arr[offset + 5] |= 0x1;
1785 				put_unaligned_be16(oip->len_mask[0],
1786 						   arr + offset + 6);
1787 				if (rctd)
1788 					put_unaligned_be16(0xa,
1789 							   arr + offset + 8);
1790 			}
1791 			oip = r_oip;
1792 			offset += bump;
1793 		}
1794 		break;
1795 	case 1:	/* one command: opcode only */
1796 	case 2:	/* one command: opcode plus service action */
1797 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1798 		sdeb_i = opcode_ind_arr[req_opcode];
1799 		oip = &opcode_info_arr[sdeb_i];
1800 		if (F_INV_OP & oip->flags) {
1801 			supp = 1;
1802 			offset = 4;
1803 		} else {
1804 			if (1 == reporting_opts) {
1805 				if (FF_SA & oip->flags) {
1806 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1807 							     2, 2);
1808 					kfree(arr);
1809 					return check_condition_result;
1810 				}
1811 				req_sa = 0;
1812 			} else if (2 == reporting_opts &&
1813 				   0 == (FF_SA & oip->flags)) {
1814 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1815 				kfree(arr);	/* point at requested sa */
1816 				return check_condition_result;
1817 			}
1818 			if (0 == (FF_SA & oip->flags) &&
1819 			    req_opcode == oip->opcode)
1820 				supp = 3;
1821 			else if (0 == (FF_SA & oip->flags)) {
1822 				na = oip->num_attached;
1823 				for (k = 0, oip = oip->arrp; k < na;
1824 				     ++k, ++oip) {
1825 					if (req_opcode == oip->opcode)
1826 						break;
1827 				}
1828 				supp = (k >= na) ? 1 : 3;
1829 			} else if (req_sa != oip->sa) {
1830 				na = oip->num_attached;
1831 				for (k = 0, oip = oip->arrp; k < na;
1832 				     ++k, ++oip) {
1833 					if (req_sa == oip->sa)
1834 						break;
1835 				}
1836 				supp = (k >= na) ? 1 : 3;
1837 			} else
1838 				supp = 3;
1839 			if (3 == supp) {
1840 				u = oip->len_mask[0];
1841 				put_unaligned_be16(u, arr + 2);
1842 				arr[4] = oip->opcode;
1843 				for (k = 1; k < u; ++k)
1844 					arr[4 + k] = (k < 16) ?
1845 						 oip->len_mask[k] : 0xff;
1846 				offset = 4 + u;
1847 			} else
1848 				offset = 4;
1849 		}
1850 		arr[1] = (rctd ? 0x80 : 0) | supp;
1851 		if (rctd) {
1852 			put_unaligned_be16(0xa, arr + offset);
1853 			offset += 12;
1854 		}
1855 		break;
1856 	default:
1857 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1858 		kfree(arr);
1859 		return check_condition_result;
1860 	}
1861 	offset = (offset < a_len) ? offset : a_len;
1862 	len = (offset < alloc_len) ? offset : alloc_len;
1863 	errsts = fill_from_dev_buffer(scp, arr, len);
1864 	kfree(arr);
1865 	return errsts;
1866 }
1867 
1868 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1869 			  struct sdebug_dev_info *devip)
1870 {
1871 	bool repd;
1872 	u32 alloc_len, len;
1873 	u8 arr[16];
1874 	u8 *cmd = scp->cmnd;
1875 
1876 	memset(arr, 0, sizeof(arr));
1877 	repd = !!(cmd[2] & 0x80);
1878 	alloc_len = get_unaligned_be32(cmd + 6);
1879 	if (alloc_len < 4) {
1880 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1881 		return check_condition_result;
1882 	}
1883 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1884 	arr[1] = 0x1;		/* ITNRS */
1885 	if (repd) {
1886 		arr[3] = 0xc;
1887 		len = 16;
1888 	} else
1889 		len = 4;
1890 
1891 	len = (len < alloc_len) ? len : alloc_len;
1892 	return fill_from_dev_buffer(scp, arr, len);
1893 }
1894 
1895 /* <<Following mode page info copied from ST318451LW>> */
1896 
1897 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1898 {	/* Read-Write Error Recovery page for mode_sense */
1899 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1900 					5, 0, 0xff, 0xff};
1901 
1902 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1903 	if (1 == pcontrol)
1904 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1905 	return sizeof(err_recov_pg);
1906 }
1907 
1908 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1909 { 	/* Disconnect-Reconnect page for mode_sense */
1910 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1911 					 0, 0, 0, 0, 0, 0, 0, 0};
1912 
1913 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1914 	if (1 == pcontrol)
1915 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1916 	return sizeof(disconnect_pg);
1917 }
1918 
1919 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1920 {       /* Format device page for mode_sense */
1921 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1922 				     0, 0, 0, 0, 0, 0, 0, 0,
1923 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1924 
1925 	memcpy(p, format_pg, sizeof(format_pg));
1926 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1927 	put_unaligned_be16(sdebug_sector_size, p + 12);
1928 	if (sdebug_removable)
1929 		p[20] |= 0x20; /* should agree with INQUIRY */
1930 	if (1 == pcontrol)
1931 		memset(p + 2, 0, sizeof(format_pg) - 2);
1932 	return sizeof(format_pg);
1933 }
1934 
1935 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1936 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1937 				     0, 0, 0, 0};
1938 
1939 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1940 { 	/* Caching page for mode_sense */
1941 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1942 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1943 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1944 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1945 
1946 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1947 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1948 	memcpy(p, caching_pg, sizeof(caching_pg));
1949 	if (1 == pcontrol)
1950 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1951 	else if (2 == pcontrol)
1952 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1953 	return sizeof(caching_pg);
1954 }
1955 
1956 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1957 				    0, 0, 0x2, 0x4b};
1958 
1959 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1960 { 	/* Control mode page for mode_sense */
1961 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1962 				        0, 0, 0, 0};
1963 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1964 				     0, 0, 0x2, 0x4b};
1965 
1966 	if (sdebug_dsense)
1967 		ctrl_m_pg[2] |= 0x4;
1968 	else
1969 		ctrl_m_pg[2] &= ~0x4;
1970 
1971 	if (sdebug_ato)
1972 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1973 
1974 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1975 	if (1 == pcontrol)
1976 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1977 	else if (2 == pcontrol)
1978 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1979 	return sizeof(ctrl_m_pg);
1980 }
1981 
1982 
1983 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1984 {	/* Informational Exceptions control mode page for mode_sense */
1985 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1986 				       0, 0, 0x0, 0x0};
1987 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1988 				      0, 0, 0x0, 0x0};
1989 
1990 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1991 	if (1 == pcontrol)
1992 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1993 	else if (2 == pcontrol)
1994 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1995 	return sizeof(iec_m_pg);
1996 }
1997 
1998 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1999 {	/* SAS SSP mode page - short format for mode_sense */
2000 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2001 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2002 
2003 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2004 	if (1 == pcontrol)
2005 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2006 	return sizeof(sas_sf_m_pg);
2007 }
2008 
2009 
2010 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
2011 			      int target_dev_id)
2012 {	/* SAS phy control and discover mode page for mode_sense */
2013 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2014 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2015 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2016 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2017 		    0x2, 0, 0, 0, 0, 0, 0, 0,
2018 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2019 		    0, 0, 0, 0, 0, 0, 0, 0,
2020 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2021 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2022 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
2023 		    0x3, 0, 0, 0, 0, 0, 0, 0,
2024 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
2025 		    0, 0, 0, 0, 0, 0, 0, 0,
2026 		};
2027 	int port_a, port_b;
2028 
2029 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2030 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2031 	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2032 	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2033 	port_a = target_dev_id + 1;
2034 	port_b = port_a + 1;
2035 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2036 	put_unaligned_be32(port_a, p + 20);
2037 	put_unaligned_be32(port_b, p + 48 + 20);
2038 	if (1 == pcontrol)
2039 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2040 	return sizeof(sas_pcd_m_pg);
2041 }
2042 
2043 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
2044 {	/* SAS SSP shared protocol specific port mode subpage */
2045 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2046 		    0, 0, 0, 0, 0, 0, 0, 0,
2047 		};
2048 
2049 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2050 	if (1 == pcontrol)
2051 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2052 	return sizeof(sas_sha_m_pg);
2053 }
2054 
2055 #define SDEBUG_MAX_MSENSE_SZ 256
2056 
2057 static int resp_mode_sense(struct scsi_cmnd *scp,
2058 			   struct sdebug_dev_info *devip)
2059 {
2060 	int pcontrol, pcode, subpcode, bd_len;
2061 	unsigned char dev_spec;
2062 	int alloc_len, offset, len, target_dev_id;
2063 	int target = scp->device->id;
2064 	unsigned char * ap;
2065 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2066 	unsigned char *cmd = scp->cmnd;
2067 	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2068 
2069 	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
2070 	pcontrol = (cmd[2] & 0xc0) >> 6;
2071 	pcode = cmd[2] & 0x3f;
2072 	subpcode = cmd[3];
2073 	msense_6 = (MODE_SENSE == cmd[0]);
2074 	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2075 	is_disk = (sdebug_ptype == TYPE_DISK);
2076 	if (is_disk && !dbd)
2077 		bd_len = llbaa ? 16 : 8;
2078 	else
2079 		bd_len = 0;
2080 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2081 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2082 	if (0x3 == pcontrol) {  /* Saving values not supported */
2083 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2084 		return check_condition_result;
2085 	}
2086 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2087 			(devip->target * 1000) - 3;
2088 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
2089 	if (is_disk)
2090 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
2091 	else
2092 		dev_spec = 0x0;
2093 	if (msense_6) {
2094 		arr[2] = dev_spec;
2095 		arr[3] = bd_len;
2096 		offset = 4;
2097 	} else {
2098 		arr[3] = dev_spec;
2099 		if (16 == bd_len)
2100 			arr[4] = 0x1;	/* set LONGLBA bit */
2101 		arr[7] = bd_len;	/* assume 255 or less */
2102 		offset = 8;
2103 	}
2104 	ap = arr + offset;
2105 	if ((bd_len > 0) && (!sdebug_capacity))
2106 		sdebug_capacity = get_sdebug_capacity();
2107 
2108 	if (8 == bd_len) {
2109 		if (sdebug_capacity > 0xfffffffe)
2110 			put_unaligned_be32(0xffffffff, ap + 0);
2111 		else
2112 			put_unaligned_be32(sdebug_capacity, ap + 0);
2113 		put_unaligned_be16(sdebug_sector_size, ap + 6);
2114 		offset += bd_len;
2115 		ap = arr + offset;
2116 	} else if (16 == bd_len) {
2117 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2118 		put_unaligned_be32(sdebug_sector_size, ap + 12);
2119 		offset += bd_len;
2120 		ap = arr + offset;
2121 	}
2122 
2123 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2124 		/* TODO: Control Extension page */
2125 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2126 		return check_condition_result;
2127 	}
2128 	bad_pcode = false;
2129 
2130 	switch (pcode) {
2131 	case 0x1:	/* Read-Write error recovery page, direct access */
2132 		len = resp_err_recov_pg(ap, pcontrol, target);
2133 		offset += len;
2134 		break;
2135 	case 0x2:	/* Disconnect-Reconnect page, all devices */
2136 		len = resp_disconnect_pg(ap, pcontrol, target);
2137 		offset += len;
2138 		break;
2139         case 0x3:       /* Format device page, direct access */
2140 		if (is_disk) {
2141 			len = resp_format_pg(ap, pcontrol, target);
2142 			offset += len;
2143 		} else
2144 			bad_pcode = true;
2145                 break;
2146 	case 0x8:	/* Caching page, direct access */
2147 		if (is_disk) {
2148 			len = resp_caching_pg(ap, pcontrol, target);
2149 			offset += len;
2150 		} else
2151 			bad_pcode = true;
2152 		break;
2153 	case 0xa:	/* Control Mode page, all devices */
2154 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2155 		offset += len;
2156 		break;
2157 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2158 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2159 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2160 			return check_condition_result;
2161 	        }
2162 		len = 0;
2163 		if ((0x0 == subpcode) || (0xff == subpcode))
2164 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2165 		if ((0x1 == subpcode) || (0xff == subpcode))
2166 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2167 						  target_dev_id);
2168 		if ((0x2 == subpcode) || (0xff == subpcode))
2169 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2170 		offset += len;
2171 		break;
2172 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2173 		len = resp_iec_m_pg(ap, pcontrol, target);
2174 		offset += len;
2175 		break;
2176 	case 0x3f:	/* Read all Mode pages */
2177 		if ((0 == subpcode) || (0xff == subpcode)) {
2178 			len = resp_err_recov_pg(ap, pcontrol, target);
2179 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2180 			if (is_disk) {
2181 				len += resp_format_pg(ap + len, pcontrol,
2182 						      target);
2183 				len += resp_caching_pg(ap + len, pcontrol,
2184 						       target);
2185 			}
2186 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2187 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2188 			if (0xff == subpcode) {
2189 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2190 						  target, target_dev_id);
2191 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2192 			}
2193 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2194 			offset += len;
2195 		} else {
2196 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2197 			return check_condition_result;
2198                 }
2199 		break;
2200 	default:
2201 		bad_pcode = true;
2202 		break;
2203 	}
2204 	if (bad_pcode) {
2205 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2206 		return check_condition_result;
2207 	}
2208 	if (msense_6)
2209 		arr[0] = offset - 1;
2210 	else
2211 		put_unaligned_be16((offset - 2), arr + 0);
2212 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2213 }
2214 
2215 #define SDEBUG_MAX_MSELECT_SZ 512
2216 
2217 static int resp_mode_select(struct scsi_cmnd *scp,
2218 			    struct sdebug_dev_info *devip)
2219 {
2220 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2221 	int param_len, res, mpage;
2222 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2223 	unsigned char *cmd = scp->cmnd;
2224 	int mselect6 = (MODE_SELECT == cmd[0]);
2225 
2226 	memset(arr, 0, sizeof(arr));
2227 	pf = cmd[1] & 0x10;
2228 	sp = cmd[1] & 0x1;
2229 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2230 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2231 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2232 		return check_condition_result;
2233 	}
2234         res = fetch_to_dev_buffer(scp, arr, param_len);
2235         if (-1 == res)
2236 		return DID_ERROR << 16;
2237 	else if (sdebug_verbose && (res < param_len))
2238 		sdev_printk(KERN_INFO, scp->device,
2239 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2240 			    __func__, param_len, res);
2241 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2242 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2243 	if (md_len > 2) {
2244 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2245 		return check_condition_result;
2246 	}
2247 	off = bd_len + (mselect6 ? 4 : 8);
2248 	mpage = arr[off] & 0x3f;
2249 	ps = !!(arr[off] & 0x80);
2250 	if (ps) {
2251 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2252 		return check_condition_result;
2253 	}
2254 	spf = !!(arr[off] & 0x40);
2255 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2256 		       (arr[off + 1] + 2);
2257 	if ((pg_len + off) > param_len) {
2258 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2259 				PARAMETER_LIST_LENGTH_ERR, 0);
2260 		return check_condition_result;
2261 	}
2262 	switch (mpage) {
2263 	case 0x8:      /* Caching Mode page */
2264 		if (caching_pg[1] == arr[off + 1]) {
2265 			memcpy(caching_pg + 2, arr + off + 2,
2266 			       sizeof(caching_pg) - 2);
2267 			goto set_mode_changed_ua;
2268 		}
2269 		break;
2270 	case 0xa:      /* Control Mode page */
2271 		if (ctrl_m_pg[1] == arr[off + 1]) {
2272 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2273 			       sizeof(ctrl_m_pg) - 2);
2274 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2275 			goto set_mode_changed_ua;
2276 		}
2277 		break;
2278 	case 0x1c:      /* Informational Exceptions Mode page */
2279 		if (iec_m_pg[1] == arr[off + 1]) {
2280 			memcpy(iec_m_pg + 2, arr + off + 2,
2281 			       sizeof(iec_m_pg) - 2);
2282 			goto set_mode_changed_ua;
2283 		}
2284 		break;
2285 	default:
2286 		break;
2287 	}
2288 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2289 	return check_condition_result;
2290 set_mode_changed_ua:
2291 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2292 	return 0;
2293 }
2294 
2295 static int resp_temp_l_pg(unsigned char * arr)
2296 {
2297 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2298 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2299 		};
2300 
2301         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2302         return sizeof(temp_l_pg);
2303 }
2304 
2305 static int resp_ie_l_pg(unsigned char * arr)
2306 {
2307 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2308 		};
2309 
2310         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2311 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2312 		arr[4] = THRESHOLD_EXCEEDED;
2313 		arr[5] = 0xff;
2314 	}
2315         return sizeof(ie_l_pg);
2316 }
2317 
2318 #define SDEBUG_MAX_LSENSE_SZ 512
2319 
2320 static int resp_log_sense(struct scsi_cmnd * scp,
2321                           struct sdebug_dev_info * devip)
2322 {
2323 	int ppc, sp, pcode, subpcode, alloc_len, len, n;
2324 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2325 	unsigned char *cmd = scp->cmnd;
2326 
2327 	memset(arr, 0, sizeof(arr));
2328 	ppc = cmd[1] & 0x2;
2329 	sp = cmd[1] & 0x1;
2330 	if (ppc || sp) {
2331 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2332 		return check_condition_result;
2333 	}
2334 	pcode = cmd[2] & 0x3f;
2335 	subpcode = cmd[3] & 0xff;
2336 	alloc_len = get_unaligned_be16(cmd + 7);
2337 	arr[0] = pcode;
2338 	if (0 == subpcode) {
2339 		switch (pcode) {
2340 		case 0x0:	/* Supported log pages log page */
2341 			n = 4;
2342 			arr[n++] = 0x0;		/* this page */
2343 			arr[n++] = 0xd;		/* Temperature */
2344 			arr[n++] = 0x2f;	/* Informational exceptions */
2345 			arr[3] = n - 4;
2346 			break;
2347 		case 0xd:	/* Temperature log page */
2348 			arr[3] = resp_temp_l_pg(arr + 4);
2349 			break;
2350 		case 0x2f:	/* Informational exceptions log page */
2351 			arr[3] = resp_ie_l_pg(arr + 4);
2352 			break;
2353 		default:
2354 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2355 			return check_condition_result;
2356 		}
2357 	} else if (0xff == subpcode) {
2358 		arr[0] |= 0x40;
2359 		arr[1] = subpcode;
2360 		switch (pcode) {
2361 		case 0x0:	/* Supported log pages and subpages log page */
2362 			n = 4;
2363 			arr[n++] = 0x0;
2364 			arr[n++] = 0x0;		/* 0,0 page */
2365 			arr[n++] = 0x0;
2366 			arr[n++] = 0xff;	/* this page */
2367 			arr[n++] = 0xd;
2368 			arr[n++] = 0x0;		/* Temperature */
2369 			arr[n++] = 0x2f;
2370 			arr[n++] = 0x0;	/* Informational exceptions */
2371 			arr[3] = n - 4;
2372 			break;
2373 		case 0xd:	/* Temperature subpages */
2374 			n = 4;
2375 			arr[n++] = 0xd;
2376 			arr[n++] = 0x0;		/* Temperature */
2377 			arr[3] = n - 4;
2378 			break;
2379 		case 0x2f:	/* Informational exceptions subpages */
2380 			n = 4;
2381 			arr[n++] = 0x2f;
2382 			arr[n++] = 0x0;		/* Informational exceptions */
2383 			arr[3] = n - 4;
2384 			break;
2385 		default:
2386 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2387 			return check_condition_result;
2388 		}
2389 	} else {
2390 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2391 		return check_condition_result;
2392 	}
2393 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2394 	return fill_from_dev_buffer(scp, arr,
2395 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2396 }
2397 
2398 static int check_device_access_params(struct scsi_cmnd *scp,
2399 				      unsigned long long lba, unsigned int num)
2400 {
2401 	if (lba + num > sdebug_capacity) {
2402 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2403 		return check_condition_result;
2404 	}
2405 	/* transfer length excessive (tie in to block limits VPD page) */
2406 	if (num > sdebug_store_sectors) {
2407 		/* needs work to find which cdb byte 'num' comes from */
2408 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2409 		return check_condition_result;
2410 	}
2411 	return 0;
2412 }
2413 
2414 /* Returns number of bytes copied or -1 if error. */
2415 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2416 			    bool do_write)
2417 {
2418 	int ret;
2419 	u64 block, rest = 0;
2420 	struct scsi_data_buffer *sdb;
2421 	enum dma_data_direction dir;
2422 
2423 	if (do_write) {
2424 		sdb = scsi_out(scmd);
2425 		dir = DMA_TO_DEVICE;
2426 	} else {
2427 		sdb = scsi_in(scmd);
2428 		dir = DMA_FROM_DEVICE;
2429 	}
2430 
2431 	if (!sdb->length)
2432 		return 0;
2433 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2434 		return -1;
2435 
2436 	block = do_div(lba, sdebug_store_sectors);
2437 	if (block + num > sdebug_store_sectors)
2438 		rest = block + num - sdebug_store_sectors;
2439 
2440 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2441 		   fake_storep + (block * sdebug_sector_size),
2442 		   (num - rest) * sdebug_sector_size, 0, do_write);
2443 	if (ret != (num - rest) * sdebug_sector_size)
2444 		return ret;
2445 
2446 	if (rest) {
2447 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2448 			    fake_storep, rest * sdebug_sector_size,
2449 			    (num - rest) * sdebug_sector_size, do_write);
2450 	}
2451 
2452 	return ret;
2453 }
2454 
2455 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2456  * arr into fake_store(lba,num) and return true. If comparison fails then
2457  * return false. */
2458 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2459 {
2460 	bool res;
2461 	u64 block, rest = 0;
2462 	u32 store_blks = sdebug_store_sectors;
2463 	u32 lb_size = sdebug_sector_size;
2464 
2465 	block = do_div(lba, store_blks);
2466 	if (block + num > store_blks)
2467 		rest = block + num - store_blks;
2468 
2469 	res = !memcmp(fake_storep + (block * lb_size), arr,
2470 		      (num - rest) * lb_size);
2471 	if (!res)
2472 		return res;
2473 	if (rest)
2474 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2475 			     rest * lb_size);
2476 	if (!res)
2477 		return res;
2478 	arr += num * lb_size;
2479 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2480 	if (rest)
2481 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2482 		       rest * lb_size);
2483 	return res;
2484 }
2485 
2486 static __be16 dif_compute_csum(const void *buf, int len)
2487 {
2488 	__be16 csum;
2489 
2490 	if (sdebug_guard)
2491 		csum = (__force __be16)ip_compute_csum(buf, len);
2492 	else
2493 		csum = cpu_to_be16(crc_t10dif(buf, len));
2494 
2495 	return csum;
2496 }
2497 
2498 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2499 		      sector_t sector, u32 ei_lba)
2500 {
2501 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2502 
2503 	if (sdt->guard_tag != csum) {
2504 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2505 			(unsigned long)sector,
2506 			be16_to_cpu(sdt->guard_tag),
2507 			be16_to_cpu(csum));
2508 		return 0x01;
2509 	}
2510 	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2511 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2512 		pr_err("REF check failed on sector %lu\n",
2513 			(unsigned long)sector);
2514 		return 0x03;
2515 	}
2516 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2517 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2518 		pr_err("REF check failed on sector %lu\n",
2519 			(unsigned long)sector);
2520 		return 0x03;
2521 	}
2522 	return 0;
2523 }
2524 
2525 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2526 			  unsigned int sectors, bool read)
2527 {
2528 	size_t resid;
2529 	void *paddr;
2530 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2531 	struct sg_mapping_iter miter;
2532 
2533 	/* Bytes of protection data to copy into sgl */
2534 	resid = sectors * sizeof(*dif_storep);
2535 
2536 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2537 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2538 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2539 
2540 	while (sg_miter_next(&miter) && resid > 0) {
2541 		size_t len = min(miter.length, resid);
2542 		void *start = dif_store(sector);
2543 		size_t rest = 0;
2544 
2545 		if (dif_store_end < start + len)
2546 			rest = start + len - dif_store_end;
2547 
2548 		paddr = miter.addr;
2549 
2550 		if (read)
2551 			memcpy(paddr, start, len - rest);
2552 		else
2553 			memcpy(start, paddr, len - rest);
2554 
2555 		if (rest) {
2556 			if (read)
2557 				memcpy(paddr + len - rest, dif_storep, rest);
2558 			else
2559 				memcpy(dif_storep, paddr + len - rest, rest);
2560 		}
2561 
2562 		sector += len / sizeof(*dif_storep);
2563 		resid -= len;
2564 	}
2565 	sg_miter_stop(&miter);
2566 }
2567 
2568 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2569 			    unsigned int sectors, u32 ei_lba)
2570 {
2571 	unsigned int i;
2572 	struct t10_pi_tuple *sdt;
2573 	sector_t sector;
2574 
2575 	for (i = 0; i < sectors; i++, ei_lba++) {
2576 		int ret;
2577 
2578 		sector = start_sec + i;
2579 		sdt = dif_store(sector);
2580 
2581 		if (sdt->app_tag == cpu_to_be16(0xffff))
2582 			continue;
2583 
2584 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2585 		if (ret) {
2586 			dif_errors++;
2587 			return ret;
2588 		}
2589 	}
2590 
2591 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2592 	dix_reads++;
2593 
2594 	return 0;
2595 }
2596 
2597 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2598 {
2599 	u8 *cmd = scp->cmnd;
2600 	struct sdebug_queued_cmd *sqcp;
2601 	u64 lba;
2602 	u32 num;
2603 	u32 ei_lba;
2604 	unsigned long iflags;
2605 	int ret;
2606 	bool check_prot;
2607 
2608 	switch (cmd[0]) {
2609 	case READ_16:
2610 		ei_lba = 0;
2611 		lba = get_unaligned_be64(cmd + 2);
2612 		num = get_unaligned_be32(cmd + 10);
2613 		check_prot = true;
2614 		break;
2615 	case READ_10:
2616 		ei_lba = 0;
2617 		lba = get_unaligned_be32(cmd + 2);
2618 		num = get_unaligned_be16(cmd + 7);
2619 		check_prot = true;
2620 		break;
2621 	case READ_6:
2622 		ei_lba = 0;
2623 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2624 		      (u32)(cmd[1] & 0x1f) << 16;
2625 		num = (0 == cmd[4]) ? 256 : cmd[4];
2626 		check_prot = true;
2627 		break;
2628 	case READ_12:
2629 		ei_lba = 0;
2630 		lba = get_unaligned_be32(cmd + 2);
2631 		num = get_unaligned_be32(cmd + 6);
2632 		check_prot = true;
2633 		break;
2634 	case XDWRITEREAD_10:
2635 		ei_lba = 0;
2636 		lba = get_unaligned_be32(cmd + 2);
2637 		num = get_unaligned_be16(cmd + 7);
2638 		check_prot = false;
2639 		break;
2640 	default:	/* assume READ(32) */
2641 		lba = get_unaligned_be64(cmd + 12);
2642 		ei_lba = get_unaligned_be32(cmd + 20);
2643 		num = get_unaligned_be32(cmd + 28);
2644 		check_prot = false;
2645 		break;
2646 	}
2647 	if (unlikely(have_dif_prot && check_prot)) {
2648 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2649 		    (cmd[1] & 0xe0)) {
2650 			mk_sense_invalid_opcode(scp);
2651 			return check_condition_result;
2652 		}
2653 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2654 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2655 		    (cmd[1] & 0xe0) == 0)
2656 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2657 				    "to DIF device\n");
2658 	}
2659 	if (unlikely(sdebug_any_injecting_opt)) {
2660 		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2661 
2662 		if (sqcp) {
2663 			if (sqcp->inj_short)
2664 				num /= 2;
2665 		}
2666 	} else
2667 		sqcp = NULL;
2668 
2669 	/* inline check_device_access_params() */
2670 	if (unlikely(lba + num > sdebug_capacity)) {
2671 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2672 		return check_condition_result;
2673 	}
2674 	/* transfer length excessive (tie in to block limits VPD page) */
2675 	if (unlikely(num > sdebug_store_sectors)) {
2676 		/* needs work to find which cdb byte 'num' comes from */
2677 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2678 		return check_condition_result;
2679 	}
2680 
2681 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2682 		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2683 		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2684 		/* claim unrecoverable read error */
2685 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2686 		/* set info field and valid bit for fixed descriptor */
2687 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2688 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2689 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2690 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2691 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2692 		}
2693 		scsi_set_resid(scp, scsi_bufflen(scp));
2694 		return check_condition_result;
2695 	}
2696 
2697 	read_lock_irqsave(&atomic_rw, iflags);
2698 
2699 	/* DIX + T10 DIF */
2700 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2701 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2702 
2703 		if (prot_ret) {
2704 			read_unlock_irqrestore(&atomic_rw, iflags);
2705 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2706 			return illegal_condition_result;
2707 		}
2708 	}
2709 
2710 	ret = do_device_access(scp, lba, num, false);
2711 	read_unlock_irqrestore(&atomic_rw, iflags);
2712 	if (unlikely(ret == -1))
2713 		return DID_ERROR << 16;
2714 
2715 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2716 
2717 	if (unlikely(sqcp)) {
2718 		if (sqcp->inj_recovered) {
2719 			mk_sense_buffer(scp, RECOVERED_ERROR,
2720 					THRESHOLD_EXCEEDED, 0);
2721 			return check_condition_result;
2722 		} else if (sqcp->inj_transport) {
2723 			mk_sense_buffer(scp, ABORTED_COMMAND,
2724 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2725 			return check_condition_result;
2726 		} else if (sqcp->inj_dif) {
2727 			/* Logical block guard check failed */
2728 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2729 			return illegal_condition_result;
2730 		} else if (sqcp->inj_dix) {
2731 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2732 			return illegal_condition_result;
2733 		}
2734 	}
2735 	return 0;
2736 }
2737 
2738 static void dump_sector(unsigned char *buf, int len)
2739 {
2740 	int i, j, n;
2741 
2742 	pr_err(">>> Sector Dump <<<\n");
2743 	for (i = 0 ; i < len ; i += 16) {
2744 		char b[128];
2745 
2746 		for (j = 0, n = 0; j < 16; j++) {
2747 			unsigned char c = buf[i+j];
2748 
2749 			if (c >= 0x20 && c < 0x7e)
2750 				n += scnprintf(b + n, sizeof(b) - n,
2751 					       " %c ", buf[i+j]);
2752 			else
2753 				n += scnprintf(b + n, sizeof(b) - n,
2754 					       "%02x ", buf[i+j]);
2755 		}
2756 		pr_err("%04d: %s\n", i, b);
2757 	}
2758 }
2759 
2760 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2761 			     unsigned int sectors, u32 ei_lba)
2762 {
2763 	int ret;
2764 	struct t10_pi_tuple *sdt;
2765 	void *daddr;
2766 	sector_t sector = start_sec;
2767 	int ppage_offset;
2768 	int dpage_offset;
2769 	struct sg_mapping_iter diter;
2770 	struct sg_mapping_iter piter;
2771 
2772 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2773 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2774 
2775 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2776 			scsi_prot_sg_count(SCpnt),
2777 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2778 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2779 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2780 
2781 	/* For each protection page */
2782 	while (sg_miter_next(&piter)) {
2783 		dpage_offset = 0;
2784 		if (WARN_ON(!sg_miter_next(&diter))) {
2785 			ret = 0x01;
2786 			goto out;
2787 		}
2788 
2789 		for (ppage_offset = 0; ppage_offset < piter.length;
2790 		     ppage_offset += sizeof(struct t10_pi_tuple)) {
2791 			/* If we're at the end of the current
2792 			 * data page advance to the next one
2793 			 */
2794 			if (dpage_offset >= diter.length) {
2795 				if (WARN_ON(!sg_miter_next(&diter))) {
2796 					ret = 0x01;
2797 					goto out;
2798 				}
2799 				dpage_offset = 0;
2800 			}
2801 
2802 			sdt = piter.addr + ppage_offset;
2803 			daddr = diter.addr + dpage_offset;
2804 
2805 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2806 			if (ret) {
2807 				dump_sector(daddr, sdebug_sector_size);
2808 				goto out;
2809 			}
2810 
2811 			sector++;
2812 			ei_lba++;
2813 			dpage_offset += sdebug_sector_size;
2814 		}
2815 		diter.consumed = dpage_offset;
2816 		sg_miter_stop(&diter);
2817 	}
2818 	sg_miter_stop(&piter);
2819 
2820 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2821 	dix_writes++;
2822 
2823 	return 0;
2824 
2825 out:
2826 	dif_errors++;
2827 	sg_miter_stop(&diter);
2828 	sg_miter_stop(&piter);
2829 	return ret;
2830 }
2831 
2832 static unsigned long lba_to_map_index(sector_t lba)
2833 {
2834 	if (sdebug_unmap_alignment)
2835 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2836 	sector_div(lba, sdebug_unmap_granularity);
2837 	return lba;
2838 }
2839 
2840 static sector_t map_index_to_lba(unsigned long index)
2841 {
2842 	sector_t lba = index * sdebug_unmap_granularity;
2843 
2844 	if (sdebug_unmap_alignment)
2845 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2846 	return lba;
2847 }
2848 
2849 static unsigned int map_state(sector_t lba, unsigned int *num)
2850 {
2851 	sector_t end;
2852 	unsigned int mapped;
2853 	unsigned long index;
2854 	unsigned long next;
2855 
2856 	index = lba_to_map_index(lba);
2857 	mapped = test_bit(index, map_storep);
2858 
2859 	if (mapped)
2860 		next = find_next_zero_bit(map_storep, map_size, index);
2861 	else
2862 		next = find_next_bit(map_storep, map_size, index);
2863 
2864 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2865 	*num = end - lba;
2866 	return mapped;
2867 }
2868 
2869 static void map_region(sector_t lba, unsigned int len)
2870 {
2871 	sector_t end = lba + len;
2872 
2873 	while (lba < end) {
2874 		unsigned long index = lba_to_map_index(lba);
2875 
2876 		if (index < map_size)
2877 			set_bit(index, map_storep);
2878 
2879 		lba = map_index_to_lba(index + 1);
2880 	}
2881 }
2882 
2883 static void unmap_region(sector_t lba, unsigned int len)
2884 {
2885 	sector_t end = lba + len;
2886 
2887 	while (lba < end) {
2888 		unsigned long index = lba_to_map_index(lba);
2889 
2890 		if (lba == map_index_to_lba(index) &&
2891 		    lba + sdebug_unmap_granularity <= end &&
2892 		    index < map_size) {
2893 			clear_bit(index, map_storep);
2894 			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2895 				memset(fake_storep +
2896 				       lba * sdebug_sector_size,
2897 				       (sdebug_lbprz & 1) ? 0 : 0xff,
2898 				       sdebug_sector_size *
2899 				       sdebug_unmap_granularity);
2900 			}
2901 			if (dif_storep) {
2902 				memset(dif_storep + lba, 0xff,
2903 				       sizeof(*dif_storep) *
2904 				       sdebug_unmap_granularity);
2905 			}
2906 		}
2907 		lba = map_index_to_lba(index + 1);
2908 	}
2909 }
2910 
2911 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2912 {
2913 	u8 *cmd = scp->cmnd;
2914 	u64 lba;
2915 	u32 num;
2916 	u32 ei_lba;
2917 	unsigned long iflags;
2918 	int ret;
2919 	bool check_prot;
2920 
2921 	switch (cmd[0]) {
2922 	case WRITE_16:
2923 		ei_lba = 0;
2924 		lba = get_unaligned_be64(cmd + 2);
2925 		num = get_unaligned_be32(cmd + 10);
2926 		check_prot = true;
2927 		break;
2928 	case WRITE_10:
2929 		ei_lba = 0;
2930 		lba = get_unaligned_be32(cmd + 2);
2931 		num = get_unaligned_be16(cmd + 7);
2932 		check_prot = true;
2933 		break;
2934 	case WRITE_6:
2935 		ei_lba = 0;
2936 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2937 		      (u32)(cmd[1] & 0x1f) << 16;
2938 		num = (0 == cmd[4]) ? 256 : cmd[4];
2939 		check_prot = true;
2940 		break;
2941 	case WRITE_12:
2942 		ei_lba = 0;
2943 		lba = get_unaligned_be32(cmd + 2);
2944 		num = get_unaligned_be32(cmd + 6);
2945 		check_prot = true;
2946 		break;
2947 	case 0x53:	/* XDWRITEREAD(10) */
2948 		ei_lba = 0;
2949 		lba = get_unaligned_be32(cmd + 2);
2950 		num = get_unaligned_be16(cmd + 7);
2951 		check_prot = false;
2952 		break;
2953 	default:	/* assume WRITE(32) */
2954 		lba = get_unaligned_be64(cmd + 12);
2955 		ei_lba = get_unaligned_be32(cmd + 20);
2956 		num = get_unaligned_be32(cmd + 28);
2957 		check_prot = false;
2958 		break;
2959 	}
2960 	if (unlikely(have_dif_prot && check_prot)) {
2961 		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2962 		    (cmd[1] & 0xe0)) {
2963 			mk_sense_invalid_opcode(scp);
2964 			return check_condition_result;
2965 		}
2966 		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2967 		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2968 		    (cmd[1] & 0xe0) == 0)
2969 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2970 				    "to DIF device\n");
2971 	}
2972 
2973 	/* inline check_device_access_params() */
2974 	if (unlikely(lba + num > sdebug_capacity)) {
2975 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2976 		return check_condition_result;
2977 	}
2978 	/* transfer length excessive (tie in to block limits VPD page) */
2979 	if (unlikely(num > sdebug_store_sectors)) {
2980 		/* needs work to find which cdb byte 'num' comes from */
2981 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2982 		return check_condition_result;
2983 	}
2984 
2985 	write_lock_irqsave(&atomic_rw, iflags);
2986 
2987 	/* DIX + T10 DIF */
2988 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2989 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2990 
2991 		if (prot_ret) {
2992 			write_unlock_irqrestore(&atomic_rw, iflags);
2993 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2994 			return illegal_condition_result;
2995 		}
2996 	}
2997 
2998 	ret = do_device_access(scp, lba, num, true);
2999 	if (unlikely(scsi_debug_lbp()))
3000 		map_region(lba, num);
3001 	write_unlock_irqrestore(&atomic_rw, iflags);
3002 	if (unlikely(-1 == ret))
3003 		return DID_ERROR << 16;
3004 	else if (unlikely(sdebug_verbose &&
3005 			  (ret < (num * sdebug_sector_size))))
3006 		sdev_printk(KERN_INFO, scp->device,
3007 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3008 			    my_name, num * sdebug_sector_size, ret);
3009 
3010 	if (unlikely(sdebug_any_injecting_opt)) {
3011 		struct sdebug_queued_cmd *sqcp =
3012 				(struct sdebug_queued_cmd *)scp->host_scribble;
3013 
3014 		if (sqcp) {
3015 			if (sqcp->inj_recovered) {
3016 				mk_sense_buffer(scp, RECOVERED_ERROR,
3017 						THRESHOLD_EXCEEDED, 0);
3018 				return check_condition_result;
3019 			} else if (sqcp->inj_dif) {
3020 				/* Logical block guard check failed */
3021 				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3022 				return illegal_condition_result;
3023 			} else if (sqcp->inj_dix) {
3024 				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3025 				return illegal_condition_result;
3026 			}
3027 		}
3028 	}
3029 	return 0;
3030 }
3031 
3032 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3033 			   u32 ei_lba, bool unmap, bool ndob)
3034 {
3035 	unsigned long iflags;
3036 	unsigned long long i;
3037 	int ret;
3038 	u64 lba_off;
3039 
3040 	ret = check_device_access_params(scp, lba, num);
3041 	if (ret)
3042 		return ret;
3043 
3044 	write_lock_irqsave(&atomic_rw, iflags);
3045 
3046 	if (unmap && scsi_debug_lbp()) {
3047 		unmap_region(lba, num);
3048 		goto out;
3049 	}
3050 
3051 	lba_off = lba * sdebug_sector_size;
3052 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
3053 	if (ndob) {
3054 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
3055 		ret = 0;
3056 	} else
3057 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3058 					  sdebug_sector_size);
3059 
3060 	if (-1 == ret) {
3061 		write_unlock_irqrestore(&atomic_rw, iflags);
3062 		return DID_ERROR << 16;
3063 	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3064 		sdev_printk(KERN_INFO, scp->device,
3065 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
3066 			    my_name, "write same",
3067 			    sdebug_sector_size, ret);
3068 
3069 	/* Copy first sector to remaining blocks */
3070 	for (i = 1 ; i < num ; i++)
3071 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3072 		       fake_storep + lba_off,
3073 		       sdebug_sector_size);
3074 
3075 	if (scsi_debug_lbp())
3076 		map_region(lba, num);
3077 out:
3078 	write_unlock_irqrestore(&atomic_rw, iflags);
3079 
3080 	return 0;
3081 }
3082 
3083 static int resp_write_same_10(struct scsi_cmnd *scp,
3084 			      struct sdebug_dev_info *devip)
3085 {
3086 	u8 *cmd = scp->cmnd;
3087 	u32 lba;
3088 	u16 num;
3089 	u32 ei_lba = 0;
3090 	bool unmap = false;
3091 
3092 	if (cmd[1] & 0x8) {
3093 		if (sdebug_lbpws10 == 0) {
3094 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3095 			return check_condition_result;
3096 		} else
3097 			unmap = true;
3098 	}
3099 	lba = get_unaligned_be32(cmd + 2);
3100 	num = get_unaligned_be16(cmd + 7);
3101 	if (num > sdebug_write_same_length) {
3102 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3103 		return check_condition_result;
3104 	}
3105 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3106 }
3107 
3108 static int resp_write_same_16(struct scsi_cmnd *scp,
3109 			      struct sdebug_dev_info *devip)
3110 {
3111 	u8 *cmd = scp->cmnd;
3112 	u64 lba;
3113 	u32 num;
3114 	u32 ei_lba = 0;
3115 	bool unmap = false;
3116 	bool ndob = false;
3117 
3118 	if (cmd[1] & 0x8) {	/* UNMAP */
3119 		if (sdebug_lbpws == 0) {
3120 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3121 			return check_condition_result;
3122 		} else
3123 			unmap = true;
3124 	}
3125 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3126 		ndob = true;
3127 	lba = get_unaligned_be64(cmd + 2);
3128 	num = get_unaligned_be32(cmd + 10);
3129 	if (num > sdebug_write_same_length) {
3130 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3131 		return check_condition_result;
3132 	}
3133 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3134 }
3135 
3136 /* Note the mode field is in the same position as the (lower) service action
3137  * field. For the Report supported operation codes command, SPC-4 suggests
3138  * each mode of this command should be reported separately; for future. */
3139 static int resp_write_buffer(struct scsi_cmnd *scp,
3140 			     struct sdebug_dev_info *devip)
3141 {
3142 	u8 *cmd = scp->cmnd;
3143 	struct scsi_device *sdp = scp->device;
3144 	struct sdebug_dev_info *dp;
3145 	u8 mode;
3146 
3147 	mode = cmd[1] & 0x1f;
3148 	switch (mode) {
3149 	case 0x4:	/* download microcode (MC) and activate (ACT) */
3150 		/* set UAs on this device only */
3151 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3152 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3153 		break;
3154 	case 0x5:	/* download MC, save and ACT */
3155 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3156 		break;
3157 	case 0x6:	/* download MC with offsets and ACT */
3158 		/* set UAs on most devices (LUs) in this target */
3159 		list_for_each_entry(dp,
3160 				    &devip->sdbg_host->dev_info_list,
3161 				    dev_list)
3162 			if (dp->target == sdp->id) {
3163 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3164 				if (devip != dp)
3165 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3166 						dp->uas_bm);
3167 			}
3168 		break;
3169 	case 0x7:	/* download MC with offsets, save, and ACT */
3170 		/* set UA on all devices (LUs) in this target */
3171 		list_for_each_entry(dp,
3172 				    &devip->sdbg_host->dev_info_list,
3173 				    dev_list)
3174 			if (dp->target == sdp->id)
3175 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3176 					dp->uas_bm);
3177 		break;
3178 	default:
3179 		/* do nothing for this command for other mode values */
3180 		break;
3181 	}
3182 	return 0;
3183 }
3184 
3185 static int resp_comp_write(struct scsi_cmnd *scp,
3186 			   struct sdebug_dev_info *devip)
3187 {
3188 	u8 *cmd = scp->cmnd;
3189 	u8 *arr;
3190 	u8 *fake_storep_hold;
3191 	u64 lba;
3192 	u32 dnum;
3193 	u32 lb_size = sdebug_sector_size;
3194 	u8 num;
3195 	unsigned long iflags;
3196 	int ret;
3197 	int retval = 0;
3198 
3199 	lba = get_unaligned_be64(cmd + 2);
3200 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3201 	if (0 == num)
3202 		return 0;	/* degenerate case, not an error */
3203 	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3204 	    (cmd[1] & 0xe0)) {
3205 		mk_sense_invalid_opcode(scp);
3206 		return check_condition_result;
3207 	}
3208 	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3209 	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3210 	    (cmd[1] & 0xe0) == 0)
3211 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3212 			    "to DIF device\n");
3213 
3214 	/* inline check_device_access_params() */
3215 	if (lba + num > sdebug_capacity) {
3216 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3217 		return check_condition_result;
3218 	}
3219 	/* transfer length excessive (tie in to block limits VPD page) */
3220 	if (num > sdebug_store_sectors) {
3221 		/* needs work to find which cdb byte 'num' comes from */
3222 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3223 		return check_condition_result;
3224 	}
3225 	dnum = 2 * num;
3226 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3227 	if (NULL == arr) {
3228 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3229 				INSUFF_RES_ASCQ);
3230 		return check_condition_result;
3231 	}
3232 
3233 	write_lock_irqsave(&atomic_rw, iflags);
3234 
3235 	/* trick do_device_access() to fetch both compare and write buffers
3236 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3237 	fake_storep_hold = fake_storep;
3238 	fake_storep = arr;
3239 	ret = do_device_access(scp, 0, dnum, true);
3240 	fake_storep = fake_storep_hold;
3241 	if (ret == -1) {
3242 		retval = DID_ERROR << 16;
3243 		goto cleanup;
3244 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3245 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3246 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3247 			    dnum * lb_size, ret);
3248 	if (!comp_write_worker(lba, num, arr)) {
3249 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3250 		retval = check_condition_result;
3251 		goto cleanup;
3252 	}
3253 	if (scsi_debug_lbp())
3254 		map_region(lba, num);
3255 cleanup:
3256 	write_unlock_irqrestore(&atomic_rw, iflags);
3257 	kfree(arr);
3258 	return retval;
3259 }
3260 
3261 struct unmap_block_desc {
3262 	__be64	lba;
3263 	__be32	blocks;
3264 	__be32	__reserved;
3265 };
3266 
3267 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3268 {
3269 	unsigned char *buf;
3270 	struct unmap_block_desc *desc;
3271 	unsigned int i, payload_len, descriptors;
3272 	int ret;
3273 	unsigned long iflags;
3274 
3275 
3276 	if (!scsi_debug_lbp())
3277 		return 0;	/* fib and say its done */
3278 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3279 	BUG_ON(scsi_bufflen(scp) != payload_len);
3280 
3281 	descriptors = (payload_len - 8) / 16;
3282 	if (descriptors > sdebug_unmap_max_desc) {
3283 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3284 		return check_condition_result;
3285 	}
3286 
3287 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3288 	if (!buf) {
3289 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3290 				INSUFF_RES_ASCQ);
3291 		return check_condition_result;
3292 	}
3293 
3294 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3295 
3296 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3297 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3298 
3299 	desc = (void *)&buf[8];
3300 
3301 	write_lock_irqsave(&atomic_rw, iflags);
3302 
3303 	for (i = 0 ; i < descriptors ; i++) {
3304 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3305 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3306 
3307 		ret = check_device_access_params(scp, lba, num);
3308 		if (ret)
3309 			goto out;
3310 
3311 		unmap_region(lba, num);
3312 	}
3313 
3314 	ret = 0;
3315 
3316 out:
3317 	write_unlock_irqrestore(&atomic_rw, iflags);
3318 	kfree(buf);
3319 
3320 	return ret;
3321 }
3322 
3323 #define SDEBUG_GET_LBA_STATUS_LEN 32
3324 
3325 static int resp_get_lba_status(struct scsi_cmnd *scp,
3326 			       struct sdebug_dev_info *devip)
3327 {
3328 	u8 *cmd = scp->cmnd;
3329 	u64 lba;
3330 	u32 alloc_len, mapped, num;
3331 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3332 	int ret;
3333 
3334 	lba = get_unaligned_be64(cmd + 2);
3335 	alloc_len = get_unaligned_be32(cmd + 10);
3336 
3337 	if (alloc_len < 24)
3338 		return 0;
3339 
3340 	ret = check_device_access_params(scp, lba, 1);
3341 	if (ret)
3342 		return ret;
3343 
3344 	if (scsi_debug_lbp())
3345 		mapped = map_state(lba, &num);
3346 	else {
3347 		mapped = 1;
3348 		/* following just in case virtual_gb changed */
3349 		sdebug_capacity = get_sdebug_capacity();
3350 		if (sdebug_capacity - lba <= 0xffffffff)
3351 			num = sdebug_capacity - lba;
3352 		else
3353 			num = 0xffffffff;
3354 	}
3355 
3356 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3357 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3358 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3359 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3360 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3361 
3362 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3363 }
3364 
3365 #define RL_BUCKET_ELEMS 8
3366 
3367 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3368  * (W-LUN), the normal Linux scanning logic does not associate it with a
3369  * device (e.g. /dev/sg7). The following magic will make that association:
3370  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3371  * where <n> is a host number. If there are multiple targets in a host then
3372  * the above will associate a W-LUN to each target. To only get a W-LUN
3373  * for target 2, then use "echo '- 2 49409' > scan" .
3374  */
3375 static int resp_report_luns(struct scsi_cmnd *scp,
3376 			    struct sdebug_dev_info *devip)
3377 {
3378 	unsigned char *cmd = scp->cmnd;
3379 	unsigned int alloc_len;
3380 	unsigned char select_report;
3381 	u64 lun;
3382 	struct scsi_lun *lun_p;
3383 	u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3384 	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
3385 	unsigned int wlun_cnt;	/* report luns W-LUN count */
3386 	unsigned int tlun_cnt;	/* total LUN count */
3387 	unsigned int rlen;	/* response length (in bytes) */
3388 	int k, j, n, res;
3389 	unsigned int off_rsp = 0;
3390 	const int sz_lun = sizeof(struct scsi_lun);
3391 
3392 	clear_luns_changed_on_target(devip);
3393 
3394 	select_report = cmd[2];
3395 	alloc_len = get_unaligned_be32(cmd + 6);
3396 
3397 	if (alloc_len < 4) {
3398 		pr_err("alloc len too small %d\n", alloc_len);
3399 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3400 		return check_condition_result;
3401 	}
3402 
3403 	switch (select_report) {
3404 	case 0:		/* all LUNs apart from W-LUNs */
3405 		lun_cnt = sdebug_max_luns;
3406 		wlun_cnt = 0;
3407 		break;
3408 	case 1:		/* only W-LUNs */
3409 		lun_cnt = 0;
3410 		wlun_cnt = 1;
3411 		break;
3412 	case 2:		/* all LUNs */
3413 		lun_cnt = sdebug_max_luns;
3414 		wlun_cnt = 1;
3415 		break;
3416 	case 0x10:	/* only administrative LUs */
3417 	case 0x11:	/* see SPC-5 */
3418 	case 0x12:	/* only subsiduary LUs owned by referenced LU */
3419 	default:
3420 		pr_debug("select report invalid %d\n", select_report);
3421 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3422 		return check_condition_result;
3423 	}
3424 
3425 	if (sdebug_no_lun_0 && (lun_cnt > 0))
3426 		--lun_cnt;
3427 
3428 	tlun_cnt = lun_cnt + wlun_cnt;
3429 	rlen = tlun_cnt * sz_lun;	/* excluding 8 byte header */
3430 	scsi_set_resid(scp, scsi_bufflen(scp));
3431 	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3432 		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3433 
3434 	/* loops rely on sizeof response header same as sizeof lun (both 8) */
3435 	lun = sdebug_no_lun_0 ? 1 : 0;
3436 	for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3437 		memset(arr, 0, sizeof(arr));
3438 		lun_p = (struct scsi_lun *)&arr[0];
3439 		if (k == 0) {
3440 			put_unaligned_be32(rlen, &arr[0]);
3441 			++lun_p;
3442 			j = 1;
3443 		}
3444 		for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3445 			if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3446 				break;
3447 			int_to_scsilun(lun++, lun_p);
3448 		}
3449 		if (j < RL_BUCKET_ELEMS)
3450 			break;
3451 		n = j * sz_lun;
3452 		res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3453 		if (res)
3454 			return res;
3455 		off_rsp += n;
3456 	}
3457 	if (wlun_cnt) {
3458 		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3459 		++j;
3460 	}
3461 	if (j > 0)
3462 		res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3463 	return res;
3464 }
3465 
3466 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3467 			    unsigned int num, struct sdebug_dev_info *devip)
3468 {
3469 	int j;
3470 	unsigned char *kaddr, *buf;
3471 	unsigned int offset;
3472 	struct scsi_data_buffer *sdb = scsi_in(scp);
3473 	struct sg_mapping_iter miter;
3474 
3475 	/* better not to use temporary buffer. */
3476 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3477 	if (!buf) {
3478 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3479 				INSUFF_RES_ASCQ);
3480 		return check_condition_result;
3481 	}
3482 
3483 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3484 
3485 	offset = 0;
3486 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3487 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3488 
3489 	while (sg_miter_next(&miter)) {
3490 		kaddr = miter.addr;
3491 		for (j = 0; j < miter.length; j++)
3492 			*(kaddr + j) ^= *(buf + offset + j);
3493 
3494 		offset += miter.length;
3495 	}
3496 	sg_miter_stop(&miter);
3497 	kfree(buf);
3498 
3499 	return 0;
3500 }
3501 
3502 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3503 			       struct sdebug_dev_info *devip)
3504 {
3505 	u8 *cmd = scp->cmnd;
3506 	u64 lba;
3507 	u32 num;
3508 	int errsts;
3509 
3510 	if (!scsi_bidi_cmnd(scp)) {
3511 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3512 				INSUFF_RES_ASCQ);
3513 		return check_condition_result;
3514 	}
3515 	errsts = resp_read_dt0(scp, devip);
3516 	if (errsts)
3517 		return errsts;
3518 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3519 		errsts = resp_write_dt0(scp, devip);
3520 		if (errsts)
3521 			return errsts;
3522 	}
3523 	lba = get_unaligned_be32(cmd + 2);
3524 	num = get_unaligned_be16(cmd + 7);
3525 	return resp_xdwriteread(scp, lba, num, devip);
3526 }
3527 
3528 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3529 {
3530 	struct sdebug_queue *sqp = sdebug_q_arr;
3531 
3532 	if (sdebug_mq_active) {
3533 		u32 tag = blk_mq_unique_tag(cmnd->request);
3534 		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3535 
3536 		if (unlikely(hwq >= submit_queues)) {
3537 			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3538 			hwq %= submit_queues;
3539 		}
3540 		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3541 		return sqp + hwq;
3542 	} else
3543 		return sqp;
3544 }
3545 
3546 /* Queued (deferred) command completions converge here. */
3547 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3548 {
3549 	int qc_idx;
3550 	int retiring = 0;
3551 	unsigned long iflags;
3552 	struct sdebug_queue *sqp;
3553 	struct sdebug_queued_cmd *sqcp;
3554 	struct scsi_cmnd *scp;
3555 	struct sdebug_dev_info *devip;
3556 
3557 	qc_idx = sd_dp->qc_idx;
3558 	sqp = sdebug_q_arr + sd_dp->sqa_idx;
3559 	if (sdebug_statistics) {
3560 		atomic_inc(&sdebug_completions);
3561 		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3562 			atomic_inc(&sdebug_miss_cpus);
3563 	}
3564 	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3565 		pr_err("wild qc_idx=%d\n", qc_idx);
3566 		return;
3567 	}
3568 	spin_lock_irqsave(&sqp->qc_lock, iflags);
3569 	sqcp = &sqp->qc_arr[qc_idx];
3570 	scp = sqcp->a_cmnd;
3571 	if (unlikely(scp == NULL)) {
3572 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3573 		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3574 		       sd_dp->sqa_idx, qc_idx);
3575 		return;
3576 	}
3577 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3578 	if (likely(devip))
3579 		atomic_dec(&devip->num_in_q);
3580 	else
3581 		pr_err("devip=NULL\n");
3582 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3583 		retiring = 1;
3584 
3585 	sqcp->a_cmnd = NULL;
3586 	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3587 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3588 		pr_err("Unexpected completion\n");
3589 		return;
3590 	}
3591 
3592 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3593 		int k, retval;
3594 
3595 		retval = atomic_read(&retired_max_queue);
3596 		if (qc_idx >= retval) {
3597 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3598 			pr_err("index %d too large\n", retval);
3599 			return;
3600 		}
3601 		k = find_last_bit(sqp->in_use_bm, retval);
3602 		if ((k < sdebug_max_queue) || (k == retval))
3603 			atomic_set(&retired_max_queue, 0);
3604 		else
3605 			atomic_set(&retired_max_queue, k + 1);
3606 	}
3607 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3608 	scp->scsi_done(scp); /* callback to mid level */
3609 }
3610 
3611 /* When high resolution timer goes off this function is called. */
3612 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3613 {
3614 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3615 						  hrt);
3616 	sdebug_q_cmd_complete(sd_dp);
3617 	return HRTIMER_NORESTART;
3618 }
3619 
3620 /* When work queue schedules work, it calls this function. */
3621 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3622 {
3623 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3624 						  ew.work);
3625 	sdebug_q_cmd_complete(sd_dp);
3626 }
3627 
3628 static bool got_shared_uuid;
3629 static uuid_t shared_uuid;
3630 
3631 static struct sdebug_dev_info *sdebug_device_create(
3632 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3633 {
3634 	struct sdebug_dev_info *devip;
3635 
3636 	devip = kzalloc(sizeof(*devip), flags);
3637 	if (devip) {
3638 		if (sdebug_uuid_ctl == 1)
3639 			uuid_gen(&devip->lu_name);
3640 		else if (sdebug_uuid_ctl == 2) {
3641 			if (got_shared_uuid)
3642 				devip->lu_name = shared_uuid;
3643 			else {
3644 				uuid_gen(&shared_uuid);
3645 				got_shared_uuid = true;
3646 				devip->lu_name = shared_uuid;
3647 			}
3648 		}
3649 		devip->sdbg_host = sdbg_host;
3650 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3651 	}
3652 	return devip;
3653 }
3654 
3655 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3656 {
3657 	struct sdebug_host_info *sdbg_host;
3658 	struct sdebug_dev_info *open_devip = NULL;
3659 	struct sdebug_dev_info *devip;
3660 
3661 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3662 	if (!sdbg_host) {
3663 		pr_err("Host info NULL\n");
3664 		return NULL;
3665         }
3666 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3667 		if ((devip->used) && (devip->channel == sdev->channel) &&
3668                     (devip->target == sdev->id) &&
3669                     (devip->lun == sdev->lun))
3670                         return devip;
3671 		else {
3672 			if ((!devip->used) && (!open_devip))
3673 				open_devip = devip;
3674 		}
3675 	}
3676 	if (!open_devip) { /* try and make a new one */
3677 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3678 		if (!open_devip) {
3679 			pr_err("out of memory at line %d\n", __LINE__);
3680 			return NULL;
3681 		}
3682 	}
3683 
3684 	open_devip->channel = sdev->channel;
3685 	open_devip->target = sdev->id;
3686 	open_devip->lun = sdev->lun;
3687 	open_devip->sdbg_host = sdbg_host;
3688 	atomic_set(&open_devip->num_in_q, 0);
3689 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3690 	open_devip->used = true;
3691 	return open_devip;
3692 }
3693 
3694 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3695 {
3696 	if (sdebug_verbose)
3697 		pr_info("slave_alloc <%u %u %u %llu>\n",
3698 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3699 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3700 	return 0;
3701 }
3702 
3703 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3704 {
3705 	struct sdebug_dev_info *devip =
3706 			(struct sdebug_dev_info *)sdp->hostdata;
3707 
3708 	if (sdebug_verbose)
3709 		pr_info("slave_configure <%u %u %u %llu>\n",
3710 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3711 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3712 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3713 	if (devip == NULL) {
3714 		devip = find_build_dev_info(sdp);
3715 		if (devip == NULL)
3716 			return 1;  /* no resources, will be marked offline */
3717 	}
3718 	sdp->hostdata = devip;
3719 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3720 	if (sdebug_no_uld)
3721 		sdp->no_uld_attach = 1;
3722 	config_cdb_len(sdp);
3723 	return 0;
3724 }
3725 
3726 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3727 {
3728 	struct sdebug_dev_info *devip =
3729 		(struct sdebug_dev_info *)sdp->hostdata;
3730 
3731 	if (sdebug_verbose)
3732 		pr_info("slave_destroy <%u %u %u %llu>\n",
3733 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3734 	if (devip) {
3735 		/* make this slot available for re-use */
3736 		devip->used = false;
3737 		sdp->hostdata = NULL;
3738 	}
3739 }
3740 
3741 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3742 {
3743 	if (!sd_dp)
3744 		return;
3745 	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3746 		hrtimer_cancel(&sd_dp->hrt);
3747 	else if (sdebug_jdelay < 0)
3748 		cancel_work_sync(&sd_dp->ew.work);
3749 }
3750 
3751 /* If @cmnd found deletes its timer or work queue and returns true; else
3752    returns false */
3753 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3754 {
3755 	unsigned long iflags;
3756 	int j, k, qmax, r_qmax;
3757 	struct sdebug_queue *sqp;
3758 	struct sdebug_queued_cmd *sqcp;
3759 	struct sdebug_dev_info *devip;
3760 	struct sdebug_defer *sd_dp;
3761 
3762 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3763 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3764 		qmax = sdebug_max_queue;
3765 		r_qmax = atomic_read(&retired_max_queue);
3766 		if (r_qmax > qmax)
3767 			qmax = r_qmax;
3768 		for (k = 0; k < qmax; ++k) {
3769 			if (test_bit(k, sqp->in_use_bm)) {
3770 				sqcp = &sqp->qc_arr[k];
3771 				if (cmnd != sqcp->a_cmnd)
3772 					continue;
3773 				/* found */
3774 				devip = (struct sdebug_dev_info *)
3775 						cmnd->device->hostdata;
3776 				if (devip)
3777 					atomic_dec(&devip->num_in_q);
3778 				sqcp->a_cmnd = NULL;
3779 				sd_dp = sqcp->sd_dp;
3780 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3781 				stop_qc_helper(sd_dp);
3782 				clear_bit(k, sqp->in_use_bm);
3783 				return true;
3784 			}
3785 		}
3786 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3787 	}
3788 	return false;
3789 }
3790 
3791 /* Deletes (stops) timers or work queues of all queued commands */
3792 static void stop_all_queued(void)
3793 {
3794 	unsigned long iflags;
3795 	int j, k;
3796 	struct sdebug_queue *sqp;
3797 	struct sdebug_queued_cmd *sqcp;
3798 	struct sdebug_dev_info *devip;
3799 	struct sdebug_defer *sd_dp;
3800 
3801 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3802 		spin_lock_irqsave(&sqp->qc_lock, iflags);
3803 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3804 			if (test_bit(k, sqp->in_use_bm)) {
3805 				sqcp = &sqp->qc_arr[k];
3806 				if (sqcp->a_cmnd == NULL)
3807 					continue;
3808 				devip = (struct sdebug_dev_info *)
3809 					sqcp->a_cmnd->device->hostdata;
3810 				if (devip)
3811 					atomic_dec(&devip->num_in_q);
3812 				sqcp->a_cmnd = NULL;
3813 				sd_dp = sqcp->sd_dp;
3814 				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3815 				stop_qc_helper(sd_dp);
3816 				clear_bit(k, sqp->in_use_bm);
3817 				spin_lock_irqsave(&sqp->qc_lock, iflags);
3818 			}
3819 		}
3820 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3821 	}
3822 }
3823 
3824 /* Free queued command memory on heap */
3825 static void free_all_queued(void)
3826 {
3827 	int j, k;
3828 	struct sdebug_queue *sqp;
3829 	struct sdebug_queued_cmd *sqcp;
3830 
3831 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3832 		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3833 			sqcp = &sqp->qc_arr[k];
3834 			kfree(sqcp->sd_dp);
3835 			sqcp->sd_dp = NULL;
3836 		}
3837 	}
3838 }
3839 
3840 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3841 {
3842 	bool ok;
3843 
3844 	++num_aborts;
3845 	if (SCpnt) {
3846 		ok = stop_queued_cmnd(SCpnt);
3847 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3848 			sdev_printk(KERN_INFO, SCpnt->device,
3849 				    "%s: command%s found\n", __func__,
3850 				    ok ? "" : " not");
3851 	}
3852 	return SUCCESS;
3853 }
3854 
3855 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3856 {
3857 	++num_dev_resets;
3858 	if (SCpnt && SCpnt->device) {
3859 		struct scsi_device *sdp = SCpnt->device;
3860 		struct sdebug_dev_info *devip =
3861 				(struct sdebug_dev_info *)sdp->hostdata;
3862 
3863 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3864 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3865 		if (devip)
3866 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3867 	}
3868 	return SUCCESS;
3869 }
3870 
3871 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3872 {
3873 	struct sdebug_host_info *sdbg_host;
3874 	struct sdebug_dev_info *devip;
3875 	struct scsi_device *sdp;
3876 	struct Scsi_Host *hp;
3877 	int k = 0;
3878 
3879 	++num_target_resets;
3880 	if (!SCpnt)
3881 		goto lie;
3882 	sdp = SCpnt->device;
3883 	if (!sdp)
3884 		goto lie;
3885 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3886 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3887 	hp = sdp->host;
3888 	if (!hp)
3889 		goto lie;
3890 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3891 	if (sdbg_host) {
3892 		list_for_each_entry(devip,
3893 				    &sdbg_host->dev_info_list,
3894 				    dev_list)
3895 			if (devip->target == sdp->id) {
3896 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3897 				++k;
3898 			}
3899 	}
3900 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3901 		sdev_printk(KERN_INFO, sdp,
3902 			    "%s: %d device(s) found in target\n", __func__, k);
3903 lie:
3904 	return SUCCESS;
3905 }
3906 
3907 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3908 {
3909 	struct sdebug_host_info *sdbg_host;
3910 	struct sdebug_dev_info *devip;
3911         struct scsi_device * sdp;
3912         struct Scsi_Host * hp;
3913 	int k = 0;
3914 
3915 	++num_bus_resets;
3916 	if (!(SCpnt && SCpnt->device))
3917 		goto lie;
3918 	sdp = SCpnt->device;
3919 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3920 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3921 	hp = sdp->host;
3922 	if (hp) {
3923 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3924 		if (sdbg_host) {
3925 			list_for_each_entry(devip,
3926                                             &sdbg_host->dev_info_list,
3927 					    dev_list) {
3928 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3929 				++k;
3930 			}
3931 		}
3932 	}
3933 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3934 		sdev_printk(KERN_INFO, sdp,
3935 			    "%s: %d device(s) found in host\n", __func__, k);
3936 lie:
3937 	return SUCCESS;
3938 }
3939 
3940 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3941 {
3942 	struct sdebug_host_info * sdbg_host;
3943 	struct sdebug_dev_info *devip;
3944 	int k = 0;
3945 
3946 	++num_host_resets;
3947 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3948 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3949         spin_lock(&sdebug_host_list_lock);
3950         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3951 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3952 				    dev_list) {
3953 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3954 			++k;
3955 		}
3956         }
3957         spin_unlock(&sdebug_host_list_lock);
3958 	stop_all_queued();
3959 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3960 		sdev_printk(KERN_INFO, SCpnt->device,
3961 			    "%s: %d device(s) found\n", __func__, k);
3962 	return SUCCESS;
3963 }
3964 
3965 static void __init sdebug_build_parts(unsigned char *ramp,
3966 				      unsigned long store_size)
3967 {
3968 	struct partition * pp;
3969 	int starts[SDEBUG_MAX_PARTS + 2];
3970 	int sectors_per_part, num_sectors, k;
3971 	int heads_by_sects, start_sec, end_sec;
3972 
3973 	/* assume partition table already zeroed */
3974 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3975 		return;
3976 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3977 		sdebug_num_parts = SDEBUG_MAX_PARTS;
3978 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3979 	}
3980 	num_sectors = (int)sdebug_store_sectors;
3981 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3982 			   / sdebug_num_parts;
3983 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3984         starts[0] = sdebug_sectors_per;
3985 	for (k = 1; k < sdebug_num_parts; ++k)
3986 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3987 			    * heads_by_sects;
3988 	starts[sdebug_num_parts] = num_sectors;
3989 	starts[sdebug_num_parts + 1] = 0;
3990 
3991 	ramp[510] = 0x55;	/* magic partition markings */
3992 	ramp[511] = 0xAA;
3993 	pp = (struct partition *)(ramp + 0x1be);
3994 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3995 		start_sec = starts[k];
3996 		end_sec = starts[k + 1] - 1;
3997 		pp->boot_ind = 0;
3998 
3999 		pp->cyl = start_sec / heads_by_sects;
4000 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
4001 			   / sdebug_sectors_per;
4002 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
4003 
4004 		pp->end_cyl = end_sec / heads_by_sects;
4005 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4006 			       / sdebug_sectors_per;
4007 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4008 
4009 		pp->start_sect = cpu_to_le32(start_sec);
4010 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4011 		pp->sys_ind = 0x83;	/* plain Linux partition */
4012 	}
4013 }
4014 
4015 static void block_unblock_all_queues(bool block)
4016 {
4017 	int j;
4018 	struct sdebug_queue *sqp;
4019 
4020 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4021 		atomic_set(&sqp->blocked, (int)block);
4022 }
4023 
4024 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4025  * commands will be processed normally before triggers occur.
4026  */
4027 static void tweak_cmnd_count(void)
4028 {
4029 	int count, modulo;
4030 
4031 	modulo = abs(sdebug_every_nth);
4032 	if (modulo < 2)
4033 		return;
4034 	block_unblock_all_queues(true);
4035 	count = atomic_read(&sdebug_cmnd_count);
4036 	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4037 	block_unblock_all_queues(false);
4038 }
4039 
4040 static void clear_queue_stats(void)
4041 {
4042 	atomic_set(&sdebug_cmnd_count, 0);
4043 	atomic_set(&sdebug_completions, 0);
4044 	atomic_set(&sdebug_miss_cpus, 0);
4045 	atomic_set(&sdebug_a_tsf, 0);
4046 }
4047 
4048 static void setup_inject(struct sdebug_queue *sqp,
4049 			 struct sdebug_queued_cmd *sqcp)
4050 {
4051 	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
4052 		return;
4053 	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4054 	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4055 	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4056 	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4057 	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4058 }
4059 
4060 /* Complete the processing of the thread that queued a SCSI command to this
4061  * driver. It either completes the command by calling cmnd_done() or
4062  * schedules a hr timer or work queue then returns 0. Returns
4063  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4064  */
4065 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4066 			 int scsi_result, int delta_jiff)
4067 {
4068 	unsigned long iflags;
4069 	int k, num_in_q, qdepth, inject;
4070 	struct sdebug_queue *sqp;
4071 	struct sdebug_queued_cmd *sqcp;
4072 	struct scsi_device *sdp;
4073 	struct sdebug_defer *sd_dp;
4074 
4075 	if (unlikely(devip == NULL)) {
4076 		if (scsi_result == 0)
4077 			scsi_result = DID_NO_CONNECT << 16;
4078 		goto respond_in_thread;
4079 	}
4080 	sdp = cmnd->device;
4081 
4082 	if (unlikely(sdebug_verbose && scsi_result))
4083 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4084 			    __func__, scsi_result);
4085 	if (delta_jiff == 0)
4086 		goto respond_in_thread;
4087 
4088 	/* schedule the response at a later time if resources permit */
4089 	sqp = get_queue(cmnd);
4090 	spin_lock_irqsave(&sqp->qc_lock, iflags);
4091 	if (unlikely(atomic_read(&sqp->blocked))) {
4092 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4093 		return SCSI_MLQUEUE_HOST_BUSY;
4094 	}
4095 	num_in_q = atomic_read(&devip->num_in_q);
4096 	qdepth = cmnd->device->queue_depth;
4097 	inject = 0;
4098 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4099 		if (scsi_result) {
4100 			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4101 			goto respond_in_thread;
4102 		} else
4103 			scsi_result = device_qfull_result;
4104 	} else if (unlikely(sdebug_every_nth &&
4105 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4106 			    (scsi_result == 0))) {
4107 		if ((num_in_q == (qdepth - 1)) &&
4108 		    (atomic_inc_return(&sdebug_a_tsf) >=
4109 		     abs(sdebug_every_nth))) {
4110 			atomic_set(&sdebug_a_tsf, 0);
4111 			inject = 1;
4112 			scsi_result = device_qfull_result;
4113 		}
4114 	}
4115 
4116 	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4117 	if (unlikely(k >= sdebug_max_queue)) {
4118 		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4119 		if (scsi_result)
4120 			goto respond_in_thread;
4121 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4122 			scsi_result = device_qfull_result;
4123 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4124 			sdev_printk(KERN_INFO, sdp,
4125 				    "%s: max_queue=%d exceeded, %s\n",
4126 				    __func__, sdebug_max_queue,
4127 				    (scsi_result ?  "status: TASK SET FULL" :
4128 						    "report: host busy"));
4129 		if (scsi_result)
4130 			goto respond_in_thread;
4131 		else
4132 			return SCSI_MLQUEUE_HOST_BUSY;
4133 	}
4134 	__set_bit(k, sqp->in_use_bm);
4135 	atomic_inc(&devip->num_in_q);
4136 	sqcp = &sqp->qc_arr[k];
4137 	sqcp->a_cmnd = cmnd;
4138 	cmnd->host_scribble = (unsigned char *)sqcp;
4139 	cmnd->result = scsi_result;
4140 	sd_dp = sqcp->sd_dp;
4141 	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4142 	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4143 		setup_inject(sqp, sqcp);
4144 	if (delta_jiff > 0 || sdebug_ndelay > 0) {
4145 		ktime_t kt;
4146 
4147 		if (delta_jiff > 0) {
4148 			kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4149 		} else
4150 			kt = sdebug_ndelay;
4151 		if (NULL == sd_dp) {
4152 			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4153 			if (NULL == sd_dp)
4154 				return SCSI_MLQUEUE_HOST_BUSY;
4155 			sqcp->sd_dp = sd_dp;
4156 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4157 				     HRTIMER_MODE_REL_PINNED);
4158 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4159 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4160 			sd_dp->qc_idx = k;
4161 		}
4162 		if (sdebug_statistics)
4163 			sd_dp->issuing_cpu = raw_smp_processor_id();
4164 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4165 	} else {	/* jdelay < 0, use work queue */
4166 		if (NULL == sd_dp) {
4167 			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4168 			if (NULL == sd_dp)
4169 				return SCSI_MLQUEUE_HOST_BUSY;
4170 			sqcp->sd_dp = sd_dp;
4171 			sd_dp->sqa_idx = sqp - sdebug_q_arr;
4172 			sd_dp->qc_idx = k;
4173 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4174 		}
4175 		if (sdebug_statistics)
4176 			sd_dp->issuing_cpu = raw_smp_processor_id();
4177 		schedule_work(&sd_dp->ew.work);
4178 	}
4179 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4180 		     (scsi_result == device_qfull_result)))
4181 		sdev_printk(KERN_INFO, sdp,
4182 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
4183 			    num_in_q, (inject ? "<inject> " : ""),
4184 			    "status: TASK SET FULL");
4185 	return 0;
4186 
4187 respond_in_thread:	/* call back to mid-layer using invocation thread */
4188 	cmnd->result = scsi_result;
4189 	cmnd->scsi_done(cmnd);
4190 	return 0;
4191 }
4192 
4193 /* Note: The following macros create attribute files in the
4194    /sys/module/scsi_debug/parameters directory. Unfortunately this
4195    driver is unaware of a change and cannot trigger auxiliary actions
4196    as it can when the corresponding attribute in the
4197    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4198  */
4199 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4200 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4201 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4202 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4203 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4204 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4205 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4206 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4207 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4208 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4209 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4210 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4211 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4212 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4213 		    sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4214 module_param_string(inq_product, sdebug_inq_product_id,
4215 		    sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4216 module_param_string(inq_rev, sdebug_inq_product_rev,
4217 		    sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4218 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4219 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4220 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4221 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4222 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4223 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4224 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4225 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4226 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4227 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4228 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4229 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4230 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4231 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4232 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4233 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4234 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4235 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4236 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4237 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4238 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4239 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4240 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4241 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4242 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4243 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4244 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4245 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4246 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4247 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4248 		   S_IRUGO | S_IWUSR);
4249 module_param_named(write_same_length, sdebug_write_same_length, int,
4250 		   S_IRUGO | S_IWUSR);
4251 
4252 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4253 MODULE_DESCRIPTION("SCSI debug adapter driver");
4254 MODULE_LICENSE("GPL");
4255 MODULE_VERSION(SDEBUG_VERSION);
4256 
4257 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4258 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4259 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4260 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4261 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4262 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4263 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4264 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4265 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4266 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4267 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4268 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4269 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4270 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4271 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4272 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4273 		 SDEBUG_VERSION "\")");
4274 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4275 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4276 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4277 MODULE_PARM_DESC(lbprz,
4278 	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4279 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4280 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4281 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4282 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4283 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4284 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4285 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4286 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4287 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4288 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4289 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4290 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4291 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4292 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4293 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4294 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4295 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4296 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4297 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4298 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4299 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4300 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4301 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4302 MODULE_PARM_DESC(uuid_ctl,
4303 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4304 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4305 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4306 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4307 
4308 #define SDEBUG_INFO_LEN 256
4309 static char sdebug_info[SDEBUG_INFO_LEN];
4310 
4311 static const char * scsi_debug_info(struct Scsi_Host * shp)
4312 {
4313 	int k;
4314 
4315 	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4316 		      my_name, SDEBUG_VERSION, sdebug_version_date);
4317 	if (k >= (SDEBUG_INFO_LEN - 1))
4318 		return sdebug_info;
4319 	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4320 		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4321 		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
4322 		  "statistics", (int)sdebug_statistics);
4323 	return sdebug_info;
4324 }
4325 
4326 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4327 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4328 				 int length)
4329 {
4330 	char arr[16];
4331 	int opts;
4332 	int minLen = length > 15 ? 15 : length;
4333 
4334 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4335 		return -EACCES;
4336 	memcpy(arr, buffer, minLen);
4337 	arr[minLen] = '\0';
4338 	if (1 != sscanf(arr, "%d", &opts))
4339 		return -EINVAL;
4340 	sdebug_opts = opts;
4341 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4342 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4343 	if (sdebug_every_nth != 0)
4344 		tweak_cmnd_count();
4345 	return length;
4346 }
4347 
4348 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4349  * same for each scsi_debug host (if more than one). Some of the counters
4350  * output are not atomics so might be inaccurate in a busy system. */
4351 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4352 {
4353 	int f, j, l;
4354 	struct sdebug_queue *sqp;
4355 
4356 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4357 		   SDEBUG_VERSION, sdebug_version_date);
4358 	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4359 		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4360 		   sdebug_opts, sdebug_every_nth);
4361 	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4362 		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4363 		   sdebug_sector_size, "bytes");
4364 	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4365 		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4366 		   num_aborts);
4367 	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4368 		   num_dev_resets, num_target_resets, num_bus_resets,
4369 		   num_host_resets);
4370 	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4371 		   dix_reads, dix_writes, dif_errors);
4372 	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4373 		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
4374 		   sdebug_mq_active);
4375 	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4376 		   atomic_read(&sdebug_cmnd_count),
4377 		   atomic_read(&sdebug_completions),
4378 		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
4379 		   atomic_read(&sdebug_a_tsf));
4380 
4381 	seq_printf(m, "submit_queues=%d\n", submit_queues);
4382 	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4383 		seq_printf(m, "  queue %d:\n", j);
4384 		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4385 		if (f != sdebug_max_queue) {
4386 			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4387 			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4388 				   "first,last bits", f, l);
4389 		}
4390 	}
4391 	return 0;
4392 }
4393 
4394 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4395 {
4396 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4397 }
4398 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4399  * of delay is jiffies.
4400  */
4401 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4402 			   size_t count)
4403 {
4404 	int jdelay, res;
4405 
4406 	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4407 		res = count;
4408 		if (sdebug_jdelay != jdelay) {
4409 			int j, k;
4410 			struct sdebug_queue *sqp;
4411 
4412 			block_unblock_all_queues(true);
4413 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4414 			     ++j, ++sqp) {
4415 				k = find_first_bit(sqp->in_use_bm,
4416 						   sdebug_max_queue);
4417 				if (k != sdebug_max_queue) {
4418 					res = -EBUSY;   /* queued commands */
4419 					break;
4420 				}
4421 			}
4422 			if (res > 0) {
4423 				/* make sure sdebug_defer instances get
4424 				 * re-allocated for new delay variant */
4425 				free_all_queued();
4426 				sdebug_jdelay = jdelay;
4427 				sdebug_ndelay = 0;
4428 			}
4429 			block_unblock_all_queues(false);
4430 		}
4431 		return res;
4432 	}
4433 	return -EINVAL;
4434 }
4435 static DRIVER_ATTR_RW(delay);
4436 
4437 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4438 {
4439 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4440 }
4441 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4442 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4443 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4444 			    size_t count)
4445 {
4446 	int ndelay, res;
4447 
4448 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4449 	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4450 		res = count;
4451 		if (sdebug_ndelay != ndelay) {
4452 			int j, k;
4453 			struct sdebug_queue *sqp;
4454 
4455 			block_unblock_all_queues(true);
4456 			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4457 			     ++j, ++sqp) {
4458 				k = find_first_bit(sqp->in_use_bm,
4459 						   sdebug_max_queue);
4460 				if (k != sdebug_max_queue) {
4461 					res = -EBUSY;   /* queued commands */
4462 					break;
4463 				}
4464 			}
4465 			if (res > 0) {
4466 				/* make sure sdebug_defer instances get
4467 				 * re-allocated for new delay variant */
4468 				free_all_queued();
4469 				sdebug_ndelay = ndelay;
4470 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4471 							: DEF_JDELAY;
4472 			}
4473 			block_unblock_all_queues(false);
4474 		}
4475 		return res;
4476 	}
4477 	return -EINVAL;
4478 }
4479 static DRIVER_ATTR_RW(ndelay);
4480 
4481 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4482 {
4483 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4484 }
4485 
4486 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4487 			  size_t count)
4488 {
4489         int opts;
4490 	char work[20];
4491 
4492         if (1 == sscanf(buf, "%10s", work)) {
4493 		if (0 == strncasecmp(work,"0x", 2)) {
4494 			if (1 == sscanf(&work[2], "%x", &opts))
4495 				goto opts_done;
4496 		} else {
4497 			if (1 == sscanf(work, "%d", &opts))
4498 				goto opts_done;
4499 		}
4500 	}
4501 	return -EINVAL;
4502 opts_done:
4503 	sdebug_opts = opts;
4504 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4505 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4506 	tweak_cmnd_count();
4507 	return count;
4508 }
4509 static DRIVER_ATTR_RW(opts);
4510 
4511 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4512 {
4513 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4514 }
4515 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4516 			   size_t count)
4517 {
4518         int n;
4519 
4520 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4521 		sdebug_ptype = n;
4522 		return count;
4523 	}
4524 	return -EINVAL;
4525 }
4526 static DRIVER_ATTR_RW(ptype);
4527 
4528 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4529 {
4530 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4531 }
4532 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4533 			    size_t count)
4534 {
4535         int n;
4536 
4537 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4538 		sdebug_dsense = n;
4539 		return count;
4540 	}
4541 	return -EINVAL;
4542 }
4543 static DRIVER_ATTR_RW(dsense);
4544 
4545 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4546 {
4547 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4548 }
4549 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4550 			     size_t count)
4551 {
4552         int n;
4553 
4554 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4555 		n = (n > 0);
4556 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4557 		if (sdebug_fake_rw != n) {
4558 			if ((0 == n) && (NULL == fake_storep)) {
4559 				unsigned long sz =
4560 					(unsigned long)sdebug_dev_size_mb *
4561 					1048576;
4562 
4563 				fake_storep = vmalloc(sz);
4564 				if (NULL == fake_storep) {
4565 					pr_err("out of memory, 9\n");
4566 					return -ENOMEM;
4567 				}
4568 				memset(fake_storep, 0, sz);
4569 			}
4570 			sdebug_fake_rw = n;
4571 		}
4572 		return count;
4573 	}
4574 	return -EINVAL;
4575 }
4576 static DRIVER_ATTR_RW(fake_rw);
4577 
4578 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4579 {
4580 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4581 }
4582 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4583 			      size_t count)
4584 {
4585         int n;
4586 
4587 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4588 		sdebug_no_lun_0 = n;
4589 		return count;
4590 	}
4591 	return -EINVAL;
4592 }
4593 static DRIVER_ATTR_RW(no_lun_0);
4594 
4595 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4596 {
4597 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4598 }
4599 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4600 			      size_t count)
4601 {
4602         int n;
4603 
4604 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4605 		sdebug_num_tgts = n;
4606 		sdebug_max_tgts_luns();
4607 		return count;
4608 	}
4609 	return -EINVAL;
4610 }
4611 static DRIVER_ATTR_RW(num_tgts);
4612 
4613 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4614 {
4615 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4616 }
4617 static DRIVER_ATTR_RO(dev_size_mb);
4618 
4619 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4620 {
4621 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4622 }
4623 static DRIVER_ATTR_RO(num_parts);
4624 
4625 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4626 {
4627 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4628 }
4629 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4630 			       size_t count)
4631 {
4632         int nth;
4633 
4634 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4635 		sdebug_every_nth = nth;
4636 		if (nth && !sdebug_statistics) {
4637 			pr_info("every_nth needs statistics=1, set it\n");
4638 			sdebug_statistics = true;
4639 		}
4640 		tweak_cmnd_count();
4641 		return count;
4642 	}
4643 	return -EINVAL;
4644 }
4645 static DRIVER_ATTR_RW(every_nth);
4646 
4647 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4648 {
4649 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4650 }
4651 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4652 			      size_t count)
4653 {
4654         int n;
4655 	bool changed;
4656 
4657 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4658 		if (n > 256) {
4659 			pr_warn("max_luns can be no more than 256\n");
4660 			return -EINVAL;
4661 		}
4662 		changed = (sdebug_max_luns != n);
4663 		sdebug_max_luns = n;
4664 		sdebug_max_tgts_luns();
4665 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4666 			struct sdebug_host_info *sdhp;
4667 			struct sdebug_dev_info *dp;
4668 
4669 			spin_lock(&sdebug_host_list_lock);
4670 			list_for_each_entry(sdhp, &sdebug_host_list,
4671 					    host_list) {
4672 				list_for_each_entry(dp, &sdhp->dev_info_list,
4673 						    dev_list) {
4674 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4675 						dp->uas_bm);
4676 				}
4677 			}
4678 			spin_unlock(&sdebug_host_list_lock);
4679 		}
4680 		return count;
4681 	}
4682 	return -EINVAL;
4683 }
4684 static DRIVER_ATTR_RW(max_luns);
4685 
4686 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4687 {
4688 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4689 }
4690 /* N.B. max_queue can be changed while there are queued commands. In flight
4691  * commands beyond the new max_queue will be completed. */
4692 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4693 			       size_t count)
4694 {
4695 	int j, n, k, a;
4696 	struct sdebug_queue *sqp;
4697 
4698 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4699 	    (n <= SDEBUG_CANQUEUE)) {
4700 		block_unblock_all_queues(true);
4701 		k = 0;
4702 		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4703 		     ++j, ++sqp) {
4704 			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4705 			if (a > k)
4706 				k = a;
4707 		}
4708 		sdebug_max_queue = n;
4709 		if (k == SDEBUG_CANQUEUE)
4710 			atomic_set(&retired_max_queue, 0);
4711 		else if (k >= n)
4712 			atomic_set(&retired_max_queue, k + 1);
4713 		else
4714 			atomic_set(&retired_max_queue, 0);
4715 		block_unblock_all_queues(false);
4716 		return count;
4717 	}
4718 	return -EINVAL;
4719 }
4720 static DRIVER_ATTR_RW(max_queue);
4721 
4722 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4723 {
4724 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4725 }
4726 static DRIVER_ATTR_RO(no_uld);
4727 
4728 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4729 {
4730 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4731 }
4732 static DRIVER_ATTR_RO(scsi_level);
4733 
4734 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4735 {
4736 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4737 }
4738 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4739 				size_t count)
4740 {
4741         int n;
4742 	bool changed;
4743 
4744 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4745 		changed = (sdebug_virtual_gb != n);
4746 		sdebug_virtual_gb = n;
4747 		sdebug_capacity = get_sdebug_capacity();
4748 		if (changed) {
4749 			struct sdebug_host_info *sdhp;
4750 			struct sdebug_dev_info *dp;
4751 
4752 			spin_lock(&sdebug_host_list_lock);
4753 			list_for_each_entry(sdhp, &sdebug_host_list,
4754 					    host_list) {
4755 				list_for_each_entry(dp, &sdhp->dev_info_list,
4756 						    dev_list) {
4757 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4758 						dp->uas_bm);
4759 				}
4760 			}
4761 			spin_unlock(&sdebug_host_list_lock);
4762 		}
4763 		return count;
4764 	}
4765 	return -EINVAL;
4766 }
4767 static DRIVER_ATTR_RW(virtual_gb);
4768 
4769 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4770 {
4771 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4772 }
4773 
4774 static int sdebug_add_adapter(void);
4775 static void sdebug_remove_adapter(void);
4776 
4777 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4778 			      size_t count)
4779 {
4780 	int delta_hosts;
4781 
4782 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4783 		return -EINVAL;
4784 	if (delta_hosts > 0) {
4785 		do {
4786 			sdebug_add_adapter();
4787 		} while (--delta_hosts);
4788 	} else if (delta_hosts < 0) {
4789 		do {
4790 			sdebug_remove_adapter();
4791 		} while (++delta_hosts);
4792 	}
4793 	return count;
4794 }
4795 static DRIVER_ATTR_RW(add_host);
4796 
4797 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4798 {
4799 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4800 }
4801 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4802 				    size_t count)
4803 {
4804 	int n;
4805 
4806 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4807 		sdebug_vpd_use_hostno = n;
4808 		return count;
4809 	}
4810 	return -EINVAL;
4811 }
4812 static DRIVER_ATTR_RW(vpd_use_hostno);
4813 
4814 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4815 {
4816 	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4817 }
4818 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4819 				size_t count)
4820 {
4821 	int n;
4822 
4823 	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4824 		if (n > 0)
4825 			sdebug_statistics = true;
4826 		else {
4827 			clear_queue_stats();
4828 			sdebug_statistics = false;
4829 		}
4830 		return count;
4831 	}
4832 	return -EINVAL;
4833 }
4834 static DRIVER_ATTR_RW(statistics);
4835 
4836 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4837 {
4838 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4839 }
4840 static DRIVER_ATTR_RO(sector_size);
4841 
4842 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4843 {
4844 	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4845 }
4846 static DRIVER_ATTR_RO(submit_queues);
4847 
4848 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4849 {
4850 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4851 }
4852 static DRIVER_ATTR_RO(dix);
4853 
4854 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4855 {
4856 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4857 }
4858 static DRIVER_ATTR_RO(dif);
4859 
4860 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4861 {
4862 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4863 }
4864 static DRIVER_ATTR_RO(guard);
4865 
4866 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4867 {
4868 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4869 }
4870 static DRIVER_ATTR_RO(ato);
4871 
4872 static ssize_t map_show(struct device_driver *ddp, char *buf)
4873 {
4874 	ssize_t count;
4875 
4876 	if (!scsi_debug_lbp())
4877 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4878 				 sdebug_store_sectors);
4879 
4880 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4881 			  (int)map_size, map_storep);
4882 	buf[count++] = '\n';
4883 	buf[count] = '\0';
4884 
4885 	return count;
4886 }
4887 static DRIVER_ATTR_RO(map);
4888 
4889 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4890 {
4891 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4892 }
4893 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4894 			       size_t count)
4895 {
4896 	int n;
4897 
4898 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4899 		sdebug_removable = (n > 0);
4900 		return count;
4901 	}
4902 	return -EINVAL;
4903 }
4904 static DRIVER_ATTR_RW(removable);
4905 
4906 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4907 {
4908 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4909 }
4910 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4911 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4912 			       size_t count)
4913 {
4914 	int n;
4915 
4916 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4917 		sdebug_host_lock = (n > 0);
4918 		return count;
4919 	}
4920 	return -EINVAL;
4921 }
4922 static DRIVER_ATTR_RW(host_lock);
4923 
4924 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4925 {
4926 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4927 }
4928 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4929 			    size_t count)
4930 {
4931 	int n;
4932 
4933 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4934 		sdebug_strict = (n > 0);
4935 		return count;
4936 	}
4937 	return -EINVAL;
4938 }
4939 static DRIVER_ATTR_RW(strict);
4940 
4941 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4942 {
4943 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4944 }
4945 static DRIVER_ATTR_RO(uuid_ctl);
4946 
4947 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
4948 {
4949 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
4950 }
4951 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
4952 			     size_t count)
4953 {
4954 	int ret, n;
4955 
4956 	ret = kstrtoint(buf, 0, &n);
4957 	if (ret)
4958 		return ret;
4959 	sdebug_cdb_len = n;
4960 	all_config_cdb_len();
4961 	return count;
4962 }
4963 static DRIVER_ATTR_RW(cdb_len);
4964 
4965 
4966 /* Note: The following array creates attribute files in the
4967    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4968    files (over those found in the /sys/module/scsi_debug/parameters
4969    directory) is that auxiliary actions can be triggered when an attribute
4970    is changed. For example see: sdebug_add_host_store() above.
4971  */
4972 
4973 static struct attribute *sdebug_drv_attrs[] = {
4974 	&driver_attr_delay.attr,
4975 	&driver_attr_opts.attr,
4976 	&driver_attr_ptype.attr,
4977 	&driver_attr_dsense.attr,
4978 	&driver_attr_fake_rw.attr,
4979 	&driver_attr_no_lun_0.attr,
4980 	&driver_attr_num_tgts.attr,
4981 	&driver_attr_dev_size_mb.attr,
4982 	&driver_attr_num_parts.attr,
4983 	&driver_attr_every_nth.attr,
4984 	&driver_attr_max_luns.attr,
4985 	&driver_attr_max_queue.attr,
4986 	&driver_attr_no_uld.attr,
4987 	&driver_attr_scsi_level.attr,
4988 	&driver_attr_virtual_gb.attr,
4989 	&driver_attr_add_host.attr,
4990 	&driver_attr_vpd_use_hostno.attr,
4991 	&driver_attr_sector_size.attr,
4992 	&driver_attr_statistics.attr,
4993 	&driver_attr_submit_queues.attr,
4994 	&driver_attr_dix.attr,
4995 	&driver_attr_dif.attr,
4996 	&driver_attr_guard.attr,
4997 	&driver_attr_ato.attr,
4998 	&driver_attr_map.attr,
4999 	&driver_attr_removable.attr,
5000 	&driver_attr_host_lock.attr,
5001 	&driver_attr_ndelay.attr,
5002 	&driver_attr_strict.attr,
5003 	&driver_attr_uuid_ctl.attr,
5004 	&driver_attr_cdb_len.attr,
5005 	NULL,
5006 };
5007 ATTRIBUTE_GROUPS(sdebug_drv);
5008 
5009 static struct device *pseudo_primary;
5010 
5011 static int __init scsi_debug_init(void)
5012 {
5013 	unsigned long sz;
5014 	int host_to_add;
5015 	int k;
5016 	int ret;
5017 
5018 	atomic_set(&retired_max_queue, 0);
5019 
5020 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5021 		pr_warn("ndelay must be less than 1 second, ignored\n");
5022 		sdebug_ndelay = 0;
5023 	} else if (sdebug_ndelay > 0)
5024 		sdebug_jdelay = JDELAY_OVERRIDDEN;
5025 
5026 	switch (sdebug_sector_size) {
5027 	case  512:
5028 	case 1024:
5029 	case 2048:
5030 	case 4096:
5031 		break;
5032 	default:
5033 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
5034 		return -EINVAL;
5035 	}
5036 
5037 	switch (sdebug_dif) {
5038 	case T10_PI_TYPE0_PROTECTION:
5039 		break;
5040 	case T10_PI_TYPE1_PROTECTION:
5041 	case T10_PI_TYPE2_PROTECTION:
5042 	case T10_PI_TYPE3_PROTECTION:
5043 		have_dif_prot = true;
5044 		break;
5045 
5046 	default:
5047 		pr_err("dif must be 0, 1, 2 or 3\n");
5048 		return -EINVAL;
5049 	}
5050 
5051 	if (sdebug_guard > 1) {
5052 		pr_err("guard must be 0 or 1\n");
5053 		return -EINVAL;
5054 	}
5055 
5056 	if (sdebug_ato > 1) {
5057 		pr_err("ato must be 0 or 1\n");
5058 		return -EINVAL;
5059 	}
5060 
5061 	if (sdebug_physblk_exp > 15) {
5062 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5063 		return -EINVAL;
5064 	}
5065 	if (sdebug_max_luns > 256) {
5066 		pr_warn("max_luns can be no more than 256, use default\n");
5067 		sdebug_max_luns = DEF_MAX_LUNS;
5068 	}
5069 
5070 	if (sdebug_lowest_aligned > 0x3fff) {
5071 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5072 		return -EINVAL;
5073 	}
5074 
5075 	if (submit_queues < 1) {
5076 		pr_err("submit_queues must be 1 or more\n");
5077 		return -EINVAL;
5078 	}
5079 	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5080 			       GFP_KERNEL);
5081 	if (sdebug_q_arr == NULL)
5082 		return -ENOMEM;
5083 	for (k = 0; k < submit_queues; ++k)
5084 		spin_lock_init(&sdebug_q_arr[k].qc_lock);
5085 
5086 	if (sdebug_dev_size_mb < 1)
5087 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5088 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5089 	sdebug_store_sectors = sz / sdebug_sector_size;
5090 	sdebug_capacity = get_sdebug_capacity();
5091 
5092 	/* play around with geometry, don't waste too much on track 0 */
5093 	sdebug_heads = 8;
5094 	sdebug_sectors_per = 32;
5095 	if (sdebug_dev_size_mb >= 256)
5096 		sdebug_heads = 64;
5097 	else if (sdebug_dev_size_mb >= 16)
5098 		sdebug_heads = 32;
5099 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5100 			       (sdebug_sectors_per * sdebug_heads);
5101 	if (sdebug_cylinders_per >= 1024) {
5102 		/* other LLDs do this; implies >= 1GB ram disk ... */
5103 		sdebug_heads = 255;
5104 		sdebug_sectors_per = 63;
5105 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5106 			       (sdebug_sectors_per * sdebug_heads);
5107 	}
5108 
5109 	if (sdebug_fake_rw == 0) {
5110 		fake_storep = vmalloc(sz);
5111 		if (NULL == fake_storep) {
5112 			pr_err("out of memory, 1\n");
5113 			ret = -ENOMEM;
5114 			goto free_q_arr;
5115 		}
5116 		memset(fake_storep, 0, sz);
5117 		if (sdebug_num_parts > 0)
5118 			sdebug_build_parts(fake_storep, sz);
5119 	}
5120 
5121 	if (sdebug_dix) {
5122 		int dif_size;
5123 
5124 		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5125 		dif_storep = vmalloc(dif_size);
5126 
5127 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5128 
5129 		if (dif_storep == NULL) {
5130 			pr_err("out of mem. (DIX)\n");
5131 			ret = -ENOMEM;
5132 			goto free_vm;
5133 		}
5134 
5135 		memset(dif_storep, 0xff, dif_size);
5136 	}
5137 
5138 	/* Logical Block Provisioning */
5139 	if (scsi_debug_lbp()) {
5140 		sdebug_unmap_max_blocks =
5141 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5142 
5143 		sdebug_unmap_max_desc =
5144 			clamp(sdebug_unmap_max_desc, 0U, 256U);
5145 
5146 		sdebug_unmap_granularity =
5147 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5148 
5149 		if (sdebug_unmap_alignment &&
5150 		    sdebug_unmap_granularity <=
5151 		    sdebug_unmap_alignment) {
5152 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5153 			ret = -EINVAL;
5154 			goto free_vm;
5155 		}
5156 
5157 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5158 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5159 
5160 		pr_info("%lu provisioning blocks\n", map_size);
5161 
5162 		if (map_storep == NULL) {
5163 			pr_err("out of mem. (MAP)\n");
5164 			ret = -ENOMEM;
5165 			goto free_vm;
5166 		}
5167 
5168 		bitmap_zero(map_storep, map_size);
5169 
5170 		/* Map first 1KB for partition table */
5171 		if (sdebug_num_parts)
5172 			map_region(0, 2);
5173 	}
5174 
5175 	pseudo_primary = root_device_register("pseudo_0");
5176 	if (IS_ERR(pseudo_primary)) {
5177 		pr_warn("root_device_register() error\n");
5178 		ret = PTR_ERR(pseudo_primary);
5179 		goto free_vm;
5180 	}
5181 	ret = bus_register(&pseudo_lld_bus);
5182 	if (ret < 0) {
5183 		pr_warn("bus_register error: %d\n", ret);
5184 		goto dev_unreg;
5185 	}
5186 	ret = driver_register(&sdebug_driverfs_driver);
5187 	if (ret < 0) {
5188 		pr_warn("driver_register error: %d\n", ret);
5189 		goto bus_unreg;
5190 	}
5191 
5192 	host_to_add = sdebug_add_host;
5193 	sdebug_add_host = 0;
5194 
5195         for (k = 0; k < host_to_add; k++) {
5196                 if (sdebug_add_adapter()) {
5197 			pr_err("sdebug_add_adapter failed k=%d\n", k);
5198                         break;
5199                 }
5200         }
5201 
5202 	if (sdebug_verbose)
5203 		pr_info("built %d host(s)\n", sdebug_add_host);
5204 
5205 	return 0;
5206 
5207 bus_unreg:
5208 	bus_unregister(&pseudo_lld_bus);
5209 dev_unreg:
5210 	root_device_unregister(pseudo_primary);
5211 free_vm:
5212 	vfree(map_storep);
5213 	vfree(dif_storep);
5214 	vfree(fake_storep);
5215 free_q_arr:
5216 	kfree(sdebug_q_arr);
5217 	return ret;
5218 }
5219 
5220 static void __exit scsi_debug_exit(void)
5221 {
5222 	int k = sdebug_add_host;
5223 
5224 	stop_all_queued();
5225 	free_all_queued();
5226 	for (; k; k--)
5227 		sdebug_remove_adapter();
5228 	driver_unregister(&sdebug_driverfs_driver);
5229 	bus_unregister(&pseudo_lld_bus);
5230 	root_device_unregister(pseudo_primary);
5231 
5232 	vfree(map_storep);
5233 	vfree(dif_storep);
5234 	vfree(fake_storep);
5235 	kfree(sdebug_q_arr);
5236 }
5237 
5238 device_initcall(scsi_debug_init);
5239 module_exit(scsi_debug_exit);
5240 
5241 static void sdebug_release_adapter(struct device * dev)
5242 {
5243         struct sdebug_host_info *sdbg_host;
5244 
5245 	sdbg_host = to_sdebug_host(dev);
5246         kfree(sdbg_host);
5247 }
5248 
5249 static int sdebug_add_adapter(void)
5250 {
5251 	int k, devs_per_host;
5252         int error = 0;
5253         struct sdebug_host_info *sdbg_host;
5254 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5255 
5256         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5257         if (NULL == sdbg_host) {
5258 		pr_err("out of memory at line %d\n", __LINE__);
5259                 return -ENOMEM;
5260         }
5261 
5262         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5263 
5264 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5265         for (k = 0; k < devs_per_host; k++) {
5266 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5267 		if (!sdbg_devinfo) {
5268 			pr_err("out of memory at line %d\n", __LINE__);
5269                         error = -ENOMEM;
5270 			goto clean;
5271                 }
5272         }
5273 
5274         spin_lock(&sdebug_host_list_lock);
5275         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5276         spin_unlock(&sdebug_host_list_lock);
5277 
5278         sdbg_host->dev.bus = &pseudo_lld_bus;
5279         sdbg_host->dev.parent = pseudo_primary;
5280         sdbg_host->dev.release = &sdebug_release_adapter;
5281 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5282 
5283         error = device_register(&sdbg_host->dev);
5284 
5285         if (error)
5286 		goto clean;
5287 
5288 	++sdebug_add_host;
5289         return error;
5290 
5291 clean:
5292 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5293 				 dev_list) {
5294 		list_del(&sdbg_devinfo->dev_list);
5295 		kfree(sdbg_devinfo);
5296 	}
5297 
5298 	kfree(sdbg_host);
5299         return error;
5300 }
5301 
5302 static void sdebug_remove_adapter(void)
5303 {
5304         struct sdebug_host_info * sdbg_host = NULL;
5305 
5306         spin_lock(&sdebug_host_list_lock);
5307         if (!list_empty(&sdebug_host_list)) {
5308                 sdbg_host = list_entry(sdebug_host_list.prev,
5309                                        struct sdebug_host_info, host_list);
5310 		list_del(&sdbg_host->host_list);
5311 	}
5312         spin_unlock(&sdebug_host_list_lock);
5313 
5314 	if (!sdbg_host)
5315 		return;
5316 
5317 	device_unregister(&sdbg_host->dev);
5318 	--sdebug_add_host;
5319 }
5320 
5321 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5322 {
5323 	int num_in_q = 0;
5324 	struct sdebug_dev_info *devip;
5325 
5326 	block_unblock_all_queues(true);
5327 	devip = (struct sdebug_dev_info *)sdev->hostdata;
5328 	if (NULL == devip) {
5329 		block_unblock_all_queues(false);
5330 		return	-ENODEV;
5331 	}
5332 	num_in_q = atomic_read(&devip->num_in_q);
5333 
5334 	if (qdepth < 1)
5335 		qdepth = 1;
5336 	/* allow to exceed max host qc_arr elements for testing */
5337 	if (qdepth > SDEBUG_CANQUEUE + 10)
5338 		qdepth = SDEBUG_CANQUEUE + 10;
5339 	scsi_change_queue_depth(sdev, qdepth);
5340 
5341 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5342 		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5343 			    __func__, qdepth, num_in_q);
5344 	}
5345 	block_unblock_all_queues(false);
5346 	return sdev->queue_depth;
5347 }
5348 
5349 static bool fake_timeout(struct scsi_cmnd *scp)
5350 {
5351 	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5352 		if (sdebug_every_nth < -1)
5353 			sdebug_every_nth = -1;
5354 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5355 			return true; /* ignore command causing timeout */
5356 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5357 			 scsi_medium_access_command(scp))
5358 			return true; /* time out reads and writes */
5359 	}
5360 	return false;
5361 }
5362 
5363 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5364 				   struct scsi_cmnd *scp)
5365 {
5366 	u8 sdeb_i;
5367 	struct scsi_device *sdp = scp->device;
5368 	const struct opcode_info_t *oip;
5369 	const struct opcode_info_t *r_oip;
5370 	struct sdebug_dev_info *devip;
5371 	u8 *cmd = scp->cmnd;
5372 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5373 	int k, na;
5374 	int errsts = 0;
5375 	u32 flags;
5376 	u16 sa;
5377 	u8 opcode = cmd[0];
5378 	bool has_wlun_rl;
5379 
5380 	scsi_set_resid(scp, 0);
5381 	if (sdebug_statistics)
5382 		atomic_inc(&sdebug_cmnd_count);
5383 	if (unlikely(sdebug_verbose &&
5384 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5385 		char b[120];
5386 		int n, len, sb;
5387 
5388 		len = scp->cmd_len;
5389 		sb = (int)sizeof(b);
5390 		if (len > 32)
5391 			strcpy(b, "too long, over 32 bytes");
5392 		else {
5393 			for (k = 0, n = 0; k < len && n < sb; ++k)
5394 				n += scnprintf(b + n, sb - n, "%02x ",
5395 					       (u32)cmd[k]);
5396 		}
5397 		if (sdebug_mq_active)
5398 			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5399 				    my_name, blk_mq_unique_tag(scp->request),
5400 				    b);
5401 		else
5402 			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5403 				    b);
5404 	}
5405 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5406 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5407 		goto err_out;
5408 
5409 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
5410 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
5411 	devip = (struct sdebug_dev_info *)sdp->hostdata;
5412 	if (unlikely(!devip)) {
5413 		devip = find_build_dev_info(sdp);
5414 		if (NULL == devip)
5415 			goto err_out;
5416 	}
5417 	na = oip->num_attached;
5418 	r_pfp = oip->pfp;
5419 	if (na) {	/* multiple commands with this opcode */
5420 		r_oip = oip;
5421 		if (FF_SA & r_oip->flags) {
5422 			if (F_SA_LOW & oip->flags)
5423 				sa = 0x1f & cmd[1];
5424 			else
5425 				sa = get_unaligned_be16(cmd + 8);
5426 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5427 				if (opcode == oip->opcode && sa == oip->sa)
5428 					break;
5429 			}
5430 		} else {   /* since no service action only check opcode */
5431 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5432 				if (opcode == oip->opcode)
5433 					break;
5434 			}
5435 		}
5436 		if (k > na) {
5437 			if (F_SA_LOW & r_oip->flags)
5438 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5439 			else if (F_SA_HIGH & r_oip->flags)
5440 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5441 			else
5442 				mk_sense_invalid_opcode(scp);
5443 			goto check_cond;
5444 		}
5445 	}	/* else (when na==0) we assume the oip is a match */
5446 	flags = oip->flags;
5447 	if (unlikely(F_INV_OP & flags)) {
5448 		mk_sense_invalid_opcode(scp);
5449 		goto check_cond;
5450 	}
5451 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5452 		if (sdebug_verbose)
5453 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5454 				    my_name, opcode, " supported for wlun");
5455 		mk_sense_invalid_opcode(scp);
5456 		goto check_cond;
5457 	}
5458 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5459 		u8 rem;
5460 		int j;
5461 
5462 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5463 			rem = ~oip->len_mask[k] & cmd[k];
5464 			if (rem) {
5465 				for (j = 7; j >= 0; --j, rem <<= 1) {
5466 					if (0x80 & rem)
5467 						break;
5468 				}
5469 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5470 				goto check_cond;
5471 			}
5472 		}
5473 	}
5474 	if (unlikely(!(F_SKIP_UA & flags) &&
5475 		     find_first_bit(devip->uas_bm,
5476 				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5477 		errsts = make_ua(scp, devip);
5478 		if (errsts)
5479 			goto check_cond;
5480 	}
5481 	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5482 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5483 		if (sdebug_verbose)
5484 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5485 				    "%s\n", my_name, "initializing command "
5486 				    "required");
5487 		errsts = check_condition_result;
5488 		goto fini;
5489 	}
5490 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5491 		goto fini;
5492 	if (unlikely(sdebug_every_nth)) {
5493 		if (fake_timeout(scp))
5494 			return 0;	/* ignore command: make trouble */
5495 	}
5496 	if (likely(oip->pfp))
5497 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5498 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5499 		errsts = r_pfp(scp, devip);
5500 
5501 fini:
5502 	return schedule_resp(scp, devip, errsts,
5503 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5504 check_cond:
5505 	return schedule_resp(scp, devip, check_condition_result, 0);
5506 err_out:
5507 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5508 }
5509 
5510 static struct scsi_host_template sdebug_driver_template = {
5511 	.show_info =		scsi_debug_show_info,
5512 	.write_info =		scsi_debug_write_info,
5513 	.proc_name =		sdebug_proc_name,
5514 	.name =			"SCSI DEBUG",
5515 	.info =			scsi_debug_info,
5516 	.slave_alloc =		scsi_debug_slave_alloc,
5517 	.slave_configure =	scsi_debug_slave_configure,
5518 	.slave_destroy =	scsi_debug_slave_destroy,
5519 	.ioctl =		scsi_debug_ioctl,
5520 	.queuecommand =		scsi_debug_queuecommand,
5521 	.change_queue_depth =	sdebug_change_qdepth,
5522 	.eh_abort_handler =	scsi_debug_abort,
5523 	.eh_device_reset_handler = scsi_debug_device_reset,
5524 	.eh_target_reset_handler = scsi_debug_target_reset,
5525 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5526 	.eh_host_reset_handler = scsi_debug_host_reset,
5527 	.can_queue =		SDEBUG_CANQUEUE,
5528 	.this_id =		7,
5529 	.sg_tablesize =		SG_MAX_SEGMENTS,
5530 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5531 	.max_sectors =		-1U,
5532 	.use_clustering = 	DISABLE_CLUSTERING,
5533 	.module =		THIS_MODULE,
5534 	.track_queue_depth =	1,
5535 };
5536 
5537 static int sdebug_driver_probe(struct device * dev)
5538 {
5539 	int error = 0;
5540 	struct sdebug_host_info *sdbg_host;
5541 	struct Scsi_Host *hpnt;
5542 	int hprot;
5543 
5544 	sdbg_host = to_sdebug_host(dev);
5545 
5546 	sdebug_driver_template.can_queue = sdebug_max_queue;
5547 	if (sdebug_clustering)
5548 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5549 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5550 	if (NULL == hpnt) {
5551 		pr_err("scsi_host_alloc failed\n");
5552 		error = -ENODEV;
5553 		return error;
5554 	}
5555 	if (submit_queues > nr_cpu_ids) {
5556 		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5557 			my_name, submit_queues, nr_cpu_ids);
5558 		submit_queues = nr_cpu_ids;
5559 	}
5560 	/* Decide whether to tell scsi subsystem that we want mq */
5561 	/* Following should give the same answer for each host */
5562 	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5563 	if (sdebug_mq_active)
5564 		hpnt->nr_hw_queues = submit_queues;
5565 
5566         sdbg_host->shost = hpnt;
5567 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5568 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5569 		hpnt->max_id = sdebug_num_tgts + 1;
5570 	else
5571 		hpnt->max_id = sdebug_num_tgts;
5572 	/* = sdebug_max_luns; */
5573 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5574 
5575 	hprot = 0;
5576 
5577 	switch (sdebug_dif) {
5578 
5579 	case T10_PI_TYPE1_PROTECTION:
5580 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5581 		if (sdebug_dix)
5582 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5583 		break;
5584 
5585 	case T10_PI_TYPE2_PROTECTION:
5586 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5587 		if (sdebug_dix)
5588 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5589 		break;
5590 
5591 	case T10_PI_TYPE3_PROTECTION:
5592 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5593 		if (sdebug_dix)
5594 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5595 		break;
5596 
5597 	default:
5598 		if (sdebug_dix)
5599 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5600 		break;
5601 	}
5602 
5603 	scsi_host_set_prot(hpnt, hprot);
5604 
5605 	if (have_dif_prot || sdebug_dix)
5606 		pr_info("host protection%s%s%s%s%s%s%s\n",
5607 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5608 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5609 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5610 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5611 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5612 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5613 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5614 
5615 	if (sdebug_guard == 1)
5616 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5617 	else
5618 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5619 
5620 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5621 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5622 	if (sdebug_every_nth)	/* need stats counters for every_nth */
5623 		sdebug_statistics = true;
5624         error = scsi_add_host(hpnt, &sdbg_host->dev);
5625         if (error) {
5626 		pr_err("scsi_add_host failed\n");
5627                 error = -ENODEV;
5628 		scsi_host_put(hpnt);
5629         } else
5630 		scsi_scan_host(hpnt);
5631 
5632 	return error;
5633 }
5634 
5635 static int sdebug_driver_remove(struct device * dev)
5636 {
5637         struct sdebug_host_info *sdbg_host;
5638 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5639 
5640 	sdbg_host = to_sdebug_host(dev);
5641 
5642 	if (!sdbg_host) {
5643 		pr_err("Unable to locate host info\n");
5644 		return -ENODEV;
5645 	}
5646 
5647         scsi_remove_host(sdbg_host->shost);
5648 
5649 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5650 				 dev_list) {
5651                 list_del(&sdbg_devinfo->dev_list);
5652                 kfree(sdbg_devinfo);
5653         }
5654 
5655         scsi_host_put(sdbg_host->shost);
5656         return 0;
5657 }
5658 
5659 static int pseudo_lld_bus_match(struct device *dev,
5660 				struct device_driver *dev_driver)
5661 {
5662 	return 1;
5663 }
5664 
5665 static struct bus_type pseudo_lld_bus = {
5666 	.name = "pseudo",
5667 	.match = pseudo_lld_bus_match,
5668 	.probe = sdebug_driver_probe,
5669 	.remove = sdebug_driver_remove,
5670 	.drv_groups = sdebug_drv_groups,
5671 };
5672