xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision f46eb0e9)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2016 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19 
20 
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22 
23 #include <linux/module.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 
45 #include <net/checksum.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SCSI_DEBUG_VERSION "1.86"
63 static const char *sdebug_version_date = "20160422";
64 
65 #define MY_NAME "scsi_debug"
66 
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define UA_RESET_ASC 0x29
78 #define UA_CHANGED_ASC 0x2a
79 #define TARGET_CHANGED_ASC 0x3f
80 #define LUNS_CHANGED_ASCQ 0x0e
81 #define INSUFF_RES_ASC 0x55
82 #define INSUFF_RES_ASCQ 0x3
83 #define POWER_ON_RESET_ASCQ 0x0
84 #define BUS_RESET_ASCQ 0x2	/* scsi bus reset occurred */
85 #define MODE_CHANGED_ASCQ 0x1	/* mode parameters changed */
86 #define CAPACITY_CHANGED_ASCQ 0x9
87 #define SAVING_PARAMS_UNSUP 0x39
88 #define TRANSPORT_PROBLEM 0x4b
89 #define THRESHOLD_EXCEEDED 0x5d
90 #define LOW_POWER_COND_ON 0x5e
91 #define MISCOMPARE_VERIFY_ASC 0x1d
92 #define MICROCODE_CHANGED_ASCQ 0x1	/* with TARGET_CHANGED_ASC */
93 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
94 
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97 
98 /* Default values for driver parameters */
99 #define DEF_NUM_HOST   1
100 #define DEF_NUM_TGTS   1
101 #define DEF_MAX_LUNS   1
102 /* With these defaults, this driver will make 1 host with 1 target
103  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
104  */
105 #define DEF_ATO 1
106 #define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
107 #define DEF_DEV_SIZE_MB   8
108 #define DEF_DIF 0
109 #define DEF_DIX 0
110 #define DEF_D_SENSE   0
111 #define DEF_EVERY_NTH   0
112 #define DEF_FAKE_RW	0
113 #define DEF_GUARD 0
114 #define DEF_HOST_LOCK 0
115 #define DEF_LBPU 0
116 #define DEF_LBPWS 0
117 #define DEF_LBPWS10 0
118 #define DEF_LBPRZ 1
119 #define DEF_LOWEST_ALIGNED 0
120 #define DEF_NDELAY   0		/* if > 0 unit is a nanosecond */
121 #define DEF_NO_LUN_0   0
122 #define DEF_NUM_PARTS   0
123 #define DEF_OPTS   0
124 #define DEF_OPT_BLKS 1024
125 #define DEF_PHYSBLK_EXP 0
126 #define DEF_PTYPE   0
127 #define DEF_REMOVABLE false
128 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
129 #define DEF_SECTOR_SIZE 512
130 #define DEF_UNMAP_ALIGNMENT 0
131 #define DEF_UNMAP_GRANULARITY 1
132 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
133 #define DEF_UNMAP_MAX_DESC 256
134 #define DEF_VIRTUAL_GB   0
135 #define DEF_VPD_USE_HOSTNO 1
136 #define DEF_WRITESAME_LENGTH 0xFFFF
137 #define DEF_STRICT 0
138 #define JDELAY_OVERRIDDEN -9999
139 
140 /* bit mask values for sdebug_opts */
141 #define SDEBUG_OPT_NOISE		1
142 #define SDEBUG_OPT_MEDIUM_ERR		2
143 #define SDEBUG_OPT_TIMEOUT		4
144 #define SDEBUG_OPT_RECOVERED_ERR	8
145 #define SDEBUG_OPT_TRANSPORT_ERR	16
146 #define SDEBUG_OPT_DIF_ERR		32
147 #define SDEBUG_OPT_DIX_ERR		64
148 #define SDEBUG_OPT_MAC_TIMEOUT		128
149 #define SDEBUG_OPT_SHORT_TRANSFER	0x100
150 #define SDEBUG_OPT_Q_NOISE		0x200
151 #define SDEBUG_OPT_ALL_TSF		0x400
152 #define SDEBUG_OPT_RARE_TSF		0x800
153 #define SDEBUG_OPT_N_WCE		0x1000
154 #define SDEBUG_OPT_RESET_NOISE		0x2000
155 #define SDEBUG_OPT_NO_CDB_NOISE		0x4000
156 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
157 			      SDEBUG_OPT_RESET_NOISE)
158 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
159 				  SDEBUG_OPT_TRANSPORT_ERR | \
160 				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
161 				  SDEBUG_OPT_SHORT_TRANSFER)
162 /* When "every_nth" > 0 then modulo "every_nth" commands:
163  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
164  *   - a RECOVERED_ERROR is simulated on successful read and write
165  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
166  *   - a TRANSPORT_ERROR is simulated on successful read and write
167  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
168  *
169  * When "every_nth" < 0 then after "- every_nth" commands:
170  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
171  *   - a RECOVERED_ERROR is simulated on successful read and write
172  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
173  *   - a TRANSPORT_ERROR is simulated on successful read and write
174  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
175  * This will continue on every subsequent command until some other action
176  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
177  * every_nth via sysfs).
178  */
179 
180 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
181  * priority order. In the subset implemented here lower numbers have higher
182  * priority. The UA numbers should be a sequence starting from 0 with
183  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
184 #define SDEBUG_UA_POR 0		/* Power on, reset, or bus device reset */
185 #define SDEBUG_UA_BUS_RESET 1
186 #define SDEBUG_UA_MODE_CHANGED 2
187 #define SDEBUG_UA_CAPACITY_CHANGED 3
188 #define SDEBUG_UA_LUNS_CHANGED 4
189 #define SDEBUG_UA_MICROCODE_CHANGED 5	/* simulate firmware change */
190 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
191 #define SDEBUG_NUM_UAS 7
192 
193 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
194  * sector on read commands: */
195 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
196 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
197 
198 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
199  * or "peripheral device" addressing (value 0) */
200 #define SAM2_LUN_ADDRESS_METHOD 0
201 
202 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
203  * (for response) at one time. Can be reduced by max_queue option. Command
204  * responses are not queued when jdelay=0 and ndelay=0. The per-device
205  * DEF_CMD_PER_LUN can be changed via sysfs:
206  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
207  * SCSI_DEBUG_CANQUEUE. */
208 #define SCSI_DEBUG_CANQUEUE_WORDS  9	/* a WORD is bits in a long */
209 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
210 #define DEF_CMD_PER_LUN  255
211 
212 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
213 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
214 #endif
215 
216 #define F_D_IN			1
217 #define F_D_OUT			2
218 #define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
219 #define F_D_UNKN		8
220 #define F_RL_WLUN_OK		0x10
221 #define F_SKIP_UA		0x20
222 #define F_DELAY_OVERR		0x40
223 #define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
224 #define F_SA_HIGH		0x100	/* as used by variable length cdbs */
225 #define F_INV_OP		0x200
226 #define F_FAKE_RW		0x400
227 #define F_M_ACCESS		0x800	/* media access */
228 
229 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
230 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
231 #define FF_SA (F_SA_HIGH | F_SA_LOW)
232 
233 #define SDEBUG_MAX_PARTS 4
234 
235 #define SCSI_DEBUG_MAX_CMD_LEN 32
236 
237 
238 struct sdebug_dev_info {
239 	struct list_head dev_list;
240 	unsigned int channel;
241 	unsigned int target;
242 	u64 lun;
243 	struct sdebug_host_info *sdbg_host;
244 	unsigned long uas_bm[1];
245 	atomic_t num_in_q;
246 	char stopped;		/* TODO: should be atomic */
247 	bool used;
248 };
249 
250 struct sdebug_host_info {
251 	struct list_head host_list;
252 	struct Scsi_Host *shost;
253 	struct device dev;
254 	struct list_head dev_info_list;
255 };
256 
257 #define to_sdebug_host(d)	\
258 	container_of(d, struct sdebug_host_info, dev)
259 
260 struct sdebug_defer {
261 	struct hrtimer hrt;
262 	struct execute_work ew;
263 	int qa_indx;
264 };
265 
266 struct sdebug_queued_cmd {
267 	/* in_use flagged by a bit in queued_in_use_bm[] */
268 	struct sdebug_defer *sd_dp;
269 	struct scsi_cmnd *a_cmnd;
270 };
271 
272 struct sdebug_scmd_extra_t {
273 	bool inj_recovered;
274 	bool inj_transport;
275 	bool inj_dif;
276 	bool inj_dix;
277 	bool inj_short;
278 };
279 
280 struct opcode_info_t {
281 	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff
282 				 * for terminating element */
283 	u8 opcode;		/* if num_attached > 0, preferred */
284 	u16 sa;			/* service action */
285 	u32 flags;		/* OR-ed set of SDEB_F_* */
286 	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
287 	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
288 	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
289 				/* ignore cdb bytes after position 15 */
290 };
291 
292 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
293 enum sdeb_opcode_index {
294 	SDEB_I_INVALID_OPCODE =	0,
295 	SDEB_I_INQUIRY = 1,
296 	SDEB_I_REPORT_LUNS = 2,
297 	SDEB_I_REQUEST_SENSE = 3,
298 	SDEB_I_TEST_UNIT_READY = 4,
299 	SDEB_I_MODE_SENSE = 5,		/* 6, 10 */
300 	SDEB_I_MODE_SELECT = 6,		/* 6, 10 */
301 	SDEB_I_LOG_SENSE = 7,
302 	SDEB_I_READ_CAPACITY = 8,	/* 10; 16 is in SA_IN(16) */
303 	SDEB_I_READ = 9,		/* 6, 10, 12, 16 */
304 	SDEB_I_WRITE = 10,		/* 6, 10, 12, 16 */
305 	SDEB_I_START_STOP = 11,
306 	SDEB_I_SERV_ACT_IN = 12,	/* 12, 16 */
307 	SDEB_I_SERV_ACT_OUT = 13,	/* 12, 16 */
308 	SDEB_I_MAINT_IN = 14,
309 	SDEB_I_MAINT_OUT = 15,
310 	SDEB_I_VERIFY = 16,		/* 10 only */
311 	SDEB_I_VARIABLE_LEN = 17,
312 	SDEB_I_RESERVE = 18,		/* 6, 10 */
313 	SDEB_I_RELEASE = 19,		/* 6, 10 */
314 	SDEB_I_ALLOW_REMOVAL = 20,	/* PREVENT ALLOW MEDIUM REMOVAL */
315 	SDEB_I_REZERO_UNIT = 21,	/* REWIND in SSC */
316 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
317 	SDEB_I_SEND_DIAG = 23,
318 	SDEB_I_UNMAP = 24,
319 	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
320 	SDEB_I_WRITE_BUFFER = 26,
321 	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
322 	SDEB_I_SYNC_CACHE = 28,		/* 10 only */
323 	SDEB_I_COMP_WRITE = 29,
324 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
325 };
326 
327 static const unsigned char opcode_ind_arr[256] = {
328 /* 0x0; 0x0->0x1f: 6 byte cdbs */
329 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
330 	    0, 0, 0, 0,
331 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
332 	0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
333 	    SDEB_I_RELEASE,
334 	0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
335 	    SDEB_I_ALLOW_REMOVAL, 0,
336 /* 0x20; 0x20->0x3f: 10 byte cdbs */
337 	0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
338 	SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
339 	0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
340 	0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
341 /* 0x40; 0x40->0x5f: 10 byte cdbs */
342 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
343 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
344 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
345 	    SDEB_I_RELEASE,
346 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
347 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
348 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
349 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
350 	0, SDEB_I_VARIABLE_LEN,
351 /* 0x80; 0x80->0x9f: 16 byte cdbs */
352 	0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
353 	SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
354 	0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
355 	0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
356 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
357 	SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
358 	     SDEB_I_MAINT_OUT, 0, 0, 0,
359 	SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
360 	     0, 0, 0, 0,
361 	0, 0, 0, 0, 0, 0, 0, 0,
362 	0, 0, 0, 0, 0, 0, 0, 0,
363 /* 0xc0; 0xc0->0xff: vendor specific */
364 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
365 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
366 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
367 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 };
369 
370 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
371 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
372 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
373 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
374 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
375 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
376 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
377 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
378 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
379 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
380 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
381 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
382 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
383 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
384 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
385 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
386 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
387 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
388 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
389 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
390 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
391 
392 static const struct opcode_info_t msense_iarr[1] = {
393 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
394 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
395 };
396 
397 static const struct opcode_info_t mselect_iarr[1] = {
398 	{0, 0x15, 0, F_D_OUT, NULL, NULL,
399 	    {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
400 };
401 
402 static const struct opcode_info_t read_iarr[3] = {
403 	{0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
404 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
405 	     0, 0, 0, 0} },
406 	{0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
407 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
408 	{0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
409 	    {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
410 	     0xc7, 0, 0, 0, 0} },
411 };
412 
413 static const struct opcode_info_t write_iarr[3] = {
414 	{0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
415 	    {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
416 	     0, 0, 0, 0} },
417 	{0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
418 	    {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
419 	{0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
420 	    {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
421 	     0xc7, 0, 0, 0, 0} },
422 };
423 
424 static const struct opcode_info_t sa_in_iarr[1] = {
425 	{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
426 	    {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
427 	     0xff, 0xff, 0xff, 0, 0xc7} },
428 };
429 
430 static const struct opcode_info_t vl_iarr[1] = {	/* VARIABLE LENGTH */
431 	{0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
432 	    NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
433 		   0, 0xff, 0xff, 0xff, 0xff} },	/* WRITE(32) */
434 };
435 
436 static const struct opcode_info_t maint_in_iarr[2] = {
437 	{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
438 	    {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
439 	     0xc7, 0, 0, 0, 0} },
440 	{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
441 	    {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
442 	     0, 0} },
443 };
444 
445 static const struct opcode_info_t write_same_iarr[1] = {
446 	{0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
447 	    {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
448 	     0xff, 0xff, 0xff, 0x1f, 0xc7} },
449 };
450 
451 static const struct opcode_info_t reserve_iarr[1] = {
452 	{0, 0x16, 0, F_D_OUT, NULL, NULL,	/* RESERVE(6) */
453 	    {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
454 };
455 
456 static const struct opcode_info_t release_iarr[1] = {
457 	{0, 0x17, 0, F_D_OUT, NULL, NULL,	/* RELEASE(6) */
458 	    {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
459 };
460 
461 
462 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
463  * plus the terminating elements for logic that scans this table such as
464  * REPORT SUPPORTED OPERATION CODES. */
465 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
466 /* 0 */
467 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
468 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
469 	{0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
470 	    {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
471 	{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
472 	    {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
473 	     0, 0} },
474 	{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
475 	    {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
476 	{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
477 	    {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
478 	{1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
479 	    {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
480 	     0} },
481 	{1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
482 	    {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
483 	{0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
484 	    {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
485 	     0, 0, 0} },
486 	{0, 0x25, 0, F_D_IN, resp_readcap, NULL,
487 	    {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
488 	     0, 0} },
489 	{3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
490 	    {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
491 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* READ(16) */
492 /* 10 */
493 	{3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
494 	    {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
495 	     0xff, 0xff, 0xff, 0x9f, 0xc7} },		/* WRITE(16) */
496 	{0, 0x1b, 0, 0, resp_start_stop, NULL,		/* START STOP UNIT */
497 	    {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
498 	{1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
499 	    {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
500 	     0xff, 0xff, 0xff, 0x1, 0xc7} },	/* READ CAPACITY(16) */
501 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
502 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
503 	{2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
504 	    {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
505 	     0} },
506 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
507 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
508 	{0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
509 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
510 	     0, 0, 0, 0, 0, 0} },
511 	{1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
512 	    vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
513 		      0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
514 	{1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
515 	    {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
516 	     0} },
517 	{1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
518 	    {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
519 	     0} },
520 /* 20 */
521 	{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
522 	    {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 	{0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
524 	    {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 	{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
526 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
527 	{0, 0x1d, F_D_OUT, 0, NULL, NULL,	/* SEND DIAGNOSTIC */
528 	    {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
529 	{0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
530 	    {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
531 	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
532 	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
533 		   0, 0, 0, 0, 0, 0} },
534 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
535 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
536 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
537 	{1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
538 	    write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
539 			      0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
540 	{0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
541 	    {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
542 	     0, 0, 0, 0} },
543 	{0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
544 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
545 	     0, 0xff, 0x1f, 0xc7} },		/* COMPARE AND WRITE */
546 
547 /* 30 */
548 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
549 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
550 };
551 
552 static int sdebug_add_host = DEF_NUM_HOST;
553 static int sdebug_ato = DEF_ATO;
554 static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
555 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
556 static int sdebug_dif = DEF_DIF;
557 static int sdebug_dix = DEF_DIX;
558 static int sdebug_dsense = DEF_D_SENSE;
559 static int sdebug_every_nth = DEF_EVERY_NTH;
560 static int sdebug_fake_rw = DEF_FAKE_RW;
561 static unsigned int sdebug_guard = DEF_GUARD;
562 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
563 static int sdebug_max_luns = DEF_MAX_LUNS;
564 static int sdebug_max_queue = SCSI_DEBUG_CANQUEUE;
565 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
566 static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
567 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
568 static int sdebug_no_uld;
569 static int sdebug_num_parts = DEF_NUM_PARTS;
570 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
571 static int sdebug_opt_blks = DEF_OPT_BLKS;
572 static int sdebug_opts = DEF_OPTS;
573 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
574 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
575 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
576 static int sdebug_sector_size = DEF_SECTOR_SIZE;
577 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
578 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
579 static unsigned int sdebug_lbpu = DEF_LBPU;
580 static unsigned int sdebug_lbpws = DEF_LBPWS;
581 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
582 static unsigned int sdebug_lbprz = DEF_LBPRZ;
583 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
584 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
585 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
586 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
587 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
588 static bool sdebug_removable = DEF_REMOVABLE;
589 static bool sdebug_clustering;
590 static bool sdebug_host_lock = DEF_HOST_LOCK;
591 static bool sdebug_strict = DEF_STRICT;
592 static bool sdebug_any_injecting_opt;
593 static bool sdebug_verbose;
594 static bool have_dif_prot;
595 
596 static atomic_t sdebug_cmnd_count;
597 static atomic_t sdebug_completions;
598 static atomic_t sdebug_a_tsf;		/* counter of 'almost' TSFs */
599 
600 static unsigned int sdebug_store_sectors;
601 static sector_t sdebug_capacity;	/* in sectors */
602 
603 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
604    may still need them */
605 static int sdebug_heads;		/* heads per disk */
606 static int sdebug_cylinders_per;	/* cylinders per surface */
607 static int sdebug_sectors_per;		/* sectors per cylinder */
608 
609 static LIST_HEAD(sdebug_host_list);
610 static DEFINE_SPINLOCK(sdebug_host_list_lock);
611 
612 static unsigned char *fake_storep;	/* ramdisk storage */
613 static struct sd_dif_tuple *dif_storep;	/* protection info */
614 static void *map_storep;		/* provisioning map */
615 
616 static unsigned long map_size;
617 static int num_aborts;
618 static int num_dev_resets;
619 static int num_target_resets;
620 static int num_bus_resets;
621 static int num_host_resets;
622 static int dix_writes;
623 static int dix_reads;
624 static int dif_errors;
625 
626 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
627 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
628 
629 static DEFINE_SPINLOCK(queued_arr_lock);
630 static DEFINE_RWLOCK(atomic_rw);
631 
632 static char sdebug_proc_name[] = MY_NAME;
633 static const char *my_name = MY_NAME;
634 
635 static struct bus_type pseudo_lld_bus;
636 
637 static struct device_driver sdebug_driverfs_driver = {
638 	.name 		= sdebug_proc_name,
639 	.bus		= &pseudo_lld_bus,
640 };
641 
642 static const int check_condition_result =
643 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
644 
645 static const int illegal_condition_result =
646 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
647 
648 static const int device_qfull_result =
649 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
650 
651 
652 static unsigned int scsi_debug_lbp(void)
653 {
654 	return 0 == sdebug_fake_rw &&
655 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
656 }
657 
658 static void *fake_store(unsigned long long lba)
659 {
660 	lba = do_div(lba, sdebug_store_sectors);
661 
662 	return fake_storep + lba * sdebug_sector_size;
663 }
664 
665 static struct sd_dif_tuple *dif_store(sector_t sector)
666 {
667 	sector = sector_div(sector, sdebug_store_sectors);
668 
669 	return dif_storep + sector;
670 }
671 
672 static void sdebug_max_tgts_luns(void)
673 {
674 	struct sdebug_host_info *sdbg_host;
675 	struct Scsi_Host *hpnt;
676 
677 	spin_lock(&sdebug_host_list_lock);
678 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
679 		hpnt = sdbg_host->shost;
680 		if ((hpnt->this_id >= 0) &&
681 		    (sdebug_num_tgts > hpnt->this_id))
682 			hpnt->max_id = sdebug_num_tgts + 1;
683 		else
684 			hpnt->max_id = sdebug_num_tgts;
685 		/* sdebug_max_luns; */
686 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
687 	}
688 	spin_unlock(&sdebug_host_list_lock);
689 }
690 
691 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
692 
693 /* Set in_bit to -1 to indicate no bit position of invalid field */
694 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
695 				 enum sdeb_cmd_data c_d,
696 				 int in_byte, int in_bit)
697 {
698 	unsigned char *sbuff;
699 	u8 sks[4];
700 	int sl, asc;
701 
702 	sbuff = scp->sense_buffer;
703 	if (!sbuff) {
704 		sdev_printk(KERN_ERR, scp->device,
705 			    "%s: sense_buffer is NULL\n", __func__);
706 		return;
707 	}
708 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
709 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
710 	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
711 	memset(sks, 0, sizeof(sks));
712 	sks[0] = 0x80;
713 	if (c_d)
714 		sks[0] |= 0x40;
715 	if (in_bit >= 0) {
716 		sks[0] |= 0x8;
717 		sks[0] |= 0x7 & in_bit;
718 	}
719 	put_unaligned_be16(in_byte, sks + 1);
720 	if (sdebug_dsense) {
721 		sl = sbuff[7] + 8;
722 		sbuff[7] = sl;
723 		sbuff[sl] = 0x2;
724 		sbuff[sl + 1] = 0x6;
725 		memcpy(sbuff + sl + 4, sks, 3);
726 	} else
727 		memcpy(sbuff + 15, sks, 3);
728 	if (sdebug_verbose)
729 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
730 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
731 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
732 }
733 
734 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
735 {
736 	unsigned char *sbuff;
737 
738 	sbuff = scp->sense_buffer;
739 	if (!sbuff) {
740 		sdev_printk(KERN_ERR, scp->device,
741 			    "%s: sense_buffer is NULL\n", __func__);
742 		return;
743 	}
744 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
745 
746 	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
747 
748 	if (sdebug_verbose)
749 		sdev_printk(KERN_INFO, scp->device,
750 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
751 			    my_name, key, asc, asq);
752 }
753 
754 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
755 {
756 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
757 }
758 
759 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
760 {
761 	if (sdebug_verbose) {
762 		if (0x1261 == cmd)
763 			sdev_printk(KERN_INFO, dev,
764 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
765 		else if (0x5331 == cmd)
766 			sdev_printk(KERN_INFO, dev,
767 				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
768 				    __func__);
769 		else
770 			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
771 				    __func__, cmd);
772 	}
773 	return -EINVAL;
774 	/* return -ENOTTY; // correct return but upsets fdisk */
775 }
776 
777 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
778 {
779 	struct sdebug_host_info *sdhp;
780 	struct sdebug_dev_info *dp;
781 
782 	spin_lock(&sdebug_host_list_lock);
783 	list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
784 		list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
785 			if ((devip->sdbg_host == dp->sdbg_host) &&
786 			    (devip->target == dp->target))
787 				clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
788 		}
789 	}
790 	spin_unlock(&sdebug_host_list_lock);
791 }
792 
793 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
794 {
795 	int k;
796 
797 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
798 	if (k != SDEBUG_NUM_UAS) {
799 		const char *cp = NULL;
800 
801 		switch (k) {
802 		case SDEBUG_UA_POR:
803 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
804 					POWER_ON_RESET_ASCQ);
805 			if (sdebug_verbose)
806 				cp = "power on reset";
807 			break;
808 		case SDEBUG_UA_BUS_RESET:
809 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
810 					BUS_RESET_ASCQ);
811 			if (sdebug_verbose)
812 				cp = "bus reset";
813 			break;
814 		case SDEBUG_UA_MODE_CHANGED:
815 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
816 					MODE_CHANGED_ASCQ);
817 			if (sdebug_verbose)
818 				cp = "mode parameters changed";
819 			break;
820 		case SDEBUG_UA_CAPACITY_CHANGED:
821 			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
822 					CAPACITY_CHANGED_ASCQ);
823 			if (sdebug_verbose)
824 				cp = "capacity data changed";
825 			break;
826 		case SDEBUG_UA_MICROCODE_CHANGED:
827 			mk_sense_buffer(scp, UNIT_ATTENTION,
828 				TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
829 			if (sdebug_verbose)
830 				cp = "microcode has been changed";
831 			break;
832 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
833 			mk_sense_buffer(scp, UNIT_ATTENTION,
834 					TARGET_CHANGED_ASC,
835 					MICROCODE_CHANGED_WO_RESET_ASCQ);
836 			if (sdebug_verbose)
837 				cp = "microcode has been changed without reset";
838 			break;
839 		case SDEBUG_UA_LUNS_CHANGED:
840 			/*
841 			 * SPC-3 behavior is to report a UNIT ATTENTION with
842 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
843 			 * on the target, until a REPORT LUNS command is
844 			 * received.  SPC-4 behavior is to report it only once.
845 			 * NOTE:  sdebug_scsi_level does not use the same
846 			 * values as struct scsi_device->scsi_level.
847 			 */
848 			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
849 				clear_luns_changed_on_target(devip);
850 			mk_sense_buffer(scp, UNIT_ATTENTION,
851 					TARGET_CHANGED_ASC,
852 					LUNS_CHANGED_ASCQ);
853 			if (sdebug_verbose)
854 				cp = "reported luns data has changed";
855 			break;
856 		default:
857 			pr_warn("unexpected unit attention code=%d\n", k);
858 			if (sdebug_verbose)
859 				cp = "unknown";
860 			break;
861 		}
862 		clear_bit(k, devip->uas_bm);
863 		if (sdebug_verbose)
864 			sdev_printk(KERN_INFO, scp->device,
865 				   "%s reports: Unit attention: %s\n",
866 				   my_name, cp);
867 		return check_condition_result;
868 	}
869 	return 0;
870 }
871 
872 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
873 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
874 				int arr_len)
875 {
876 	int act_len;
877 	struct scsi_data_buffer *sdb = scsi_in(scp);
878 
879 	if (!sdb->length)
880 		return 0;
881 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
882 		return DID_ERROR << 16;
883 
884 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
885 				      arr, arr_len);
886 	sdb->resid = scsi_bufflen(scp) - act_len;
887 
888 	return 0;
889 }
890 
891 /* Returns number of bytes fetched into 'arr' or -1 if error. */
892 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
893 			       int arr_len)
894 {
895 	if (!scsi_bufflen(scp))
896 		return 0;
897 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
898 		return -1;
899 
900 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
901 }
902 
903 
904 static const char * inq_vendor_id = "Linux   ";
905 static const char * inq_product_id = "scsi_debug      ";
906 static const char *inq_product_rev = "0186";	/* version less '.' */
907 static const u64 naa5_comp_a = 0x5222222000000000ULL;
908 static const u64 naa5_comp_b = 0x5333333000000000ULL;
909 static const u64 naa5_comp_c = 0x5111111000000000ULL;
910 
911 /* Device identification VPD page. Returns number of bytes placed in arr */
912 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
913 			   int target_dev_id, int dev_id_num,
914 			   const char * dev_id_str,
915 			   int dev_id_str_len)
916 {
917 	int num, port_a;
918 	char b[32];
919 
920 	port_a = target_dev_id + 1;
921 	/* T10 vendor identifier field format (faked) */
922 	arr[0] = 0x2;	/* ASCII */
923 	arr[1] = 0x1;
924 	arr[2] = 0x0;
925 	memcpy(&arr[4], inq_vendor_id, 8);
926 	memcpy(&arr[12], inq_product_id, 16);
927 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
928 	num = 8 + 16 + dev_id_str_len;
929 	arr[3] = num;
930 	num += 4;
931 	if (dev_id_num >= 0) {
932 		/* NAA-5, Logical unit identifier (binary) */
933 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
934 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
935 		arr[num++] = 0x0;
936 		arr[num++] = 0x8;
937 		put_unaligned_be64(naa5_comp_b + dev_id_num, arr + num);
938 		num += 8;
939 		/* Target relative port number */
940 		arr[num++] = 0x61;	/* proto=sas, binary */
941 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
942 		arr[num++] = 0x0;	/* reserved */
943 		arr[num++] = 0x4;	/* length */
944 		arr[num++] = 0x0;	/* reserved */
945 		arr[num++] = 0x0;	/* reserved */
946 		arr[num++] = 0x0;
947 		arr[num++] = 0x1;	/* relative port A */
948 	}
949 	/* NAA-5, Target port identifier */
950 	arr[num++] = 0x61;	/* proto=sas, binary */
951 	arr[num++] = 0x93;	/* piv=1, target port, naa */
952 	arr[num++] = 0x0;
953 	arr[num++] = 0x8;
954 	put_unaligned_be64(naa5_comp_a + port_a, arr + num);
955 	num += 8;
956 	/* NAA-5, Target port group identifier */
957 	arr[num++] = 0x61;	/* proto=sas, binary */
958 	arr[num++] = 0x95;	/* piv=1, target port group id */
959 	arr[num++] = 0x0;
960 	arr[num++] = 0x4;
961 	arr[num++] = 0;
962 	arr[num++] = 0;
963 	put_unaligned_be16(port_group_id, arr + num);
964 	num += 2;
965 	/* NAA-5, Target device identifier */
966 	arr[num++] = 0x61;	/* proto=sas, binary */
967 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
968 	arr[num++] = 0x0;
969 	arr[num++] = 0x8;
970 	put_unaligned_be64(naa5_comp_a + target_dev_id, arr + num);
971 	num += 8;
972 	/* SCSI name string: Target device identifier */
973 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
974 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
975 	arr[num++] = 0x0;
976 	arr[num++] = 24;
977 	memcpy(arr + num, "naa.52222220", 12);
978 	num += 12;
979 	snprintf(b, sizeof(b), "%08X", target_dev_id);
980 	memcpy(arr + num, b, 8);
981 	num += 8;
982 	memset(arr + num, 0, 4);
983 	num += 4;
984 	return num;
985 }
986 
987 static unsigned char vpd84_data[] = {
988 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
989     0x22,0x22,0x22,0x0,0xbb,0x1,
990     0x22,0x22,0x22,0x0,0xbb,0x2,
991 };
992 
993 /*  Software interface identification VPD page */
994 static int inquiry_evpd_84(unsigned char * arr)
995 {
996 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
997 	return sizeof(vpd84_data);
998 }
999 
1000 /* Management network addresses VPD page */
1001 static int inquiry_evpd_85(unsigned char * arr)
1002 {
1003 	int num = 0;
1004 	const char * na1 = "https://www.kernel.org/config";
1005 	const char * na2 = "http://www.kernel.org/log";
1006 	int plen, olen;
1007 
1008 	arr[num++] = 0x1;	/* lu, storage config */
1009 	arr[num++] = 0x0;	/* reserved */
1010 	arr[num++] = 0x0;
1011 	olen = strlen(na1);
1012 	plen = olen + 1;
1013 	if (plen % 4)
1014 		plen = ((plen / 4) + 1) * 4;
1015 	arr[num++] = plen;	/* length, null termianted, padded */
1016 	memcpy(arr + num, na1, olen);
1017 	memset(arr + num + olen, 0, plen - olen);
1018 	num += plen;
1019 
1020 	arr[num++] = 0x4;	/* lu, logging */
1021 	arr[num++] = 0x0;	/* reserved */
1022 	arr[num++] = 0x0;
1023 	olen = strlen(na2);
1024 	plen = olen + 1;
1025 	if (plen % 4)
1026 		plen = ((plen / 4) + 1) * 4;
1027 	arr[num++] = plen;	/* length, null terminated, padded */
1028 	memcpy(arr + num, na2, olen);
1029 	memset(arr + num + olen, 0, plen - olen);
1030 	num += plen;
1031 
1032 	return num;
1033 }
1034 
1035 /* SCSI ports VPD page */
1036 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1037 {
1038 	int num = 0;
1039 	int port_a, port_b;
1040 
1041 	port_a = target_dev_id + 1;
1042 	port_b = port_a + 1;
1043 	arr[num++] = 0x0;	/* reserved */
1044 	arr[num++] = 0x0;	/* reserved */
1045 	arr[num++] = 0x0;
1046 	arr[num++] = 0x1;	/* relative port 1 (primary) */
1047 	memset(arr + num, 0, 6);
1048 	num += 6;
1049 	arr[num++] = 0x0;
1050 	arr[num++] = 12;	/* length tp descriptor */
1051 	/* naa-5 target port identifier (A) */
1052 	arr[num++] = 0x61;	/* proto=sas, binary */
1053 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1054 	arr[num++] = 0x0;	/* reserved */
1055 	arr[num++] = 0x8;	/* length */
1056 	put_unaligned_be64(naa5_comp_a + port_a, arr + num);
1057 	num += 8;
1058 	arr[num++] = 0x0;	/* reserved */
1059 	arr[num++] = 0x0;	/* reserved */
1060 	arr[num++] = 0x0;
1061 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
1062 	memset(arr + num, 0, 6);
1063 	num += 6;
1064 	arr[num++] = 0x0;
1065 	arr[num++] = 12;	/* length tp descriptor */
1066 	/* naa-5 target port identifier (B) */
1067 	arr[num++] = 0x61;	/* proto=sas, binary */
1068 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
1069 	arr[num++] = 0x0;	/* reserved */
1070 	arr[num++] = 0x8;	/* length */
1071 	put_unaligned_be64(naa5_comp_a + port_b, arr + num);
1072 	num += 8;
1073 
1074 	return num;
1075 }
1076 
1077 
1078 static unsigned char vpd89_data[] = {
1079 /* from 4th byte */ 0,0,0,0,
1080 'l','i','n','u','x',' ',' ',' ',
1081 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1082 '1','2','3','4',
1083 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1084 0xec,0,0,0,
1085 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1086 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1087 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1088 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1089 0x53,0x41,
1090 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1091 0x20,0x20,
1092 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1093 0x10,0x80,
1094 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1095 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1096 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1097 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1098 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1099 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1100 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1101 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1102 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1103 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1104 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1105 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1106 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1107 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1108 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1109 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1110 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1111 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1112 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1113 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1118 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1119 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1120 };
1121 
1122 /* ATA Information VPD page */
1123 static int inquiry_evpd_89(unsigned char * arr)
1124 {
1125 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
1126 	return sizeof(vpd89_data);
1127 }
1128 
1129 
1130 static unsigned char vpdb0_data[] = {
1131 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1132 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1133 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1134 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1135 };
1136 
1137 /* Block limits VPD page (SBC-3) */
1138 static int inquiry_evpd_b0(unsigned char * arr)
1139 {
1140 	unsigned int gran;
1141 
1142 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1143 
1144 	/* Optimal transfer length granularity */
1145 	gran = 1 << sdebug_physblk_exp;
1146 	put_unaligned_be16(gran, arr + 2);
1147 
1148 	/* Maximum Transfer Length */
1149 	if (sdebug_store_sectors > 0x400)
1150 		put_unaligned_be32(sdebug_store_sectors, arr + 4);
1151 
1152 	/* Optimal Transfer Length */
1153 	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1154 
1155 	if (sdebug_lbpu) {
1156 		/* Maximum Unmap LBA Count */
1157 		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1158 
1159 		/* Maximum Unmap Block Descriptor Count */
1160 		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1161 	}
1162 
1163 	/* Unmap Granularity Alignment */
1164 	if (sdebug_unmap_alignment) {
1165 		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1166 		arr[28] |= 0x80; /* UGAVALID */
1167 	}
1168 
1169 	/* Optimal Unmap Granularity */
1170 	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1171 
1172 	/* Maximum WRITE SAME Length */
1173 	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1174 
1175 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1176 
1177 	return sizeof(vpdb0_data);
1178 }
1179 
1180 /* Block device characteristics VPD page (SBC-3) */
1181 static int inquiry_evpd_b1(unsigned char *arr)
1182 {
1183 	memset(arr, 0, 0x3c);
1184 	arr[0] = 0;
1185 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
1186 	arr[2] = 0;
1187 	arr[3] = 5;	/* less than 1.8" */
1188 
1189 	return 0x3c;
1190 }
1191 
1192 /* Logical block provisioning VPD page (SBC-3) */
1193 static int inquiry_evpd_b2(unsigned char *arr)
1194 {
1195 	memset(arr, 0, 0x4);
1196 	arr[0] = 0;			/* threshold exponent */
1197 
1198 	if (sdebug_lbpu)
1199 		arr[1] = 1 << 7;
1200 
1201 	if (sdebug_lbpws)
1202 		arr[1] |= 1 << 6;
1203 
1204 	if (sdebug_lbpws10)
1205 		arr[1] |= 1 << 5;
1206 
1207 	if (sdebug_lbprz)
1208 		arr[1] |= 1 << 2;
1209 
1210 	return 0x4;
1211 }
1212 
1213 #define SDEBUG_LONG_INQ_SZ 96
1214 #define SDEBUG_MAX_INQ_ARR_SZ 584
1215 
1216 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1217 {
1218 	unsigned char pq_pdt;
1219 	unsigned char * arr;
1220 	unsigned char *cmd = scp->cmnd;
1221 	int alloc_len, n, ret;
1222 	bool have_wlun;
1223 
1224 	alloc_len = get_unaligned_be16(cmd + 3);
1225 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1226 	if (! arr)
1227 		return DID_REQUEUE << 16;
1228 	have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1229 	if (have_wlun)
1230 		pq_pdt = 0x1e;	/* present, wlun */
1231 	else if (sdebug_no_lun_0 && (0 == devip->lun))
1232 		pq_pdt = 0x7f;	/* not present, no device type */
1233 	else
1234 		pq_pdt = (sdebug_ptype & 0x1f);
1235 	arr[0] = pq_pdt;
1236 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
1237 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1238 		kfree(arr);
1239 		return check_condition_result;
1240 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
1241 		int lu_id_num, port_group_id, target_dev_id, len;
1242 		char lu_id_str[6];
1243 		int host_no = devip->sdbg_host->shost->host_no;
1244 
1245 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
1246 		    (devip->channel & 0x7f);
1247 		if (0 == sdebug_vpd_use_hostno)
1248 			host_no = 0;
1249 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1250 			    (devip->target * 1000) + devip->lun);
1251 		target_dev_id = ((host_no + 1) * 2000) +
1252 				 (devip->target * 1000) - 3;
1253 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1254 		if (0 == cmd[2]) { /* supported vital product data pages */
1255 			arr[1] = cmd[2];	/*sanity */
1256 			n = 4;
1257 			arr[n++] = 0x0;   /* this page */
1258 			arr[n++] = 0x80;  /* unit serial number */
1259 			arr[n++] = 0x83;  /* device identification */
1260 			arr[n++] = 0x84;  /* software interface ident. */
1261 			arr[n++] = 0x85;  /* management network addresses */
1262 			arr[n++] = 0x86;  /* extended inquiry */
1263 			arr[n++] = 0x87;  /* mode page policy */
1264 			arr[n++] = 0x88;  /* SCSI ports */
1265 			arr[n++] = 0x89;  /* ATA information */
1266 			arr[n++] = 0xb0;  /* Block limits (SBC) */
1267 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1268 			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1269 				arr[n++] = 0xb2;
1270 			arr[3] = n - 4;	  /* number of supported VPD pages */
1271 		} else if (0x80 == cmd[2]) { /* unit serial number */
1272 			arr[1] = cmd[2];	/*sanity */
1273 			arr[3] = len;
1274 			memcpy(&arr[4], lu_id_str, len);
1275 		} else if (0x83 == cmd[2]) { /* device identification */
1276 			arr[1] = cmd[2];	/*sanity */
1277 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1278 						 target_dev_id, lu_id_num,
1279 						 lu_id_str, len);
1280 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
1281 			arr[1] = cmd[2];	/*sanity */
1282 			arr[3] = inquiry_evpd_84(&arr[4]);
1283 		} else if (0x85 == cmd[2]) { /* Management network addresses */
1284 			arr[1] = cmd[2];	/*sanity */
1285 			arr[3] = inquiry_evpd_85(&arr[4]);
1286 		} else if (0x86 == cmd[2]) { /* extended inquiry */
1287 			arr[1] = cmd[2];	/*sanity */
1288 			arr[3] = 0x3c;	/* number of following entries */
1289 			if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
1290 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
1291 			else if (sdebug_dif)
1292 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1293 			else
1294 				arr[4] = 0x0;   /* no protection stuff */
1295 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
1296 		} else if (0x87 == cmd[2]) { /* mode page policy */
1297 			arr[1] = cmd[2];	/*sanity */
1298 			arr[3] = 0x8;	/* number of following entries */
1299 			arr[4] = 0x2;	/* disconnect-reconnect mp */
1300 			arr[6] = 0x80;	/* mlus, shared */
1301 			arr[8] = 0x18;	 /* protocol specific lu */
1302 			arr[10] = 0x82;	 /* mlus, per initiator port */
1303 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
1304 			arr[1] = cmd[2];	/*sanity */
1305 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1306 		} else if (0x89 == cmd[2]) { /* ATA information */
1307 			arr[1] = cmd[2];        /*sanity */
1308 			n = inquiry_evpd_89(&arr[4]);
1309 			put_unaligned_be16(n, arr + 2);
1310 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1311 			arr[1] = cmd[2];        /*sanity */
1312 			arr[3] = inquiry_evpd_b0(&arr[4]);
1313 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1314 			arr[1] = cmd[2];        /*sanity */
1315 			arr[3] = inquiry_evpd_b1(&arr[4]);
1316 		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1317 			arr[1] = cmd[2];        /*sanity */
1318 			arr[3] = inquiry_evpd_b2(&arr[4]);
1319 		} else {
1320 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1321 			kfree(arr);
1322 			return check_condition_result;
1323 		}
1324 		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1325 		ret = fill_from_dev_buffer(scp, arr,
1326 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1327 		kfree(arr);
1328 		return ret;
1329 	}
1330 	/* drops through here for a standard inquiry */
1331 	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
1332 	arr[2] = sdebug_scsi_level;
1333 	arr[3] = 2;    /* response_data_format==2 */
1334 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1335 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
1336 	if (0 == sdebug_vpd_use_hostno)
1337 		arr[5] = 0x10; /* claim: implicit TGPS */
1338 	arr[6] = 0x10; /* claim: MultiP */
1339 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1340 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1341 	memcpy(&arr[8], inq_vendor_id, 8);
1342 	memcpy(&arr[16], inq_product_id, 16);
1343 	memcpy(&arr[32], inq_product_rev, 4);
1344 	/* version descriptors (2 bytes each) follow */
1345 	arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1346 	arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1347 	n = 62;
1348 	if (sdebug_ptype == 0) {
1349 		arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1350 	} else if (sdebug_ptype == 1) {
1351 		arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1352 	}
1353 	arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1354 	ret = fill_from_dev_buffer(scp, arr,
1355 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
1356 	kfree(arr);
1357 	return ret;
1358 }
1359 
1360 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1361 				   0, 0, 0x0, 0x0};
1362 
1363 static int resp_requests(struct scsi_cmnd * scp,
1364 			 struct sdebug_dev_info * devip)
1365 {
1366 	unsigned char * sbuff;
1367 	unsigned char *cmd = scp->cmnd;
1368 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1369 	bool dsense;
1370 	int len = 18;
1371 
1372 	memset(arr, 0, sizeof(arr));
1373 	dsense = !!(cmd[1] & 1);
1374 	sbuff = scp->sense_buffer;
1375 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1376 		if (dsense) {
1377 			arr[0] = 0x72;
1378 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
1379 			arr[2] = THRESHOLD_EXCEEDED;
1380 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
1381 			len = 8;
1382 		} else {
1383 			arr[0] = 0x70;
1384 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
1385 			arr[7] = 0xa;   	/* 18 byte sense buffer */
1386 			arr[12] = THRESHOLD_EXCEEDED;
1387 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
1388 		}
1389 	} else {
1390 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1391 		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1392 			;	/* have sense and formats match */
1393 		else if (arr[0] <= 0x70) {
1394 			if (dsense) {
1395 				memset(arr, 0, 8);
1396 				arr[0] = 0x72;
1397 				len = 8;
1398 			} else {
1399 				memset(arr, 0, 18);
1400 				arr[0] = 0x70;
1401 				arr[7] = 0xa;
1402 			}
1403 		} else if (dsense) {
1404 			memset(arr, 0, 8);
1405 			arr[0] = 0x72;
1406 			arr[1] = sbuff[2];     /* sense key */
1407 			arr[2] = sbuff[12];    /* asc */
1408 			arr[3] = sbuff[13];    /* ascq */
1409 			len = 8;
1410 		} else {
1411 			memset(arr, 0, 18);
1412 			arr[0] = 0x70;
1413 			arr[2] = sbuff[1];
1414 			arr[7] = 0xa;
1415 			arr[12] = sbuff[1];
1416 			arr[13] = sbuff[3];
1417 		}
1418 
1419 	}
1420 	mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1421 	return fill_from_dev_buffer(scp, arr, len);
1422 }
1423 
1424 static int resp_start_stop(struct scsi_cmnd * scp,
1425 			   struct sdebug_dev_info * devip)
1426 {
1427 	unsigned char *cmd = scp->cmnd;
1428 	int power_cond, start;
1429 
1430 	power_cond = (cmd[4] & 0xf0) >> 4;
1431 	if (power_cond) {
1432 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1433 		return check_condition_result;
1434 	}
1435 	start = cmd[4] & 1;
1436 	if (start == devip->stopped)
1437 		devip->stopped = !start;
1438 	return 0;
1439 }
1440 
1441 static sector_t get_sdebug_capacity(void)
1442 {
1443 	static const unsigned int gibibyte = 1073741824;
1444 
1445 	if (sdebug_virtual_gb > 0)
1446 		return (sector_t)sdebug_virtual_gb *
1447 			(gibibyte / sdebug_sector_size);
1448 	else
1449 		return sdebug_store_sectors;
1450 }
1451 
1452 #define SDEBUG_READCAP_ARR_SZ 8
1453 static int resp_readcap(struct scsi_cmnd * scp,
1454 			struct sdebug_dev_info * devip)
1455 {
1456 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1457 	unsigned int capac;
1458 
1459 	/* following just in case virtual_gb changed */
1460 	sdebug_capacity = get_sdebug_capacity();
1461 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1462 	if (sdebug_capacity < 0xffffffff) {
1463 		capac = (unsigned int)sdebug_capacity - 1;
1464 		put_unaligned_be32(capac, arr + 0);
1465 	} else
1466 		put_unaligned_be32(0xffffffff, arr + 0);
1467 	put_unaligned_be16(sdebug_sector_size, arr + 6);
1468 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1469 }
1470 
1471 #define SDEBUG_READCAP16_ARR_SZ 32
1472 static int resp_readcap16(struct scsi_cmnd * scp,
1473 			  struct sdebug_dev_info * devip)
1474 {
1475 	unsigned char *cmd = scp->cmnd;
1476 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1477 	int alloc_len;
1478 
1479 	alloc_len = get_unaligned_be32(cmd + 10);
1480 	/* following just in case virtual_gb changed */
1481 	sdebug_capacity = get_sdebug_capacity();
1482 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1483 	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1484 	put_unaligned_be32(sdebug_sector_size, arr + 8);
1485 	arr[13] = sdebug_physblk_exp & 0xf;
1486 	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1487 
1488 	if (scsi_debug_lbp()) {
1489 		arr[14] |= 0x80; /* LBPME */
1490 		if (sdebug_lbprz)
1491 			arr[14] |= 0x40; /* LBPRZ */
1492 	}
1493 
1494 	arr[15] = sdebug_lowest_aligned & 0xff;
1495 
1496 	if (sdebug_dif) {
1497 		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1498 		arr[12] |= 1; /* PROT_EN */
1499 	}
1500 
1501 	return fill_from_dev_buffer(scp, arr,
1502 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1503 }
1504 
1505 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1506 
1507 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1508 			      struct sdebug_dev_info * devip)
1509 {
1510 	unsigned char *cmd = scp->cmnd;
1511 	unsigned char * arr;
1512 	int host_no = devip->sdbg_host->shost->host_no;
1513 	int n, ret, alen, rlen;
1514 	int port_group_a, port_group_b, port_a, port_b;
1515 
1516 	alen = get_unaligned_be32(cmd + 6);
1517 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1518 	if (! arr)
1519 		return DID_REQUEUE << 16;
1520 	/*
1521 	 * EVPD page 0x88 states we have two ports, one
1522 	 * real and a fake port with no device connected.
1523 	 * So we create two port groups with one port each
1524 	 * and set the group with port B to unavailable.
1525 	 */
1526 	port_a = 0x1; /* relative port A */
1527 	port_b = 0x2; /* relative port B */
1528 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1529 			(devip->channel & 0x7f);
1530 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1531 			(devip->channel & 0x7f) + 0x80;
1532 
1533 	/*
1534 	 * The asymmetric access state is cycled according to the host_id.
1535 	 */
1536 	n = 4;
1537 	if (0 == sdebug_vpd_use_hostno) {
1538 		arr[n++] = host_no % 3; /* Asymm access state */
1539 		arr[n++] = 0x0F; /* claim: all states are supported */
1540 	} else {
1541 		arr[n++] = 0x0; /* Active/Optimized path */
1542 		arr[n++] = 0x01; /* only support active/optimized paths */
1543 	}
1544 	put_unaligned_be16(port_group_a, arr + n);
1545 	n += 2;
1546 	arr[n++] = 0;    /* Reserved */
1547 	arr[n++] = 0;    /* Status code */
1548 	arr[n++] = 0;    /* Vendor unique */
1549 	arr[n++] = 0x1;  /* One port per group */
1550 	arr[n++] = 0;    /* Reserved */
1551 	arr[n++] = 0;    /* Reserved */
1552 	put_unaligned_be16(port_a, arr + n);
1553 	n += 2;
1554 	arr[n++] = 3;    /* Port unavailable */
1555 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1556 	put_unaligned_be16(port_group_b, arr + n);
1557 	n += 2;
1558 	arr[n++] = 0;    /* Reserved */
1559 	arr[n++] = 0;    /* Status code */
1560 	arr[n++] = 0;    /* Vendor unique */
1561 	arr[n++] = 0x1;  /* One port per group */
1562 	arr[n++] = 0;    /* Reserved */
1563 	arr[n++] = 0;    /* Reserved */
1564 	put_unaligned_be16(port_b, arr + n);
1565 	n += 2;
1566 
1567 	rlen = n - 4;
1568 	put_unaligned_be32(rlen, arr + 0);
1569 
1570 	/*
1571 	 * Return the smallest value of either
1572 	 * - The allocated length
1573 	 * - The constructed command length
1574 	 * - The maximum array size
1575 	 */
1576 	rlen = min(alen,n);
1577 	ret = fill_from_dev_buffer(scp, arr,
1578 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1579 	kfree(arr);
1580 	return ret;
1581 }
1582 
1583 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1584 			     struct sdebug_dev_info *devip)
1585 {
1586 	bool rctd;
1587 	u8 reporting_opts, req_opcode, sdeb_i, supp;
1588 	u16 req_sa, u;
1589 	u32 alloc_len, a_len;
1590 	int k, offset, len, errsts, count, bump, na;
1591 	const struct opcode_info_t *oip;
1592 	const struct opcode_info_t *r_oip;
1593 	u8 *arr;
1594 	u8 *cmd = scp->cmnd;
1595 
1596 	rctd = !!(cmd[2] & 0x80);
1597 	reporting_opts = cmd[2] & 0x7;
1598 	req_opcode = cmd[3];
1599 	req_sa = get_unaligned_be16(cmd + 4);
1600 	alloc_len = get_unaligned_be32(cmd + 6);
1601 	if (alloc_len < 4 || alloc_len > 0xffff) {
1602 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1603 		return check_condition_result;
1604 	}
1605 	if (alloc_len > 8192)
1606 		a_len = 8192;
1607 	else
1608 		a_len = alloc_len;
1609 	arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1610 	if (NULL == arr) {
1611 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1612 				INSUFF_RES_ASCQ);
1613 		return check_condition_result;
1614 	}
1615 	switch (reporting_opts) {
1616 	case 0:	/* all commands */
1617 		/* count number of commands */
1618 		for (count = 0, oip = opcode_info_arr;
1619 		     oip->num_attached != 0xff; ++oip) {
1620 			if (F_INV_OP & oip->flags)
1621 				continue;
1622 			count += (oip->num_attached + 1);
1623 		}
1624 		bump = rctd ? 20 : 8;
1625 		put_unaligned_be32(count * bump, arr);
1626 		for (offset = 4, oip = opcode_info_arr;
1627 		     oip->num_attached != 0xff && offset < a_len; ++oip) {
1628 			if (F_INV_OP & oip->flags)
1629 				continue;
1630 			na = oip->num_attached;
1631 			arr[offset] = oip->opcode;
1632 			put_unaligned_be16(oip->sa, arr + offset + 2);
1633 			if (rctd)
1634 				arr[offset + 5] |= 0x2;
1635 			if (FF_SA & oip->flags)
1636 				arr[offset + 5] |= 0x1;
1637 			put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1638 			if (rctd)
1639 				put_unaligned_be16(0xa, arr + offset + 8);
1640 			r_oip = oip;
1641 			for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1642 				if (F_INV_OP & oip->flags)
1643 					continue;
1644 				offset += bump;
1645 				arr[offset] = oip->opcode;
1646 				put_unaligned_be16(oip->sa, arr + offset + 2);
1647 				if (rctd)
1648 					arr[offset + 5] |= 0x2;
1649 				if (FF_SA & oip->flags)
1650 					arr[offset + 5] |= 0x1;
1651 				put_unaligned_be16(oip->len_mask[0],
1652 						   arr + offset + 6);
1653 				if (rctd)
1654 					put_unaligned_be16(0xa,
1655 							   arr + offset + 8);
1656 			}
1657 			oip = r_oip;
1658 			offset += bump;
1659 		}
1660 		break;
1661 	case 1:	/* one command: opcode only */
1662 	case 2:	/* one command: opcode plus service action */
1663 	case 3:	/* one command: if sa==0 then opcode only else opcode+sa */
1664 		sdeb_i = opcode_ind_arr[req_opcode];
1665 		oip = &opcode_info_arr[sdeb_i];
1666 		if (F_INV_OP & oip->flags) {
1667 			supp = 1;
1668 			offset = 4;
1669 		} else {
1670 			if (1 == reporting_opts) {
1671 				if (FF_SA & oip->flags) {
1672 					mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1673 							     2, 2);
1674 					kfree(arr);
1675 					return check_condition_result;
1676 				}
1677 				req_sa = 0;
1678 			} else if (2 == reporting_opts &&
1679 				   0 == (FF_SA & oip->flags)) {
1680 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1681 				kfree(arr);	/* point at requested sa */
1682 				return check_condition_result;
1683 			}
1684 			if (0 == (FF_SA & oip->flags) &&
1685 			    req_opcode == oip->opcode)
1686 				supp = 3;
1687 			else if (0 == (FF_SA & oip->flags)) {
1688 				na = oip->num_attached;
1689 				for (k = 0, oip = oip->arrp; k < na;
1690 				     ++k, ++oip) {
1691 					if (req_opcode == oip->opcode)
1692 						break;
1693 				}
1694 				supp = (k >= na) ? 1 : 3;
1695 			} else if (req_sa != oip->sa) {
1696 				na = oip->num_attached;
1697 				for (k = 0, oip = oip->arrp; k < na;
1698 				     ++k, ++oip) {
1699 					if (req_sa == oip->sa)
1700 						break;
1701 				}
1702 				supp = (k >= na) ? 1 : 3;
1703 			} else
1704 				supp = 3;
1705 			if (3 == supp) {
1706 				u = oip->len_mask[0];
1707 				put_unaligned_be16(u, arr + 2);
1708 				arr[4] = oip->opcode;
1709 				for (k = 1; k < u; ++k)
1710 					arr[4 + k] = (k < 16) ?
1711 						 oip->len_mask[k] : 0xff;
1712 				offset = 4 + u;
1713 			} else
1714 				offset = 4;
1715 		}
1716 		arr[1] = (rctd ? 0x80 : 0) | supp;
1717 		if (rctd) {
1718 			put_unaligned_be16(0xa, arr + offset);
1719 			offset += 12;
1720 		}
1721 		break;
1722 	default:
1723 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1724 		kfree(arr);
1725 		return check_condition_result;
1726 	}
1727 	offset = (offset < a_len) ? offset : a_len;
1728 	len = (offset < alloc_len) ? offset : alloc_len;
1729 	errsts = fill_from_dev_buffer(scp, arr, len);
1730 	kfree(arr);
1731 	return errsts;
1732 }
1733 
1734 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1735 			  struct sdebug_dev_info *devip)
1736 {
1737 	bool repd;
1738 	u32 alloc_len, len;
1739 	u8 arr[16];
1740 	u8 *cmd = scp->cmnd;
1741 
1742 	memset(arr, 0, sizeof(arr));
1743 	repd = !!(cmd[2] & 0x80);
1744 	alloc_len = get_unaligned_be32(cmd + 6);
1745 	if (alloc_len < 4) {
1746 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1747 		return check_condition_result;
1748 	}
1749 	arr[0] = 0xc8;		/* ATS | ATSS | LURS */
1750 	arr[1] = 0x1;		/* ITNRS */
1751 	if (repd) {
1752 		arr[3] = 0xc;
1753 		len = 16;
1754 	} else
1755 		len = 4;
1756 
1757 	len = (len < alloc_len) ? len : alloc_len;
1758 	return fill_from_dev_buffer(scp, arr, len);
1759 }
1760 
1761 /* <<Following mode page info copied from ST318451LW>> */
1762 
1763 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1764 {	/* Read-Write Error Recovery page for mode_sense */
1765 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1766 					5, 0, 0xff, 0xff};
1767 
1768 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1769 	if (1 == pcontrol)
1770 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1771 	return sizeof(err_recov_pg);
1772 }
1773 
1774 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1775 { 	/* Disconnect-Reconnect page for mode_sense */
1776 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1777 					 0, 0, 0, 0, 0, 0, 0, 0};
1778 
1779 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1780 	if (1 == pcontrol)
1781 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1782 	return sizeof(disconnect_pg);
1783 }
1784 
1785 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1786 {       /* Format device page for mode_sense */
1787 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1788 				     0, 0, 0, 0, 0, 0, 0, 0,
1789 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1790 
1791 	memcpy(p, format_pg, sizeof(format_pg));
1792 	put_unaligned_be16(sdebug_sectors_per, p + 10);
1793 	put_unaligned_be16(sdebug_sector_size, p + 12);
1794 	if (sdebug_removable)
1795 		p[20] |= 0x20; /* should agree with INQUIRY */
1796 	if (1 == pcontrol)
1797 		memset(p + 2, 0, sizeof(format_pg) - 2);
1798 	return sizeof(format_pg);
1799 }
1800 
1801 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1802 				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1803 				     0, 0, 0, 0};
1804 
1805 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1806 { 	/* Caching page for mode_sense */
1807 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1808 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1809 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1810 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1811 
1812 	if (SDEBUG_OPT_N_WCE & sdebug_opts)
1813 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
1814 	memcpy(p, caching_pg, sizeof(caching_pg));
1815 	if (1 == pcontrol)
1816 		memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1817 	else if (2 == pcontrol)
1818 		memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1819 	return sizeof(caching_pg);
1820 }
1821 
1822 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1823 				    0, 0, 0x2, 0x4b};
1824 
1825 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1826 { 	/* Control mode page for mode_sense */
1827 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1828 				        0, 0, 0, 0};
1829 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1830 				     0, 0, 0x2, 0x4b};
1831 
1832 	if (sdebug_dsense)
1833 		ctrl_m_pg[2] |= 0x4;
1834 	else
1835 		ctrl_m_pg[2] &= ~0x4;
1836 
1837 	if (sdebug_ato)
1838 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1839 
1840 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1841 	if (1 == pcontrol)
1842 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1843 	else if (2 == pcontrol)
1844 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1845 	return sizeof(ctrl_m_pg);
1846 }
1847 
1848 
1849 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1850 {	/* Informational Exceptions control mode page for mode_sense */
1851 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1852 				       0, 0, 0x0, 0x0};
1853 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1854 				      0, 0, 0x0, 0x0};
1855 
1856 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1857 	if (1 == pcontrol)
1858 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1859 	else if (2 == pcontrol)
1860 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1861 	return sizeof(iec_m_pg);
1862 }
1863 
1864 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1865 {	/* SAS SSP mode page - short format for mode_sense */
1866 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1867 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1868 
1869 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1870 	if (1 == pcontrol)
1871 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1872 	return sizeof(sas_sf_m_pg);
1873 }
1874 
1875 
1876 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1877 			      int target_dev_id)
1878 {	/* SAS phy control and discover mode page for mode_sense */
1879 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1880 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1881 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1882 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1883 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1884 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1885 		    0, 0, 0, 0, 0, 0, 0, 0,
1886 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1887 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1888 		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
1889 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1890 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1891 		    0, 0, 0, 0, 0, 0, 0, 0,
1892 		};
1893 	int port_a, port_b;
1894 
1895 	put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 16);
1896 	put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 24);
1897 	put_unaligned_be64(naa5_comp_a, sas_pcd_m_pg + 64);
1898 	put_unaligned_be64(naa5_comp_c + 1, sas_pcd_m_pg + 72);
1899 	port_a = target_dev_id + 1;
1900 	port_b = port_a + 1;
1901 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1902 	put_unaligned_be32(port_a, p + 20);
1903 	put_unaligned_be32(port_b, p + 48 + 20);
1904 	if (1 == pcontrol)
1905 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1906 	return sizeof(sas_pcd_m_pg);
1907 }
1908 
1909 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1910 {	/* SAS SSP shared protocol specific port mode subpage */
1911 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1912 		    0, 0, 0, 0, 0, 0, 0, 0,
1913 		};
1914 
1915 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1916 	if (1 == pcontrol)
1917 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1918 	return sizeof(sas_sha_m_pg);
1919 }
1920 
1921 #define SDEBUG_MAX_MSENSE_SZ 256
1922 
1923 static int resp_mode_sense(struct scsi_cmnd *scp,
1924 			   struct sdebug_dev_info *devip)
1925 {
1926 	unsigned char dbd, llbaa;
1927 	int pcontrol, pcode, subpcode, bd_len;
1928 	unsigned char dev_spec;
1929 	int alloc_len, msense_6, offset, len, target_dev_id;
1930 	int target = scp->device->id;
1931 	unsigned char * ap;
1932 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1933 	unsigned char *cmd = scp->cmnd;
1934 
1935 	dbd = !!(cmd[1] & 0x8);
1936 	pcontrol = (cmd[2] & 0xc0) >> 6;
1937 	pcode = cmd[2] & 0x3f;
1938 	subpcode = cmd[3];
1939 	msense_6 = (MODE_SENSE == cmd[0]);
1940 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1941 	if ((0 == sdebug_ptype) && (0 == dbd))
1942 		bd_len = llbaa ? 16 : 8;
1943 	else
1944 		bd_len = 0;
1945 	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
1946 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1947 	if (0x3 == pcontrol) {  /* Saving values not supported */
1948 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1949 		return check_condition_result;
1950 	}
1951 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1952 			(devip->target * 1000) - 3;
1953 	/* set DPOFUA bit for disks */
1954 	if (0 == sdebug_ptype)
1955 		dev_spec = 0x10;	/* would be 0x90 if read-only */
1956 	else
1957 		dev_spec = 0x0;
1958 	if (msense_6) {
1959 		arr[2] = dev_spec;
1960 		arr[3] = bd_len;
1961 		offset = 4;
1962 	} else {
1963 		arr[3] = dev_spec;
1964 		if (16 == bd_len)
1965 			arr[4] = 0x1;	/* set LONGLBA bit */
1966 		arr[7] = bd_len;	/* assume 255 or less */
1967 		offset = 8;
1968 	}
1969 	ap = arr + offset;
1970 	if ((bd_len > 0) && (!sdebug_capacity))
1971 		sdebug_capacity = get_sdebug_capacity();
1972 
1973 	if (8 == bd_len) {
1974 		if (sdebug_capacity > 0xfffffffe)
1975 			put_unaligned_be32(0xffffffff, ap + 0);
1976 		else
1977 			put_unaligned_be32(sdebug_capacity, ap + 0);
1978 		put_unaligned_be16(sdebug_sector_size, ap + 6);
1979 		offset += bd_len;
1980 		ap = arr + offset;
1981 	} else if (16 == bd_len) {
1982 		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
1983 		put_unaligned_be32(sdebug_sector_size, ap + 12);
1984 		offset += bd_len;
1985 		ap = arr + offset;
1986 	}
1987 
1988 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1989 		/* TODO: Control Extension page */
1990 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
1991 		return check_condition_result;
1992 	}
1993 	switch (pcode) {
1994 	case 0x1:	/* Read-Write error recovery page, direct access */
1995 		len = resp_err_recov_pg(ap, pcontrol, target);
1996 		offset += len;
1997 		break;
1998 	case 0x2:	/* Disconnect-Reconnect page, all devices */
1999 		len = resp_disconnect_pg(ap, pcontrol, target);
2000 		offset += len;
2001 		break;
2002         case 0x3:       /* Format device page, direct access */
2003                 len = resp_format_pg(ap, pcontrol, target);
2004                 offset += len;
2005                 break;
2006 	case 0x8:	/* Caching page, direct access */
2007 		len = resp_caching_pg(ap, pcontrol, target);
2008 		offset += len;
2009 		break;
2010 	case 0xa:	/* Control Mode page, all devices */
2011 		len = resp_ctrl_m_pg(ap, pcontrol, target);
2012 		offset += len;
2013 		break;
2014 	case 0x19:	/* if spc==1 then sas phy, control+discover */
2015 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
2016 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2017 			return check_condition_result;
2018 	        }
2019 		len = 0;
2020 		if ((0x0 == subpcode) || (0xff == subpcode))
2021 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2022 		if ((0x1 == subpcode) || (0xff == subpcode))
2023 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2024 						  target_dev_id);
2025 		if ((0x2 == subpcode) || (0xff == subpcode))
2026 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
2027 		offset += len;
2028 		break;
2029 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
2030 		len = resp_iec_m_pg(ap, pcontrol, target);
2031 		offset += len;
2032 		break;
2033 	case 0x3f:	/* Read all Mode pages */
2034 		if ((0 == subpcode) || (0xff == subpcode)) {
2035 			len = resp_err_recov_pg(ap, pcontrol, target);
2036 			len += resp_disconnect_pg(ap + len, pcontrol, target);
2037 			len += resp_format_pg(ap + len, pcontrol, target);
2038 			len += resp_caching_pg(ap + len, pcontrol, target);
2039 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2040 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2041 			if (0xff == subpcode) {
2042 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2043 						  target, target_dev_id);
2044 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
2045 			}
2046 			len += resp_iec_m_pg(ap + len, pcontrol, target);
2047 		} else {
2048 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2049 			return check_condition_result;
2050                 }
2051 		offset += len;
2052 		break;
2053 	default:
2054 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2055 		return check_condition_result;
2056 	}
2057 	if (msense_6)
2058 		arr[0] = offset - 1;
2059 	else
2060 		put_unaligned_be16((offset - 2), arr + 0);
2061 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2062 }
2063 
2064 #define SDEBUG_MAX_MSELECT_SZ 512
2065 
2066 static int resp_mode_select(struct scsi_cmnd *scp,
2067 			    struct sdebug_dev_info *devip)
2068 {
2069 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2070 	int param_len, res, mpage;
2071 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2072 	unsigned char *cmd = scp->cmnd;
2073 	int mselect6 = (MODE_SELECT == cmd[0]);
2074 
2075 	memset(arr, 0, sizeof(arr));
2076 	pf = cmd[1] & 0x10;
2077 	sp = cmd[1] & 0x1;
2078 	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2079 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2080 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2081 		return check_condition_result;
2082 	}
2083         res = fetch_to_dev_buffer(scp, arr, param_len);
2084         if (-1 == res)
2085 		return DID_ERROR << 16;
2086 	else if (sdebug_verbose && (res < param_len))
2087 		sdev_printk(KERN_INFO, scp->device,
2088 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
2089 			    __func__, param_len, res);
2090 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2091 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2092 	if (md_len > 2) {
2093 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2094 		return check_condition_result;
2095 	}
2096 	off = bd_len + (mselect6 ? 4 : 8);
2097 	mpage = arr[off] & 0x3f;
2098 	ps = !!(arr[off] & 0x80);
2099 	if (ps) {
2100 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2101 		return check_condition_result;
2102 	}
2103 	spf = !!(arr[off] & 0x40);
2104 	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2105 		       (arr[off + 1] + 2);
2106 	if ((pg_len + off) > param_len) {
2107 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
2108 				PARAMETER_LIST_LENGTH_ERR, 0);
2109 		return check_condition_result;
2110 	}
2111 	switch (mpage) {
2112 	case 0x8:      /* Caching Mode page */
2113 		if (caching_pg[1] == arr[off + 1]) {
2114 			memcpy(caching_pg + 2, arr + off + 2,
2115 			       sizeof(caching_pg) - 2);
2116 			goto set_mode_changed_ua;
2117 		}
2118 		break;
2119 	case 0xa:      /* Control Mode page */
2120 		if (ctrl_m_pg[1] == arr[off + 1]) {
2121 			memcpy(ctrl_m_pg + 2, arr + off + 2,
2122 			       sizeof(ctrl_m_pg) - 2);
2123 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2124 			goto set_mode_changed_ua;
2125 		}
2126 		break;
2127 	case 0x1c:      /* Informational Exceptions Mode page */
2128 		if (iec_m_pg[1] == arr[off + 1]) {
2129 			memcpy(iec_m_pg + 2, arr + off + 2,
2130 			       sizeof(iec_m_pg) - 2);
2131 			goto set_mode_changed_ua;
2132 		}
2133 		break;
2134 	default:
2135 		break;
2136 	}
2137 	mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2138 	return check_condition_result;
2139 set_mode_changed_ua:
2140 	set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2141 	return 0;
2142 }
2143 
2144 static int resp_temp_l_pg(unsigned char * arr)
2145 {
2146 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2147 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
2148 		};
2149 
2150         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2151         return sizeof(temp_l_pg);
2152 }
2153 
2154 static int resp_ie_l_pg(unsigned char * arr)
2155 {
2156 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2157 		};
2158 
2159         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2160 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
2161 		arr[4] = THRESHOLD_EXCEEDED;
2162 		arr[5] = 0xff;
2163 	}
2164         return sizeof(ie_l_pg);
2165 }
2166 
2167 #define SDEBUG_MAX_LSENSE_SZ 512
2168 
2169 static int resp_log_sense(struct scsi_cmnd * scp,
2170                           struct sdebug_dev_info * devip)
2171 {
2172 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2173 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2174 	unsigned char *cmd = scp->cmnd;
2175 
2176 	memset(arr, 0, sizeof(arr));
2177 	ppc = cmd[1] & 0x2;
2178 	sp = cmd[1] & 0x1;
2179 	if (ppc || sp) {
2180 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2181 		return check_condition_result;
2182 	}
2183 	pcontrol = (cmd[2] & 0xc0) >> 6;
2184 	pcode = cmd[2] & 0x3f;
2185 	subpcode = cmd[3] & 0xff;
2186 	alloc_len = get_unaligned_be16(cmd + 7);
2187 	arr[0] = pcode;
2188 	if (0 == subpcode) {
2189 		switch (pcode) {
2190 		case 0x0:	/* Supported log pages log page */
2191 			n = 4;
2192 			arr[n++] = 0x0;		/* this page */
2193 			arr[n++] = 0xd;		/* Temperature */
2194 			arr[n++] = 0x2f;	/* Informational exceptions */
2195 			arr[3] = n - 4;
2196 			break;
2197 		case 0xd:	/* Temperature log page */
2198 			arr[3] = resp_temp_l_pg(arr + 4);
2199 			break;
2200 		case 0x2f:	/* Informational exceptions log page */
2201 			arr[3] = resp_ie_l_pg(arr + 4);
2202 			break;
2203 		default:
2204 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2205 			return check_condition_result;
2206 		}
2207 	} else if (0xff == subpcode) {
2208 		arr[0] |= 0x40;
2209 		arr[1] = subpcode;
2210 		switch (pcode) {
2211 		case 0x0:	/* Supported log pages and subpages log page */
2212 			n = 4;
2213 			arr[n++] = 0x0;
2214 			arr[n++] = 0x0;		/* 0,0 page */
2215 			arr[n++] = 0x0;
2216 			arr[n++] = 0xff;	/* this page */
2217 			arr[n++] = 0xd;
2218 			arr[n++] = 0x0;		/* Temperature */
2219 			arr[n++] = 0x2f;
2220 			arr[n++] = 0x0;	/* Informational exceptions */
2221 			arr[3] = n - 4;
2222 			break;
2223 		case 0xd:	/* Temperature subpages */
2224 			n = 4;
2225 			arr[n++] = 0xd;
2226 			arr[n++] = 0x0;		/* Temperature */
2227 			arr[3] = n - 4;
2228 			break;
2229 		case 0x2f:	/* Informational exceptions subpages */
2230 			n = 4;
2231 			arr[n++] = 0x2f;
2232 			arr[n++] = 0x0;		/* Informational exceptions */
2233 			arr[3] = n - 4;
2234 			break;
2235 		default:
2236 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2237 			return check_condition_result;
2238 		}
2239 	} else {
2240 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2241 		return check_condition_result;
2242 	}
2243 	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2244 	return fill_from_dev_buffer(scp, arr,
2245 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
2246 }
2247 
2248 static int check_device_access_params(struct scsi_cmnd *scp,
2249 				      unsigned long long lba, unsigned int num)
2250 {
2251 	if (lba + num > sdebug_capacity) {
2252 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2253 		return check_condition_result;
2254 	}
2255 	/* transfer length excessive (tie in to block limits VPD page) */
2256 	if (num > sdebug_store_sectors) {
2257 		/* needs work to find which cdb byte 'num' comes from */
2258 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2259 		return check_condition_result;
2260 	}
2261 	return 0;
2262 }
2263 
2264 /* Returns number of bytes copied or -1 if error. */
2265 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2266 			    bool do_write)
2267 {
2268 	int ret;
2269 	u64 block, rest = 0;
2270 	struct scsi_data_buffer *sdb;
2271 	enum dma_data_direction dir;
2272 
2273 	if (do_write) {
2274 		sdb = scsi_out(scmd);
2275 		dir = DMA_TO_DEVICE;
2276 	} else {
2277 		sdb = scsi_in(scmd);
2278 		dir = DMA_FROM_DEVICE;
2279 	}
2280 
2281 	if (!sdb->length)
2282 		return 0;
2283 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2284 		return -1;
2285 
2286 	block = do_div(lba, sdebug_store_sectors);
2287 	if (block + num > sdebug_store_sectors)
2288 		rest = block + num - sdebug_store_sectors;
2289 
2290 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2291 		   fake_storep + (block * sdebug_sector_size),
2292 		   (num - rest) * sdebug_sector_size, 0, do_write);
2293 	if (ret != (num - rest) * sdebug_sector_size)
2294 		return ret;
2295 
2296 	if (rest) {
2297 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2298 			    fake_storep, rest * sdebug_sector_size,
2299 			    (num - rest) * sdebug_sector_size, do_write);
2300 	}
2301 
2302 	return ret;
2303 }
2304 
2305 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2306  * arr into fake_store(lba,num) and return true. If comparison fails then
2307  * return false. */
2308 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2309 {
2310 	bool res;
2311 	u64 block, rest = 0;
2312 	u32 store_blks = sdebug_store_sectors;
2313 	u32 lb_size = sdebug_sector_size;
2314 
2315 	block = do_div(lba, store_blks);
2316 	if (block + num > store_blks)
2317 		rest = block + num - store_blks;
2318 
2319 	res = !memcmp(fake_storep + (block * lb_size), arr,
2320 		      (num - rest) * lb_size);
2321 	if (!res)
2322 		return res;
2323 	if (rest)
2324 		res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2325 			     rest * lb_size);
2326 	if (!res)
2327 		return res;
2328 	arr += num * lb_size;
2329 	memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2330 	if (rest)
2331 		memcpy(fake_storep, arr + ((num - rest) * lb_size),
2332 		       rest * lb_size);
2333 	return res;
2334 }
2335 
2336 static __be16 dif_compute_csum(const void *buf, int len)
2337 {
2338 	__be16 csum;
2339 
2340 	if (sdebug_guard)
2341 		csum = (__force __be16)ip_compute_csum(buf, len);
2342 	else
2343 		csum = cpu_to_be16(crc_t10dif(buf, len));
2344 
2345 	return csum;
2346 }
2347 
2348 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2349 		      sector_t sector, u32 ei_lba)
2350 {
2351 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
2352 
2353 	if (sdt->guard_tag != csum) {
2354 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2355 			(unsigned long)sector,
2356 			be16_to_cpu(sdt->guard_tag),
2357 			be16_to_cpu(csum));
2358 		return 0x01;
2359 	}
2360 	if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
2361 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2362 		pr_err("REF check failed on sector %lu\n",
2363 			(unsigned long)sector);
2364 		return 0x03;
2365 	}
2366 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2367 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
2368 		pr_err("REF check failed on sector %lu\n",
2369 			(unsigned long)sector);
2370 		return 0x03;
2371 	}
2372 	return 0;
2373 }
2374 
2375 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2376 			  unsigned int sectors, bool read)
2377 {
2378 	size_t resid;
2379 	void *paddr;
2380 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
2381 	struct sg_mapping_iter miter;
2382 
2383 	/* Bytes of protection data to copy into sgl */
2384 	resid = sectors * sizeof(*dif_storep);
2385 
2386 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2387 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2388 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2389 
2390 	while (sg_miter_next(&miter) && resid > 0) {
2391 		size_t len = min(miter.length, resid);
2392 		void *start = dif_store(sector);
2393 		size_t rest = 0;
2394 
2395 		if (dif_store_end < start + len)
2396 			rest = start + len - dif_store_end;
2397 
2398 		paddr = miter.addr;
2399 
2400 		if (read)
2401 			memcpy(paddr, start, len - rest);
2402 		else
2403 			memcpy(start, paddr, len - rest);
2404 
2405 		if (rest) {
2406 			if (read)
2407 				memcpy(paddr + len - rest, dif_storep, rest);
2408 			else
2409 				memcpy(dif_storep, paddr + len - rest, rest);
2410 		}
2411 
2412 		sector += len / sizeof(*dif_storep);
2413 		resid -= len;
2414 	}
2415 	sg_miter_stop(&miter);
2416 }
2417 
2418 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2419 			    unsigned int sectors, u32 ei_lba)
2420 {
2421 	unsigned int i;
2422 	struct sd_dif_tuple *sdt;
2423 	sector_t sector;
2424 
2425 	for (i = 0; i < sectors; i++, ei_lba++) {
2426 		int ret;
2427 
2428 		sector = start_sec + i;
2429 		sdt = dif_store(sector);
2430 
2431 		if (sdt->app_tag == cpu_to_be16(0xffff))
2432 			continue;
2433 
2434 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2435 		if (ret) {
2436 			dif_errors++;
2437 			return ret;
2438 		}
2439 	}
2440 
2441 	dif_copy_prot(SCpnt, start_sec, sectors, true);
2442 	dix_reads++;
2443 
2444 	return 0;
2445 }
2446 
2447 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2448 {
2449 	u8 *cmd = scp->cmnd;
2450 	u64 lba;
2451 	u32 num;
2452 	u32 ei_lba;
2453 	unsigned long iflags;
2454 	int ret;
2455 	bool check_prot;
2456 
2457 	switch (cmd[0]) {
2458 	case READ_16:
2459 		ei_lba = 0;
2460 		lba = get_unaligned_be64(cmd + 2);
2461 		num = get_unaligned_be32(cmd + 10);
2462 		check_prot = true;
2463 		break;
2464 	case READ_10:
2465 		ei_lba = 0;
2466 		lba = get_unaligned_be32(cmd + 2);
2467 		num = get_unaligned_be16(cmd + 7);
2468 		check_prot = true;
2469 		break;
2470 	case READ_6:
2471 		ei_lba = 0;
2472 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2473 		      (u32)(cmd[1] & 0x1f) << 16;
2474 		num = (0 == cmd[4]) ? 256 : cmd[4];
2475 		check_prot = true;
2476 		break;
2477 	case READ_12:
2478 		ei_lba = 0;
2479 		lba = get_unaligned_be32(cmd + 2);
2480 		num = get_unaligned_be32(cmd + 6);
2481 		check_prot = true;
2482 		break;
2483 	case XDWRITEREAD_10:
2484 		ei_lba = 0;
2485 		lba = get_unaligned_be32(cmd + 2);
2486 		num = get_unaligned_be16(cmd + 7);
2487 		check_prot = false;
2488 		break;
2489 	default:	/* assume READ(32) */
2490 		lba = get_unaligned_be64(cmd + 12);
2491 		ei_lba = get_unaligned_be32(cmd + 20);
2492 		num = get_unaligned_be32(cmd + 28);
2493 		check_prot = false;
2494 		break;
2495 	}
2496 	if (unlikely(have_dif_prot && check_prot)) {
2497 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2498 		    (cmd[1] & 0xe0)) {
2499 			mk_sense_invalid_opcode(scp);
2500 			return check_condition_result;
2501 		}
2502 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2503 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2504 		    (cmd[1] & 0xe0) == 0)
2505 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2506 				    "to DIF device\n");
2507 	}
2508 	if (unlikely(sdebug_any_injecting_opt)) {
2509 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2510 
2511 		if (ep->inj_short)
2512 			num /= 2;
2513 	}
2514 
2515 	/* inline check_device_access_params() */
2516 	if (unlikely(lba + num > sdebug_capacity)) {
2517 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2518 		return check_condition_result;
2519 	}
2520 	/* transfer length excessive (tie in to block limits VPD page) */
2521 	if (unlikely(num > sdebug_store_sectors)) {
2522 		/* needs work to find which cdb byte 'num' comes from */
2523 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2524 		return check_condition_result;
2525 	}
2526 
2527 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2528 		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2529 		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2530 		/* claim unrecoverable read error */
2531 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2532 		/* set info field and valid bit for fixed descriptor */
2533 		if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2534 			scp->sense_buffer[0] |= 0x80;	/* Valid bit */
2535 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
2536 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2537 			put_unaligned_be32(ret, scp->sense_buffer + 3);
2538 		}
2539 		scsi_set_resid(scp, scsi_bufflen(scp));
2540 		return check_condition_result;
2541 	}
2542 
2543 	read_lock_irqsave(&atomic_rw, iflags);
2544 
2545 	/* DIX + T10 DIF */
2546 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2547 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2548 
2549 		if (prot_ret) {
2550 			read_unlock_irqrestore(&atomic_rw, iflags);
2551 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2552 			return illegal_condition_result;
2553 		}
2554 	}
2555 
2556 	ret = do_device_access(scp, lba, num, false);
2557 	read_unlock_irqrestore(&atomic_rw, iflags);
2558 	if (unlikely(ret == -1))
2559 		return DID_ERROR << 16;
2560 
2561 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2562 
2563 	if (unlikely(sdebug_any_injecting_opt)) {
2564 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2565 
2566 		if (ep->inj_recovered) {
2567 			mk_sense_buffer(scp, RECOVERED_ERROR,
2568 					THRESHOLD_EXCEEDED, 0);
2569 			return check_condition_result;
2570 		} else if (ep->inj_transport) {
2571 			mk_sense_buffer(scp, ABORTED_COMMAND,
2572 					TRANSPORT_PROBLEM, ACK_NAK_TO);
2573 			return check_condition_result;
2574 		} else if (ep->inj_dif) {
2575 			/* Logical block guard check failed */
2576 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2577 			return illegal_condition_result;
2578 		} else if (ep->inj_dix) {
2579 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2580 			return illegal_condition_result;
2581 		}
2582 	}
2583 	return 0;
2584 }
2585 
2586 static void dump_sector(unsigned char *buf, int len)
2587 {
2588 	int i, j, n;
2589 
2590 	pr_err(">>> Sector Dump <<<\n");
2591 	for (i = 0 ; i < len ; i += 16) {
2592 		char b[128];
2593 
2594 		for (j = 0, n = 0; j < 16; j++) {
2595 			unsigned char c = buf[i+j];
2596 
2597 			if (c >= 0x20 && c < 0x7e)
2598 				n += scnprintf(b + n, sizeof(b) - n,
2599 					       " %c ", buf[i+j]);
2600 			else
2601 				n += scnprintf(b + n, sizeof(b) - n,
2602 					       "%02x ", buf[i+j]);
2603 		}
2604 		pr_err("%04d: %s\n", i, b);
2605 	}
2606 }
2607 
2608 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2609 			     unsigned int sectors, u32 ei_lba)
2610 {
2611 	int ret;
2612 	struct sd_dif_tuple *sdt;
2613 	void *daddr;
2614 	sector_t sector = start_sec;
2615 	int ppage_offset;
2616 	int dpage_offset;
2617 	struct sg_mapping_iter diter;
2618 	struct sg_mapping_iter piter;
2619 
2620 	BUG_ON(scsi_sg_count(SCpnt) == 0);
2621 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2622 
2623 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2624 			scsi_prot_sg_count(SCpnt),
2625 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2626 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2627 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2628 
2629 	/* For each protection page */
2630 	while (sg_miter_next(&piter)) {
2631 		dpage_offset = 0;
2632 		if (WARN_ON(!sg_miter_next(&diter))) {
2633 			ret = 0x01;
2634 			goto out;
2635 		}
2636 
2637 		for (ppage_offset = 0; ppage_offset < piter.length;
2638 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
2639 			/* If we're at the end of the current
2640 			 * data page advance to the next one
2641 			 */
2642 			if (dpage_offset >= diter.length) {
2643 				if (WARN_ON(!sg_miter_next(&diter))) {
2644 					ret = 0x01;
2645 					goto out;
2646 				}
2647 				dpage_offset = 0;
2648 			}
2649 
2650 			sdt = piter.addr + ppage_offset;
2651 			daddr = diter.addr + dpage_offset;
2652 
2653 			ret = dif_verify(sdt, daddr, sector, ei_lba);
2654 			if (ret) {
2655 				dump_sector(daddr, sdebug_sector_size);
2656 				goto out;
2657 			}
2658 
2659 			sector++;
2660 			ei_lba++;
2661 			dpage_offset += sdebug_sector_size;
2662 		}
2663 		diter.consumed = dpage_offset;
2664 		sg_miter_stop(&diter);
2665 	}
2666 	sg_miter_stop(&piter);
2667 
2668 	dif_copy_prot(SCpnt, start_sec, sectors, false);
2669 	dix_writes++;
2670 
2671 	return 0;
2672 
2673 out:
2674 	dif_errors++;
2675 	sg_miter_stop(&diter);
2676 	sg_miter_stop(&piter);
2677 	return ret;
2678 }
2679 
2680 static unsigned long lba_to_map_index(sector_t lba)
2681 {
2682 	if (sdebug_unmap_alignment)
2683 		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2684 	sector_div(lba, sdebug_unmap_granularity);
2685 	return lba;
2686 }
2687 
2688 static sector_t map_index_to_lba(unsigned long index)
2689 {
2690 	sector_t lba = index * sdebug_unmap_granularity;
2691 
2692 	if (sdebug_unmap_alignment)
2693 		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2694 	return lba;
2695 }
2696 
2697 static unsigned int map_state(sector_t lba, unsigned int *num)
2698 {
2699 	sector_t end;
2700 	unsigned int mapped;
2701 	unsigned long index;
2702 	unsigned long next;
2703 
2704 	index = lba_to_map_index(lba);
2705 	mapped = test_bit(index, map_storep);
2706 
2707 	if (mapped)
2708 		next = find_next_zero_bit(map_storep, map_size, index);
2709 	else
2710 		next = find_next_bit(map_storep, map_size, index);
2711 
2712 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2713 	*num = end - lba;
2714 	return mapped;
2715 }
2716 
2717 static void map_region(sector_t lba, unsigned int len)
2718 {
2719 	sector_t end = lba + len;
2720 
2721 	while (lba < end) {
2722 		unsigned long index = lba_to_map_index(lba);
2723 
2724 		if (index < map_size)
2725 			set_bit(index, map_storep);
2726 
2727 		lba = map_index_to_lba(index + 1);
2728 	}
2729 }
2730 
2731 static void unmap_region(sector_t lba, unsigned int len)
2732 {
2733 	sector_t end = lba + len;
2734 
2735 	while (lba < end) {
2736 		unsigned long index = lba_to_map_index(lba);
2737 
2738 		if (lba == map_index_to_lba(index) &&
2739 		    lba + sdebug_unmap_granularity <= end &&
2740 		    index < map_size) {
2741 			clear_bit(index, map_storep);
2742 			if (sdebug_lbprz) {
2743 				memset(fake_storep +
2744 				       lba * sdebug_sector_size, 0,
2745 				       sdebug_sector_size *
2746 				       sdebug_unmap_granularity);
2747 			}
2748 			if (dif_storep) {
2749 				memset(dif_storep + lba, 0xff,
2750 				       sizeof(*dif_storep) *
2751 				       sdebug_unmap_granularity);
2752 			}
2753 		}
2754 		lba = map_index_to_lba(index + 1);
2755 	}
2756 }
2757 
2758 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2759 {
2760 	u8 *cmd = scp->cmnd;
2761 	u64 lba;
2762 	u32 num;
2763 	u32 ei_lba;
2764 	unsigned long iflags;
2765 	int ret;
2766 	bool check_prot;
2767 
2768 	switch (cmd[0]) {
2769 	case WRITE_16:
2770 		ei_lba = 0;
2771 		lba = get_unaligned_be64(cmd + 2);
2772 		num = get_unaligned_be32(cmd + 10);
2773 		check_prot = true;
2774 		break;
2775 	case WRITE_10:
2776 		ei_lba = 0;
2777 		lba = get_unaligned_be32(cmd + 2);
2778 		num = get_unaligned_be16(cmd + 7);
2779 		check_prot = true;
2780 		break;
2781 	case WRITE_6:
2782 		ei_lba = 0;
2783 		lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2784 		      (u32)(cmd[1] & 0x1f) << 16;
2785 		num = (0 == cmd[4]) ? 256 : cmd[4];
2786 		check_prot = true;
2787 		break;
2788 	case WRITE_12:
2789 		ei_lba = 0;
2790 		lba = get_unaligned_be32(cmd + 2);
2791 		num = get_unaligned_be32(cmd + 6);
2792 		check_prot = true;
2793 		break;
2794 	case 0x53:	/* XDWRITEREAD(10) */
2795 		ei_lba = 0;
2796 		lba = get_unaligned_be32(cmd + 2);
2797 		num = get_unaligned_be16(cmd + 7);
2798 		check_prot = false;
2799 		break;
2800 	default:	/* assume WRITE(32) */
2801 		lba = get_unaligned_be64(cmd + 12);
2802 		ei_lba = get_unaligned_be32(cmd + 20);
2803 		num = get_unaligned_be32(cmd + 28);
2804 		check_prot = false;
2805 		break;
2806 	}
2807 	if (unlikely(have_dif_prot && check_prot)) {
2808 		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
2809 		    (cmd[1] & 0xe0)) {
2810 			mk_sense_invalid_opcode(scp);
2811 			return check_condition_result;
2812 		}
2813 		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
2814 		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
2815 		    (cmd[1] & 0xe0) == 0)
2816 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2817 				    "to DIF device\n");
2818 	}
2819 
2820 	/* inline check_device_access_params() */
2821 	if (unlikely(lba + num > sdebug_capacity)) {
2822 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2823 		return check_condition_result;
2824 	}
2825 	/* transfer length excessive (tie in to block limits VPD page) */
2826 	if (unlikely(num > sdebug_store_sectors)) {
2827 		/* needs work to find which cdb byte 'num' comes from */
2828 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2829 		return check_condition_result;
2830 	}
2831 
2832 	write_lock_irqsave(&atomic_rw, iflags);
2833 
2834 	/* DIX + T10 DIF */
2835 	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2836 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2837 
2838 		if (prot_ret) {
2839 			write_unlock_irqrestore(&atomic_rw, iflags);
2840 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2841 			return illegal_condition_result;
2842 		}
2843 	}
2844 
2845 	ret = do_device_access(scp, lba, num, true);
2846 	if (unlikely(scsi_debug_lbp()))
2847 		map_region(lba, num);
2848 	write_unlock_irqrestore(&atomic_rw, iflags);
2849 	if (unlikely(-1 == ret))
2850 		return DID_ERROR << 16;
2851 	else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2852 		sdev_printk(KERN_INFO, scp->device,
2853 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2854 			    my_name, num * sdebug_sector_size, ret);
2855 
2856 	if (unlikely(sdebug_any_injecting_opt)) {
2857 		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2858 
2859 		if (ep->inj_recovered) {
2860 			mk_sense_buffer(scp, RECOVERED_ERROR,
2861 					THRESHOLD_EXCEEDED, 0);
2862 			return check_condition_result;
2863 		} else if (ep->inj_dif) {
2864 			/* Logical block guard check failed */
2865 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2866 			return illegal_condition_result;
2867 		} else if (ep->inj_dix) {
2868 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2869 			return illegal_condition_result;
2870 		}
2871 	}
2872 	return 0;
2873 }
2874 
2875 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2876 			   u32 ei_lba, bool unmap, bool ndob)
2877 {
2878 	unsigned long iflags;
2879 	unsigned long long i;
2880 	int ret;
2881 	u64 lba_off;
2882 
2883 	ret = check_device_access_params(scp, lba, num);
2884 	if (ret)
2885 		return ret;
2886 
2887 	write_lock_irqsave(&atomic_rw, iflags);
2888 
2889 	if (unmap && scsi_debug_lbp()) {
2890 		unmap_region(lba, num);
2891 		goto out;
2892 	}
2893 
2894 	lba_off = lba * sdebug_sector_size;
2895 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
2896 	if (ndob) {
2897 		memset(fake_storep + lba_off, 0, sdebug_sector_size);
2898 		ret = 0;
2899 	} else
2900 		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2901 					  sdebug_sector_size);
2902 
2903 	if (-1 == ret) {
2904 		write_unlock_irqrestore(&atomic_rw, iflags);
2905 		return DID_ERROR << 16;
2906 	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
2907 		sdev_printk(KERN_INFO, scp->device,
2908 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2909 			    my_name, "write same",
2910 			    num * sdebug_sector_size, ret);
2911 
2912 	/* Copy first sector to remaining blocks */
2913 	for (i = 1 ; i < num ; i++)
2914 		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
2915 		       fake_storep + lba_off,
2916 		       sdebug_sector_size);
2917 
2918 	if (scsi_debug_lbp())
2919 		map_region(lba, num);
2920 out:
2921 	write_unlock_irqrestore(&atomic_rw, iflags);
2922 
2923 	return 0;
2924 }
2925 
2926 static int resp_write_same_10(struct scsi_cmnd *scp,
2927 			      struct sdebug_dev_info *devip)
2928 {
2929 	u8 *cmd = scp->cmnd;
2930 	u32 lba;
2931 	u16 num;
2932 	u32 ei_lba = 0;
2933 	bool unmap = false;
2934 
2935 	if (cmd[1] & 0x8) {
2936 		if (sdebug_lbpws10 == 0) {
2937 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2938 			return check_condition_result;
2939 		} else
2940 			unmap = true;
2941 	}
2942 	lba = get_unaligned_be32(cmd + 2);
2943 	num = get_unaligned_be16(cmd + 7);
2944 	if (num > sdebug_write_same_length) {
2945 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
2946 		return check_condition_result;
2947 	}
2948 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
2949 }
2950 
2951 static int resp_write_same_16(struct scsi_cmnd *scp,
2952 			      struct sdebug_dev_info *devip)
2953 {
2954 	u8 *cmd = scp->cmnd;
2955 	u64 lba;
2956 	u32 num;
2957 	u32 ei_lba = 0;
2958 	bool unmap = false;
2959 	bool ndob = false;
2960 
2961 	if (cmd[1] & 0x8) {	/* UNMAP */
2962 		if (sdebug_lbpws == 0) {
2963 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2964 			return check_condition_result;
2965 		} else
2966 			unmap = true;
2967 	}
2968 	if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
2969 		ndob = true;
2970 	lba = get_unaligned_be64(cmd + 2);
2971 	num = get_unaligned_be32(cmd + 10);
2972 	if (num > sdebug_write_same_length) {
2973 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
2974 		return check_condition_result;
2975 	}
2976 	return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
2977 }
2978 
2979 /* Note the mode field is in the same position as the (lower) service action
2980  * field. For the Report supported operation codes command, SPC-4 suggests
2981  * each mode of this command should be reported separately; for future. */
2982 static int resp_write_buffer(struct scsi_cmnd *scp,
2983 			     struct sdebug_dev_info *devip)
2984 {
2985 	u8 *cmd = scp->cmnd;
2986 	struct scsi_device *sdp = scp->device;
2987 	struct sdebug_dev_info *dp;
2988 	u8 mode;
2989 
2990 	mode = cmd[1] & 0x1f;
2991 	switch (mode) {
2992 	case 0x4:	/* download microcode (MC) and activate (ACT) */
2993 		/* set UAs on this device only */
2994 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2995 		set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
2996 		break;
2997 	case 0x5:	/* download MC, save and ACT */
2998 		set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
2999 		break;
3000 	case 0x6:	/* download MC with offsets and ACT */
3001 		/* set UAs on most devices (LUs) in this target */
3002 		list_for_each_entry(dp,
3003 				    &devip->sdbg_host->dev_info_list,
3004 				    dev_list)
3005 			if (dp->target == sdp->id) {
3006 				set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3007 				if (devip != dp)
3008 					set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3009 						dp->uas_bm);
3010 			}
3011 		break;
3012 	case 0x7:	/* download MC with offsets, save, and ACT */
3013 		/* set UA on all devices (LUs) in this target */
3014 		list_for_each_entry(dp,
3015 				    &devip->sdbg_host->dev_info_list,
3016 				    dev_list)
3017 			if (dp->target == sdp->id)
3018 				set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3019 					dp->uas_bm);
3020 		break;
3021 	default:
3022 		/* do nothing for this command for other mode values */
3023 		break;
3024 	}
3025 	return 0;
3026 }
3027 
3028 static int resp_comp_write(struct scsi_cmnd *scp,
3029 			   struct sdebug_dev_info *devip)
3030 {
3031 	u8 *cmd = scp->cmnd;
3032 	u8 *arr;
3033 	u8 *fake_storep_hold;
3034 	u64 lba;
3035 	u32 dnum;
3036 	u32 lb_size = sdebug_sector_size;
3037 	u8 num;
3038 	unsigned long iflags;
3039 	int ret;
3040 	int retval = 0;
3041 
3042 	lba = get_unaligned_be64(cmd + 2);
3043 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
3044 	if (0 == num)
3045 		return 0;	/* degenerate case, not an error */
3046 	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
3047 	    (cmd[1] & 0xe0)) {
3048 		mk_sense_invalid_opcode(scp);
3049 		return check_condition_result;
3050 	}
3051 	if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
3052 	     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
3053 	    (cmd[1] & 0xe0) == 0)
3054 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3055 			    "to DIF device\n");
3056 
3057 	/* inline check_device_access_params() */
3058 	if (lba + num > sdebug_capacity) {
3059 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3060 		return check_condition_result;
3061 	}
3062 	/* transfer length excessive (tie in to block limits VPD page) */
3063 	if (num > sdebug_store_sectors) {
3064 		/* needs work to find which cdb byte 'num' comes from */
3065 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3066 		return check_condition_result;
3067 	}
3068 	dnum = 2 * num;
3069 	arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3070 	if (NULL == arr) {
3071 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3072 				INSUFF_RES_ASCQ);
3073 		return check_condition_result;
3074 	}
3075 
3076 	write_lock_irqsave(&atomic_rw, iflags);
3077 
3078 	/* trick do_device_access() to fetch both compare and write buffers
3079 	 * from data-in into arr. Safe (atomic) since write_lock held. */
3080 	fake_storep_hold = fake_storep;
3081 	fake_storep = arr;
3082 	ret = do_device_access(scp, 0, dnum, true);
3083 	fake_storep = fake_storep_hold;
3084 	if (ret == -1) {
3085 		retval = DID_ERROR << 16;
3086 		goto cleanup;
3087 	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
3088 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3089 			    "indicated=%u, IO sent=%d bytes\n", my_name,
3090 			    dnum * lb_size, ret);
3091 	if (!comp_write_worker(lba, num, arr)) {
3092 		mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3093 		retval = check_condition_result;
3094 		goto cleanup;
3095 	}
3096 	if (scsi_debug_lbp())
3097 		map_region(lba, num);
3098 cleanup:
3099 	write_unlock_irqrestore(&atomic_rw, iflags);
3100 	kfree(arr);
3101 	return retval;
3102 }
3103 
3104 struct unmap_block_desc {
3105 	__be64	lba;
3106 	__be32	blocks;
3107 	__be32	__reserved;
3108 };
3109 
3110 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3111 {
3112 	unsigned char *buf;
3113 	struct unmap_block_desc *desc;
3114 	unsigned int i, payload_len, descriptors;
3115 	int ret;
3116 	unsigned long iflags;
3117 
3118 
3119 	if (!scsi_debug_lbp())
3120 		return 0;	/* fib and say its done */
3121 	payload_len = get_unaligned_be16(scp->cmnd + 7);
3122 	BUG_ON(scsi_bufflen(scp) != payload_len);
3123 
3124 	descriptors = (payload_len - 8) / 16;
3125 	if (descriptors > sdebug_unmap_max_desc) {
3126 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3127 		return check_condition_result;
3128 	}
3129 
3130 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3131 	if (!buf) {
3132 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3133 				INSUFF_RES_ASCQ);
3134 		return check_condition_result;
3135 	}
3136 
3137 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3138 
3139 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3140 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3141 
3142 	desc = (void *)&buf[8];
3143 
3144 	write_lock_irqsave(&atomic_rw, iflags);
3145 
3146 	for (i = 0 ; i < descriptors ; i++) {
3147 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3148 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
3149 
3150 		ret = check_device_access_params(scp, lba, num);
3151 		if (ret)
3152 			goto out;
3153 
3154 		unmap_region(lba, num);
3155 	}
3156 
3157 	ret = 0;
3158 
3159 out:
3160 	write_unlock_irqrestore(&atomic_rw, iflags);
3161 	kfree(buf);
3162 
3163 	return ret;
3164 }
3165 
3166 #define SDEBUG_GET_LBA_STATUS_LEN 32
3167 
3168 static int resp_get_lba_status(struct scsi_cmnd *scp,
3169 			       struct sdebug_dev_info *devip)
3170 {
3171 	u8 *cmd = scp->cmnd;
3172 	u64 lba;
3173 	u32 alloc_len, mapped, num;
3174 	u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3175 	int ret;
3176 
3177 	lba = get_unaligned_be64(cmd + 2);
3178 	alloc_len = get_unaligned_be32(cmd + 10);
3179 
3180 	if (alloc_len < 24)
3181 		return 0;
3182 
3183 	ret = check_device_access_params(scp, lba, 1);
3184 	if (ret)
3185 		return ret;
3186 
3187 	if (scsi_debug_lbp())
3188 		mapped = map_state(lba, &num);
3189 	else {
3190 		mapped = 1;
3191 		/* following just in case virtual_gb changed */
3192 		sdebug_capacity = get_sdebug_capacity();
3193 		if (sdebug_capacity - lba <= 0xffffffff)
3194 			num = sdebug_capacity - lba;
3195 		else
3196 			num = 0xffffffff;
3197 	}
3198 
3199 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3200 	put_unaligned_be32(20, arr);		/* Parameter Data Length */
3201 	put_unaligned_be64(lba, arr + 8);	/* LBA */
3202 	put_unaligned_be32(num, arr + 16);	/* Number of blocks */
3203 	arr[20] = !mapped;		/* prov_stat=0: mapped; 1: dealloc */
3204 
3205 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3206 }
3207 
3208 #define SDEBUG_RLUN_ARR_SZ 256
3209 
3210 static int resp_report_luns(struct scsi_cmnd * scp,
3211 			    struct sdebug_dev_info * devip)
3212 {
3213 	unsigned int alloc_len;
3214 	int lun_cnt, i, upper, num, n, want_wlun, shortish;
3215 	u64 lun;
3216 	unsigned char *cmd = scp->cmnd;
3217 	int select_report = (int)cmd[2];
3218 	struct scsi_lun *one_lun;
3219 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3220 	unsigned char * max_addr;
3221 
3222 	clear_luns_changed_on_target(devip);
3223 	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3224 	shortish = (alloc_len < 4);
3225 	if (shortish || (select_report > 2)) {
3226 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3227 		return check_condition_result;
3228 	}
3229 	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
3230 	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3231 	lun_cnt = sdebug_max_luns;
3232 	if (1 == select_report)
3233 		lun_cnt = 0;
3234 	else if (sdebug_no_lun_0 && (lun_cnt > 0))
3235 		--lun_cnt;
3236 	want_wlun = (select_report > 0) ? 1 : 0;
3237 	num = lun_cnt + want_wlun;
3238 	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3239 	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3240 	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3241 			    sizeof(struct scsi_lun)), num);
3242 	if (n < num) {
3243 		want_wlun = 0;
3244 		lun_cnt = n;
3245 	}
3246 	one_lun = (struct scsi_lun *) &arr[8];
3247 	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3248 	for (i = 0, lun = (sdebug_no_lun_0 ? 1 : 0);
3249              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3250 	     i++, lun++) {
3251 		upper = (lun >> 8) & 0x3f;
3252 		if (upper)
3253 			one_lun[i].scsi_lun[0] =
3254 			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3255 		one_lun[i].scsi_lun[1] = lun & 0xff;
3256 	}
3257 	if (want_wlun) {
3258 		one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3259 		one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3260 		i++;
3261 	}
3262 	alloc_len = (unsigned char *)(one_lun + i) - arr;
3263 	return fill_from_dev_buffer(scp, arr,
3264 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3265 }
3266 
3267 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3268 			    unsigned int num, struct sdebug_dev_info *devip)
3269 {
3270 	int j;
3271 	unsigned char *kaddr, *buf;
3272 	unsigned int offset;
3273 	struct scsi_data_buffer *sdb = scsi_in(scp);
3274 	struct sg_mapping_iter miter;
3275 
3276 	/* better not to use temporary buffer. */
3277 	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3278 	if (!buf) {
3279 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3280 				INSUFF_RES_ASCQ);
3281 		return check_condition_result;
3282 	}
3283 
3284 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3285 
3286 	offset = 0;
3287 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3288 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
3289 
3290 	while (sg_miter_next(&miter)) {
3291 		kaddr = miter.addr;
3292 		for (j = 0; j < miter.length; j++)
3293 			*(kaddr + j) ^= *(buf + offset + j);
3294 
3295 		offset += miter.length;
3296 	}
3297 	sg_miter_stop(&miter);
3298 	kfree(buf);
3299 
3300 	return 0;
3301 }
3302 
3303 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3304 			       struct sdebug_dev_info *devip)
3305 {
3306 	u8 *cmd = scp->cmnd;
3307 	u64 lba;
3308 	u32 num;
3309 	int errsts;
3310 
3311 	if (!scsi_bidi_cmnd(scp)) {
3312 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3313 				INSUFF_RES_ASCQ);
3314 		return check_condition_result;
3315 	}
3316 	errsts = resp_read_dt0(scp, devip);
3317 	if (errsts)
3318 		return errsts;
3319 	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
3320 		errsts = resp_write_dt0(scp, devip);
3321 		if (errsts)
3322 			return errsts;
3323 	}
3324 	lba = get_unaligned_be32(cmd + 2);
3325 	num = get_unaligned_be16(cmd + 7);
3326 	return resp_xdwriteread(scp, lba, num, devip);
3327 }
3328 
3329 /* Queued command completions converge here. */
3330 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3331 {
3332 	int qa_indx;
3333 	int retiring = 0;
3334 	unsigned long iflags;
3335 	struct sdebug_queued_cmd *sqcp;
3336 	struct scsi_cmnd *scp;
3337 	struct sdebug_dev_info *devip;
3338 
3339 	atomic_inc(&sdebug_completions);
3340 	qa_indx = sd_dp->qa_indx;
3341 	if (unlikely((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE))) {
3342 		pr_err("wild qa_indx=%d\n", qa_indx);
3343 		return;
3344 	}
3345 	spin_lock_irqsave(&queued_arr_lock, iflags);
3346 	sqcp = &queued_arr[qa_indx];
3347 	scp = sqcp->a_cmnd;
3348 	if (unlikely(NULL == scp)) {
3349 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3350 		pr_err("scp is NULL\n");
3351 		return;
3352 	}
3353 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
3354 	if (likely(devip))
3355 		atomic_dec(&devip->num_in_q);
3356 	else
3357 		pr_err("devip=NULL\n");
3358 	if (unlikely(atomic_read(&retired_max_queue) > 0))
3359 		retiring = 1;
3360 
3361 	sqcp->a_cmnd = NULL;
3362 	if (unlikely(!test_and_clear_bit(qa_indx, queued_in_use_bm))) {
3363 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3364 		pr_err("Unexpected completion\n");
3365 		return;
3366 	}
3367 
3368 	if (unlikely(retiring)) {	/* user has reduced max_queue */
3369 		int k, retval;
3370 
3371 		retval = atomic_read(&retired_max_queue);
3372 		if (qa_indx >= retval) {
3373 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3374 			pr_err("index %d too large\n", retval);
3375 			return;
3376 		}
3377 		k = find_last_bit(queued_in_use_bm, retval);
3378 		if ((k < sdebug_max_queue) || (k == retval))
3379 			atomic_set(&retired_max_queue, 0);
3380 		else
3381 			atomic_set(&retired_max_queue, k + 1);
3382 	}
3383 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3384 	scp->scsi_done(scp); /* callback to mid level */
3385 }
3386 
3387 /* When high resolution timer goes off this function is called. */
3388 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3389 {
3390 	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3391 						  hrt);
3392 	sdebug_q_cmd_complete(sd_dp);
3393 	return HRTIMER_NORESTART;
3394 }
3395 
3396 /* When work queue schedules work, it calls this function. */
3397 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3398 {
3399 	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3400 						  ew.work);
3401 	sdebug_q_cmd_complete(sd_dp);
3402 }
3403 
3404 static struct sdebug_dev_info *sdebug_device_create(
3405 			struct sdebug_host_info *sdbg_host, gfp_t flags)
3406 {
3407 	struct sdebug_dev_info *devip;
3408 
3409 	devip = kzalloc(sizeof(*devip), flags);
3410 	if (devip) {
3411 		devip->sdbg_host = sdbg_host;
3412 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3413 	}
3414 	return devip;
3415 }
3416 
3417 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3418 {
3419 	struct sdebug_host_info *sdbg_host;
3420 	struct sdebug_dev_info *open_devip = NULL;
3421 	struct sdebug_dev_info *devip;
3422 
3423 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3424 	if (!sdbg_host) {
3425 		pr_err("Host info NULL\n");
3426 		return NULL;
3427         }
3428 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3429 		if ((devip->used) && (devip->channel == sdev->channel) &&
3430                     (devip->target == sdev->id) &&
3431                     (devip->lun == sdev->lun))
3432                         return devip;
3433 		else {
3434 			if ((!devip->used) && (!open_devip))
3435 				open_devip = devip;
3436 		}
3437 	}
3438 	if (!open_devip) { /* try and make a new one */
3439 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3440 		if (!open_devip) {
3441 			pr_err("out of memory at line %d\n", __LINE__);
3442 			return NULL;
3443 		}
3444 	}
3445 
3446 	open_devip->channel = sdev->channel;
3447 	open_devip->target = sdev->id;
3448 	open_devip->lun = sdev->lun;
3449 	open_devip->sdbg_host = sdbg_host;
3450 	atomic_set(&open_devip->num_in_q, 0);
3451 	set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3452 	open_devip->used = true;
3453 	return open_devip;
3454 }
3455 
3456 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3457 {
3458 	if (sdebug_verbose)
3459 		pr_info("slave_alloc <%u %u %u %llu>\n",
3460 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3461 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3462 	return 0;
3463 }
3464 
3465 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3466 {
3467 	struct sdebug_dev_info *devip =
3468 			(struct sdebug_dev_info *)sdp->hostdata;
3469 
3470 	if (sdebug_verbose)
3471 		pr_info("slave_configure <%u %u %u %llu>\n",
3472 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3473 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3474 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3475 	if (NULL == devip) {
3476 		devip = find_build_dev_info(sdp);
3477 		if (NULL == devip)
3478 			return 1;  /* no resources, will be marked offline */
3479 	}
3480 	sdp->hostdata = devip;
3481 	blk_queue_max_segment_size(sdp->request_queue, -1U);
3482 	if (sdebug_no_uld)
3483 		sdp->no_uld_attach = 1;
3484 	return 0;
3485 }
3486 
3487 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3488 {
3489 	struct sdebug_dev_info *devip =
3490 		(struct sdebug_dev_info *)sdp->hostdata;
3491 
3492 	if (sdebug_verbose)
3493 		pr_info("slave_destroy <%u %u %u %llu>\n",
3494 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3495 	if (devip) {
3496 		/* make this slot available for re-use */
3497 		devip->used = false;
3498 		sdp->hostdata = NULL;
3499 	}
3500 }
3501 
3502 /* If @cmnd found deletes its timer or work queue and returns true; else
3503    returns false */
3504 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3505 {
3506 	unsigned long iflags;
3507 	int k, qmax, r_qmax;
3508 	struct sdebug_queued_cmd *sqcp;
3509 	struct sdebug_dev_info *devip;
3510 	struct sdebug_defer *sd_dp;
3511 
3512 	spin_lock_irqsave(&queued_arr_lock, iflags);
3513 	qmax = sdebug_max_queue;
3514 	r_qmax = atomic_read(&retired_max_queue);
3515 	if (r_qmax > qmax)
3516 		qmax = r_qmax;
3517 	for (k = 0; k < qmax; ++k) {
3518 		if (test_bit(k, queued_in_use_bm)) {
3519 			sqcp = &queued_arr[k];
3520 			if (cmnd != sqcp->a_cmnd)
3521 				continue;
3522 			/* found command */
3523 			devip = (struct sdebug_dev_info *)
3524 				cmnd->device->hostdata;
3525 			if (devip)
3526 				atomic_dec(&devip->num_in_q);
3527 			sqcp->a_cmnd = NULL;
3528 			sd_dp = sqcp->sd_dp;
3529 			spin_unlock_irqrestore(&queued_arr_lock,
3530 					       iflags);
3531 			if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) {
3532 				if (sd_dp)
3533 					hrtimer_cancel(&sd_dp->hrt);
3534 			} else if (sdebug_jdelay < 0) {
3535 				if (sd_dp)
3536 					cancel_work_sync(&sd_dp->ew.work);
3537 			}
3538 			clear_bit(k, queued_in_use_bm);
3539 			return true;
3540 		}
3541 	}
3542 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3543 	return false;
3544 }
3545 
3546 /* Deletes (stops) timers or work queues of all queued commands */
3547 static void stop_all_queued(void)
3548 {
3549 	unsigned long iflags;
3550 	int k;
3551 	struct sdebug_queued_cmd *sqcp;
3552 	struct sdebug_dev_info *devip;
3553 	struct sdebug_defer *sd_dp;
3554 
3555 	spin_lock_irqsave(&queued_arr_lock, iflags);
3556 	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3557 		if (test_bit(k, queued_in_use_bm)) {
3558 			sqcp = &queued_arr[k];
3559 			if (NULL == sqcp->a_cmnd)
3560 				continue;
3561 			devip = (struct sdebug_dev_info *)
3562 				sqcp->a_cmnd->device->hostdata;
3563 			if (devip)
3564 				atomic_dec(&devip->num_in_q);
3565 			sqcp->a_cmnd = NULL;
3566 			sd_dp = sqcp->sd_dp;
3567 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3568 			if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) {
3569 				if (sd_dp)
3570 					hrtimer_cancel(&sd_dp->hrt);
3571 			} else if (sdebug_jdelay < 0) {
3572 				if (sd_dp)
3573 					cancel_work_sync(&sd_dp->ew.work);
3574 			}
3575 			clear_bit(k, queued_in_use_bm);
3576 			spin_lock_irqsave(&queued_arr_lock, iflags);
3577 		}
3578 	}
3579 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3580 }
3581 
3582 /* Free queued command memory on heap */
3583 static void free_all_queued(void)
3584 {
3585 	int k;
3586 	struct sdebug_queued_cmd *sqcp;
3587 
3588 	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3589 		sqcp = &queued_arr[k];
3590 		kfree(sqcp->sd_dp);
3591 		sqcp->sd_dp = NULL;
3592 	}
3593 }
3594 
3595 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3596 {
3597 	bool ok;
3598 
3599 	++num_aborts;
3600 	if (SCpnt) {
3601 		ok = stop_queued_cmnd(SCpnt);
3602 		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3603 			sdev_printk(KERN_INFO, SCpnt->device,
3604 				    "%s: command%s found\n", __func__,
3605 				    ok ? "" : " not");
3606 	}
3607 	return SUCCESS;
3608 }
3609 
3610 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3611 {
3612 	++num_dev_resets;
3613 	if (SCpnt && SCpnt->device) {
3614 		struct scsi_device *sdp = SCpnt->device;
3615 		struct sdebug_dev_info *devip =
3616 				(struct sdebug_dev_info *)sdp->hostdata;
3617 
3618 		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3619 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3620 		if (devip)
3621 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
3622 	}
3623 	return SUCCESS;
3624 }
3625 
3626 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3627 {
3628 	struct sdebug_host_info *sdbg_host;
3629 	struct sdebug_dev_info *devip;
3630 	struct scsi_device *sdp;
3631 	struct Scsi_Host *hp;
3632 	int k = 0;
3633 
3634 	++num_target_resets;
3635 	if (!SCpnt)
3636 		goto lie;
3637 	sdp = SCpnt->device;
3638 	if (!sdp)
3639 		goto lie;
3640 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3641 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3642 	hp = sdp->host;
3643 	if (!hp)
3644 		goto lie;
3645 	sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3646 	if (sdbg_host) {
3647 		list_for_each_entry(devip,
3648 				    &sdbg_host->dev_info_list,
3649 				    dev_list)
3650 			if (devip->target == sdp->id) {
3651 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3652 				++k;
3653 			}
3654 	}
3655 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3656 		sdev_printk(KERN_INFO, sdp,
3657 			    "%s: %d device(s) found in target\n", __func__, k);
3658 lie:
3659 	return SUCCESS;
3660 }
3661 
3662 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3663 {
3664 	struct sdebug_host_info *sdbg_host;
3665 	struct sdebug_dev_info *devip;
3666         struct scsi_device * sdp;
3667         struct Scsi_Host * hp;
3668 	int k = 0;
3669 
3670 	++num_bus_resets;
3671 	if (!(SCpnt && SCpnt->device))
3672 		goto lie;
3673 	sdp = SCpnt->device;
3674 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3675 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3676 	hp = sdp->host;
3677 	if (hp) {
3678 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3679 		if (sdbg_host) {
3680 			list_for_each_entry(devip,
3681                                             &sdbg_host->dev_info_list,
3682 					    dev_list) {
3683 				set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3684 				++k;
3685 			}
3686 		}
3687 	}
3688 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3689 		sdev_printk(KERN_INFO, sdp,
3690 			    "%s: %d device(s) found in host\n", __func__, k);
3691 lie:
3692 	return SUCCESS;
3693 }
3694 
3695 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3696 {
3697 	struct sdebug_host_info * sdbg_host;
3698 	struct sdebug_dev_info *devip;
3699 	int k = 0;
3700 
3701 	++num_host_resets;
3702 	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3703 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3704         spin_lock(&sdebug_host_list_lock);
3705         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3706 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
3707 				    dev_list) {
3708 			set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3709 			++k;
3710 		}
3711         }
3712         spin_unlock(&sdebug_host_list_lock);
3713 	stop_all_queued();
3714 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3715 		sdev_printk(KERN_INFO, SCpnt->device,
3716 			    "%s: %d device(s) found\n", __func__, k);
3717 	return SUCCESS;
3718 }
3719 
3720 static void __init sdebug_build_parts(unsigned char *ramp,
3721 				      unsigned long store_size)
3722 {
3723 	struct partition * pp;
3724 	int starts[SDEBUG_MAX_PARTS + 2];
3725 	int sectors_per_part, num_sectors, k;
3726 	int heads_by_sects, start_sec, end_sec;
3727 
3728 	/* assume partition table already zeroed */
3729 	if ((sdebug_num_parts < 1) || (store_size < 1048576))
3730 		return;
3731 	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3732 		sdebug_num_parts = SDEBUG_MAX_PARTS;
3733 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3734 	}
3735 	num_sectors = (int)sdebug_store_sectors;
3736 	sectors_per_part = (num_sectors - sdebug_sectors_per)
3737 			   / sdebug_num_parts;
3738 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
3739         starts[0] = sdebug_sectors_per;
3740 	for (k = 1; k < sdebug_num_parts; ++k)
3741 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
3742 			    * heads_by_sects;
3743 	starts[sdebug_num_parts] = num_sectors;
3744 	starts[sdebug_num_parts + 1] = 0;
3745 
3746 	ramp[510] = 0x55;	/* magic partition markings */
3747 	ramp[511] = 0xAA;
3748 	pp = (struct partition *)(ramp + 0x1be);
3749 	for (k = 0; starts[k + 1]; ++k, ++pp) {
3750 		start_sec = starts[k];
3751 		end_sec = starts[k + 1] - 1;
3752 		pp->boot_ind = 0;
3753 
3754 		pp->cyl = start_sec / heads_by_sects;
3755 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
3756 			   / sdebug_sectors_per;
3757 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
3758 
3759 		pp->end_cyl = end_sec / heads_by_sects;
3760 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3761 			       / sdebug_sectors_per;
3762 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3763 
3764 		pp->start_sect = cpu_to_le32(start_sec);
3765 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3766 		pp->sys_ind = 0x83;	/* plain Linux partition */
3767 	}
3768 }
3769 
3770 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3771 			 int scsi_result, int delta_jiff)
3772 {
3773 	unsigned long iflags;
3774 	int k, num_in_q, qdepth, inject;
3775 	struct sdebug_queued_cmd *sqcp = NULL;
3776 	struct scsi_device *sdp;
3777 	struct sdebug_defer *sd_dp;
3778 
3779 	if (unlikely(WARN_ON(!cmnd)))
3780 		return SCSI_MLQUEUE_HOST_BUSY;
3781 
3782 	if (unlikely(NULL == devip)) {
3783 		if (0 == scsi_result)
3784 			scsi_result = DID_NO_CONNECT << 16;
3785 		goto respond_in_thread;
3786 	}
3787 
3788 	sdp = cmnd->device;
3789 
3790 	if (unlikely(sdebug_verbose && scsi_result))
3791 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3792 			    __func__, scsi_result);
3793 	if (delta_jiff == 0)
3794 		goto respond_in_thread;
3795 
3796 	/* schedule the response at a later time if resources permit */
3797 	spin_lock_irqsave(&queued_arr_lock, iflags);
3798 	num_in_q = atomic_read(&devip->num_in_q);
3799 	qdepth = cmnd->device->queue_depth;
3800 	inject = 0;
3801 	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
3802 		if (scsi_result) {
3803 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
3804 			goto respond_in_thread;
3805 		} else
3806 			scsi_result = device_qfull_result;
3807 	} else if (unlikely((sdebug_every_nth != 0) &&
3808 			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
3809 			    (scsi_result == 0))) {
3810 		if ((num_in_q == (qdepth - 1)) &&
3811 		    (atomic_inc_return(&sdebug_a_tsf) >=
3812 		     abs(sdebug_every_nth))) {
3813 			atomic_set(&sdebug_a_tsf, 0);
3814 			inject = 1;
3815 			scsi_result = device_qfull_result;
3816 		}
3817 	}
3818 
3819 	k = find_first_zero_bit(queued_in_use_bm, sdebug_max_queue);
3820 	if (unlikely(k >= sdebug_max_queue)) {
3821 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
3822 		if (scsi_result)
3823 			goto respond_in_thread;
3824 		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
3825 			scsi_result = device_qfull_result;
3826 		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
3827 			sdev_printk(KERN_INFO, sdp,
3828 				    "%s: max_queue=%d exceeded, %s\n",
3829 				    __func__, sdebug_max_queue,
3830 				    (scsi_result ?  "status: TASK SET FULL" :
3831 						    "report: host busy"));
3832 		if (scsi_result)
3833 			goto respond_in_thread;
3834 		else
3835 			return SCSI_MLQUEUE_HOST_BUSY;
3836 	}
3837 	__set_bit(k, queued_in_use_bm);
3838 	atomic_inc(&devip->num_in_q);
3839 	sqcp = &queued_arr[k];
3840 	sqcp->a_cmnd = cmnd;
3841 	cmnd->result = scsi_result;
3842 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
3843 	sd_dp = sqcp->sd_dp;
3844 	if ((delta_jiff > 0) || (sdebug_ndelay > 0)) {
3845 		ktime_t kt;
3846 
3847 		if (delta_jiff > 0) {
3848 			struct timespec ts;
3849 
3850 			jiffies_to_timespec(delta_jiff, &ts);
3851 			kt = ktime_set(ts.tv_sec, ts.tv_nsec);
3852 		} else
3853 			kt = ktime_set(0, sdebug_ndelay);
3854 		if (NULL == sd_dp) {
3855 			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
3856 			if (NULL == sd_dp)
3857 				return SCSI_MLQUEUE_HOST_BUSY;
3858 			sqcp->sd_dp = sd_dp;
3859 			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
3860 				     HRTIMER_MODE_REL);
3861 			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
3862 			sd_dp->qa_indx = k;
3863 		}
3864 		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL);
3865 	} else {	/* jdelay < 0 */
3866 		if (NULL == sd_dp) {
3867 			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
3868 			if (NULL == sd_dp)
3869 				return SCSI_MLQUEUE_HOST_BUSY;
3870 			sqcp->sd_dp = sd_dp;
3871 			sd_dp->qa_indx = k;
3872 			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
3873 		}
3874 		schedule_work(&sd_dp->ew.work);
3875 	}
3876 	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
3877 		     (scsi_result == device_qfull_result)))
3878 		sdev_printk(KERN_INFO, sdp,
3879 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
3880 			    num_in_q, (inject ? "<inject> " : ""),
3881 			    "status: TASK SET FULL");
3882 	return 0;
3883 
3884 respond_in_thread:	/* call back to mid-layer using invocation thread */
3885 	cmnd->result = scsi_result;
3886 	cmnd->scsi_done(cmnd);
3887 	return 0;
3888 }
3889 
3890 /* Note: The following macros create attribute files in the
3891    /sys/module/scsi_debug/parameters directory. Unfortunately this
3892    driver is unaware of a change and cannot trigger auxiliary actions
3893    as it can when the corresponding attribute in the
3894    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3895  */
3896 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
3897 module_param_named(ato, sdebug_ato, int, S_IRUGO);
3898 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
3899 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
3900 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
3901 module_param_named(dif, sdebug_dif, int, S_IRUGO);
3902 module_param_named(dix, sdebug_dix, int, S_IRUGO);
3903 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
3904 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
3905 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
3906 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
3907 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
3908 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
3909 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
3910 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
3911 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
3912 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
3913 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
3914 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
3915 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
3916 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
3917 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
3918 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
3919 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
3920 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
3921 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
3922 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
3923 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
3924 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
3925 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
3926 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
3927 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
3928 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
3929 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
3930 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
3931 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
3932 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
3933 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
3934 		   S_IRUGO | S_IWUSR);
3935 module_param_named(write_same_length, sdebug_write_same_length, int,
3936 		   S_IRUGO | S_IWUSR);
3937 
3938 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
3939 MODULE_DESCRIPTION("SCSI debug adapter driver");
3940 MODULE_LICENSE("GPL");
3941 MODULE_VERSION(SCSI_DEBUG_VERSION);
3942 
3943 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3944 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3945 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3946 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3947 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
3948 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3949 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3950 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
3951 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
3952 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
3953 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
3954 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
3955 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
3956 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
3957 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
3958 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
3959 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
3960 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
3961 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
3962 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
3963 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
3964 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
3965 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
3966 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
3967 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
3968 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
3969 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3970 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3971 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3972 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3973 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3974 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
3975 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3976 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3977 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3978 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3979 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
3980 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3981 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3982 
3983 static char sdebug_info[256];
3984 
3985 static const char * scsi_debug_info(struct Scsi_Host * shp)
3986 {
3987 	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
3988 		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
3989 		sdebug_version_date, sdebug_dev_size_mb, sdebug_opts);
3990 	return sdebug_info;
3991 }
3992 
3993 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
3994 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
3995 				 int length)
3996 {
3997 	char arr[16];
3998 	int opts;
3999 	int minLen = length > 15 ? 15 : length;
4000 
4001 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4002 		return -EACCES;
4003 	memcpy(arr, buffer, minLen);
4004 	arr[minLen] = '\0';
4005 	if (1 != sscanf(arr, "%d", &opts))
4006 		return -EINVAL;
4007 	sdebug_opts = opts;
4008 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4009 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4010 	if (sdebug_every_nth != 0)
4011 		atomic_set(&sdebug_cmnd_count, 0);
4012 	return length;
4013 }
4014 
4015 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4016  * same for each scsi_debug host (if more than one). Some of the counters
4017  * output are not atomics so might be inaccurate in a busy system. */
4018 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4019 {
4020 	int f, l;
4021 	char b[32];
4022 
4023 	if (sdebug_every_nth > 0)
4024 		snprintf(b, sizeof(b), " (curr:%d)",
4025 			 ((SDEBUG_OPT_RARE_TSF & sdebug_opts) ?
4026 				atomic_read(&sdebug_a_tsf) :
4027 				atomic_read(&sdebug_cmnd_count)));
4028 	else
4029 		b[0] = '\0';
4030 
4031 	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4032 		"num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4033 		"every_nth=%d%s\n"
4034 		"delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4035 		"sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4036 		"command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4037 		"host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4038 		"usec_in_jiffy=%lu\n",
4039 		SCSI_DEBUG_VERSION, sdebug_version_date,
4040 		sdebug_num_tgts, sdebug_dev_size_mb, sdebug_opts,
4041 		sdebug_every_nth, b, sdebug_jdelay, sdebug_ndelay,
4042 		sdebug_max_luns, atomic_read(&sdebug_completions),
4043 		sdebug_sector_size, sdebug_cylinders_per, sdebug_heads,
4044 		sdebug_sectors_per, num_aborts, num_dev_resets,
4045 		num_target_resets, num_bus_resets, num_host_resets,
4046 		dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4047 
4048 	f = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4049 	if (f != sdebug_max_queue) {
4050 		l = find_last_bit(queued_in_use_bm, sdebug_max_queue);
4051 		seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4052 			   "queued_in_use_bm", f, l);
4053 	}
4054 	return 0;
4055 }
4056 
4057 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4058 {
4059 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4060 }
4061 /* Returns -EBUSY if jdelay is being changed and commands are queued */
4062 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4063 			   size_t count)
4064 {
4065 	int jdelay, res;
4066 
4067 	if ((count > 0) && (1 == sscanf(buf, "%d", &jdelay))) {
4068 		res = count;
4069 		if (sdebug_jdelay != jdelay) {
4070 			unsigned long iflags;
4071 			int k;
4072 
4073 			spin_lock_irqsave(&queued_arr_lock, iflags);
4074 			k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4075 			if (k != sdebug_max_queue)
4076 				res = -EBUSY;	/* have queued commands */
4077 			else {
4078 				/* make sure sdebug_defer instances get
4079 				 * re-allocated for new delay variant */
4080 				free_all_queued();
4081 				sdebug_jdelay = jdelay;
4082 				sdebug_ndelay = 0;
4083 			}
4084 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4085 		}
4086 		return res;
4087 	}
4088 	return -EINVAL;
4089 }
4090 static DRIVER_ATTR_RW(delay);
4091 
4092 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4093 {
4094 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4095 }
4096 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4097 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4098 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4099 			    size_t count)
4100 {
4101 	unsigned long iflags;
4102 	int ndelay, res, k;
4103 
4104 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4105 	    (ndelay >= 0) && (ndelay < 1000000000)) {
4106 		res = count;
4107 		if (sdebug_ndelay != ndelay) {
4108 			spin_lock_irqsave(&queued_arr_lock, iflags);
4109 			k = find_first_bit(queued_in_use_bm, sdebug_max_queue);
4110 			if (k != sdebug_max_queue)
4111 				res = -EBUSY;	/* have queued commands */
4112 			else {
4113 				/* make sure sdebug_defer instances get
4114 				 * re-allocated for new delay variant */
4115 				free_all_queued();
4116 				sdebug_ndelay = ndelay;
4117 				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4118 							: DEF_JDELAY;
4119 			}
4120 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
4121 		}
4122 		return res;
4123 	}
4124 	return -EINVAL;
4125 }
4126 static DRIVER_ATTR_RW(ndelay);
4127 
4128 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4129 {
4130 	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4131 }
4132 
4133 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4134 			  size_t count)
4135 {
4136         int opts;
4137 	char work[20];
4138 
4139         if (1 == sscanf(buf, "%10s", work)) {
4140 		if (0 == strncasecmp(work,"0x", 2)) {
4141 			if (1 == sscanf(&work[2], "%x", &opts))
4142 				goto opts_done;
4143 		} else {
4144 			if (1 == sscanf(work, "%d", &opts))
4145 				goto opts_done;
4146 		}
4147 	}
4148 	return -EINVAL;
4149 opts_done:
4150 	sdebug_opts = opts;
4151 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4152 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4153 	atomic_set(&sdebug_cmnd_count, 0);
4154 	atomic_set(&sdebug_a_tsf, 0);
4155 	return count;
4156 }
4157 static DRIVER_ATTR_RW(opts);
4158 
4159 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4160 {
4161 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4162 }
4163 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4164 			   size_t count)
4165 {
4166         int n;
4167 
4168 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4169 		sdebug_ptype = n;
4170 		return count;
4171 	}
4172 	return -EINVAL;
4173 }
4174 static DRIVER_ATTR_RW(ptype);
4175 
4176 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4177 {
4178 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4179 }
4180 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4181 			    size_t count)
4182 {
4183         int n;
4184 
4185 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4186 		sdebug_dsense = n;
4187 		return count;
4188 	}
4189 	return -EINVAL;
4190 }
4191 static DRIVER_ATTR_RW(dsense);
4192 
4193 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4194 {
4195 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4196 }
4197 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4198 			     size_t count)
4199 {
4200         int n;
4201 
4202 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4203 		n = (n > 0);
4204 		sdebug_fake_rw = (sdebug_fake_rw > 0);
4205 		if (sdebug_fake_rw != n) {
4206 			if ((0 == n) && (NULL == fake_storep)) {
4207 				unsigned long sz =
4208 					(unsigned long)sdebug_dev_size_mb *
4209 					1048576;
4210 
4211 				fake_storep = vmalloc(sz);
4212 				if (NULL == fake_storep) {
4213 					pr_err("out of memory, 9\n");
4214 					return -ENOMEM;
4215 				}
4216 				memset(fake_storep, 0, sz);
4217 			}
4218 			sdebug_fake_rw = n;
4219 		}
4220 		return count;
4221 	}
4222 	return -EINVAL;
4223 }
4224 static DRIVER_ATTR_RW(fake_rw);
4225 
4226 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4227 {
4228 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4229 }
4230 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4231 			      size_t count)
4232 {
4233         int n;
4234 
4235 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4236 		sdebug_no_lun_0 = n;
4237 		return count;
4238 	}
4239 	return -EINVAL;
4240 }
4241 static DRIVER_ATTR_RW(no_lun_0);
4242 
4243 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4244 {
4245 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4246 }
4247 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4248 			      size_t count)
4249 {
4250         int n;
4251 
4252 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4253 		sdebug_num_tgts = n;
4254 		sdebug_max_tgts_luns();
4255 		return count;
4256 	}
4257 	return -EINVAL;
4258 }
4259 static DRIVER_ATTR_RW(num_tgts);
4260 
4261 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4262 {
4263 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4264 }
4265 static DRIVER_ATTR_RO(dev_size_mb);
4266 
4267 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4268 {
4269 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4270 }
4271 static DRIVER_ATTR_RO(num_parts);
4272 
4273 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4274 {
4275 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4276 }
4277 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4278 			       size_t count)
4279 {
4280         int nth;
4281 
4282 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4283 		sdebug_every_nth = nth;
4284 		atomic_set(&sdebug_cmnd_count, 0);
4285 		return count;
4286 	}
4287 	return -EINVAL;
4288 }
4289 static DRIVER_ATTR_RW(every_nth);
4290 
4291 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4292 {
4293 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4294 }
4295 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4296 			      size_t count)
4297 {
4298         int n;
4299 	bool changed;
4300 
4301 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4302 		changed = (sdebug_max_luns != n);
4303 		sdebug_max_luns = n;
4304 		sdebug_max_tgts_luns();
4305 		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
4306 			struct sdebug_host_info *sdhp;
4307 			struct sdebug_dev_info *dp;
4308 
4309 			spin_lock(&sdebug_host_list_lock);
4310 			list_for_each_entry(sdhp, &sdebug_host_list,
4311 					    host_list) {
4312 				list_for_each_entry(dp, &sdhp->dev_info_list,
4313 						    dev_list) {
4314 					set_bit(SDEBUG_UA_LUNS_CHANGED,
4315 						dp->uas_bm);
4316 				}
4317 			}
4318 			spin_unlock(&sdebug_host_list_lock);
4319 		}
4320 		return count;
4321 	}
4322 	return -EINVAL;
4323 }
4324 static DRIVER_ATTR_RW(max_luns);
4325 
4326 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4327 {
4328 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4329 }
4330 /* N.B. max_queue can be changed while there are queued commands. In flight
4331  * commands beyond the new max_queue will be completed. */
4332 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4333 			       size_t count)
4334 {
4335 	unsigned long iflags;
4336 	int n, k;
4337 
4338 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4339 	    (n <= SCSI_DEBUG_CANQUEUE)) {
4340 		spin_lock_irqsave(&queued_arr_lock, iflags);
4341 		k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4342 		sdebug_max_queue = n;
4343 		if (SCSI_DEBUG_CANQUEUE == k)
4344 			atomic_set(&retired_max_queue, 0);
4345 		else if (k >= n)
4346 			atomic_set(&retired_max_queue, k + 1);
4347 		else
4348 			atomic_set(&retired_max_queue, 0);
4349 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
4350 		return count;
4351 	}
4352 	return -EINVAL;
4353 }
4354 static DRIVER_ATTR_RW(max_queue);
4355 
4356 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4357 {
4358 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4359 }
4360 static DRIVER_ATTR_RO(no_uld);
4361 
4362 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4363 {
4364 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4365 }
4366 static DRIVER_ATTR_RO(scsi_level);
4367 
4368 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4369 {
4370 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4371 }
4372 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4373 				size_t count)
4374 {
4375         int n;
4376 	bool changed;
4377 
4378 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4379 		changed = (sdebug_virtual_gb != n);
4380 		sdebug_virtual_gb = n;
4381 		sdebug_capacity = get_sdebug_capacity();
4382 		if (changed) {
4383 			struct sdebug_host_info *sdhp;
4384 			struct sdebug_dev_info *dp;
4385 
4386 			spin_lock(&sdebug_host_list_lock);
4387 			list_for_each_entry(sdhp, &sdebug_host_list,
4388 					    host_list) {
4389 				list_for_each_entry(dp, &sdhp->dev_info_list,
4390 						    dev_list) {
4391 					set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4392 						dp->uas_bm);
4393 				}
4394 			}
4395 			spin_unlock(&sdebug_host_list_lock);
4396 		}
4397 		return count;
4398 	}
4399 	return -EINVAL;
4400 }
4401 static DRIVER_ATTR_RW(virtual_gb);
4402 
4403 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4404 {
4405 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4406 }
4407 
4408 static int sdebug_add_adapter(void);
4409 static void sdebug_remove_adapter(void);
4410 
4411 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4412 			      size_t count)
4413 {
4414 	int delta_hosts;
4415 
4416 	if (sscanf(buf, "%d", &delta_hosts) != 1)
4417 		return -EINVAL;
4418 	if (delta_hosts > 0) {
4419 		do {
4420 			sdebug_add_adapter();
4421 		} while (--delta_hosts);
4422 	} else if (delta_hosts < 0) {
4423 		do {
4424 			sdebug_remove_adapter();
4425 		} while (++delta_hosts);
4426 	}
4427 	return count;
4428 }
4429 static DRIVER_ATTR_RW(add_host);
4430 
4431 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4432 {
4433 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4434 }
4435 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4436 				    size_t count)
4437 {
4438 	int n;
4439 
4440 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4441 		sdebug_vpd_use_hostno = n;
4442 		return count;
4443 	}
4444 	return -EINVAL;
4445 }
4446 static DRIVER_ATTR_RW(vpd_use_hostno);
4447 
4448 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4449 {
4450 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4451 }
4452 static DRIVER_ATTR_RO(sector_size);
4453 
4454 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4455 {
4456 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4457 }
4458 static DRIVER_ATTR_RO(dix);
4459 
4460 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4461 {
4462 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4463 }
4464 static DRIVER_ATTR_RO(dif);
4465 
4466 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4467 {
4468 	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4469 }
4470 static DRIVER_ATTR_RO(guard);
4471 
4472 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4473 {
4474 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4475 }
4476 static DRIVER_ATTR_RO(ato);
4477 
4478 static ssize_t map_show(struct device_driver *ddp, char *buf)
4479 {
4480 	ssize_t count;
4481 
4482 	if (!scsi_debug_lbp())
4483 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4484 				 sdebug_store_sectors);
4485 
4486 	count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4487 			  (int)map_size, map_storep);
4488 	buf[count++] = '\n';
4489 	buf[count] = '\0';
4490 
4491 	return count;
4492 }
4493 static DRIVER_ATTR_RO(map);
4494 
4495 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4496 {
4497 	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4498 }
4499 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4500 			       size_t count)
4501 {
4502 	int n;
4503 
4504 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4505 		sdebug_removable = (n > 0);
4506 		return count;
4507 	}
4508 	return -EINVAL;
4509 }
4510 static DRIVER_ATTR_RW(removable);
4511 
4512 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4513 {
4514 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4515 }
4516 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4517 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4518 			       size_t count)
4519 {
4520 	int n;
4521 
4522 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4523 		sdebug_host_lock = (n > 0);
4524 		return count;
4525 	}
4526 	return -EINVAL;
4527 }
4528 static DRIVER_ATTR_RW(host_lock);
4529 
4530 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4531 {
4532 	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4533 }
4534 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4535 			    size_t count)
4536 {
4537 	int n;
4538 
4539 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4540 		sdebug_strict = (n > 0);
4541 		return count;
4542 	}
4543 	return -EINVAL;
4544 }
4545 static DRIVER_ATTR_RW(strict);
4546 
4547 
4548 /* Note: The following array creates attribute files in the
4549    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4550    files (over those found in the /sys/module/scsi_debug/parameters
4551    directory) is that auxiliary actions can be triggered when an attribute
4552    is changed. For example see: sdebug_add_host_store() above.
4553  */
4554 
4555 static struct attribute *sdebug_drv_attrs[] = {
4556 	&driver_attr_delay.attr,
4557 	&driver_attr_opts.attr,
4558 	&driver_attr_ptype.attr,
4559 	&driver_attr_dsense.attr,
4560 	&driver_attr_fake_rw.attr,
4561 	&driver_attr_no_lun_0.attr,
4562 	&driver_attr_num_tgts.attr,
4563 	&driver_attr_dev_size_mb.attr,
4564 	&driver_attr_num_parts.attr,
4565 	&driver_attr_every_nth.attr,
4566 	&driver_attr_max_luns.attr,
4567 	&driver_attr_max_queue.attr,
4568 	&driver_attr_no_uld.attr,
4569 	&driver_attr_scsi_level.attr,
4570 	&driver_attr_virtual_gb.attr,
4571 	&driver_attr_add_host.attr,
4572 	&driver_attr_vpd_use_hostno.attr,
4573 	&driver_attr_sector_size.attr,
4574 	&driver_attr_dix.attr,
4575 	&driver_attr_dif.attr,
4576 	&driver_attr_guard.attr,
4577 	&driver_attr_ato.attr,
4578 	&driver_attr_map.attr,
4579 	&driver_attr_removable.attr,
4580 	&driver_attr_host_lock.attr,
4581 	&driver_attr_ndelay.attr,
4582 	&driver_attr_strict.attr,
4583 	NULL,
4584 };
4585 ATTRIBUTE_GROUPS(sdebug_drv);
4586 
4587 static struct device *pseudo_primary;
4588 
4589 static int __init scsi_debug_init(void)
4590 {
4591 	unsigned long sz;
4592 	int host_to_add;
4593 	int k;
4594 	int ret;
4595 
4596 	atomic_set(&sdebug_cmnd_count, 0);
4597 	atomic_set(&sdebug_completions, 0);
4598 	atomic_set(&retired_max_queue, 0);
4599 
4600 	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4601 		pr_warn("ndelay must be less than 1 second, ignored\n");
4602 		sdebug_ndelay = 0;
4603 	} else if (sdebug_ndelay > 0)
4604 		sdebug_jdelay = JDELAY_OVERRIDDEN;
4605 
4606 	switch (sdebug_sector_size) {
4607 	case  512:
4608 	case 1024:
4609 	case 2048:
4610 	case 4096:
4611 		break;
4612 	default:
4613 		pr_err("invalid sector_size %d\n", sdebug_sector_size);
4614 		return -EINVAL;
4615 	}
4616 
4617 	switch (sdebug_dif) {
4618 
4619 	case SD_DIF_TYPE0_PROTECTION:
4620 		break;
4621 	case SD_DIF_TYPE1_PROTECTION:
4622 	case SD_DIF_TYPE2_PROTECTION:
4623 	case SD_DIF_TYPE3_PROTECTION:
4624 		have_dif_prot = true;
4625 		break;
4626 
4627 	default:
4628 		pr_err("dif must be 0, 1, 2 or 3\n");
4629 		return -EINVAL;
4630 	}
4631 
4632 	if (sdebug_guard > 1) {
4633 		pr_err("guard must be 0 or 1\n");
4634 		return -EINVAL;
4635 	}
4636 
4637 	if (sdebug_ato > 1) {
4638 		pr_err("ato must be 0 or 1\n");
4639 		return -EINVAL;
4640 	}
4641 
4642 	if (sdebug_physblk_exp > 15) {
4643 		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4644 		return -EINVAL;
4645 	}
4646 
4647 	if (sdebug_lowest_aligned > 0x3fff) {
4648 		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4649 		return -EINVAL;
4650 	}
4651 
4652 	if (sdebug_dev_size_mb < 1)
4653 		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4654 	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
4655 	sdebug_store_sectors = sz / sdebug_sector_size;
4656 	sdebug_capacity = get_sdebug_capacity();
4657 
4658 	/* play around with geometry, don't waste too much on track 0 */
4659 	sdebug_heads = 8;
4660 	sdebug_sectors_per = 32;
4661 	if (sdebug_dev_size_mb >= 256)
4662 		sdebug_heads = 64;
4663 	else if (sdebug_dev_size_mb >= 16)
4664 		sdebug_heads = 32;
4665 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4666 			       (sdebug_sectors_per * sdebug_heads);
4667 	if (sdebug_cylinders_per >= 1024) {
4668 		/* other LLDs do this; implies >= 1GB ram disk ... */
4669 		sdebug_heads = 255;
4670 		sdebug_sectors_per = 63;
4671 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4672 			       (sdebug_sectors_per * sdebug_heads);
4673 	}
4674 
4675 	if (0 == sdebug_fake_rw) {
4676 		fake_storep = vmalloc(sz);
4677 		if (NULL == fake_storep) {
4678 			pr_err("out of memory, 1\n");
4679 			return -ENOMEM;
4680 		}
4681 		memset(fake_storep, 0, sz);
4682 		if (sdebug_num_parts > 0)
4683 			sdebug_build_parts(fake_storep, sz);
4684 	}
4685 
4686 	if (sdebug_dix) {
4687 		int dif_size;
4688 
4689 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4690 		dif_storep = vmalloc(dif_size);
4691 
4692 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4693 
4694 		if (dif_storep == NULL) {
4695 			pr_err("out of mem. (DIX)\n");
4696 			ret = -ENOMEM;
4697 			goto free_vm;
4698 		}
4699 
4700 		memset(dif_storep, 0xff, dif_size);
4701 	}
4702 
4703 	/* Logical Block Provisioning */
4704 	if (scsi_debug_lbp()) {
4705 		sdebug_unmap_max_blocks =
4706 			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
4707 
4708 		sdebug_unmap_max_desc =
4709 			clamp(sdebug_unmap_max_desc, 0U, 256U);
4710 
4711 		sdebug_unmap_granularity =
4712 			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
4713 
4714 		if (sdebug_unmap_alignment &&
4715 		    sdebug_unmap_granularity <=
4716 		    sdebug_unmap_alignment) {
4717 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4718 			return -EINVAL;
4719 		}
4720 
4721 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4722 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4723 
4724 		pr_info("%lu provisioning blocks\n", map_size);
4725 
4726 		if (map_storep == NULL) {
4727 			pr_err("out of mem. (MAP)\n");
4728 			ret = -ENOMEM;
4729 			goto free_vm;
4730 		}
4731 
4732 		bitmap_zero(map_storep, map_size);
4733 
4734 		/* Map first 1KB for partition table */
4735 		if (sdebug_num_parts)
4736 			map_region(0, 2);
4737 	}
4738 
4739 	pseudo_primary = root_device_register("pseudo_0");
4740 	if (IS_ERR(pseudo_primary)) {
4741 		pr_warn("root_device_register() error\n");
4742 		ret = PTR_ERR(pseudo_primary);
4743 		goto free_vm;
4744 	}
4745 	ret = bus_register(&pseudo_lld_bus);
4746 	if (ret < 0) {
4747 		pr_warn("bus_register error: %d\n", ret);
4748 		goto dev_unreg;
4749 	}
4750 	ret = driver_register(&sdebug_driverfs_driver);
4751 	if (ret < 0) {
4752 		pr_warn("driver_register error: %d\n", ret);
4753 		goto bus_unreg;
4754 	}
4755 
4756 	host_to_add = sdebug_add_host;
4757 	sdebug_add_host = 0;
4758 
4759         for (k = 0; k < host_to_add; k++) {
4760                 if (sdebug_add_adapter()) {
4761 			pr_err("sdebug_add_adapter failed k=%d\n", k);
4762                         break;
4763                 }
4764         }
4765 
4766 	if (sdebug_verbose)
4767 		pr_info("built %d host(s)\n", sdebug_add_host);
4768 
4769 	return 0;
4770 
4771 bus_unreg:
4772 	bus_unregister(&pseudo_lld_bus);
4773 dev_unreg:
4774 	root_device_unregister(pseudo_primary);
4775 free_vm:
4776 	vfree(map_storep);
4777 	vfree(dif_storep);
4778 	vfree(fake_storep);
4779 
4780 	return ret;
4781 }
4782 
4783 static void __exit scsi_debug_exit(void)
4784 {
4785 	int k = sdebug_add_host;
4786 
4787 	stop_all_queued();
4788 	free_all_queued();
4789 	for (; k; k--)
4790 		sdebug_remove_adapter();
4791 	driver_unregister(&sdebug_driverfs_driver);
4792 	bus_unregister(&pseudo_lld_bus);
4793 	root_device_unregister(pseudo_primary);
4794 
4795 	vfree(dif_storep);
4796 	vfree(fake_storep);
4797 }
4798 
4799 device_initcall(scsi_debug_init);
4800 module_exit(scsi_debug_exit);
4801 
4802 static void sdebug_release_adapter(struct device * dev)
4803 {
4804         struct sdebug_host_info *sdbg_host;
4805 
4806 	sdbg_host = to_sdebug_host(dev);
4807         kfree(sdbg_host);
4808 }
4809 
4810 static int sdebug_add_adapter(void)
4811 {
4812 	int k, devs_per_host;
4813         int error = 0;
4814         struct sdebug_host_info *sdbg_host;
4815 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
4816 
4817         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4818         if (NULL == sdbg_host) {
4819 		pr_err("out of memory at line %d\n", __LINE__);
4820                 return -ENOMEM;
4821         }
4822 
4823         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4824 
4825 	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
4826         for (k = 0; k < devs_per_host; k++) {
4827 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4828 		if (!sdbg_devinfo) {
4829 			pr_err("out of memory at line %d\n", __LINE__);
4830                         error = -ENOMEM;
4831 			goto clean;
4832                 }
4833         }
4834 
4835         spin_lock(&sdebug_host_list_lock);
4836         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4837         spin_unlock(&sdebug_host_list_lock);
4838 
4839         sdbg_host->dev.bus = &pseudo_lld_bus;
4840         sdbg_host->dev.parent = pseudo_primary;
4841         sdbg_host->dev.release = &sdebug_release_adapter;
4842 	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
4843 
4844         error = device_register(&sdbg_host->dev);
4845 
4846         if (error)
4847 		goto clean;
4848 
4849 	++sdebug_add_host;
4850         return error;
4851 
4852 clean:
4853 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4854 				 dev_list) {
4855 		list_del(&sdbg_devinfo->dev_list);
4856 		kfree(sdbg_devinfo);
4857 	}
4858 
4859 	kfree(sdbg_host);
4860         return error;
4861 }
4862 
4863 static void sdebug_remove_adapter(void)
4864 {
4865         struct sdebug_host_info * sdbg_host = NULL;
4866 
4867         spin_lock(&sdebug_host_list_lock);
4868         if (!list_empty(&sdebug_host_list)) {
4869                 sdbg_host = list_entry(sdebug_host_list.prev,
4870                                        struct sdebug_host_info, host_list);
4871 		list_del(&sdbg_host->host_list);
4872 	}
4873         spin_unlock(&sdebug_host_list_lock);
4874 
4875 	if (!sdbg_host)
4876 		return;
4877 
4878 	device_unregister(&sdbg_host->dev);
4879 	--sdebug_add_host;
4880 }
4881 
4882 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4883 {
4884 	int num_in_q = 0;
4885 	unsigned long iflags;
4886 	struct sdebug_dev_info *devip;
4887 
4888 	spin_lock_irqsave(&queued_arr_lock, iflags);
4889 	devip = (struct sdebug_dev_info *)sdev->hostdata;
4890 	if (NULL == devip) {
4891 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
4892 		return	-ENODEV;
4893 	}
4894 	num_in_q = atomic_read(&devip->num_in_q);
4895 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
4896 
4897 	if (qdepth < 1)
4898 		qdepth = 1;
4899 	/* allow to exceed max host queued_arr elements for testing */
4900 	if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4901 		qdepth = SCSI_DEBUG_CANQUEUE + 10;
4902 	scsi_change_queue_depth(sdev, qdepth);
4903 
4904 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
4905 		sdev_printk(KERN_INFO, sdev,
4906 			    "%s: qdepth=%d, num_in_q=%d\n",
4907 			    __func__, qdepth, num_in_q);
4908 	}
4909 	return sdev->queue_depth;
4910 }
4911 
4912 static int check_inject(struct scsi_cmnd *scp)
4913 {
4914 	struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
4915 
4916 	memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
4917 
4918 	if (atomic_inc_return(&sdebug_cmnd_count) >= abs(sdebug_every_nth)) {
4919 		atomic_set(&sdebug_cmnd_count, 0);
4920 		if (sdebug_every_nth < -1)
4921 			sdebug_every_nth = -1;
4922 		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
4923 			return 1; /* ignore command causing timeout */
4924 		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
4925 			 scsi_medium_access_command(scp))
4926 			return 1; /* time out reads and writes */
4927 		if (sdebug_any_injecting_opt) {
4928 			if (SDEBUG_OPT_RECOVERED_ERR & sdebug_opts)
4929 				ep->inj_recovered = true;
4930 			if (SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts)
4931 				ep->inj_transport = true;
4932 			if (SDEBUG_OPT_DIF_ERR & sdebug_opts)
4933 				ep->inj_dif = true;
4934 			if (SDEBUG_OPT_DIX_ERR & sdebug_opts)
4935 				ep->inj_dix = true;
4936 			if (SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts)
4937 				ep->inj_short = true;
4938 		}
4939 	}
4940 	return 0;
4941 }
4942 
4943 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
4944 				   struct scsi_cmnd *scp)
4945 {
4946 	u8 sdeb_i;
4947 	struct scsi_device *sdp = scp->device;
4948 	const struct opcode_info_t *oip;
4949 	const struct opcode_info_t *r_oip;
4950 	struct sdebug_dev_info *devip;
4951 	u8 *cmd = scp->cmnd;
4952 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
4953 	int k, na;
4954 	int errsts = 0;
4955 	u32 flags;
4956 	u16 sa;
4957 	u8 opcode = cmd[0];
4958 	bool has_wlun_rl;
4959 
4960 	scsi_set_resid(scp, 0);
4961 	if (unlikely(sdebug_verbose &&
4962 		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
4963 		char b[120];
4964 		int n, len, sb;
4965 
4966 		len = scp->cmd_len;
4967 		sb = (int)sizeof(b);
4968 		if (len > 32)
4969 			strcpy(b, "too long, over 32 bytes");
4970 		else {
4971 			for (k = 0, n = 0; k < len && n < sb; ++k)
4972 				n += scnprintf(b + n, sb - n, "%02x ",
4973 					       (u32)cmd[k]);
4974 		}
4975 		sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
4976 	}
4977 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
4978 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
4979 		goto err_out;
4980 
4981 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
4982 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
4983 	devip = (struct sdebug_dev_info *)sdp->hostdata;
4984 	if (unlikely(!devip)) {
4985 		devip = find_build_dev_info(sdp);
4986 		if (NULL == devip)
4987 			goto err_out;
4988 	}
4989 	na = oip->num_attached;
4990 	r_pfp = oip->pfp;
4991 	if (na) {	/* multiple commands with this opcode */
4992 		r_oip = oip;
4993 		if (FF_SA & r_oip->flags) {
4994 			if (F_SA_LOW & oip->flags)
4995 				sa = 0x1f & cmd[1];
4996 			else
4997 				sa = get_unaligned_be16(cmd + 8);
4998 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
4999 				if (opcode == oip->opcode && sa == oip->sa)
5000 					break;
5001 			}
5002 		} else {   /* since no service action only check opcode */
5003 			for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5004 				if (opcode == oip->opcode)
5005 					break;
5006 			}
5007 		}
5008 		if (k > na) {
5009 			if (F_SA_LOW & r_oip->flags)
5010 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5011 			else if (F_SA_HIGH & r_oip->flags)
5012 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5013 			else
5014 				mk_sense_invalid_opcode(scp);
5015 			goto check_cond;
5016 		}
5017 	}	/* else (when na==0) we assume the oip is a match */
5018 	flags = oip->flags;
5019 	if (unlikely(F_INV_OP & flags)) {
5020 		mk_sense_invalid_opcode(scp);
5021 		goto check_cond;
5022 	}
5023 	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5024 		if (sdebug_verbose)
5025 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5026 				    my_name, opcode, " supported for wlun");
5027 		mk_sense_invalid_opcode(scp);
5028 		goto check_cond;
5029 	}
5030 	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
5031 		u8 rem;
5032 		int j;
5033 
5034 		for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5035 			rem = ~oip->len_mask[k] & cmd[k];
5036 			if (rem) {
5037 				for (j = 7; j >= 0; --j, rem <<= 1) {
5038 					if (0x80 & rem)
5039 						break;
5040 				}
5041 				mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5042 				goto check_cond;
5043 			}
5044 		}
5045 	}
5046 	if (unlikely(!(F_SKIP_UA & flags) &&
5047 		     SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm,
5048 						      SDEBUG_NUM_UAS))) {
5049 		errsts = make_ua(scp, devip);
5050 		if (errsts)
5051 			goto check_cond;
5052 	}
5053 	if (unlikely((F_M_ACCESS & flags) && devip->stopped)) {
5054 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5055 		if (sdebug_verbose)
5056 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5057 				    "%s\n", my_name, "initializing command "
5058 				    "required");
5059 		errsts = check_condition_result;
5060 		goto fini;
5061 	}
5062 	if (sdebug_fake_rw && (F_FAKE_RW & flags))
5063 		goto fini;
5064 	if (unlikely(sdebug_every_nth)) {
5065 		if (check_inject(scp))
5066 			return 0;	/* ignore command: make trouble */
5067 	}
5068 	if (likely(oip->pfp))
5069 		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
5070 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
5071 		errsts = r_pfp(scp, devip);
5072 
5073 fini:
5074 	return schedule_resp(scp, devip, errsts,
5075 			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5076 check_cond:
5077 	return schedule_resp(scp, devip, check_condition_result, 0);
5078 err_out:
5079 	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5080 }
5081 
5082 static struct scsi_host_template sdebug_driver_template = {
5083 	.show_info =		scsi_debug_show_info,
5084 	.write_info =		scsi_debug_write_info,
5085 	.proc_name =		sdebug_proc_name,
5086 	.name =			"SCSI DEBUG",
5087 	.info =			scsi_debug_info,
5088 	.slave_alloc =		scsi_debug_slave_alloc,
5089 	.slave_configure =	scsi_debug_slave_configure,
5090 	.slave_destroy =	scsi_debug_slave_destroy,
5091 	.ioctl =		scsi_debug_ioctl,
5092 	.queuecommand =		scsi_debug_queuecommand,
5093 	.change_queue_depth =	sdebug_change_qdepth,
5094 	.eh_abort_handler =	scsi_debug_abort,
5095 	.eh_device_reset_handler = scsi_debug_device_reset,
5096 	.eh_target_reset_handler = scsi_debug_target_reset,
5097 	.eh_bus_reset_handler = scsi_debug_bus_reset,
5098 	.eh_host_reset_handler = scsi_debug_host_reset,
5099 	.can_queue =		SCSI_DEBUG_CANQUEUE,
5100 	.this_id =		7,
5101 	.sg_tablesize =		SG_MAX_SEGMENTS,
5102 	.cmd_per_lun =		DEF_CMD_PER_LUN,
5103 	.max_sectors =		-1U,
5104 	.use_clustering = 	DISABLE_CLUSTERING,
5105 	.module =		THIS_MODULE,
5106 	.track_queue_depth =	1,
5107 	.cmd_size =		sizeof(struct sdebug_scmd_extra_t),
5108 };
5109 
5110 static int sdebug_driver_probe(struct device * dev)
5111 {
5112 	int error = 0;
5113 	struct sdebug_host_info *sdbg_host;
5114 	struct Scsi_Host *hpnt;
5115 	int hprot;
5116 
5117 	sdbg_host = to_sdebug_host(dev);
5118 
5119 	sdebug_driver_template.can_queue = sdebug_max_queue;
5120 	if (sdebug_clustering)
5121 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5122 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5123 	if (NULL == hpnt) {
5124 		pr_err("scsi_host_alloc failed\n");
5125 		error = -ENODEV;
5126 		return error;
5127 	}
5128 
5129         sdbg_host->shost = hpnt;
5130 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5131 	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5132 		hpnt->max_id = sdebug_num_tgts + 1;
5133 	else
5134 		hpnt->max_id = sdebug_num_tgts;
5135 	/* = sdebug_max_luns; */
5136 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5137 
5138 	hprot = 0;
5139 
5140 	switch (sdebug_dif) {
5141 
5142 	case SD_DIF_TYPE1_PROTECTION:
5143 		hprot = SHOST_DIF_TYPE1_PROTECTION;
5144 		if (sdebug_dix)
5145 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
5146 		break;
5147 
5148 	case SD_DIF_TYPE2_PROTECTION:
5149 		hprot = SHOST_DIF_TYPE2_PROTECTION;
5150 		if (sdebug_dix)
5151 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
5152 		break;
5153 
5154 	case SD_DIF_TYPE3_PROTECTION:
5155 		hprot = SHOST_DIF_TYPE3_PROTECTION;
5156 		if (sdebug_dix)
5157 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
5158 		break;
5159 
5160 	default:
5161 		if (sdebug_dix)
5162 			hprot |= SHOST_DIX_TYPE0_PROTECTION;
5163 		break;
5164 	}
5165 
5166 	scsi_host_set_prot(hpnt, hprot);
5167 
5168 	if (have_dif_prot || sdebug_dix)
5169 		pr_info("host protection%s%s%s%s%s%s%s\n",
5170 			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5171 			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5172 			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5173 			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5174 			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5175 			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5176 			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5177 
5178 	if (sdebug_guard == 1)
5179 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5180 	else
5181 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5182 
5183 	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5184 	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5185         error = scsi_add_host(hpnt, &sdbg_host->dev);
5186         if (error) {
5187 		pr_err("scsi_add_host failed\n");
5188                 error = -ENODEV;
5189 		scsi_host_put(hpnt);
5190         } else
5191 		scsi_scan_host(hpnt);
5192 
5193 	return error;
5194 }
5195 
5196 static int sdebug_driver_remove(struct device * dev)
5197 {
5198         struct sdebug_host_info *sdbg_host;
5199 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
5200 
5201 	sdbg_host = to_sdebug_host(dev);
5202 
5203 	if (!sdbg_host) {
5204 		pr_err("Unable to locate host info\n");
5205 		return -ENODEV;
5206 	}
5207 
5208         scsi_remove_host(sdbg_host->shost);
5209 
5210 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5211 				 dev_list) {
5212                 list_del(&sdbg_devinfo->dev_list);
5213                 kfree(sdbg_devinfo);
5214         }
5215 
5216         scsi_host_put(sdbg_host->shost);
5217         return 0;
5218 }
5219 
5220 static int pseudo_lld_bus_match(struct device *dev,
5221 				struct device_driver *dev_driver)
5222 {
5223 	return 1;
5224 }
5225 
5226 static struct bus_type pseudo_lld_bus = {
5227 	.name = "pseudo",
5228 	.match = pseudo_lld_bus_match,
5229 	.probe = sdebug_driver_probe,
5230 	.remove = sdebug_driver_remove,
5231 	.drv_groups = sdebug_drv_groups,
5232 };
5233