xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 94c7b6fc)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27 
28 #include <linux/module.h>
29 
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
63 
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
68 #define UNRECOVERED_READ_ERR 0x11
69 #define PARAMETER_LIST_LENGTH_ERR 0x1a
70 #define INVALID_OPCODE 0x20
71 #define ADDR_OUT_OF_RANGE 0x21
72 #define INVALID_COMMAND_OPCODE 0x20
73 #define INVALID_FIELD_IN_CDB 0x24
74 #define INVALID_FIELD_IN_PARAM_LIST 0x26
75 #define POWERON_RESET 0x29
76 #define SAVING_PARAMS_UNSUP 0x39
77 #define TRANSPORT_PROBLEM 0x4b
78 #define THRESHOLD_EXCEEDED 0x5d
79 #define LOW_POWER_COND_ON 0x5e
80 
81 /* Additional Sense Code Qualifier (ASCQ) */
82 #define ACK_NAK_TO 0x3
83 
84 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 
86 /* Default values for driver parameters */
87 #define DEF_NUM_HOST   1
88 #define DEF_NUM_TGTS   1
89 #define DEF_MAX_LUNS   1
90 /* With these defaults, this driver will make 1 host with 1 target
91  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92  */
93 #define DEF_ATO 1
94 #define DEF_DELAY   1
95 #define DEF_DEV_SIZE_MB   8
96 #define DEF_DIF 0
97 #define DEF_DIX 0
98 #define DEF_D_SENSE   0
99 #define DEF_EVERY_NTH   0
100 #define DEF_FAKE_RW	0
101 #define DEF_GUARD 0
102 #define DEF_LBPU 0
103 #define DEF_LBPWS 0
104 #define DEF_LBPWS10 0
105 #define DEF_LBPRZ 1
106 #define DEF_LOWEST_ALIGNED 0
107 #define DEF_NO_LUN_0   0
108 #define DEF_NUM_PARTS   0
109 #define DEF_OPTS   0
110 #define DEF_OPT_BLKS 64
111 #define DEF_PHYSBLK_EXP 0
112 #define DEF_PTYPE   0
113 #define DEF_REMOVABLE false
114 #define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
115 #define DEF_SECTOR_SIZE 512
116 #define DEF_UNMAP_ALIGNMENT 0
117 #define DEF_UNMAP_GRANULARITY 1
118 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
119 #define DEF_UNMAP_MAX_DESC 256
120 #define DEF_VIRTUAL_GB   0
121 #define DEF_VPD_USE_HOSTNO 1
122 #define DEF_WRITESAME_LENGTH 0xFFFF
123 
124 /* bit mask values for scsi_debug_opts */
125 #define SCSI_DEBUG_OPT_NOISE   1
126 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
127 #define SCSI_DEBUG_OPT_TIMEOUT   4
128 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
129 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
130 #define SCSI_DEBUG_OPT_DIF_ERR   32
131 #define SCSI_DEBUG_OPT_DIX_ERR   64
132 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
133 #define SCSI_DEBUG_OPT_SHORT_TRANSFER	256
134 /* When "every_nth" > 0 then modulo "every_nth" commands:
135  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
136  *   - a RECOVERED_ERROR is simulated on successful read and write
137  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
138  *   - a TRANSPORT_ERROR is simulated on successful read and write
139  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
140  *
141  * When "every_nth" < 0 then after "- every_nth" commands:
142  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
143  *   - a RECOVERED_ERROR is simulated on successful read and write
144  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
145  *   - a TRANSPORT_ERROR is simulated on successful read and write
146  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
147  * This will continue until some other action occurs (e.g. the user
148  * writing a new value (other than -1 or 1) to every_nth via sysfs).
149  */
150 
151 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
152  * sector on read commands: */
153 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
154 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
155 
156 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
157  * or "peripheral device" addressing (value 0) */
158 #define SAM2_LUN_ADDRESS_METHOD 0
159 #define SAM2_WLUN_REPORT_LUNS 0xc101
160 
161 /* Can queue up to this number of commands. Typically commands that
162  * that have a non-zero delay are queued. */
163 #define SCSI_DEBUG_CANQUEUE  255
164 
165 static int scsi_debug_add_host = DEF_NUM_HOST;
166 static int scsi_debug_ato = DEF_ATO;
167 static int scsi_debug_delay = DEF_DELAY;
168 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
169 static int scsi_debug_dif = DEF_DIF;
170 static int scsi_debug_dix = DEF_DIX;
171 static int scsi_debug_dsense = DEF_D_SENSE;
172 static int scsi_debug_every_nth = DEF_EVERY_NTH;
173 static int scsi_debug_fake_rw = DEF_FAKE_RW;
174 static unsigned int scsi_debug_guard = DEF_GUARD;
175 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
176 static int scsi_debug_max_luns = DEF_MAX_LUNS;
177 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
178 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
179 static int scsi_debug_no_uld = 0;
180 static int scsi_debug_num_parts = DEF_NUM_PARTS;
181 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
182 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
183 static int scsi_debug_opts = DEF_OPTS;
184 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
185 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
186 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
187 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
188 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
189 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
190 static unsigned int scsi_debug_lbpu = DEF_LBPU;
191 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
192 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
193 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
194 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
195 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
196 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
197 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
198 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
199 static bool scsi_debug_removable = DEF_REMOVABLE;
200 static bool scsi_debug_clustering;
201 
202 static int scsi_debug_cmnd_count = 0;
203 
204 #define DEV_READONLY(TGT)      (0)
205 
206 static unsigned int sdebug_store_sectors;
207 static sector_t sdebug_capacity;	/* in sectors */
208 
209 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
210    may still need them */
211 static int sdebug_heads;		/* heads per disk */
212 static int sdebug_cylinders_per;	/* cylinders per surface */
213 static int sdebug_sectors_per;		/* sectors per cylinder */
214 
215 #define SDEBUG_MAX_PARTS 4
216 
217 #define SDEBUG_SENSE_LEN 32
218 
219 #define SCSI_DEBUG_MAX_CMD_LEN 32
220 
221 static unsigned int scsi_debug_lbp(void)
222 {
223 	return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
224 }
225 
226 struct sdebug_dev_info {
227 	struct list_head dev_list;
228 	unsigned char sense_buff[SDEBUG_SENSE_LEN];	/* weak nexus */
229 	unsigned int channel;
230 	unsigned int target;
231 	unsigned int lun;
232 	struct sdebug_host_info *sdbg_host;
233 	unsigned int wlun;
234 	char reset;
235 	char stopped;
236 	char used;
237 };
238 
239 struct sdebug_host_info {
240 	struct list_head host_list;
241 	struct Scsi_Host *shost;
242 	struct device dev;
243 	struct list_head dev_info_list;
244 };
245 
246 #define to_sdebug_host(d)	\
247 	container_of(d, struct sdebug_host_info, dev)
248 
249 static LIST_HEAD(sdebug_host_list);
250 static DEFINE_SPINLOCK(sdebug_host_list_lock);
251 
252 typedef void (* done_funct_t) (struct scsi_cmnd *);
253 
254 struct sdebug_queued_cmd {
255 	int in_use;
256 	struct timer_list cmnd_timer;
257 	done_funct_t done_funct;
258 	struct scsi_cmnd * a_cmnd;
259 	int scsi_result;
260 };
261 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
262 
263 static unsigned char * fake_storep;	/* ramdisk storage */
264 static struct sd_dif_tuple *dif_storep;	/* protection info */
265 static void *map_storep;		/* provisioning map */
266 
267 static unsigned long map_size;
268 static int num_aborts = 0;
269 static int num_dev_resets = 0;
270 static int num_bus_resets = 0;
271 static int num_host_resets = 0;
272 static int dix_writes;
273 static int dix_reads;
274 static int dif_errors;
275 
276 static DEFINE_SPINLOCK(queued_arr_lock);
277 static DEFINE_RWLOCK(atomic_rw);
278 
279 static char sdebug_proc_name[] = "scsi_debug";
280 
281 static struct bus_type pseudo_lld_bus;
282 
283 static struct device_driver sdebug_driverfs_driver = {
284 	.name 		= sdebug_proc_name,
285 	.bus		= &pseudo_lld_bus,
286 };
287 
288 static const int check_condition_result =
289 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
290 
291 static const int illegal_condition_result =
292 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
293 
294 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
295 				    0, 0, 0x2, 0x4b};
296 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
297 			           0, 0, 0x0, 0x0};
298 
299 static void *fake_store(unsigned long long lba)
300 {
301 	lba = do_div(lba, sdebug_store_sectors);
302 
303 	return fake_storep + lba * scsi_debug_sector_size;
304 }
305 
306 static struct sd_dif_tuple *dif_store(sector_t sector)
307 {
308 	sector = do_div(sector, sdebug_store_sectors);
309 
310 	return dif_storep + sector;
311 }
312 
313 static int sdebug_add_adapter(void);
314 static void sdebug_remove_adapter(void);
315 
316 static void sdebug_max_tgts_luns(void)
317 {
318 	struct sdebug_host_info *sdbg_host;
319 	struct Scsi_Host *hpnt;
320 
321 	spin_lock(&sdebug_host_list_lock);
322 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
323 		hpnt = sdbg_host->shost;
324 		if ((hpnt->this_id >= 0) &&
325 		    (scsi_debug_num_tgts > hpnt->this_id))
326 			hpnt->max_id = scsi_debug_num_tgts + 1;
327 		else
328 			hpnt->max_id = scsi_debug_num_tgts;
329 		/* scsi_debug_max_luns; */
330 		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
331 	}
332 	spin_unlock(&sdebug_host_list_lock);
333 }
334 
335 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
336 			    int asc, int asq)
337 {
338 	unsigned char *sbuff;
339 
340 	sbuff = devip->sense_buff;
341 	memset(sbuff, 0, SDEBUG_SENSE_LEN);
342 
343 	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
344 
345 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
346 		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
347 		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
348 }
349 
350 static void get_data_transfer_info(unsigned char *cmd,
351 				   unsigned long long *lba, unsigned int *num,
352 				   u32 *ei_lba)
353 {
354 	*ei_lba = 0;
355 
356 	switch (*cmd) {
357 	case VARIABLE_LENGTH_CMD:
358 		*lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
359 			(u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
360 			(u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
361 			(u64)cmd[13] << 48 | (u64)cmd[12] << 56;
362 
363 		*ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
364 			(u32)cmd[21] << 16 | (u32)cmd[20] << 24;
365 
366 		*num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
367 			(u32)cmd[28] << 24;
368 		break;
369 
370 	case WRITE_SAME_16:
371 	case WRITE_16:
372 	case READ_16:
373 		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
374 			(u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
375 			(u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
376 			(u64)cmd[3] << 48 | (u64)cmd[2] << 56;
377 
378 		*num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
379 			(u32)cmd[10] << 24;
380 		break;
381 	case WRITE_12:
382 	case READ_12:
383 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
384 			(u32)cmd[2] << 24;
385 
386 		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
387 			(u32)cmd[6] << 24;
388 		break;
389 	case WRITE_SAME:
390 	case WRITE_10:
391 	case READ_10:
392 	case XDWRITEREAD_10:
393 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 |	(u32)cmd[3] << 16 |
394 			(u32)cmd[2] << 24;
395 
396 		*num = (u32)cmd[8] | (u32)cmd[7] << 8;
397 		break;
398 	case WRITE_6:
399 	case READ_6:
400 		*lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
401 			(u32)(cmd[1] & 0x1f) << 16;
402 		*num = (0 == cmd[4]) ? 256 : cmd[4];
403 		break;
404 	default:
405 		break;
406 	}
407 }
408 
409 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
410 {
411 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
412 		printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
413 	}
414 	return -EINVAL;
415 	/* return -ENOTTY; // correct return but upsets fdisk */
416 }
417 
418 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
419 			   struct sdebug_dev_info * devip)
420 {
421 	if (devip->reset) {
422 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
423 			printk(KERN_INFO "scsi_debug: Reporting Unit "
424 			       "attention: power on reset\n");
425 		devip->reset = 0;
426 		mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
427 		return check_condition_result;
428 	}
429 	if ((0 == reset_only) && devip->stopped) {
430 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
431 			printk(KERN_INFO "scsi_debug: Reporting Not "
432 			       "ready: initializing command required\n");
433 		mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
434 				0x2);
435 		return check_condition_result;
436 	}
437 	return 0;
438 }
439 
440 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
441 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
442 				int arr_len)
443 {
444 	int act_len;
445 	struct scsi_data_buffer *sdb = scsi_in(scp);
446 
447 	if (!sdb->length)
448 		return 0;
449 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
450 		return (DID_ERROR << 16);
451 
452 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
453 				      arr, arr_len);
454 	sdb->resid = scsi_bufflen(scp) - act_len;
455 
456 	return 0;
457 }
458 
459 /* Returns number of bytes fetched into 'arr' or -1 if error. */
460 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
461 			       int arr_len)
462 {
463 	if (!scsi_bufflen(scp))
464 		return 0;
465 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
466 		return -1;
467 
468 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
469 }
470 
471 
472 static const char * inq_vendor_id = "Linux   ";
473 static const char * inq_product_id = "scsi_debug      ";
474 static const char * inq_product_rev = "0004";
475 
476 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
477 			   int target_dev_id, int dev_id_num,
478 			   const char * dev_id_str,
479 			   int dev_id_str_len)
480 {
481 	int num, port_a;
482 	char b[32];
483 
484 	port_a = target_dev_id + 1;
485 	/* T10 vendor identifier field format (faked) */
486 	arr[0] = 0x2;	/* ASCII */
487 	arr[1] = 0x1;
488 	arr[2] = 0x0;
489 	memcpy(&arr[4], inq_vendor_id, 8);
490 	memcpy(&arr[12], inq_product_id, 16);
491 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
492 	num = 8 + 16 + dev_id_str_len;
493 	arr[3] = num;
494 	num += 4;
495 	if (dev_id_num >= 0) {
496 		/* NAA-5, Logical unit identifier (binary) */
497 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
498 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
499 		arr[num++] = 0x0;
500 		arr[num++] = 0x8;
501 		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
502 		arr[num++] = 0x33;
503 		arr[num++] = 0x33;
504 		arr[num++] = 0x30;
505 		arr[num++] = (dev_id_num >> 24);
506 		arr[num++] = (dev_id_num >> 16) & 0xff;
507 		arr[num++] = (dev_id_num >> 8) & 0xff;
508 		arr[num++] = dev_id_num & 0xff;
509 		/* Target relative port number */
510 		arr[num++] = 0x61;	/* proto=sas, binary */
511 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
512 		arr[num++] = 0x0;	/* reserved */
513 		arr[num++] = 0x4;	/* length */
514 		arr[num++] = 0x0;	/* reserved */
515 		arr[num++] = 0x0;	/* reserved */
516 		arr[num++] = 0x0;
517 		arr[num++] = 0x1;	/* relative port A */
518 	}
519 	/* NAA-5, Target port identifier */
520 	arr[num++] = 0x61;	/* proto=sas, binary */
521 	arr[num++] = 0x93;	/* piv=1, target port, naa */
522 	arr[num++] = 0x0;
523 	arr[num++] = 0x8;
524 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
525 	arr[num++] = 0x22;
526 	arr[num++] = 0x22;
527 	arr[num++] = 0x20;
528 	arr[num++] = (port_a >> 24);
529 	arr[num++] = (port_a >> 16) & 0xff;
530 	arr[num++] = (port_a >> 8) & 0xff;
531 	arr[num++] = port_a & 0xff;
532 	/* NAA-5, Target port group identifier */
533 	arr[num++] = 0x61;	/* proto=sas, binary */
534 	arr[num++] = 0x95;	/* piv=1, target port group id */
535 	arr[num++] = 0x0;
536 	arr[num++] = 0x4;
537 	arr[num++] = 0;
538 	arr[num++] = 0;
539 	arr[num++] = (port_group_id >> 8) & 0xff;
540 	arr[num++] = port_group_id & 0xff;
541 	/* NAA-5, Target device identifier */
542 	arr[num++] = 0x61;	/* proto=sas, binary */
543 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
544 	arr[num++] = 0x0;
545 	arr[num++] = 0x8;
546 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
547 	arr[num++] = 0x22;
548 	arr[num++] = 0x22;
549 	arr[num++] = 0x20;
550 	arr[num++] = (target_dev_id >> 24);
551 	arr[num++] = (target_dev_id >> 16) & 0xff;
552 	arr[num++] = (target_dev_id >> 8) & 0xff;
553 	arr[num++] = target_dev_id & 0xff;
554 	/* SCSI name string: Target device identifier */
555 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
556 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
557 	arr[num++] = 0x0;
558 	arr[num++] = 24;
559 	memcpy(arr + num, "naa.52222220", 12);
560 	num += 12;
561 	snprintf(b, sizeof(b), "%08X", target_dev_id);
562 	memcpy(arr + num, b, 8);
563 	num += 8;
564 	memset(arr + num, 0, 4);
565 	num += 4;
566 	return num;
567 }
568 
569 
570 static unsigned char vpd84_data[] = {
571 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
572     0x22,0x22,0x22,0x0,0xbb,0x1,
573     0x22,0x22,0x22,0x0,0xbb,0x2,
574 };
575 
576 static int inquiry_evpd_84(unsigned char * arr)
577 {
578 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
579 	return sizeof(vpd84_data);
580 }
581 
582 static int inquiry_evpd_85(unsigned char * arr)
583 {
584 	int num = 0;
585 	const char * na1 = "https://www.kernel.org/config";
586 	const char * na2 = "http://www.kernel.org/log";
587 	int plen, olen;
588 
589 	arr[num++] = 0x1;	/* lu, storage config */
590 	arr[num++] = 0x0;	/* reserved */
591 	arr[num++] = 0x0;
592 	olen = strlen(na1);
593 	plen = olen + 1;
594 	if (plen % 4)
595 		plen = ((plen / 4) + 1) * 4;
596 	arr[num++] = plen;	/* length, null termianted, padded */
597 	memcpy(arr + num, na1, olen);
598 	memset(arr + num + olen, 0, plen - olen);
599 	num += plen;
600 
601 	arr[num++] = 0x4;	/* lu, logging */
602 	arr[num++] = 0x0;	/* reserved */
603 	arr[num++] = 0x0;
604 	olen = strlen(na2);
605 	plen = olen + 1;
606 	if (plen % 4)
607 		plen = ((plen / 4) + 1) * 4;
608 	arr[num++] = plen;	/* length, null terminated, padded */
609 	memcpy(arr + num, na2, olen);
610 	memset(arr + num + olen, 0, plen - olen);
611 	num += plen;
612 
613 	return num;
614 }
615 
616 /* SCSI ports VPD page */
617 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
618 {
619 	int num = 0;
620 	int port_a, port_b;
621 
622 	port_a = target_dev_id + 1;
623 	port_b = port_a + 1;
624 	arr[num++] = 0x0;	/* reserved */
625 	arr[num++] = 0x0;	/* reserved */
626 	arr[num++] = 0x0;
627 	arr[num++] = 0x1;	/* relative port 1 (primary) */
628 	memset(arr + num, 0, 6);
629 	num += 6;
630 	arr[num++] = 0x0;
631 	arr[num++] = 12;	/* length tp descriptor */
632 	/* naa-5 target port identifier (A) */
633 	arr[num++] = 0x61;	/* proto=sas, binary */
634 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
635 	arr[num++] = 0x0;	/* reserved */
636 	arr[num++] = 0x8;	/* length */
637 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
638 	arr[num++] = 0x22;
639 	arr[num++] = 0x22;
640 	arr[num++] = 0x20;
641 	arr[num++] = (port_a >> 24);
642 	arr[num++] = (port_a >> 16) & 0xff;
643 	arr[num++] = (port_a >> 8) & 0xff;
644 	arr[num++] = port_a & 0xff;
645 
646 	arr[num++] = 0x0;	/* reserved */
647 	arr[num++] = 0x0;	/* reserved */
648 	arr[num++] = 0x0;
649 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
650 	memset(arr + num, 0, 6);
651 	num += 6;
652 	arr[num++] = 0x0;
653 	arr[num++] = 12;	/* length tp descriptor */
654 	/* naa-5 target port identifier (B) */
655 	arr[num++] = 0x61;	/* proto=sas, binary */
656 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
657 	arr[num++] = 0x0;	/* reserved */
658 	arr[num++] = 0x8;	/* length */
659 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
660 	arr[num++] = 0x22;
661 	arr[num++] = 0x22;
662 	arr[num++] = 0x20;
663 	arr[num++] = (port_b >> 24);
664 	arr[num++] = (port_b >> 16) & 0xff;
665 	arr[num++] = (port_b >> 8) & 0xff;
666 	arr[num++] = port_b & 0xff;
667 
668 	return num;
669 }
670 
671 
672 static unsigned char vpd89_data[] = {
673 /* from 4th byte */ 0,0,0,0,
674 'l','i','n','u','x',' ',' ',' ',
675 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
676 '1','2','3','4',
677 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
678 0xec,0,0,0,
679 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
680 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
681 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
682 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
683 0x53,0x41,
684 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
685 0x20,0x20,
686 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
687 0x10,0x80,
688 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
689 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
690 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
691 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
692 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
693 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
694 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
699 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
700 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
701 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
711 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
712 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
713 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
714 };
715 
716 static int inquiry_evpd_89(unsigned char * arr)
717 {
718 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
719 	return sizeof(vpd89_data);
720 }
721 
722 
723 /* Block limits VPD page (SBC-3) */
724 static unsigned char vpdb0_data[] = {
725 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
726 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
727 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
728 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
729 };
730 
731 static int inquiry_evpd_b0(unsigned char * arr)
732 {
733 	unsigned int gran;
734 
735 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
736 
737 	/* Optimal transfer length granularity */
738 	gran = 1 << scsi_debug_physblk_exp;
739 	arr[2] = (gran >> 8) & 0xff;
740 	arr[3] = gran & 0xff;
741 
742 	/* Maximum Transfer Length */
743 	if (sdebug_store_sectors > 0x400) {
744 		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
745 		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
746 		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
747 		arr[7] = sdebug_store_sectors & 0xff;
748 	}
749 
750 	/* Optimal Transfer Length */
751 	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
752 
753 	if (scsi_debug_lbpu) {
754 		/* Maximum Unmap LBA Count */
755 		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
756 
757 		/* Maximum Unmap Block Descriptor Count */
758 		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
759 	}
760 
761 	/* Unmap Granularity Alignment */
762 	if (scsi_debug_unmap_alignment) {
763 		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
764 		arr[28] |= 0x80; /* UGAVALID */
765 	}
766 
767 	/* Optimal Unmap Granularity */
768 	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
769 
770 	/* Maximum WRITE SAME Length */
771 	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
772 
773 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
774 
775 	return sizeof(vpdb0_data);
776 }
777 
778 /* Block device characteristics VPD page (SBC-3) */
779 static int inquiry_evpd_b1(unsigned char *arr)
780 {
781 	memset(arr, 0, 0x3c);
782 	arr[0] = 0;
783 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
784 	arr[2] = 0;
785 	arr[3] = 5;	/* less than 1.8" */
786 
787 	return 0x3c;
788 }
789 
790 /* Logical block provisioning VPD page (SBC-3) */
791 static int inquiry_evpd_b2(unsigned char *arr)
792 {
793 	memset(arr, 0, 0x4);
794 	arr[0] = 0;			/* threshold exponent */
795 
796 	if (scsi_debug_lbpu)
797 		arr[1] = 1 << 7;
798 
799 	if (scsi_debug_lbpws)
800 		arr[1] |= 1 << 6;
801 
802 	if (scsi_debug_lbpws10)
803 		arr[1] |= 1 << 5;
804 
805 	if (scsi_debug_lbprz)
806 		arr[1] |= 1 << 2;
807 
808 	return 0x4;
809 }
810 
811 #define SDEBUG_LONG_INQ_SZ 96
812 #define SDEBUG_MAX_INQ_ARR_SZ 584
813 
814 static int resp_inquiry(struct scsi_cmnd * scp, int target,
815 			struct sdebug_dev_info * devip)
816 {
817 	unsigned char pq_pdt;
818 	unsigned char * arr;
819 	unsigned char *cmd = (unsigned char *)scp->cmnd;
820 	int alloc_len, n, ret;
821 
822 	alloc_len = (cmd[3] << 8) + cmd[4];
823 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
824 	if (! arr)
825 		return DID_REQUEUE << 16;
826 	if (devip->wlun)
827 		pq_pdt = 0x1e;	/* present, wlun */
828 	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
829 		pq_pdt = 0x7f;	/* not present, no device type */
830 	else
831 		pq_pdt = (scsi_debug_ptype & 0x1f);
832 	arr[0] = pq_pdt;
833 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
834 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
835 			       	0);
836 		kfree(arr);
837 		return check_condition_result;
838 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
839 		int lu_id_num, port_group_id, target_dev_id, len;
840 		char lu_id_str[6];
841 		int host_no = devip->sdbg_host->shost->host_no;
842 
843 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
844 		    (devip->channel & 0x7f);
845 		if (0 == scsi_debug_vpd_use_hostno)
846 			host_no = 0;
847 		lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
848 			    (devip->target * 1000) + devip->lun);
849 		target_dev_id = ((host_no + 1) * 2000) +
850 				 (devip->target * 1000) - 3;
851 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
852 		if (0 == cmd[2]) { /* supported vital product data pages */
853 			arr[1] = cmd[2];	/*sanity */
854 			n = 4;
855 			arr[n++] = 0x0;   /* this page */
856 			arr[n++] = 0x80;  /* unit serial number */
857 			arr[n++] = 0x83;  /* device identification */
858 			arr[n++] = 0x84;  /* software interface ident. */
859 			arr[n++] = 0x85;  /* management network addresses */
860 			arr[n++] = 0x86;  /* extended inquiry */
861 			arr[n++] = 0x87;  /* mode page policy */
862 			arr[n++] = 0x88;  /* SCSI ports */
863 			arr[n++] = 0x89;  /* ATA information */
864 			arr[n++] = 0xb0;  /* Block limits (SBC) */
865 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
866 			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
867 				arr[n++] = 0xb2;
868 			arr[3] = n - 4;	  /* number of supported VPD pages */
869 		} else if (0x80 == cmd[2]) { /* unit serial number */
870 			arr[1] = cmd[2];	/*sanity */
871 			arr[3] = len;
872 			memcpy(&arr[4], lu_id_str, len);
873 		} else if (0x83 == cmd[2]) { /* device identification */
874 			arr[1] = cmd[2];	/*sanity */
875 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
876 						 target_dev_id, lu_id_num,
877 						 lu_id_str, len);
878 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
879 			arr[1] = cmd[2];	/*sanity */
880 			arr[3] = inquiry_evpd_84(&arr[4]);
881 		} else if (0x85 == cmd[2]) { /* Management network addresses */
882 			arr[1] = cmd[2];	/*sanity */
883 			arr[3] = inquiry_evpd_85(&arr[4]);
884 		} else if (0x86 == cmd[2]) { /* extended inquiry */
885 			arr[1] = cmd[2];	/*sanity */
886 			arr[3] = 0x3c;	/* number of following entries */
887 			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
888 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
889 			else if (scsi_debug_dif)
890 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
891 			else
892 				arr[4] = 0x0;   /* no protection stuff */
893 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
894 		} else if (0x87 == cmd[2]) { /* mode page policy */
895 			arr[1] = cmd[2];	/*sanity */
896 			arr[3] = 0x8;	/* number of following entries */
897 			arr[4] = 0x2;	/* disconnect-reconnect mp */
898 			arr[6] = 0x80;	/* mlus, shared */
899 			arr[8] = 0x18;	 /* protocol specific lu */
900 			arr[10] = 0x82;	 /* mlus, per initiator port */
901 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
902 			arr[1] = cmd[2];	/*sanity */
903 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
904 		} else if (0x89 == cmd[2]) { /* ATA information */
905 			arr[1] = cmd[2];        /*sanity */
906 			n = inquiry_evpd_89(&arr[4]);
907 			arr[2] = (n >> 8);
908 			arr[3] = (n & 0xff);
909 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
910 			arr[1] = cmd[2];        /*sanity */
911 			arr[3] = inquiry_evpd_b0(&arr[4]);
912 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
913 			arr[1] = cmd[2];        /*sanity */
914 			arr[3] = inquiry_evpd_b1(&arr[4]);
915 		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
916 			arr[1] = cmd[2];        /*sanity */
917 			arr[3] = inquiry_evpd_b2(&arr[4]);
918 		} else {
919 			/* Illegal request, invalid field in cdb */
920 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
921 					INVALID_FIELD_IN_CDB, 0);
922 			kfree(arr);
923 			return check_condition_result;
924 		}
925 		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
926 		ret = fill_from_dev_buffer(scp, arr,
927 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
928 		kfree(arr);
929 		return ret;
930 	}
931 	/* drops through here for a standard inquiry */
932 	arr[1] = scsi_debug_removable ? 0x80 : 0;	/* Removable disk */
933 	arr[2] = scsi_debug_scsi_level;
934 	arr[3] = 2;    /* response_data_format==2 */
935 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
936 	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
937 	if (0 == scsi_debug_vpd_use_hostno)
938 		arr[5] = 0x10; /* claim: implicit TGPS */
939 	arr[6] = 0x10; /* claim: MultiP */
940 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
941 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
942 	memcpy(&arr[8], inq_vendor_id, 8);
943 	memcpy(&arr[16], inq_product_id, 16);
944 	memcpy(&arr[32], inq_product_rev, 4);
945 	/* version descriptors (2 bytes each) follow */
946 	arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
947 	arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
948 	n = 62;
949 	if (scsi_debug_ptype == 0) {
950 		arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
951 	} else if (scsi_debug_ptype == 1) {
952 		arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
953 	}
954 	arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
955 	ret = fill_from_dev_buffer(scp, arr,
956 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
957 	kfree(arr);
958 	return ret;
959 }
960 
961 static int resp_requests(struct scsi_cmnd * scp,
962 			 struct sdebug_dev_info * devip)
963 {
964 	unsigned char * sbuff;
965 	unsigned char *cmd = (unsigned char *)scp->cmnd;
966 	unsigned char arr[SDEBUG_SENSE_LEN];
967 	int want_dsense;
968 	int len = 18;
969 
970 	memset(arr, 0, sizeof(arr));
971 	if (devip->reset == 1)
972 		mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
973 	want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
974 	sbuff = devip->sense_buff;
975 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
976 		if (want_dsense) {
977 			arr[0] = 0x72;
978 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
979 			arr[2] = THRESHOLD_EXCEEDED;
980 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
981 		} else {
982 			arr[0] = 0x70;
983 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
984 			arr[7] = 0xa;   	/* 18 byte sense buffer */
985 			arr[12] = THRESHOLD_EXCEEDED;
986 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
987 		}
988 	} else {
989 		memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
990 		if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
991 			/* DESC bit set and sense_buff in fixed format */
992 			memset(arr, 0, sizeof(arr));
993 			arr[0] = 0x72;
994 			arr[1] = sbuff[2];     /* sense key */
995 			arr[2] = sbuff[12];    /* asc */
996 			arr[3] = sbuff[13];    /* ascq */
997 			len = 8;
998 		}
999 	}
1000 	mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
1001 	return fill_from_dev_buffer(scp, arr, len);
1002 }
1003 
1004 static int resp_start_stop(struct scsi_cmnd * scp,
1005 			   struct sdebug_dev_info * devip)
1006 {
1007 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1008 	int power_cond, errsts, start;
1009 
1010 	if ((errsts = check_readiness(scp, 1, devip)))
1011 		return errsts;
1012 	power_cond = (cmd[4] & 0xf0) >> 4;
1013 	if (power_cond) {
1014 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1015 			       	0);
1016 		return check_condition_result;
1017 	}
1018 	start = cmd[4] & 1;
1019 	if (start == devip->stopped)
1020 		devip->stopped = !start;
1021 	return 0;
1022 }
1023 
1024 static sector_t get_sdebug_capacity(void)
1025 {
1026 	if (scsi_debug_virtual_gb > 0)
1027 		return (sector_t)scsi_debug_virtual_gb *
1028 			(1073741824 / scsi_debug_sector_size);
1029 	else
1030 		return sdebug_store_sectors;
1031 }
1032 
1033 #define SDEBUG_READCAP_ARR_SZ 8
1034 static int resp_readcap(struct scsi_cmnd * scp,
1035 			struct sdebug_dev_info * devip)
1036 {
1037 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1038 	unsigned int capac;
1039 	int errsts;
1040 
1041 	if ((errsts = check_readiness(scp, 1, devip)))
1042 		return errsts;
1043 	/* following just in case virtual_gb changed */
1044 	sdebug_capacity = get_sdebug_capacity();
1045 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1046 	if (sdebug_capacity < 0xffffffff) {
1047 		capac = (unsigned int)sdebug_capacity - 1;
1048 		arr[0] = (capac >> 24);
1049 		arr[1] = (capac >> 16) & 0xff;
1050 		arr[2] = (capac >> 8) & 0xff;
1051 		arr[3] = capac & 0xff;
1052 	} else {
1053 		arr[0] = 0xff;
1054 		arr[1] = 0xff;
1055 		arr[2] = 0xff;
1056 		arr[3] = 0xff;
1057 	}
1058 	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1059 	arr[7] = scsi_debug_sector_size & 0xff;
1060 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1061 }
1062 
1063 #define SDEBUG_READCAP16_ARR_SZ 32
1064 static int resp_readcap16(struct scsi_cmnd * scp,
1065 			  struct sdebug_dev_info * devip)
1066 {
1067 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1068 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1069 	unsigned long long capac;
1070 	int errsts, k, alloc_len;
1071 
1072 	if ((errsts = check_readiness(scp, 1, devip)))
1073 		return errsts;
1074 	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1075 		     + cmd[13]);
1076 	/* following just in case virtual_gb changed */
1077 	sdebug_capacity = get_sdebug_capacity();
1078 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1079 	capac = sdebug_capacity - 1;
1080 	for (k = 0; k < 8; ++k, capac >>= 8)
1081 		arr[7 - k] = capac & 0xff;
1082 	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1083 	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1084 	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1085 	arr[11] = scsi_debug_sector_size & 0xff;
1086 	arr[13] = scsi_debug_physblk_exp & 0xf;
1087 	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1088 
1089 	if (scsi_debug_lbp()) {
1090 		arr[14] |= 0x80; /* LBPME */
1091 		if (scsi_debug_lbprz)
1092 			arr[14] |= 0x40; /* LBPRZ */
1093 	}
1094 
1095 	arr[15] = scsi_debug_lowest_aligned & 0xff;
1096 
1097 	if (scsi_debug_dif) {
1098 		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1099 		arr[12] |= 1; /* PROT_EN */
1100 	}
1101 
1102 	return fill_from_dev_buffer(scp, arr,
1103 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1104 }
1105 
1106 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1107 
1108 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1109 			      struct sdebug_dev_info * devip)
1110 {
1111 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1112 	unsigned char * arr;
1113 	int host_no = devip->sdbg_host->shost->host_no;
1114 	int n, ret, alen, rlen;
1115 	int port_group_a, port_group_b, port_a, port_b;
1116 
1117 	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1118 		+ cmd[9]);
1119 
1120 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1121 	if (! arr)
1122 		return DID_REQUEUE << 16;
1123 	/*
1124 	 * EVPD page 0x88 states we have two ports, one
1125 	 * real and a fake port with no device connected.
1126 	 * So we create two port groups with one port each
1127 	 * and set the group with port B to unavailable.
1128 	 */
1129 	port_a = 0x1; /* relative port A */
1130 	port_b = 0x2; /* relative port B */
1131 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1132 	    (devip->channel & 0x7f);
1133 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1134 	    (devip->channel & 0x7f) + 0x80;
1135 
1136 	/*
1137 	 * The asymmetric access state is cycled according to the host_id.
1138 	 */
1139 	n = 4;
1140 	if (0 == scsi_debug_vpd_use_hostno) {
1141 	    arr[n++] = host_no % 3; /* Asymm access state */
1142 	    arr[n++] = 0x0F; /* claim: all states are supported */
1143 	} else {
1144 	    arr[n++] = 0x0; /* Active/Optimized path */
1145 	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1146 	}
1147 	arr[n++] = (port_group_a >> 8) & 0xff;
1148 	arr[n++] = port_group_a & 0xff;
1149 	arr[n++] = 0;    /* Reserved */
1150 	arr[n++] = 0;    /* Status code */
1151 	arr[n++] = 0;    /* Vendor unique */
1152 	arr[n++] = 0x1;  /* One port per group */
1153 	arr[n++] = 0;    /* Reserved */
1154 	arr[n++] = 0;    /* Reserved */
1155 	arr[n++] = (port_a >> 8) & 0xff;
1156 	arr[n++] = port_a & 0xff;
1157 	arr[n++] = 3;    /* Port unavailable */
1158 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1159 	arr[n++] = (port_group_b >> 8) & 0xff;
1160 	arr[n++] = port_group_b & 0xff;
1161 	arr[n++] = 0;    /* Reserved */
1162 	arr[n++] = 0;    /* Status code */
1163 	arr[n++] = 0;    /* Vendor unique */
1164 	arr[n++] = 0x1;  /* One port per group */
1165 	arr[n++] = 0;    /* Reserved */
1166 	arr[n++] = 0;    /* Reserved */
1167 	arr[n++] = (port_b >> 8) & 0xff;
1168 	arr[n++] = port_b & 0xff;
1169 
1170 	rlen = n - 4;
1171 	arr[0] = (rlen >> 24) & 0xff;
1172 	arr[1] = (rlen >> 16) & 0xff;
1173 	arr[2] = (rlen >> 8) & 0xff;
1174 	arr[3] = rlen & 0xff;
1175 
1176 	/*
1177 	 * Return the smallest value of either
1178 	 * - The allocated length
1179 	 * - The constructed command length
1180 	 * - The maximum array size
1181 	 */
1182 	rlen = min(alen,n);
1183 	ret = fill_from_dev_buffer(scp, arr,
1184 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1185 	kfree(arr);
1186 	return ret;
1187 }
1188 
1189 /* <<Following mode page info copied from ST318451LW>> */
1190 
1191 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1192 {	/* Read-Write Error Recovery page for mode_sense */
1193 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1194 					5, 0, 0xff, 0xff};
1195 
1196 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1197 	if (1 == pcontrol)
1198 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1199 	return sizeof(err_recov_pg);
1200 }
1201 
1202 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1203 { 	/* Disconnect-Reconnect page for mode_sense */
1204 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1205 					 0, 0, 0, 0, 0, 0, 0, 0};
1206 
1207 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1208 	if (1 == pcontrol)
1209 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1210 	return sizeof(disconnect_pg);
1211 }
1212 
1213 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1214 {       /* Format device page for mode_sense */
1215 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1216 				     0, 0, 0, 0, 0, 0, 0, 0,
1217 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1218 
1219 	memcpy(p, format_pg, sizeof(format_pg));
1220 	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1221 	p[11] = sdebug_sectors_per & 0xff;
1222 	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1223 	p[13] = scsi_debug_sector_size & 0xff;
1224 	if (scsi_debug_removable)
1225 		p[20] |= 0x20; /* should agree with INQUIRY */
1226 	if (1 == pcontrol)
1227 		memset(p + 2, 0, sizeof(format_pg) - 2);
1228 	return sizeof(format_pg);
1229 }
1230 
1231 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1232 { 	/* Caching page for mode_sense */
1233 	unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1234 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1235 
1236 	memcpy(p, caching_pg, sizeof(caching_pg));
1237 	if (1 == pcontrol)
1238 		memset(p + 2, 0, sizeof(caching_pg) - 2);
1239 	return sizeof(caching_pg);
1240 }
1241 
1242 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1243 { 	/* Control mode page for mode_sense */
1244 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1245 				        0, 0, 0, 0};
1246 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1247 				     0, 0, 0x2, 0x4b};
1248 
1249 	if (scsi_debug_dsense)
1250 		ctrl_m_pg[2] |= 0x4;
1251 	else
1252 		ctrl_m_pg[2] &= ~0x4;
1253 
1254 	if (scsi_debug_ato)
1255 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1256 
1257 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1258 	if (1 == pcontrol)
1259 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1260 	else if (2 == pcontrol)
1261 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1262 	return sizeof(ctrl_m_pg);
1263 }
1264 
1265 
1266 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1267 {	/* Informational Exceptions control mode page for mode_sense */
1268 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1269 				       0, 0, 0x0, 0x0};
1270 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1271 				      0, 0, 0x0, 0x0};
1272 
1273 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1274 	if (1 == pcontrol)
1275 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1276 	else if (2 == pcontrol)
1277 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1278 	return sizeof(iec_m_pg);
1279 }
1280 
1281 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1282 {	/* SAS SSP mode page - short format for mode_sense */
1283 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1284 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1285 
1286 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1287 	if (1 == pcontrol)
1288 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1289 	return sizeof(sas_sf_m_pg);
1290 }
1291 
1292 
1293 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1294 			      int target_dev_id)
1295 {	/* SAS phy control and discover mode page for mode_sense */
1296 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1297 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1298 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1299 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1300 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1301 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1302 		    0, 0, 0, 0, 0, 0, 0, 0,
1303 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1304 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1305 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1306 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1307 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1308 		    0, 0, 0, 0, 0, 0, 0, 0,
1309 		};
1310 	int port_a, port_b;
1311 
1312 	port_a = target_dev_id + 1;
1313 	port_b = port_a + 1;
1314 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1315 	p[20] = (port_a >> 24);
1316 	p[21] = (port_a >> 16) & 0xff;
1317 	p[22] = (port_a >> 8) & 0xff;
1318 	p[23] = port_a & 0xff;
1319 	p[48 + 20] = (port_b >> 24);
1320 	p[48 + 21] = (port_b >> 16) & 0xff;
1321 	p[48 + 22] = (port_b >> 8) & 0xff;
1322 	p[48 + 23] = port_b & 0xff;
1323 	if (1 == pcontrol)
1324 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1325 	return sizeof(sas_pcd_m_pg);
1326 }
1327 
1328 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1329 {	/* SAS SSP shared protocol specific port mode subpage */
1330 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1331 		    0, 0, 0, 0, 0, 0, 0, 0,
1332 		};
1333 
1334 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1335 	if (1 == pcontrol)
1336 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1337 	return sizeof(sas_sha_m_pg);
1338 }
1339 
1340 #define SDEBUG_MAX_MSENSE_SZ 256
1341 
1342 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1343 			   struct sdebug_dev_info * devip)
1344 {
1345 	unsigned char dbd, llbaa;
1346 	int pcontrol, pcode, subpcode, bd_len;
1347 	unsigned char dev_spec;
1348 	int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1349 	unsigned char * ap;
1350 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1351 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1352 
1353 	if ((errsts = check_readiness(scp, 1, devip)))
1354 		return errsts;
1355 	dbd = !!(cmd[1] & 0x8);
1356 	pcontrol = (cmd[2] & 0xc0) >> 6;
1357 	pcode = cmd[2] & 0x3f;
1358 	subpcode = cmd[3];
1359 	msense_6 = (MODE_SENSE == cmd[0]);
1360 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1361 	if ((0 == scsi_debug_ptype) && (0 == dbd))
1362 		bd_len = llbaa ? 16 : 8;
1363 	else
1364 		bd_len = 0;
1365 	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1366 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1367 	if (0x3 == pcontrol) {  /* Saving values not supported */
1368 		mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1369 			       	0);
1370 		return check_condition_result;
1371 	}
1372 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1373 			(devip->target * 1000) - 3;
1374 	/* set DPOFUA bit for disks */
1375 	if (0 == scsi_debug_ptype)
1376 		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1377 	else
1378 		dev_spec = 0x0;
1379 	if (msense_6) {
1380 		arr[2] = dev_spec;
1381 		arr[3] = bd_len;
1382 		offset = 4;
1383 	} else {
1384 		arr[3] = dev_spec;
1385 		if (16 == bd_len)
1386 			arr[4] = 0x1;	/* set LONGLBA bit */
1387 		arr[7] = bd_len;	/* assume 255 or less */
1388 		offset = 8;
1389 	}
1390 	ap = arr + offset;
1391 	if ((bd_len > 0) && (!sdebug_capacity))
1392 		sdebug_capacity = get_sdebug_capacity();
1393 
1394 	if (8 == bd_len) {
1395 		if (sdebug_capacity > 0xfffffffe) {
1396 			ap[0] = 0xff;
1397 			ap[1] = 0xff;
1398 			ap[2] = 0xff;
1399 			ap[3] = 0xff;
1400 		} else {
1401 			ap[0] = (sdebug_capacity >> 24) & 0xff;
1402 			ap[1] = (sdebug_capacity >> 16) & 0xff;
1403 			ap[2] = (sdebug_capacity >> 8) & 0xff;
1404 			ap[3] = sdebug_capacity & 0xff;
1405 		}
1406 		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1407 		ap[7] = scsi_debug_sector_size & 0xff;
1408 		offset += bd_len;
1409 		ap = arr + offset;
1410 	} else if (16 == bd_len) {
1411 		unsigned long long capac = sdebug_capacity;
1412 
1413         	for (k = 0; k < 8; ++k, capac >>= 8)
1414                 	ap[7 - k] = capac & 0xff;
1415 		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1416 		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1417 		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1418 		ap[15] = scsi_debug_sector_size & 0xff;
1419 		offset += bd_len;
1420 		ap = arr + offset;
1421 	}
1422 
1423 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1424 		/* TODO: Control Extension page */
1425 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1426 			       	0);
1427 		return check_condition_result;
1428 	}
1429 	switch (pcode) {
1430 	case 0x1:	/* Read-Write error recovery page, direct access */
1431 		len = resp_err_recov_pg(ap, pcontrol, target);
1432 		offset += len;
1433 		break;
1434 	case 0x2:	/* Disconnect-Reconnect page, all devices */
1435 		len = resp_disconnect_pg(ap, pcontrol, target);
1436 		offset += len;
1437 		break;
1438         case 0x3:       /* Format device page, direct access */
1439                 len = resp_format_pg(ap, pcontrol, target);
1440                 offset += len;
1441                 break;
1442 	case 0x8:	/* Caching page, direct access */
1443 		len = resp_caching_pg(ap, pcontrol, target);
1444 		offset += len;
1445 		break;
1446 	case 0xa:	/* Control Mode page, all devices */
1447 		len = resp_ctrl_m_pg(ap, pcontrol, target);
1448 		offset += len;
1449 		break;
1450 	case 0x19:	/* if spc==1 then sas phy, control+discover */
1451 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
1452 		        mk_sense_buffer(devip, ILLEGAL_REQUEST,
1453 					INVALID_FIELD_IN_CDB, 0);
1454 			return check_condition_result;
1455 	        }
1456 		len = 0;
1457 		if ((0x0 == subpcode) || (0xff == subpcode))
1458 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1459 		if ((0x1 == subpcode) || (0xff == subpcode))
1460 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1461 						  target_dev_id);
1462 		if ((0x2 == subpcode) || (0xff == subpcode))
1463 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
1464 		offset += len;
1465 		break;
1466 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
1467 		len = resp_iec_m_pg(ap, pcontrol, target);
1468 		offset += len;
1469 		break;
1470 	case 0x3f:	/* Read all Mode pages */
1471 		if ((0 == subpcode) || (0xff == subpcode)) {
1472 			len = resp_err_recov_pg(ap, pcontrol, target);
1473 			len += resp_disconnect_pg(ap + len, pcontrol, target);
1474 			len += resp_format_pg(ap + len, pcontrol, target);
1475 			len += resp_caching_pg(ap + len, pcontrol, target);
1476 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1477 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1478 			if (0xff == subpcode) {
1479 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1480 						  target, target_dev_id);
1481 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
1482 			}
1483 			len += resp_iec_m_pg(ap + len, pcontrol, target);
1484 		} else {
1485 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1486 					INVALID_FIELD_IN_CDB, 0);
1487 			return check_condition_result;
1488                 }
1489 		offset += len;
1490 		break;
1491 	default:
1492 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1493 			       	0);
1494 		return check_condition_result;
1495 	}
1496 	if (msense_6)
1497 		arr[0] = offset - 1;
1498 	else {
1499 		arr[0] = ((offset - 2) >> 8) & 0xff;
1500 		arr[1] = (offset - 2) & 0xff;
1501 	}
1502 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1503 }
1504 
1505 #define SDEBUG_MAX_MSELECT_SZ 512
1506 
1507 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1508 			    struct sdebug_dev_info * devip)
1509 {
1510 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1511 	int param_len, res, errsts, mpage;
1512 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1513 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1514 
1515 	if ((errsts = check_readiness(scp, 1, devip)))
1516 		return errsts;
1517 	memset(arr, 0, sizeof(arr));
1518 	pf = cmd[1] & 0x10;
1519 	sp = cmd[1] & 0x1;
1520 	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1521 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1522 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1523 				INVALID_FIELD_IN_CDB, 0);
1524 		return check_condition_result;
1525 	}
1526         res = fetch_to_dev_buffer(scp, arr, param_len);
1527         if (-1 == res)
1528                 return (DID_ERROR << 16);
1529         else if ((res < param_len) &&
1530                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1531                 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1532                        " IO sent=%d bytes\n", param_len, res);
1533 	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1534 	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1535 	if (md_len > 2) {
1536 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1537 				INVALID_FIELD_IN_PARAM_LIST, 0);
1538 		return check_condition_result;
1539 	}
1540 	off = bd_len + (mselect6 ? 4 : 8);
1541 	mpage = arr[off] & 0x3f;
1542 	ps = !!(arr[off] & 0x80);
1543 	if (ps) {
1544 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1545 				INVALID_FIELD_IN_PARAM_LIST, 0);
1546 		return check_condition_result;
1547 	}
1548 	spf = !!(arr[off] & 0x40);
1549 	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1550 		       (arr[off + 1] + 2);
1551 	if ((pg_len + off) > param_len) {
1552 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1553 				PARAMETER_LIST_LENGTH_ERR, 0);
1554 		return check_condition_result;
1555 	}
1556 	switch (mpage) {
1557 	case 0xa:      /* Control Mode page */
1558 		if (ctrl_m_pg[1] == arr[off + 1]) {
1559 			memcpy(ctrl_m_pg + 2, arr + off + 2,
1560 			       sizeof(ctrl_m_pg) - 2);
1561 			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1562 			return 0;
1563 		}
1564 		break;
1565 	case 0x1c:      /* Informational Exceptions Mode page */
1566 		if (iec_m_pg[1] == arr[off + 1]) {
1567 			memcpy(iec_m_pg + 2, arr + off + 2,
1568 			       sizeof(iec_m_pg) - 2);
1569 			return 0;
1570 		}
1571 		break;
1572 	default:
1573 		break;
1574 	}
1575 	mk_sense_buffer(devip, ILLEGAL_REQUEST,
1576 			INVALID_FIELD_IN_PARAM_LIST, 0);
1577 	return check_condition_result;
1578 }
1579 
1580 static int resp_temp_l_pg(unsigned char * arr)
1581 {
1582 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1583 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
1584 		};
1585 
1586         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1587         return sizeof(temp_l_pg);
1588 }
1589 
1590 static int resp_ie_l_pg(unsigned char * arr)
1591 {
1592 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1593 		};
1594 
1595         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1596 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
1597 		arr[4] = THRESHOLD_EXCEEDED;
1598 		arr[5] = 0xff;
1599 	}
1600         return sizeof(ie_l_pg);
1601 }
1602 
1603 #define SDEBUG_MAX_LSENSE_SZ 512
1604 
1605 static int resp_log_sense(struct scsi_cmnd * scp,
1606                           struct sdebug_dev_info * devip)
1607 {
1608 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1609 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1610 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1611 
1612 	if ((errsts = check_readiness(scp, 1, devip)))
1613 		return errsts;
1614 	memset(arr, 0, sizeof(arr));
1615 	ppc = cmd[1] & 0x2;
1616 	sp = cmd[1] & 0x1;
1617 	if (ppc || sp) {
1618 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1619 				INVALID_FIELD_IN_CDB, 0);
1620 		return check_condition_result;
1621 	}
1622 	pcontrol = (cmd[2] & 0xc0) >> 6;
1623 	pcode = cmd[2] & 0x3f;
1624 	subpcode = cmd[3] & 0xff;
1625 	alloc_len = (cmd[7] << 8) + cmd[8];
1626 	arr[0] = pcode;
1627 	if (0 == subpcode) {
1628 		switch (pcode) {
1629 		case 0x0:	/* Supported log pages log page */
1630 			n = 4;
1631 			arr[n++] = 0x0;		/* this page */
1632 			arr[n++] = 0xd;		/* Temperature */
1633 			arr[n++] = 0x2f;	/* Informational exceptions */
1634 			arr[3] = n - 4;
1635 			break;
1636 		case 0xd:	/* Temperature log page */
1637 			arr[3] = resp_temp_l_pg(arr + 4);
1638 			break;
1639 		case 0x2f:	/* Informational exceptions log page */
1640 			arr[3] = resp_ie_l_pg(arr + 4);
1641 			break;
1642 		default:
1643 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1644 					INVALID_FIELD_IN_CDB, 0);
1645 			return check_condition_result;
1646 		}
1647 	} else if (0xff == subpcode) {
1648 		arr[0] |= 0x40;
1649 		arr[1] = subpcode;
1650 		switch (pcode) {
1651 		case 0x0:	/* Supported log pages and subpages log page */
1652 			n = 4;
1653 			arr[n++] = 0x0;
1654 			arr[n++] = 0x0;		/* 0,0 page */
1655 			arr[n++] = 0x0;
1656 			arr[n++] = 0xff;	/* this page */
1657 			arr[n++] = 0xd;
1658 			arr[n++] = 0x0;		/* Temperature */
1659 			arr[n++] = 0x2f;
1660 			arr[n++] = 0x0;	/* Informational exceptions */
1661 			arr[3] = n - 4;
1662 			break;
1663 		case 0xd:	/* Temperature subpages */
1664 			n = 4;
1665 			arr[n++] = 0xd;
1666 			arr[n++] = 0x0;		/* Temperature */
1667 			arr[3] = n - 4;
1668 			break;
1669 		case 0x2f:	/* Informational exceptions subpages */
1670 			n = 4;
1671 			arr[n++] = 0x2f;
1672 			arr[n++] = 0x0;		/* Informational exceptions */
1673 			arr[3] = n - 4;
1674 			break;
1675 		default:
1676 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1677 					INVALID_FIELD_IN_CDB, 0);
1678 			return check_condition_result;
1679 		}
1680 	} else {
1681 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1682 				INVALID_FIELD_IN_CDB, 0);
1683 		return check_condition_result;
1684 	}
1685 	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1686 	return fill_from_dev_buffer(scp, arr,
1687 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1688 }
1689 
1690 static int check_device_access_params(struct sdebug_dev_info *devi,
1691 				      unsigned long long lba, unsigned int num)
1692 {
1693 	if (lba + num > sdebug_capacity) {
1694 		mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1695 		return check_condition_result;
1696 	}
1697 	/* transfer length excessive (tie in to block limits VPD page) */
1698 	if (num > sdebug_store_sectors) {
1699 		mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1700 		return check_condition_result;
1701 	}
1702 	return 0;
1703 }
1704 
1705 /* Returns number of bytes copied or -1 if error. */
1706 static int do_device_access(struct scsi_cmnd *scmd,
1707 			    struct sdebug_dev_info *devi,
1708 			    unsigned long long lba, unsigned int num, int write)
1709 {
1710 	int ret;
1711 	unsigned long long block, rest = 0;
1712 	struct scsi_data_buffer *sdb;
1713 	enum dma_data_direction dir;
1714 	size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1715 		       off_t);
1716 
1717 	if (write) {
1718 		sdb = scsi_out(scmd);
1719 		dir = DMA_TO_DEVICE;
1720 		func = sg_pcopy_to_buffer;
1721 	} else {
1722 		sdb = scsi_in(scmd);
1723 		dir = DMA_FROM_DEVICE;
1724 		func = sg_pcopy_from_buffer;
1725 	}
1726 
1727 	if (!sdb->length)
1728 		return 0;
1729 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1730 		return -1;
1731 
1732 	block = do_div(lba, sdebug_store_sectors);
1733 	if (block + num > sdebug_store_sectors)
1734 		rest = block + num - sdebug_store_sectors;
1735 
1736 	ret = func(sdb->table.sgl, sdb->table.nents,
1737 		   fake_storep + (block * scsi_debug_sector_size),
1738 		   (num - rest) * scsi_debug_sector_size, 0);
1739 	if (ret != (num - rest) * scsi_debug_sector_size)
1740 		return ret;
1741 
1742 	if (rest) {
1743 		ret += func(sdb->table.sgl, sdb->table.nents,
1744 			    fake_storep, rest * scsi_debug_sector_size,
1745 			    (num - rest) * scsi_debug_sector_size);
1746 	}
1747 
1748 	return ret;
1749 }
1750 
1751 static __be16 dif_compute_csum(const void *buf, int len)
1752 {
1753 	__be16 csum;
1754 
1755 	if (scsi_debug_guard)
1756 		csum = (__force __be16)ip_compute_csum(buf, len);
1757 	else
1758 		csum = cpu_to_be16(crc_t10dif(buf, len));
1759 
1760 	return csum;
1761 }
1762 
1763 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1764 		      sector_t sector, u32 ei_lba)
1765 {
1766 	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1767 
1768 	if (sdt->guard_tag != csum) {
1769 		pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1770 			__func__,
1771 			(unsigned long)sector,
1772 			be16_to_cpu(sdt->guard_tag),
1773 			be16_to_cpu(csum));
1774 		return 0x01;
1775 	}
1776 	if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1777 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1778 		pr_err("%s: REF check failed on sector %lu\n",
1779 			__func__, (unsigned long)sector);
1780 		return 0x03;
1781 	}
1782 	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1783 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1784 		pr_err("%s: REF check failed on sector %lu\n",
1785 			__func__, (unsigned long)sector);
1786 		return 0x03;
1787 	}
1788 	return 0;
1789 }
1790 
1791 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1792 			  unsigned int sectors, bool read)
1793 {
1794 	size_t resid;
1795 	void *paddr;
1796 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
1797 	struct sg_mapping_iter miter;
1798 
1799 	/* Bytes of protection data to copy into sgl */
1800 	resid = sectors * sizeof(*dif_storep);
1801 
1802 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1803 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1804 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1805 
1806 	while (sg_miter_next(&miter) && resid > 0) {
1807 		size_t len = min(miter.length, resid);
1808 		void *start = dif_store(sector);
1809 		size_t rest = 0;
1810 
1811 		if (dif_store_end < start + len)
1812 			rest = start + len - dif_store_end;
1813 
1814 		paddr = miter.addr;
1815 
1816 		if (read)
1817 			memcpy(paddr, start, len - rest);
1818 		else
1819 			memcpy(start, paddr, len - rest);
1820 
1821 		if (rest) {
1822 			if (read)
1823 				memcpy(paddr + len - rest, dif_storep, rest);
1824 			else
1825 				memcpy(dif_storep, paddr + len - rest, rest);
1826 		}
1827 
1828 		sector += len / sizeof(*dif_storep);
1829 		resid -= len;
1830 	}
1831 	sg_miter_stop(&miter);
1832 }
1833 
1834 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1835 			    unsigned int sectors, u32 ei_lba)
1836 {
1837 	unsigned int i;
1838 	struct sd_dif_tuple *sdt;
1839 	sector_t sector;
1840 
1841 	for (i = 0; i < sectors; i++, ei_lba++) {
1842 		int ret;
1843 
1844 		sector = start_sec + i;
1845 		sdt = dif_store(sector);
1846 
1847 		if (sdt->app_tag == cpu_to_be16(0xffff))
1848 			continue;
1849 
1850 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1851 		if (ret) {
1852 			dif_errors++;
1853 			return ret;
1854 		}
1855 	}
1856 
1857 	dif_copy_prot(SCpnt, start_sec, sectors, true);
1858 	dix_reads++;
1859 
1860 	return 0;
1861 }
1862 
1863 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1864 		     unsigned int num, struct sdebug_dev_info *devip,
1865 		     u32 ei_lba)
1866 {
1867 	unsigned long iflags;
1868 	int ret;
1869 
1870 	ret = check_device_access_params(devip, lba, num);
1871 	if (ret)
1872 		return ret;
1873 
1874 	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1875 	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1876 	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1877 		/* claim unrecoverable read error */
1878 		mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1879 		/* set info field and valid bit for fixed descriptor */
1880 		if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1881 			devip->sense_buff[0] |= 0x80;	/* Valid bit */
1882 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
1883 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1884 			devip->sense_buff[3] = (ret >> 24) & 0xff;
1885 			devip->sense_buff[4] = (ret >> 16) & 0xff;
1886 			devip->sense_buff[5] = (ret >> 8) & 0xff;
1887 			devip->sense_buff[6] = ret & 0xff;
1888 		}
1889 	        scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1890 		return check_condition_result;
1891 	}
1892 
1893 	read_lock_irqsave(&atomic_rw, iflags);
1894 
1895 	/* DIX + T10 DIF */
1896 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1897 		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1898 
1899 		if (prot_ret) {
1900 			read_unlock_irqrestore(&atomic_rw, iflags);
1901 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1902 			return illegal_condition_result;
1903 		}
1904 	}
1905 
1906 	ret = do_device_access(SCpnt, devip, lba, num, 0);
1907 	read_unlock_irqrestore(&atomic_rw, iflags);
1908 	if (ret == -1)
1909 		return DID_ERROR << 16;
1910 
1911 	scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1912 
1913 	return 0;
1914 }
1915 
1916 void dump_sector(unsigned char *buf, int len)
1917 {
1918 	int i, j;
1919 
1920 	printk(KERN_ERR ">>> Sector Dump <<<\n");
1921 
1922 	for (i = 0 ; i < len ; i += 16) {
1923 		printk(KERN_ERR "%04d: ", i);
1924 
1925 		for (j = 0 ; j < 16 ; j++) {
1926 			unsigned char c = buf[i+j];
1927 			if (c >= 0x20 && c < 0x7e)
1928 				printk(" %c ", buf[i+j]);
1929 			else
1930 				printk("%02x ", buf[i+j]);
1931 		}
1932 
1933 		printk("\n");
1934 	}
1935 }
1936 
1937 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1938 			     unsigned int sectors, u32 ei_lba)
1939 {
1940 	int ret;
1941 	struct sd_dif_tuple *sdt;
1942 	void *daddr;
1943 	sector_t sector = start_sec;
1944 	int ppage_offset;
1945 	int dpage_offset;
1946 	struct sg_mapping_iter diter;
1947 	struct sg_mapping_iter piter;
1948 
1949 	BUG_ON(scsi_sg_count(SCpnt) == 0);
1950 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1951 
1952 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
1953 			scsi_prot_sg_count(SCpnt),
1954 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1955 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1956 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1957 
1958 	/* For each protection page */
1959 	while (sg_miter_next(&piter)) {
1960 		dpage_offset = 0;
1961 		if (WARN_ON(!sg_miter_next(&diter))) {
1962 			ret = 0x01;
1963 			goto out;
1964 		}
1965 
1966 		for (ppage_offset = 0; ppage_offset < piter.length;
1967 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
1968 			/* If we're at the end of the current
1969 			 * data page advance to the next one
1970 			 */
1971 			if (dpage_offset >= diter.length) {
1972 				if (WARN_ON(!sg_miter_next(&diter))) {
1973 					ret = 0x01;
1974 					goto out;
1975 				}
1976 				dpage_offset = 0;
1977 			}
1978 
1979 			sdt = piter.addr + ppage_offset;
1980 			daddr = diter.addr + dpage_offset;
1981 
1982 			ret = dif_verify(sdt, daddr, sector, ei_lba);
1983 			if (ret) {
1984 				dump_sector(daddr, scsi_debug_sector_size);
1985 				goto out;
1986 			}
1987 
1988 			sector++;
1989 			ei_lba++;
1990 			dpage_offset += scsi_debug_sector_size;
1991 		}
1992 		diter.consumed = dpage_offset;
1993 		sg_miter_stop(&diter);
1994 	}
1995 	sg_miter_stop(&piter);
1996 
1997 	dif_copy_prot(SCpnt, start_sec, sectors, false);
1998 	dix_writes++;
1999 
2000 	return 0;
2001 
2002 out:
2003 	dif_errors++;
2004 	sg_miter_stop(&diter);
2005 	sg_miter_stop(&piter);
2006 	return ret;
2007 }
2008 
2009 static unsigned long lba_to_map_index(sector_t lba)
2010 {
2011 	if (scsi_debug_unmap_alignment) {
2012 		lba += scsi_debug_unmap_granularity -
2013 			scsi_debug_unmap_alignment;
2014 	}
2015 	do_div(lba, scsi_debug_unmap_granularity);
2016 
2017 	return lba;
2018 }
2019 
2020 static sector_t map_index_to_lba(unsigned long index)
2021 {
2022 	sector_t lba = index * scsi_debug_unmap_granularity;
2023 
2024 	if (scsi_debug_unmap_alignment) {
2025 		lba -= scsi_debug_unmap_granularity -
2026 			scsi_debug_unmap_alignment;
2027 	}
2028 
2029 	return lba;
2030 }
2031 
2032 static unsigned int map_state(sector_t lba, unsigned int *num)
2033 {
2034 	sector_t end;
2035 	unsigned int mapped;
2036 	unsigned long index;
2037 	unsigned long next;
2038 
2039 	index = lba_to_map_index(lba);
2040 	mapped = test_bit(index, map_storep);
2041 
2042 	if (mapped)
2043 		next = find_next_zero_bit(map_storep, map_size, index);
2044 	else
2045 		next = find_next_bit(map_storep, map_size, index);
2046 
2047 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2048 	*num = end - lba;
2049 
2050 	return mapped;
2051 }
2052 
2053 static void map_region(sector_t lba, unsigned int len)
2054 {
2055 	sector_t end = lba + len;
2056 
2057 	while (lba < end) {
2058 		unsigned long index = lba_to_map_index(lba);
2059 
2060 		if (index < map_size)
2061 			set_bit(index, map_storep);
2062 
2063 		lba = map_index_to_lba(index + 1);
2064 	}
2065 }
2066 
2067 static void unmap_region(sector_t lba, unsigned int len)
2068 {
2069 	sector_t end = lba + len;
2070 
2071 	while (lba < end) {
2072 		unsigned long index = lba_to_map_index(lba);
2073 
2074 		if (lba == map_index_to_lba(index) &&
2075 		    lba + scsi_debug_unmap_granularity <= end &&
2076 		    index < map_size) {
2077 			clear_bit(index, map_storep);
2078 			if (scsi_debug_lbprz) {
2079 				memset(fake_storep +
2080 				       lba * scsi_debug_sector_size, 0,
2081 				       scsi_debug_sector_size *
2082 				       scsi_debug_unmap_granularity);
2083 			}
2084 			if (dif_storep) {
2085 				memset(dif_storep + lba, 0xff,
2086 				       sizeof(*dif_storep) *
2087 				       scsi_debug_unmap_granularity);
2088 			}
2089 		}
2090 		lba = map_index_to_lba(index + 1);
2091 	}
2092 }
2093 
2094 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2095 		      unsigned int num, struct sdebug_dev_info *devip,
2096 		      u32 ei_lba)
2097 {
2098 	unsigned long iflags;
2099 	int ret;
2100 
2101 	ret = check_device_access_params(devip, lba, num);
2102 	if (ret)
2103 		return ret;
2104 
2105 	write_lock_irqsave(&atomic_rw, iflags);
2106 
2107 	/* DIX + T10 DIF */
2108 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2109 		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2110 
2111 		if (prot_ret) {
2112 			write_unlock_irqrestore(&atomic_rw, iflags);
2113 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2114 			return illegal_condition_result;
2115 		}
2116 	}
2117 
2118 	ret = do_device_access(SCpnt, devip, lba, num, 1);
2119 	if (scsi_debug_lbp())
2120 		map_region(lba, num);
2121 	write_unlock_irqrestore(&atomic_rw, iflags);
2122 	if (-1 == ret)
2123 		return (DID_ERROR << 16);
2124 	else if ((ret < (num * scsi_debug_sector_size)) &&
2125 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2126 		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2127 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2128 
2129 	return 0;
2130 }
2131 
2132 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2133 		      unsigned int num, struct sdebug_dev_info *devip,
2134 			   u32 ei_lba, unsigned int unmap)
2135 {
2136 	unsigned long iflags;
2137 	unsigned long long i;
2138 	int ret;
2139 
2140 	ret = check_device_access_params(devip, lba, num);
2141 	if (ret)
2142 		return ret;
2143 
2144 	if (num > scsi_debug_write_same_length) {
2145 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2146 				0);
2147 		return check_condition_result;
2148 	}
2149 
2150 	write_lock_irqsave(&atomic_rw, iflags);
2151 
2152 	if (unmap && scsi_debug_lbp()) {
2153 		unmap_region(lba, num);
2154 		goto out;
2155 	}
2156 
2157 	/* Else fetch one logical block */
2158 	ret = fetch_to_dev_buffer(scmd,
2159 				  fake_storep + (lba * scsi_debug_sector_size),
2160 				  scsi_debug_sector_size);
2161 
2162 	if (-1 == ret) {
2163 		write_unlock_irqrestore(&atomic_rw, iflags);
2164 		return (DID_ERROR << 16);
2165 	} else if ((ret < (num * scsi_debug_sector_size)) &&
2166 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2167 		printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2168 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2169 
2170 	/* Copy first sector to remaining blocks */
2171 	for (i = 1 ; i < num ; i++)
2172 		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2173 		       fake_storep + (lba * scsi_debug_sector_size),
2174 		       scsi_debug_sector_size);
2175 
2176 	if (scsi_debug_lbp())
2177 		map_region(lba, num);
2178 out:
2179 	write_unlock_irqrestore(&atomic_rw, iflags);
2180 
2181 	return 0;
2182 }
2183 
2184 struct unmap_block_desc {
2185 	__be64	lba;
2186 	__be32	blocks;
2187 	__be32	__reserved;
2188 };
2189 
2190 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2191 {
2192 	unsigned char *buf;
2193 	struct unmap_block_desc *desc;
2194 	unsigned int i, payload_len, descriptors;
2195 	int ret;
2196 	unsigned long iflags;
2197 
2198 	ret = check_readiness(scmd, 1, devip);
2199 	if (ret)
2200 		return ret;
2201 
2202 	payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2203 	BUG_ON(scsi_bufflen(scmd) != payload_len);
2204 
2205 	descriptors = (payload_len - 8) / 16;
2206 
2207 	buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2208 	if (!buf)
2209 		return check_condition_result;
2210 
2211 	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2212 
2213 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2214 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2215 
2216 	desc = (void *)&buf[8];
2217 
2218 	write_lock_irqsave(&atomic_rw, iflags);
2219 
2220 	for (i = 0 ; i < descriptors ; i++) {
2221 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2222 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
2223 
2224 		ret = check_device_access_params(devip, lba, num);
2225 		if (ret)
2226 			goto out;
2227 
2228 		unmap_region(lba, num);
2229 	}
2230 
2231 	ret = 0;
2232 
2233 out:
2234 	write_unlock_irqrestore(&atomic_rw, iflags);
2235 	kfree(buf);
2236 
2237 	return ret;
2238 }
2239 
2240 #define SDEBUG_GET_LBA_STATUS_LEN 32
2241 
2242 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2243 			       struct sdebug_dev_info * devip)
2244 {
2245 	unsigned long long lba;
2246 	unsigned int alloc_len, mapped, num;
2247 	unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2248 	int ret;
2249 
2250 	ret = check_readiness(scmd, 1, devip);
2251 	if (ret)
2252 		return ret;
2253 
2254 	lba = get_unaligned_be64(&scmd->cmnd[2]);
2255 	alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2256 
2257 	if (alloc_len < 24)
2258 		return 0;
2259 
2260 	ret = check_device_access_params(devip, lba, 1);
2261 	if (ret)
2262 		return ret;
2263 
2264 	mapped = map_state(lba, &num);
2265 
2266 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2267 	put_unaligned_be32(20, &arr[0]);	/* Parameter Data Length */
2268 	put_unaligned_be64(lba, &arr[8]);	/* LBA */
2269 	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
2270 	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
2271 
2272 	return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2273 }
2274 
2275 #define SDEBUG_RLUN_ARR_SZ 256
2276 
2277 static int resp_report_luns(struct scsi_cmnd * scp,
2278 			    struct sdebug_dev_info * devip)
2279 {
2280 	unsigned int alloc_len;
2281 	int lun_cnt, i, upper, num, n, wlun, lun;
2282 	unsigned char *cmd = (unsigned char *)scp->cmnd;
2283 	int select_report = (int)cmd[2];
2284 	struct scsi_lun *one_lun;
2285 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2286 	unsigned char * max_addr;
2287 
2288 	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2289 	if ((alloc_len < 4) || (select_report > 2)) {
2290 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2291 			       	0);
2292 		return check_condition_result;
2293 	}
2294 	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
2295 	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2296 	lun_cnt = scsi_debug_max_luns;
2297 	if (1 == select_report)
2298 		lun_cnt = 0;
2299 	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2300 		--lun_cnt;
2301 	wlun = (select_report > 0) ? 1 : 0;
2302 	num = lun_cnt + wlun;
2303 	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2304 	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2305 	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2306 			    sizeof(struct scsi_lun)), num);
2307 	if (n < num) {
2308 		wlun = 0;
2309 		lun_cnt = n;
2310 	}
2311 	one_lun = (struct scsi_lun *) &arr[8];
2312 	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2313 	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2314              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2315 	     i++, lun++) {
2316 		upper = (lun >> 8) & 0x3f;
2317 		if (upper)
2318 			one_lun[i].scsi_lun[0] =
2319 			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2320 		one_lun[i].scsi_lun[1] = lun & 0xff;
2321 	}
2322 	if (wlun) {
2323 		one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2324 		one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2325 		i++;
2326 	}
2327 	alloc_len = (unsigned char *)(one_lun + i) - arr;
2328 	return fill_from_dev_buffer(scp, arr,
2329 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2330 }
2331 
2332 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2333 			    unsigned int num, struct sdebug_dev_info *devip)
2334 {
2335 	int j;
2336 	unsigned char *kaddr, *buf;
2337 	unsigned int offset;
2338 	struct scsi_data_buffer *sdb = scsi_in(scp);
2339 	struct sg_mapping_iter miter;
2340 
2341 	/* better not to use temporary buffer. */
2342 	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2343 	if (!buf) {
2344 		mk_sense_buffer(devip, NOT_READY,
2345 				LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
2346 		return check_condition_result;
2347 	}
2348 
2349 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2350 
2351 	offset = 0;
2352 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2353 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
2354 
2355 	while (sg_miter_next(&miter)) {
2356 		kaddr = miter.addr;
2357 		for (j = 0; j < miter.length; j++)
2358 			*(kaddr + j) ^= *(buf + offset + j);
2359 
2360 		offset += miter.length;
2361 	}
2362 	sg_miter_stop(&miter);
2363 	kfree(buf);
2364 
2365 	return 0;
2366 }
2367 
2368 /* When timer goes off this function is called. */
2369 static void timer_intr_handler(unsigned long indx)
2370 {
2371 	struct sdebug_queued_cmd * sqcp;
2372 	unsigned long iflags;
2373 
2374 	if (indx >= scsi_debug_max_queue) {
2375 		printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2376 		       "large\n");
2377 		return;
2378 	}
2379 	spin_lock_irqsave(&queued_arr_lock, iflags);
2380 	sqcp = &queued_arr[(int)indx];
2381 	if (! sqcp->in_use) {
2382 		printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2383 		       "interrupt\n");
2384 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2385 		return;
2386 	}
2387 	sqcp->in_use = 0;
2388 	if (sqcp->done_funct) {
2389 		sqcp->a_cmnd->result = sqcp->scsi_result;
2390 		sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2391 	}
2392 	sqcp->done_funct = NULL;
2393 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2394 }
2395 
2396 
2397 static struct sdebug_dev_info *
2398 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2399 {
2400 	struct sdebug_dev_info *devip;
2401 
2402 	devip = kzalloc(sizeof(*devip), flags);
2403 	if (devip) {
2404 		devip->sdbg_host = sdbg_host;
2405 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2406 	}
2407 	return devip;
2408 }
2409 
2410 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2411 {
2412 	struct sdebug_host_info * sdbg_host;
2413 	struct sdebug_dev_info * open_devip = NULL;
2414 	struct sdebug_dev_info * devip =
2415 			(struct sdebug_dev_info *)sdev->hostdata;
2416 
2417 	if (devip)
2418 		return devip;
2419 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2420 	if (!sdbg_host) {
2421                 printk(KERN_ERR "Host info NULL\n");
2422 		return NULL;
2423         }
2424 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2425 		if ((devip->used) && (devip->channel == sdev->channel) &&
2426                     (devip->target == sdev->id) &&
2427                     (devip->lun == sdev->lun))
2428                         return devip;
2429 		else {
2430 			if ((!devip->used) && (!open_devip))
2431 				open_devip = devip;
2432 		}
2433 	}
2434 	if (!open_devip) { /* try and make a new one */
2435 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2436 		if (!open_devip) {
2437 			printk(KERN_ERR "%s: out of memory at line %d\n",
2438 				__func__, __LINE__);
2439 			return NULL;
2440 		}
2441 	}
2442 
2443 	open_devip->channel = sdev->channel;
2444 	open_devip->target = sdev->id;
2445 	open_devip->lun = sdev->lun;
2446 	open_devip->sdbg_host = sdbg_host;
2447 	open_devip->reset = 1;
2448 	open_devip->used = 1;
2449 	memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2450 	if (scsi_debug_dsense)
2451 		open_devip->sense_buff[0] = 0x72;
2452 	else {
2453 		open_devip->sense_buff[0] = 0x70;
2454 		open_devip->sense_buff[7] = 0xa;
2455 	}
2456 	if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2457 		open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2458 
2459 	return open_devip;
2460 }
2461 
2462 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2463 {
2464 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2465 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2466 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2467 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2468 	return 0;
2469 }
2470 
2471 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2472 {
2473 	struct sdebug_dev_info *devip;
2474 
2475 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2476 		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2477 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2478 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2479 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2480 	devip = devInfoReg(sdp);
2481 	if (NULL == devip)
2482 		return 1;	/* no resources, will be marked offline */
2483 	sdp->hostdata = devip;
2484 	if (sdp->host->cmd_per_lun)
2485 		scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2486 					sdp->host->cmd_per_lun);
2487 	blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2488 	if (scsi_debug_no_uld)
2489 		sdp->no_uld_attach = 1;
2490 	return 0;
2491 }
2492 
2493 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2494 {
2495 	struct sdebug_dev_info *devip =
2496 		(struct sdebug_dev_info *)sdp->hostdata;
2497 
2498 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2499 		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2500 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2501 	if (devip) {
2502 		/* make this slot available for re-use */
2503 		devip->used = 0;
2504 		sdp->hostdata = NULL;
2505 	}
2506 }
2507 
2508 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2509 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2510 {
2511 	unsigned long iflags;
2512 	int k;
2513 	struct sdebug_queued_cmd *sqcp;
2514 
2515 	spin_lock_irqsave(&queued_arr_lock, iflags);
2516 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2517 		sqcp = &queued_arr[k];
2518 		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2519 			del_timer_sync(&sqcp->cmnd_timer);
2520 			sqcp->in_use = 0;
2521 			sqcp->a_cmnd = NULL;
2522 			break;
2523 		}
2524 	}
2525 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2526 	return (k < scsi_debug_max_queue) ? 1 : 0;
2527 }
2528 
2529 /* Deletes (stops) timers of all queued commands */
2530 static void stop_all_queued(void)
2531 {
2532 	unsigned long iflags;
2533 	int k;
2534 	struct sdebug_queued_cmd *sqcp;
2535 
2536 	spin_lock_irqsave(&queued_arr_lock, iflags);
2537 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2538 		sqcp = &queued_arr[k];
2539 		if (sqcp->in_use && sqcp->a_cmnd) {
2540 			del_timer_sync(&sqcp->cmnd_timer);
2541 			sqcp->in_use = 0;
2542 			sqcp->a_cmnd = NULL;
2543 		}
2544 	}
2545 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2546 }
2547 
2548 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2549 {
2550 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2551 		printk(KERN_INFO "scsi_debug: abort\n");
2552 	++num_aborts;
2553 	stop_queued_cmnd(SCpnt);
2554 	return SUCCESS;
2555 }
2556 
2557 static int scsi_debug_biosparam(struct scsi_device *sdev,
2558 		struct block_device * bdev, sector_t capacity, int *info)
2559 {
2560 	int res;
2561 	unsigned char *buf;
2562 
2563 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2564 		printk(KERN_INFO "scsi_debug: biosparam\n");
2565 	buf = scsi_bios_ptable(bdev);
2566 	if (buf) {
2567 		res = scsi_partsize(buf, capacity,
2568 				    &info[2], &info[0], &info[1]);
2569 		kfree(buf);
2570 		if (! res)
2571 			return res;
2572 	}
2573 	info[0] = sdebug_heads;
2574 	info[1] = sdebug_sectors_per;
2575 	info[2] = sdebug_cylinders_per;
2576 	return 0;
2577 }
2578 
2579 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2580 {
2581 	struct sdebug_dev_info * devip;
2582 
2583 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2584 		printk(KERN_INFO "scsi_debug: device_reset\n");
2585 	++num_dev_resets;
2586 	if (SCpnt) {
2587 		devip = devInfoReg(SCpnt->device);
2588 		if (devip)
2589 			devip->reset = 1;
2590 	}
2591 	return SUCCESS;
2592 }
2593 
2594 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2595 {
2596 	struct sdebug_host_info *sdbg_host;
2597         struct sdebug_dev_info * dev_info;
2598         struct scsi_device * sdp;
2599         struct Scsi_Host * hp;
2600 
2601 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2602 		printk(KERN_INFO "scsi_debug: bus_reset\n");
2603 	++num_bus_resets;
2604 	if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2605 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2606 		if (sdbg_host) {
2607 			list_for_each_entry(dev_info,
2608                                             &sdbg_host->dev_info_list,
2609                                             dev_list)
2610 				dev_info->reset = 1;
2611 		}
2612 	}
2613 	return SUCCESS;
2614 }
2615 
2616 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2617 {
2618 	struct sdebug_host_info * sdbg_host;
2619         struct sdebug_dev_info * dev_info;
2620 
2621 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2622 		printk(KERN_INFO "scsi_debug: host_reset\n");
2623 	++num_host_resets;
2624         spin_lock(&sdebug_host_list_lock);
2625         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2626                 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2627                                     dev_list)
2628                         dev_info->reset = 1;
2629         }
2630         spin_unlock(&sdebug_host_list_lock);
2631 	stop_all_queued();
2632 	return SUCCESS;
2633 }
2634 
2635 /* Initializes timers in queued array */
2636 static void __init init_all_queued(void)
2637 {
2638 	unsigned long iflags;
2639 	int k;
2640 	struct sdebug_queued_cmd * sqcp;
2641 
2642 	spin_lock_irqsave(&queued_arr_lock, iflags);
2643 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2644 		sqcp = &queued_arr[k];
2645 		init_timer(&sqcp->cmnd_timer);
2646 		sqcp->in_use = 0;
2647 		sqcp->a_cmnd = NULL;
2648 	}
2649 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2650 }
2651 
2652 static void __init sdebug_build_parts(unsigned char *ramp,
2653 				      unsigned long store_size)
2654 {
2655 	struct partition * pp;
2656 	int starts[SDEBUG_MAX_PARTS + 2];
2657 	int sectors_per_part, num_sectors, k;
2658 	int heads_by_sects, start_sec, end_sec;
2659 
2660 	/* assume partition table already zeroed */
2661 	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2662 		return;
2663 	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2664 		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2665 		printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2666 				    "partitions to %d\n", SDEBUG_MAX_PARTS);
2667 	}
2668 	num_sectors = (int)sdebug_store_sectors;
2669 	sectors_per_part = (num_sectors - sdebug_sectors_per)
2670 			   / scsi_debug_num_parts;
2671 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
2672         starts[0] = sdebug_sectors_per;
2673 	for (k = 1; k < scsi_debug_num_parts; ++k)
2674 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
2675 			    * heads_by_sects;
2676 	starts[scsi_debug_num_parts] = num_sectors;
2677 	starts[scsi_debug_num_parts + 1] = 0;
2678 
2679 	ramp[510] = 0x55;	/* magic partition markings */
2680 	ramp[511] = 0xAA;
2681 	pp = (struct partition *)(ramp + 0x1be);
2682 	for (k = 0; starts[k + 1]; ++k, ++pp) {
2683 		start_sec = starts[k];
2684 		end_sec = starts[k + 1] - 1;
2685 		pp->boot_ind = 0;
2686 
2687 		pp->cyl = start_sec / heads_by_sects;
2688 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
2689 			   / sdebug_sectors_per;
2690 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
2691 
2692 		pp->end_cyl = end_sec / heads_by_sects;
2693 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2694 			       / sdebug_sectors_per;
2695 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2696 
2697 		pp->start_sect = cpu_to_le32(start_sec);
2698 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2699 		pp->sys_ind = 0x83;	/* plain Linux partition */
2700 	}
2701 }
2702 
2703 static int schedule_resp(struct scsi_cmnd * cmnd,
2704 			 struct sdebug_dev_info * devip,
2705 			 done_funct_t done, int scsi_result, int delta_jiff)
2706 {
2707 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2708 		if (scsi_result) {
2709 			struct scsi_device * sdp = cmnd->device;
2710 
2711 			printk(KERN_INFO "scsi_debug:    <%u %u %u %u> "
2712 			       "non-zero result=0x%x\n", sdp->host->host_no,
2713 			       sdp->channel, sdp->id, sdp->lun, scsi_result);
2714 		}
2715 	}
2716 	if (cmnd && devip) {
2717 		/* simulate autosense by this driver */
2718 		if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2719 			memcpy(cmnd->sense_buffer, devip->sense_buff,
2720 			       (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2721 			       SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2722 	}
2723 	if (delta_jiff <= 0) {
2724 		if (cmnd)
2725 			cmnd->result = scsi_result;
2726 		if (done)
2727 			done(cmnd);
2728 		return 0;
2729 	} else {
2730 		unsigned long iflags;
2731 		int k;
2732 		struct sdebug_queued_cmd * sqcp = NULL;
2733 
2734 		spin_lock_irqsave(&queued_arr_lock, iflags);
2735 		for (k = 0; k < scsi_debug_max_queue; ++k) {
2736 			sqcp = &queued_arr[k];
2737 			if (! sqcp->in_use)
2738 				break;
2739 		}
2740 		if (k >= scsi_debug_max_queue) {
2741 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
2742 			printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2743 			return 1;	/* report busy to mid level */
2744 		}
2745 		sqcp->in_use = 1;
2746 		sqcp->a_cmnd = cmnd;
2747 		sqcp->scsi_result = scsi_result;
2748 		sqcp->done_funct = done;
2749 		sqcp->cmnd_timer.function = timer_intr_handler;
2750 		sqcp->cmnd_timer.data = k;
2751 		sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2752 		add_timer(&sqcp->cmnd_timer);
2753 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2754 		if (cmnd)
2755 			cmnd->result = 0;
2756 		return 0;
2757 	}
2758 }
2759 /* Note: The following macros create attribute files in the
2760    /sys/module/scsi_debug/parameters directory. Unfortunately this
2761    driver is unaware of a change and cannot trigger auxiliary actions
2762    as it can when the corresponding attribute in the
2763    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2764  */
2765 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2766 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2767 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
2768 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2769 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2770 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2771 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2772 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2773 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2774 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2775 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
2776 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2777 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2778 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2779 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2780 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2781 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2782 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2783 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2784 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2785 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2786 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2787 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2788 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2789 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2790 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2791 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2792 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2793 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2794 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2795 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2796 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2797 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2798 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2799 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2800 		   S_IRUGO | S_IWUSR);
2801 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2802 		   S_IRUGO | S_IWUSR);
2803 
2804 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2805 MODULE_DESCRIPTION("SCSI debug adapter driver");
2806 MODULE_LICENSE("GPL");
2807 MODULE_VERSION(SCSI_DEBUG_VERSION);
2808 
2809 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2810 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2811 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
2812 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2813 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2814 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2815 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2816 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2817 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2818 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2819 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2820 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2821 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2822 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2823 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2824 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2825 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2826 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2827 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2828 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2829 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2830 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2831 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2832 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2833 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2834 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2835 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2836 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2837 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2838 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2839 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2840 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2841 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2842 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2843 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2844 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2845 
2846 static char sdebug_info[256];
2847 
2848 static const char * scsi_debug_info(struct Scsi_Host * shp)
2849 {
2850 	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2851 		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2852 		scsi_debug_version_date, scsi_debug_dev_size_mb,
2853 		scsi_debug_opts);
2854 	return sdebug_info;
2855 }
2856 
2857 /* scsi_debug_proc_info
2858  * Used if the driver currently has no own support for /proc/scsi
2859  */
2860 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2861 {
2862 	char arr[16];
2863 	int opts;
2864 	int minLen = length > 15 ? 15 : length;
2865 
2866 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2867 		return -EACCES;
2868 	memcpy(arr, buffer, minLen);
2869 	arr[minLen] = '\0';
2870 	if (1 != sscanf(arr, "%d", &opts))
2871 		return -EINVAL;
2872 	scsi_debug_opts = opts;
2873 	if (scsi_debug_every_nth != 0)
2874 		scsi_debug_cmnd_count = 0;
2875 	return length;
2876 }
2877 
2878 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2879 {
2880 	seq_printf(m, "scsi_debug adapter driver, version "
2881 	    "%s [%s]\n"
2882 	    "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2883 	    "every_nth=%d(curr:%d)\n"
2884 	    "delay=%d, max_luns=%d, scsi_level=%d\n"
2885 	    "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2886 	    "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2887 	    "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2888 	    SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2889 	    scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2890 	    scsi_debug_cmnd_count, scsi_debug_delay,
2891 	    scsi_debug_max_luns, scsi_debug_scsi_level,
2892 	    scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2893 	    sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2894 	    num_host_resets, dix_reads, dix_writes, dif_errors);
2895 	return 0;
2896 }
2897 
2898 static ssize_t delay_show(struct device_driver *ddp, char *buf)
2899 {
2900         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2901 }
2902 
2903 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
2904 			   size_t count)
2905 {
2906         int delay;
2907 	char work[20];
2908 
2909         if (1 == sscanf(buf, "%10s", work)) {
2910 		if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2911 			scsi_debug_delay = delay;
2912 			return count;
2913 		}
2914 	}
2915 	return -EINVAL;
2916 }
2917 static DRIVER_ATTR_RW(delay);
2918 
2919 static ssize_t opts_show(struct device_driver *ddp, char *buf)
2920 {
2921         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2922 }
2923 
2924 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
2925 			  size_t count)
2926 {
2927         int opts;
2928 	char work[20];
2929 
2930         if (1 == sscanf(buf, "%10s", work)) {
2931 		if (0 == strnicmp(work,"0x", 2)) {
2932 			if (1 == sscanf(&work[2], "%x", &opts))
2933 				goto opts_done;
2934 		} else {
2935 			if (1 == sscanf(work, "%d", &opts))
2936 				goto opts_done;
2937 		}
2938 	}
2939 	return -EINVAL;
2940 opts_done:
2941 	scsi_debug_opts = opts;
2942 	scsi_debug_cmnd_count = 0;
2943 	return count;
2944 }
2945 static DRIVER_ATTR_RW(opts);
2946 
2947 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
2948 {
2949         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2950 }
2951 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
2952 			   size_t count)
2953 {
2954         int n;
2955 
2956 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2957 		scsi_debug_ptype = n;
2958 		return count;
2959 	}
2960 	return -EINVAL;
2961 }
2962 static DRIVER_ATTR_RW(ptype);
2963 
2964 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
2965 {
2966         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2967 }
2968 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
2969 			    size_t count)
2970 {
2971         int n;
2972 
2973 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2974 		scsi_debug_dsense = n;
2975 		return count;
2976 	}
2977 	return -EINVAL;
2978 }
2979 static DRIVER_ATTR_RW(dsense);
2980 
2981 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
2982 {
2983         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2984 }
2985 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
2986 			     size_t count)
2987 {
2988         int n;
2989 
2990 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2991 		scsi_debug_fake_rw = n;
2992 		return count;
2993 	}
2994 	return -EINVAL;
2995 }
2996 static DRIVER_ATTR_RW(fake_rw);
2997 
2998 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
2999 {
3000         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3001 }
3002 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3003 			      size_t count)
3004 {
3005         int n;
3006 
3007 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3008 		scsi_debug_no_lun_0 = n;
3009 		return count;
3010 	}
3011 	return -EINVAL;
3012 }
3013 static DRIVER_ATTR_RW(no_lun_0);
3014 
3015 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3016 {
3017         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3018 }
3019 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3020 			      size_t count)
3021 {
3022         int n;
3023 
3024 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3025 		scsi_debug_num_tgts = n;
3026 		sdebug_max_tgts_luns();
3027 		return count;
3028 	}
3029 	return -EINVAL;
3030 }
3031 static DRIVER_ATTR_RW(num_tgts);
3032 
3033 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3034 {
3035         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3036 }
3037 static DRIVER_ATTR_RO(dev_size_mb);
3038 
3039 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3040 {
3041         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3042 }
3043 static DRIVER_ATTR_RO(num_parts);
3044 
3045 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3046 {
3047         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3048 }
3049 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3050 			       size_t count)
3051 {
3052         int nth;
3053 
3054 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3055 		scsi_debug_every_nth = nth;
3056 		scsi_debug_cmnd_count = 0;
3057 		return count;
3058 	}
3059 	return -EINVAL;
3060 }
3061 static DRIVER_ATTR_RW(every_nth);
3062 
3063 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3064 {
3065         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3066 }
3067 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3068 			      size_t count)
3069 {
3070         int n;
3071 
3072 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3073 		scsi_debug_max_luns = n;
3074 		sdebug_max_tgts_luns();
3075 		return count;
3076 	}
3077 	return -EINVAL;
3078 }
3079 static DRIVER_ATTR_RW(max_luns);
3080 
3081 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3082 {
3083         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3084 }
3085 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3086 			       size_t count)
3087 {
3088         int n;
3089 
3090 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3091 	    (n <= SCSI_DEBUG_CANQUEUE)) {
3092 		scsi_debug_max_queue = n;
3093 		return count;
3094 	}
3095 	return -EINVAL;
3096 }
3097 static DRIVER_ATTR_RW(max_queue);
3098 
3099 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3100 {
3101         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3102 }
3103 static DRIVER_ATTR_RO(no_uld);
3104 
3105 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3106 {
3107         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3108 }
3109 static DRIVER_ATTR_RO(scsi_level);
3110 
3111 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3112 {
3113         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3114 }
3115 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3116 				size_t count)
3117 {
3118         int n;
3119 
3120 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3121 		scsi_debug_virtual_gb = n;
3122 
3123 		sdebug_capacity = get_sdebug_capacity();
3124 
3125 		return count;
3126 	}
3127 	return -EINVAL;
3128 }
3129 static DRIVER_ATTR_RW(virtual_gb);
3130 
3131 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3132 {
3133         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3134 }
3135 
3136 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3137 			      size_t count)
3138 {
3139 	int delta_hosts;
3140 
3141 	if (sscanf(buf, "%d", &delta_hosts) != 1)
3142 		return -EINVAL;
3143 	if (delta_hosts > 0) {
3144 		do {
3145 			sdebug_add_adapter();
3146 		} while (--delta_hosts);
3147 	} else if (delta_hosts < 0) {
3148 		do {
3149 			sdebug_remove_adapter();
3150 		} while (++delta_hosts);
3151 	}
3152 	return count;
3153 }
3154 static DRIVER_ATTR_RW(add_host);
3155 
3156 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3157 {
3158 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3159 }
3160 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3161 				    size_t count)
3162 {
3163 	int n;
3164 
3165 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3166 		scsi_debug_vpd_use_hostno = n;
3167 		return count;
3168 	}
3169 	return -EINVAL;
3170 }
3171 static DRIVER_ATTR_RW(vpd_use_hostno);
3172 
3173 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3174 {
3175 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3176 }
3177 static DRIVER_ATTR_RO(sector_size);
3178 
3179 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3180 {
3181 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3182 }
3183 static DRIVER_ATTR_RO(dix);
3184 
3185 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3186 {
3187 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3188 }
3189 static DRIVER_ATTR_RO(dif);
3190 
3191 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3192 {
3193 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3194 }
3195 static DRIVER_ATTR_RO(guard);
3196 
3197 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3198 {
3199 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3200 }
3201 static DRIVER_ATTR_RO(ato);
3202 
3203 static ssize_t map_show(struct device_driver *ddp, char *buf)
3204 {
3205 	ssize_t count;
3206 
3207 	if (!scsi_debug_lbp())
3208 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3209 				 sdebug_store_sectors);
3210 
3211 	count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3212 
3213 	buf[count++] = '\n';
3214 	buf[count++] = 0;
3215 
3216 	return count;
3217 }
3218 static DRIVER_ATTR_RO(map);
3219 
3220 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3221 {
3222 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3223 }
3224 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3225 			       size_t count)
3226 {
3227 	int n;
3228 
3229 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3230 		scsi_debug_removable = (n > 0);
3231 		return count;
3232 	}
3233 	return -EINVAL;
3234 }
3235 static DRIVER_ATTR_RW(removable);
3236 
3237 /* Note: The following array creates attribute files in the
3238    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3239    files (over those found in the /sys/module/scsi_debug/parameters
3240    directory) is that auxiliary actions can be triggered when an attribute
3241    is changed. For example see: sdebug_add_host_store() above.
3242  */
3243 
3244 static struct attribute *sdebug_drv_attrs[] = {
3245 	&driver_attr_delay.attr,
3246 	&driver_attr_opts.attr,
3247 	&driver_attr_ptype.attr,
3248 	&driver_attr_dsense.attr,
3249 	&driver_attr_fake_rw.attr,
3250 	&driver_attr_no_lun_0.attr,
3251 	&driver_attr_num_tgts.attr,
3252 	&driver_attr_dev_size_mb.attr,
3253 	&driver_attr_num_parts.attr,
3254 	&driver_attr_every_nth.attr,
3255 	&driver_attr_max_luns.attr,
3256 	&driver_attr_max_queue.attr,
3257 	&driver_attr_no_uld.attr,
3258 	&driver_attr_scsi_level.attr,
3259 	&driver_attr_virtual_gb.attr,
3260 	&driver_attr_add_host.attr,
3261 	&driver_attr_vpd_use_hostno.attr,
3262 	&driver_attr_sector_size.attr,
3263 	&driver_attr_dix.attr,
3264 	&driver_attr_dif.attr,
3265 	&driver_attr_guard.attr,
3266 	&driver_attr_ato.attr,
3267 	&driver_attr_map.attr,
3268 	&driver_attr_removable.attr,
3269 	NULL,
3270 };
3271 ATTRIBUTE_GROUPS(sdebug_drv);
3272 
3273 static struct device *pseudo_primary;
3274 
3275 static int __init scsi_debug_init(void)
3276 {
3277 	unsigned long sz;
3278 	int host_to_add;
3279 	int k;
3280 	int ret;
3281 
3282 	switch (scsi_debug_sector_size) {
3283 	case  512:
3284 	case 1024:
3285 	case 2048:
3286 	case 4096:
3287 		break;
3288 	default:
3289 		printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3290 		       scsi_debug_sector_size);
3291 		return -EINVAL;
3292 	}
3293 
3294 	switch (scsi_debug_dif) {
3295 
3296 	case SD_DIF_TYPE0_PROTECTION:
3297 	case SD_DIF_TYPE1_PROTECTION:
3298 	case SD_DIF_TYPE2_PROTECTION:
3299 	case SD_DIF_TYPE3_PROTECTION:
3300 		break;
3301 
3302 	default:
3303 		printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3304 		return -EINVAL;
3305 	}
3306 
3307 	if (scsi_debug_guard > 1) {
3308 		printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3309 		return -EINVAL;
3310 	}
3311 
3312 	if (scsi_debug_ato > 1) {
3313 		printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3314 		return -EINVAL;
3315 	}
3316 
3317 	if (scsi_debug_physblk_exp > 15) {
3318 		printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3319 		       scsi_debug_physblk_exp);
3320 		return -EINVAL;
3321 	}
3322 
3323 	if (scsi_debug_lowest_aligned > 0x3fff) {
3324 		printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3325 		       scsi_debug_lowest_aligned);
3326 		return -EINVAL;
3327 	}
3328 
3329 	if (scsi_debug_dev_size_mb < 1)
3330 		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3331 	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3332 	sdebug_store_sectors = sz / scsi_debug_sector_size;
3333 	sdebug_capacity = get_sdebug_capacity();
3334 
3335 	/* play around with geometry, don't waste too much on track 0 */
3336 	sdebug_heads = 8;
3337 	sdebug_sectors_per = 32;
3338 	if (scsi_debug_dev_size_mb >= 16)
3339 		sdebug_heads = 32;
3340 	else if (scsi_debug_dev_size_mb >= 256)
3341 		sdebug_heads = 64;
3342 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3343 			       (sdebug_sectors_per * sdebug_heads);
3344 	if (sdebug_cylinders_per >= 1024) {
3345 		/* other LLDs do this; implies >= 1GB ram disk ... */
3346 		sdebug_heads = 255;
3347 		sdebug_sectors_per = 63;
3348 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3349 			       (sdebug_sectors_per * sdebug_heads);
3350 	}
3351 
3352 	fake_storep = vmalloc(sz);
3353 	if (NULL == fake_storep) {
3354 		printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3355 		return -ENOMEM;
3356 	}
3357 	memset(fake_storep, 0, sz);
3358 	if (scsi_debug_num_parts > 0)
3359 		sdebug_build_parts(fake_storep, sz);
3360 
3361 	if (scsi_debug_dix) {
3362 		int dif_size;
3363 
3364 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3365 		dif_storep = vmalloc(dif_size);
3366 
3367 		printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3368 		       dif_size, dif_storep);
3369 
3370 		if (dif_storep == NULL) {
3371 			printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3372 			ret = -ENOMEM;
3373 			goto free_vm;
3374 		}
3375 
3376 		memset(dif_storep, 0xff, dif_size);
3377 	}
3378 
3379 	/* Logical Block Provisioning */
3380 	if (scsi_debug_lbp()) {
3381 		scsi_debug_unmap_max_blocks =
3382 			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3383 
3384 		scsi_debug_unmap_max_desc =
3385 			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3386 
3387 		scsi_debug_unmap_granularity =
3388 			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3389 
3390 		if (scsi_debug_unmap_alignment &&
3391 		    scsi_debug_unmap_granularity <=
3392 		    scsi_debug_unmap_alignment) {
3393 			printk(KERN_ERR
3394 			       "%s: ERR: unmap_granularity <= unmap_alignment\n",
3395 			       __func__);
3396 			return -EINVAL;
3397 		}
3398 
3399 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3400 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3401 
3402 		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3403 		       map_size);
3404 
3405 		if (map_storep == NULL) {
3406 			printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3407 			ret = -ENOMEM;
3408 			goto free_vm;
3409 		}
3410 
3411 		bitmap_zero(map_storep, map_size);
3412 
3413 		/* Map first 1KB for partition table */
3414 		if (scsi_debug_num_parts)
3415 			map_region(0, 2);
3416 	}
3417 
3418 	pseudo_primary = root_device_register("pseudo_0");
3419 	if (IS_ERR(pseudo_primary)) {
3420 		printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3421 		ret = PTR_ERR(pseudo_primary);
3422 		goto free_vm;
3423 	}
3424 	ret = bus_register(&pseudo_lld_bus);
3425 	if (ret < 0) {
3426 		printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3427 			ret);
3428 		goto dev_unreg;
3429 	}
3430 	ret = driver_register(&sdebug_driverfs_driver);
3431 	if (ret < 0) {
3432 		printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3433 			ret);
3434 		goto bus_unreg;
3435 	}
3436 
3437 	init_all_queued();
3438 
3439 	host_to_add = scsi_debug_add_host;
3440         scsi_debug_add_host = 0;
3441 
3442         for (k = 0; k < host_to_add; k++) {
3443                 if (sdebug_add_adapter()) {
3444                         printk(KERN_ERR "scsi_debug_init: "
3445                                "sdebug_add_adapter failed k=%d\n", k);
3446                         break;
3447                 }
3448         }
3449 
3450 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3451 		printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3452 		       scsi_debug_add_host);
3453 	}
3454 	return 0;
3455 
3456 bus_unreg:
3457 	bus_unregister(&pseudo_lld_bus);
3458 dev_unreg:
3459 	root_device_unregister(pseudo_primary);
3460 free_vm:
3461 	if (map_storep)
3462 		vfree(map_storep);
3463 	if (dif_storep)
3464 		vfree(dif_storep);
3465 	vfree(fake_storep);
3466 
3467 	return ret;
3468 }
3469 
3470 static void __exit scsi_debug_exit(void)
3471 {
3472 	int k = scsi_debug_add_host;
3473 
3474 	stop_all_queued();
3475 	for (; k; k--)
3476 		sdebug_remove_adapter();
3477 	driver_unregister(&sdebug_driverfs_driver);
3478 	bus_unregister(&pseudo_lld_bus);
3479 	root_device_unregister(pseudo_primary);
3480 
3481 	if (dif_storep)
3482 		vfree(dif_storep);
3483 
3484 	vfree(fake_storep);
3485 }
3486 
3487 device_initcall(scsi_debug_init);
3488 module_exit(scsi_debug_exit);
3489 
3490 static void sdebug_release_adapter(struct device * dev)
3491 {
3492         struct sdebug_host_info *sdbg_host;
3493 
3494 	sdbg_host = to_sdebug_host(dev);
3495         kfree(sdbg_host);
3496 }
3497 
3498 static int sdebug_add_adapter(void)
3499 {
3500 	int k, devs_per_host;
3501         int error = 0;
3502         struct sdebug_host_info *sdbg_host;
3503 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
3504 
3505         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3506         if (NULL == sdbg_host) {
3507                 printk(KERN_ERR "%s: out of memory at line %d\n",
3508                        __func__, __LINE__);
3509                 return -ENOMEM;
3510         }
3511 
3512         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3513 
3514 	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3515         for (k = 0; k < devs_per_host; k++) {
3516 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3517 		if (!sdbg_devinfo) {
3518                         printk(KERN_ERR "%s: out of memory at line %d\n",
3519                                __func__, __LINE__);
3520                         error = -ENOMEM;
3521 			goto clean;
3522                 }
3523         }
3524 
3525         spin_lock(&sdebug_host_list_lock);
3526         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3527         spin_unlock(&sdebug_host_list_lock);
3528 
3529         sdbg_host->dev.bus = &pseudo_lld_bus;
3530         sdbg_host->dev.parent = pseudo_primary;
3531         sdbg_host->dev.release = &sdebug_release_adapter;
3532         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3533 
3534         error = device_register(&sdbg_host->dev);
3535 
3536         if (error)
3537 		goto clean;
3538 
3539 	++scsi_debug_add_host;
3540         return error;
3541 
3542 clean:
3543 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3544 				 dev_list) {
3545 		list_del(&sdbg_devinfo->dev_list);
3546 		kfree(sdbg_devinfo);
3547 	}
3548 
3549 	kfree(sdbg_host);
3550         return error;
3551 }
3552 
3553 static void sdebug_remove_adapter(void)
3554 {
3555         struct sdebug_host_info * sdbg_host = NULL;
3556 
3557         spin_lock(&sdebug_host_list_lock);
3558         if (!list_empty(&sdebug_host_list)) {
3559                 sdbg_host = list_entry(sdebug_host_list.prev,
3560                                        struct sdebug_host_info, host_list);
3561 		list_del(&sdbg_host->host_list);
3562 	}
3563         spin_unlock(&sdebug_host_list_lock);
3564 
3565 	if (!sdbg_host)
3566 		return;
3567 
3568         device_unregister(&sdbg_host->dev);
3569         --scsi_debug_add_host;
3570 }
3571 
3572 static
3573 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3574 {
3575 	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3576 	int len, k;
3577 	unsigned int num;
3578 	unsigned long long lba;
3579 	u32 ei_lba;
3580 	int errsts = 0;
3581 	int target = SCpnt->device->id;
3582 	struct sdebug_dev_info *devip = NULL;
3583 	int inj_recovered = 0;
3584 	int inj_transport = 0;
3585 	int inj_dif = 0;
3586 	int inj_dix = 0;
3587 	int inj_short = 0;
3588 	int delay_override = 0;
3589 	int unmap = 0;
3590 
3591 	scsi_set_resid(SCpnt, 0);
3592 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3593 		printk(KERN_INFO "scsi_debug: cmd ");
3594 		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3595 			printk("%02x ", (int)cmd[k]);
3596 		printk("\n");
3597 	}
3598 
3599 	if (target == SCpnt->device->host->hostt->this_id) {
3600 		printk(KERN_INFO "scsi_debug: initiator's id used as "
3601 		       "target!\n");
3602 		return schedule_resp(SCpnt, NULL, done,
3603 				     DID_NO_CONNECT << 16, 0);
3604 	}
3605 
3606 	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3607 	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3608 		return schedule_resp(SCpnt, NULL, done,
3609 				     DID_NO_CONNECT << 16, 0);
3610 	devip = devInfoReg(SCpnt->device);
3611 	if (NULL == devip)
3612 		return schedule_resp(SCpnt, NULL, done,
3613 				     DID_NO_CONNECT << 16, 0);
3614 
3615 	if ((scsi_debug_every_nth != 0) &&
3616 	    (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3617 		scsi_debug_cmnd_count = 0;
3618 		if (scsi_debug_every_nth < -1)
3619 			scsi_debug_every_nth = -1;
3620 		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3621 			return 0; /* ignore command causing timeout */
3622 		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3623 			 scsi_medium_access_command(SCpnt))
3624 			return 0; /* time out reads and writes */
3625 		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3626 			inj_recovered = 1; /* to reads and writes below */
3627 		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3628 			inj_transport = 1; /* to reads and writes below */
3629 		else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3630 			inj_dif = 1; /* to reads and writes below */
3631 		else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3632 			inj_dix = 1; /* to reads and writes below */
3633 		else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
3634 			inj_short = 1;
3635 	}
3636 
3637 	if (devip->wlun) {
3638 		switch (*cmd) {
3639 		case INQUIRY:
3640 		case REQUEST_SENSE:
3641 		case TEST_UNIT_READY:
3642 		case REPORT_LUNS:
3643 			break;  /* only allowable wlun commands */
3644 		default:
3645 			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3646 				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3647 				       "not supported for wlun\n", *cmd);
3648 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3649 					INVALID_OPCODE, 0);
3650 			errsts = check_condition_result;
3651 			return schedule_resp(SCpnt, devip, done, errsts,
3652 					     0);
3653 		}
3654 	}
3655 
3656 	switch (*cmd) {
3657 	case INQUIRY:     /* mandatory, ignore unit attention */
3658 		delay_override = 1;
3659 		errsts = resp_inquiry(SCpnt, target, devip);
3660 		break;
3661 	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
3662 		delay_override = 1;
3663 		errsts = resp_requests(SCpnt, devip);
3664 		break;
3665 	case REZERO_UNIT:	/* actually this is REWIND for SSC */
3666 	case START_STOP:
3667 		errsts = resp_start_stop(SCpnt, devip);
3668 		break;
3669 	case ALLOW_MEDIUM_REMOVAL:
3670 		errsts = check_readiness(SCpnt, 1, devip);
3671 		if (errsts)
3672 			break;
3673 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3674 			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3675 			       cmd[4] ? "inhibited" : "enabled");
3676 		break;
3677 	case SEND_DIAGNOSTIC:     /* mandatory */
3678 		errsts = check_readiness(SCpnt, 1, devip);
3679 		break;
3680 	case TEST_UNIT_READY:     /* mandatory */
3681 		delay_override = 1;
3682 		errsts = check_readiness(SCpnt, 0, devip);
3683 		break;
3684 	case RESERVE:
3685 		errsts = check_readiness(SCpnt, 1, devip);
3686 		break;
3687 	case RESERVE_10:
3688 		errsts = check_readiness(SCpnt, 1, devip);
3689 		break;
3690 	case RELEASE:
3691 		errsts = check_readiness(SCpnt, 1, devip);
3692 		break;
3693 	case RELEASE_10:
3694 		errsts = check_readiness(SCpnt, 1, devip);
3695 		break;
3696 	case READ_CAPACITY:
3697 		errsts = resp_readcap(SCpnt, devip);
3698 		break;
3699 	case SERVICE_ACTION_IN:
3700 		if (cmd[1] == SAI_READ_CAPACITY_16)
3701 			errsts = resp_readcap16(SCpnt, devip);
3702 		else if (cmd[1] == SAI_GET_LBA_STATUS) {
3703 
3704 			if (scsi_debug_lbp() == 0) {
3705 				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3706 						INVALID_COMMAND_OPCODE, 0);
3707 				errsts = check_condition_result;
3708 			} else
3709 				errsts = resp_get_lba_status(SCpnt, devip);
3710 		} else {
3711 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3712 					INVALID_OPCODE, 0);
3713 			errsts = check_condition_result;
3714 		}
3715 		break;
3716 	case MAINTENANCE_IN:
3717 		if (MI_REPORT_TARGET_PGS != cmd[1]) {
3718 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3719 					INVALID_OPCODE, 0);
3720 			errsts = check_condition_result;
3721 			break;
3722 		}
3723 		errsts = resp_report_tgtpgs(SCpnt, devip);
3724 		break;
3725 	case READ_16:
3726 	case READ_12:
3727 	case READ_10:
3728 		/* READ{10,12,16} and DIF Type 2 are natural enemies */
3729 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3730 		    cmd[1] & 0xe0) {
3731 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3732 					INVALID_COMMAND_OPCODE, 0);
3733 			errsts = check_condition_result;
3734 			break;
3735 		}
3736 
3737 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3738 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3739 		    (cmd[1] & 0xe0) == 0)
3740 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3741 
3742 		/* fall through */
3743 	case READ_6:
3744 read:
3745 		errsts = check_readiness(SCpnt, 0, devip);
3746 		if (errsts)
3747 			break;
3748 		if (scsi_debug_fake_rw)
3749 			break;
3750 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3751 
3752 		if (inj_short)
3753 			num /= 2;
3754 
3755 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3756 		if (inj_recovered && (0 == errsts)) {
3757 			mk_sense_buffer(devip, RECOVERED_ERROR,
3758 					THRESHOLD_EXCEEDED, 0);
3759 			errsts = check_condition_result;
3760 		} else if (inj_transport && (0 == errsts)) {
3761 			mk_sense_buffer(devip, ABORTED_COMMAND,
3762 					TRANSPORT_PROBLEM, ACK_NAK_TO);
3763 			errsts = check_condition_result;
3764 		} else if (inj_dif && (0 == errsts)) {
3765 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3766 			errsts = illegal_condition_result;
3767 		} else if (inj_dix && (0 == errsts)) {
3768 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3769 			errsts = illegal_condition_result;
3770 		}
3771 		break;
3772 	case REPORT_LUNS:	/* mandatory, ignore unit attention */
3773 		delay_override = 1;
3774 		errsts = resp_report_luns(SCpnt, devip);
3775 		break;
3776 	case VERIFY:		/* 10 byte SBC-2 command */
3777 		errsts = check_readiness(SCpnt, 0, devip);
3778 		break;
3779 	case WRITE_16:
3780 	case WRITE_12:
3781 	case WRITE_10:
3782 		/* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3783 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3784 		    cmd[1] & 0xe0) {
3785 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3786 					INVALID_COMMAND_OPCODE, 0);
3787 			errsts = check_condition_result;
3788 			break;
3789 		}
3790 
3791 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3792 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3793 		    (cmd[1] & 0xe0) == 0)
3794 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3795 
3796 		/* fall through */
3797 	case WRITE_6:
3798 write:
3799 		errsts = check_readiness(SCpnt, 0, devip);
3800 		if (errsts)
3801 			break;
3802 		if (scsi_debug_fake_rw)
3803 			break;
3804 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3805 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3806 		if (inj_recovered && (0 == errsts)) {
3807 			mk_sense_buffer(devip, RECOVERED_ERROR,
3808 					THRESHOLD_EXCEEDED, 0);
3809 			errsts = check_condition_result;
3810 		} else if (inj_dif && (0 == errsts)) {
3811 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3812 			errsts = illegal_condition_result;
3813 		} else if (inj_dix && (0 == errsts)) {
3814 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3815 			errsts = illegal_condition_result;
3816 		}
3817 		break;
3818 	case WRITE_SAME_16:
3819 	case WRITE_SAME:
3820 		if (cmd[1] & 0x8) {
3821 			if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3822 			    (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3823 				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3824 						INVALID_FIELD_IN_CDB, 0);
3825 				errsts = check_condition_result;
3826 			} else
3827 				unmap = 1;
3828 		}
3829 		if (errsts)
3830 			break;
3831 		errsts = check_readiness(SCpnt, 0, devip);
3832 		if (errsts)
3833 			break;
3834 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3835 		errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3836 		break;
3837 	case UNMAP:
3838 		errsts = check_readiness(SCpnt, 0, devip);
3839 		if (errsts)
3840 			break;
3841 
3842 		if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3843 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3844 					INVALID_COMMAND_OPCODE, 0);
3845 			errsts = check_condition_result;
3846 		} else
3847 			errsts = resp_unmap(SCpnt, devip);
3848 		break;
3849 	case MODE_SENSE:
3850 	case MODE_SENSE_10:
3851 		errsts = resp_mode_sense(SCpnt, target, devip);
3852 		break;
3853 	case MODE_SELECT:
3854 		errsts = resp_mode_select(SCpnt, 1, devip);
3855 		break;
3856 	case MODE_SELECT_10:
3857 		errsts = resp_mode_select(SCpnt, 0, devip);
3858 		break;
3859 	case LOG_SENSE:
3860 		errsts = resp_log_sense(SCpnt, devip);
3861 		break;
3862 	case SYNCHRONIZE_CACHE:
3863 		delay_override = 1;
3864 		errsts = check_readiness(SCpnt, 0, devip);
3865 		break;
3866 	case WRITE_BUFFER:
3867 		errsts = check_readiness(SCpnt, 1, devip);
3868 		break;
3869 	case XDWRITEREAD_10:
3870 		if (!scsi_bidi_cmnd(SCpnt)) {
3871 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3872 					INVALID_FIELD_IN_CDB, 0);
3873 			errsts = check_condition_result;
3874 			break;
3875 		}
3876 
3877 		errsts = check_readiness(SCpnt, 0, devip);
3878 		if (errsts)
3879 			break;
3880 		if (scsi_debug_fake_rw)
3881 			break;
3882 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3883 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3884 		if (errsts)
3885 			break;
3886 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3887 		if (errsts)
3888 			break;
3889 		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3890 		break;
3891 	case VARIABLE_LENGTH_CMD:
3892 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3893 
3894 			if ((cmd[10] & 0xe0) == 0)
3895 				printk(KERN_ERR
3896 				       "Unprotected RD/WR to DIF device\n");
3897 
3898 			if (cmd[9] == READ_32) {
3899 				BUG_ON(SCpnt->cmd_len < 32);
3900 				goto read;
3901 			}
3902 
3903 			if (cmd[9] == WRITE_32) {
3904 				BUG_ON(SCpnt->cmd_len < 32);
3905 				goto write;
3906 			}
3907 		}
3908 
3909 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
3910 				INVALID_FIELD_IN_CDB, 0);
3911 		errsts = check_condition_result;
3912 		break;
3913 
3914 	default:
3915 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3916 			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3917 			       "supported\n", *cmd);
3918 		errsts = check_readiness(SCpnt, 1, devip);
3919 		if (errsts)
3920 			break;	/* Unit attention takes precedence */
3921 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3922 		errsts = check_condition_result;
3923 		break;
3924 	}
3925 	return schedule_resp(SCpnt, devip, done, errsts,
3926 			     (delay_override ? 0 : scsi_debug_delay));
3927 }
3928 
3929 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3930 
3931 static struct scsi_host_template sdebug_driver_template = {
3932 	.show_info =		scsi_debug_show_info,
3933 	.write_info =		scsi_debug_write_info,
3934 	.proc_name =		sdebug_proc_name,
3935 	.name =			"SCSI DEBUG",
3936 	.info =			scsi_debug_info,
3937 	.slave_alloc =		scsi_debug_slave_alloc,
3938 	.slave_configure =	scsi_debug_slave_configure,
3939 	.slave_destroy =	scsi_debug_slave_destroy,
3940 	.ioctl =		scsi_debug_ioctl,
3941 	.queuecommand =		scsi_debug_queuecommand,
3942 	.eh_abort_handler =	scsi_debug_abort,
3943 	.eh_bus_reset_handler = scsi_debug_bus_reset,
3944 	.eh_device_reset_handler = scsi_debug_device_reset,
3945 	.eh_host_reset_handler = scsi_debug_host_reset,
3946 	.bios_param =		scsi_debug_biosparam,
3947 	.can_queue =		SCSI_DEBUG_CANQUEUE,
3948 	.this_id =		7,
3949 	.sg_tablesize =		256,
3950 	.cmd_per_lun =		16,
3951 	.max_sectors =		0xffff,
3952 	.use_clustering = 	DISABLE_CLUSTERING,
3953 	.module =		THIS_MODULE,
3954 };
3955 
3956 static int sdebug_driver_probe(struct device * dev)
3957 {
3958         int error = 0;
3959         struct sdebug_host_info *sdbg_host;
3960         struct Scsi_Host *hpnt;
3961 	int host_prot;
3962 
3963 	sdbg_host = to_sdebug_host(dev);
3964 
3965 	sdebug_driver_template.can_queue = scsi_debug_max_queue;
3966 	if (scsi_debug_clustering)
3967 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
3968 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3969 	if (NULL == hpnt) {
3970 		printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3971 		error = -ENODEV;
3972 		return error;
3973 	}
3974 
3975         sdbg_host->shost = hpnt;
3976 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3977 	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3978 		hpnt->max_id = scsi_debug_num_tgts + 1;
3979 	else
3980 		hpnt->max_id = scsi_debug_num_tgts;
3981 	hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;	/* = scsi_debug_max_luns; */
3982 
3983 	host_prot = 0;
3984 
3985 	switch (scsi_debug_dif) {
3986 
3987 	case SD_DIF_TYPE1_PROTECTION:
3988 		host_prot = SHOST_DIF_TYPE1_PROTECTION;
3989 		if (scsi_debug_dix)
3990 			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3991 		break;
3992 
3993 	case SD_DIF_TYPE2_PROTECTION:
3994 		host_prot = SHOST_DIF_TYPE2_PROTECTION;
3995 		if (scsi_debug_dix)
3996 			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3997 		break;
3998 
3999 	case SD_DIF_TYPE3_PROTECTION:
4000 		host_prot = SHOST_DIF_TYPE3_PROTECTION;
4001 		if (scsi_debug_dix)
4002 			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4003 		break;
4004 
4005 	default:
4006 		if (scsi_debug_dix)
4007 			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4008 		break;
4009 	}
4010 
4011 	scsi_host_set_prot(hpnt, host_prot);
4012 
4013 	printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4014 	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4015 	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4016 	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4017 	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4018 	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4019 	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4020 	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4021 
4022 	if (scsi_debug_guard == 1)
4023 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4024 	else
4025 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4026 
4027         error = scsi_add_host(hpnt, &sdbg_host->dev);
4028         if (error) {
4029                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4030                 error = -ENODEV;
4031 		scsi_host_put(hpnt);
4032         } else
4033 		scsi_scan_host(hpnt);
4034 
4035 
4036         return error;
4037 }
4038 
4039 static int sdebug_driver_remove(struct device * dev)
4040 {
4041         struct sdebug_host_info *sdbg_host;
4042 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
4043 
4044 	sdbg_host = to_sdebug_host(dev);
4045 
4046 	if (!sdbg_host) {
4047 		printk(KERN_ERR "%s: Unable to locate host info\n",
4048 		       __func__);
4049 		return -ENODEV;
4050 	}
4051 
4052         scsi_remove_host(sdbg_host->shost);
4053 
4054 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4055 				 dev_list) {
4056                 list_del(&sdbg_devinfo->dev_list);
4057                 kfree(sdbg_devinfo);
4058         }
4059 
4060         scsi_host_put(sdbg_host->shost);
4061         return 0;
4062 }
4063 
4064 static int pseudo_lld_bus_match(struct device *dev,
4065 				struct device_driver *dev_driver)
4066 {
4067 	return 1;
4068 }
4069 
4070 static struct bus_type pseudo_lld_bus = {
4071 	.name = "pseudo",
4072 	.match = pseudo_lld_bus_match,
4073 	.probe = sdebug_driver_probe,
4074 	.remove = sdebug_driver_remove,
4075 	.drv_groups = sdebug_drv_groups,
4076 };
4077