xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision 9ab98f57)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27 
28 #include <linux/module.h>
29 
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
63 
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
79 
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
82 
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
84 
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST   1
87 #define DEF_NUM_TGTS   1
88 #define DEF_MAX_LUNS   1
89 /* With these defaults, this driver will make 1 host with 1 target
90  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
91  */
92 #define DEF_DELAY   1
93 #define DEF_DEV_SIZE_MB   8
94 #define DEF_EVERY_NTH   0
95 #define DEF_NUM_PARTS   0
96 #define DEF_OPTS   0
97 #define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
98 #define DEF_PTYPE   0
99 #define DEF_D_SENSE   0
100 #define DEF_NO_LUN_0   0
101 #define DEF_VIRTUAL_GB   0
102 #define DEF_FAKE_RW	0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
105 #define DEF_DIX 0
106 #define DEF_DIF 0
107 #define DEF_GUARD 0
108 #define DEF_ATO 1
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_OPT_BLKS 64
112 #define DEF_UNMAP_MAX_BLOCKS 0
113 #define DEF_UNMAP_MAX_DESC 0
114 #define DEF_UNMAP_GRANULARITY 0
115 #define DEF_UNMAP_ALIGNMENT 0
116 
117 /* bit mask values for scsi_debug_opts */
118 #define SCSI_DEBUG_OPT_NOISE   1
119 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
120 #define SCSI_DEBUG_OPT_TIMEOUT   4
121 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
122 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
123 #define SCSI_DEBUG_OPT_DIF_ERR   32
124 #define SCSI_DEBUG_OPT_DIX_ERR   64
125 /* When "every_nth" > 0 then modulo "every_nth" commands:
126  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
127  *   - a RECOVERED_ERROR is simulated on successful read and write
128  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
129  *   - a TRANSPORT_ERROR is simulated on successful read and write
130  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
131  *
132  * When "every_nth" < 0 then after "- every_nth" commands:
133  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134  *   - a RECOVERED_ERROR is simulated on successful read and write
135  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136  *   - a TRANSPORT_ERROR is simulated on successful read and write
137  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
138  * This will continue until some other action occurs (e.g. the user
139  * writing a new value (other than -1 or 1) to every_nth via sysfs).
140  */
141 
142 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
143  * sector on read commands: */
144 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
145 
146 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
147  * or "peripheral device" addressing (value 0) */
148 #define SAM2_LUN_ADDRESS_METHOD 0
149 #define SAM2_WLUN_REPORT_LUNS 0xc101
150 
151 /* Can queue up to this number of commands. Typically commands that
152  * that have a non-zero delay are queued. */
153 #define SCSI_DEBUG_CANQUEUE  255
154 
155 static int scsi_debug_add_host = DEF_NUM_HOST;
156 static int scsi_debug_delay = DEF_DELAY;
157 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
158 static int scsi_debug_every_nth = DEF_EVERY_NTH;
159 static int scsi_debug_max_luns = DEF_MAX_LUNS;
160 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
161 static int scsi_debug_num_parts = DEF_NUM_PARTS;
162 static int scsi_debug_no_uld = 0;
163 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
164 static int scsi_debug_opts = DEF_OPTS;
165 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
166 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
167 static int scsi_debug_dsense = DEF_D_SENSE;
168 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
169 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
170 static int scsi_debug_fake_rw = DEF_FAKE_RW;
171 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
172 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
173 static int scsi_debug_dix = DEF_DIX;
174 static int scsi_debug_dif = DEF_DIF;
175 static int scsi_debug_guard = DEF_GUARD;
176 static int scsi_debug_ato = DEF_ATO;
177 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
178 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
179 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
180 static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
181 static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
182 static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
183 static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
184 
185 static int scsi_debug_cmnd_count = 0;
186 
187 #define DEV_READONLY(TGT)      (0)
188 #define DEV_REMOVEABLE(TGT)    (0)
189 
190 static unsigned int sdebug_store_sectors;
191 static sector_t sdebug_capacity;	/* in sectors */
192 
193 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
194    may still need them */
195 static int sdebug_heads;		/* heads per disk */
196 static int sdebug_cylinders_per;	/* cylinders per surface */
197 static int sdebug_sectors_per;		/* sectors per cylinder */
198 
199 #define SDEBUG_MAX_PARTS 4
200 
201 #define SDEBUG_SENSE_LEN 32
202 
203 #define SCSI_DEBUG_MAX_CMD_LEN 32
204 
205 struct sdebug_dev_info {
206 	struct list_head dev_list;
207 	unsigned char sense_buff[SDEBUG_SENSE_LEN];	/* weak nexus */
208 	unsigned int channel;
209 	unsigned int target;
210 	unsigned int lun;
211 	struct sdebug_host_info *sdbg_host;
212 	unsigned int wlun;
213 	char reset;
214 	char stopped;
215 	char used;
216 };
217 
218 struct sdebug_host_info {
219 	struct list_head host_list;
220 	struct Scsi_Host *shost;
221 	struct device dev;
222 	struct list_head dev_info_list;
223 };
224 
225 #define to_sdebug_host(d)	\
226 	container_of(d, struct sdebug_host_info, dev)
227 
228 static LIST_HEAD(sdebug_host_list);
229 static DEFINE_SPINLOCK(sdebug_host_list_lock);
230 
231 typedef void (* done_funct_t) (struct scsi_cmnd *);
232 
233 struct sdebug_queued_cmd {
234 	int in_use;
235 	struct timer_list cmnd_timer;
236 	done_funct_t done_funct;
237 	struct scsi_cmnd * a_cmnd;
238 	int scsi_result;
239 };
240 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
241 
242 static unsigned char * fake_storep;	/* ramdisk storage */
243 static unsigned char *dif_storep;	/* protection info */
244 static void *map_storep;		/* provisioning map */
245 
246 static unsigned long map_size;
247 static int num_aborts = 0;
248 static int num_dev_resets = 0;
249 static int num_bus_resets = 0;
250 static int num_host_resets = 0;
251 static int dix_writes;
252 static int dix_reads;
253 static int dif_errors;
254 
255 static DEFINE_SPINLOCK(queued_arr_lock);
256 static DEFINE_RWLOCK(atomic_rw);
257 
258 static char sdebug_proc_name[] = "scsi_debug";
259 
260 static struct bus_type pseudo_lld_bus;
261 
262 static inline sector_t dif_offset(sector_t sector)
263 {
264 	return sector << 3;
265 }
266 
267 static struct device_driver sdebug_driverfs_driver = {
268 	.name 		= sdebug_proc_name,
269 	.bus		= &pseudo_lld_bus,
270 };
271 
272 static const int check_condition_result =
273 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
274 
275 static const int illegal_condition_result =
276 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
277 
278 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
279 				    0, 0, 0x2, 0x4b};
280 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
281 			           0, 0, 0x0, 0x0};
282 
283 static int sdebug_add_adapter(void);
284 static void sdebug_remove_adapter(void);
285 
286 static void sdebug_max_tgts_luns(void)
287 {
288 	struct sdebug_host_info *sdbg_host;
289 	struct Scsi_Host *hpnt;
290 
291 	spin_lock(&sdebug_host_list_lock);
292 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
293 		hpnt = sdbg_host->shost;
294 		if ((hpnt->this_id >= 0) &&
295 		    (scsi_debug_num_tgts > hpnt->this_id))
296 			hpnt->max_id = scsi_debug_num_tgts + 1;
297 		else
298 			hpnt->max_id = scsi_debug_num_tgts;
299 		/* scsi_debug_max_luns; */
300 		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
301 	}
302 	spin_unlock(&sdebug_host_list_lock);
303 }
304 
305 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
306 			    int asc, int asq)
307 {
308 	unsigned char *sbuff;
309 
310 	sbuff = devip->sense_buff;
311 	memset(sbuff, 0, SDEBUG_SENSE_LEN);
312 
313 	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
314 
315 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
316 		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
317 		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
318 }
319 
320 static void get_data_transfer_info(unsigned char *cmd,
321 				   unsigned long long *lba, unsigned int *num,
322 				   u32 *ei_lba)
323 {
324 	*ei_lba = 0;
325 
326 	switch (*cmd) {
327 	case VARIABLE_LENGTH_CMD:
328 		*lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
329 			(u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
330 			(u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
331 			(u64)cmd[13] << 48 | (u64)cmd[12] << 56;
332 
333 		*ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
334 			(u32)cmd[21] << 16 | (u32)cmd[20] << 24;
335 
336 		*num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
337 			(u32)cmd[28] << 24;
338 		break;
339 
340 	case WRITE_SAME_16:
341 	case WRITE_16:
342 	case READ_16:
343 		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
344 			(u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
345 			(u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
346 			(u64)cmd[3] << 48 | (u64)cmd[2] << 56;
347 
348 		*num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
349 			(u32)cmd[10] << 24;
350 		break;
351 	case WRITE_12:
352 	case READ_12:
353 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
354 			(u32)cmd[2] << 24;
355 
356 		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
357 			(u32)cmd[6] << 24;
358 		break;
359 	case WRITE_SAME:
360 	case WRITE_10:
361 	case READ_10:
362 	case XDWRITEREAD_10:
363 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 |	(u32)cmd[3] << 16 |
364 			(u32)cmd[2] << 24;
365 
366 		*num = (u32)cmd[8] | (u32)cmd[7] << 8;
367 		break;
368 	case WRITE_6:
369 	case READ_6:
370 		*lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
371 			(u32)(cmd[1] & 0x1f) << 16;
372 		*num = (0 == cmd[4]) ? 256 : cmd[4];
373 		break;
374 	default:
375 		break;
376 	}
377 }
378 
379 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
380 {
381 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
382 		printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
383 	}
384 	return -EINVAL;
385 	/* return -ENOTTY; // correct return but upsets fdisk */
386 }
387 
388 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
389 			   struct sdebug_dev_info * devip)
390 {
391 	if (devip->reset) {
392 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
393 			printk(KERN_INFO "scsi_debug: Reporting Unit "
394 			       "attention: power on reset\n");
395 		devip->reset = 0;
396 		mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
397 		return check_condition_result;
398 	}
399 	if ((0 == reset_only) && devip->stopped) {
400 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
401 			printk(KERN_INFO "scsi_debug: Reporting Not "
402 			       "ready: initializing command required\n");
403 		mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
404 				0x2);
405 		return check_condition_result;
406 	}
407 	return 0;
408 }
409 
410 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
411 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
412 				int arr_len)
413 {
414 	int act_len;
415 	struct scsi_data_buffer *sdb = scsi_in(scp);
416 
417 	if (!sdb->length)
418 		return 0;
419 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
420 		return (DID_ERROR << 16);
421 
422 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
423 				      arr, arr_len);
424 	if (sdb->resid)
425 		sdb->resid -= act_len;
426 	else
427 		sdb->resid = scsi_bufflen(scp) - act_len;
428 
429 	return 0;
430 }
431 
432 /* Returns number of bytes fetched into 'arr' or -1 if error. */
433 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
434 			       int arr_len)
435 {
436 	if (!scsi_bufflen(scp))
437 		return 0;
438 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
439 		return -1;
440 
441 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
442 }
443 
444 
445 static const char * inq_vendor_id = "Linux   ";
446 static const char * inq_product_id = "scsi_debug      ";
447 static const char * inq_product_rev = "0004";
448 
449 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
450 			   int target_dev_id, int dev_id_num,
451 			   const char * dev_id_str,
452 			   int dev_id_str_len)
453 {
454 	int num, port_a;
455 	char b[32];
456 
457 	port_a = target_dev_id + 1;
458 	/* T10 vendor identifier field format (faked) */
459 	arr[0] = 0x2;	/* ASCII */
460 	arr[1] = 0x1;
461 	arr[2] = 0x0;
462 	memcpy(&arr[4], inq_vendor_id, 8);
463 	memcpy(&arr[12], inq_product_id, 16);
464 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
465 	num = 8 + 16 + dev_id_str_len;
466 	arr[3] = num;
467 	num += 4;
468 	if (dev_id_num >= 0) {
469 		/* NAA-5, Logical unit identifier (binary) */
470 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
471 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
472 		arr[num++] = 0x0;
473 		arr[num++] = 0x8;
474 		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
475 		arr[num++] = 0x33;
476 		arr[num++] = 0x33;
477 		arr[num++] = 0x30;
478 		arr[num++] = (dev_id_num >> 24);
479 		arr[num++] = (dev_id_num >> 16) & 0xff;
480 		arr[num++] = (dev_id_num >> 8) & 0xff;
481 		arr[num++] = dev_id_num & 0xff;
482 		/* Target relative port number */
483 		arr[num++] = 0x61;	/* proto=sas, binary */
484 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
485 		arr[num++] = 0x0;	/* reserved */
486 		arr[num++] = 0x4;	/* length */
487 		arr[num++] = 0x0;	/* reserved */
488 		arr[num++] = 0x0;	/* reserved */
489 		arr[num++] = 0x0;
490 		arr[num++] = 0x1;	/* relative port A */
491 	}
492 	/* NAA-5, Target port identifier */
493 	arr[num++] = 0x61;	/* proto=sas, binary */
494 	arr[num++] = 0x93;	/* piv=1, target port, naa */
495 	arr[num++] = 0x0;
496 	arr[num++] = 0x8;
497 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
498 	arr[num++] = 0x22;
499 	arr[num++] = 0x22;
500 	arr[num++] = 0x20;
501 	arr[num++] = (port_a >> 24);
502 	arr[num++] = (port_a >> 16) & 0xff;
503 	arr[num++] = (port_a >> 8) & 0xff;
504 	arr[num++] = port_a & 0xff;
505 	/* NAA-5, Target port group identifier */
506 	arr[num++] = 0x61;	/* proto=sas, binary */
507 	arr[num++] = 0x95;	/* piv=1, target port group id */
508 	arr[num++] = 0x0;
509 	arr[num++] = 0x4;
510 	arr[num++] = 0;
511 	arr[num++] = 0;
512 	arr[num++] = (port_group_id >> 8) & 0xff;
513 	arr[num++] = port_group_id & 0xff;
514 	/* NAA-5, Target device identifier */
515 	arr[num++] = 0x61;	/* proto=sas, binary */
516 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
517 	arr[num++] = 0x0;
518 	arr[num++] = 0x8;
519 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
520 	arr[num++] = 0x22;
521 	arr[num++] = 0x22;
522 	arr[num++] = 0x20;
523 	arr[num++] = (target_dev_id >> 24);
524 	arr[num++] = (target_dev_id >> 16) & 0xff;
525 	arr[num++] = (target_dev_id >> 8) & 0xff;
526 	arr[num++] = target_dev_id & 0xff;
527 	/* SCSI name string: Target device identifier */
528 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
529 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
530 	arr[num++] = 0x0;
531 	arr[num++] = 24;
532 	memcpy(arr + num, "naa.52222220", 12);
533 	num += 12;
534 	snprintf(b, sizeof(b), "%08X", target_dev_id);
535 	memcpy(arr + num, b, 8);
536 	num += 8;
537 	memset(arr + num, 0, 4);
538 	num += 4;
539 	return num;
540 }
541 
542 
543 static unsigned char vpd84_data[] = {
544 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
545     0x22,0x22,0x22,0x0,0xbb,0x1,
546     0x22,0x22,0x22,0x0,0xbb,0x2,
547 };
548 
549 static int inquiry_evpd_84(unsigned char * arr)
550 {
551 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
552 	return sizeof(vpd84_data);
553 }
554 
555 static int inquiry_evpd_85(unsigned char * arr)
556 {
557 	int num = 0;
558 	const char * na1 = "https://www.kernel.org/config";
559 	const char * na2 = "http://www.kernel.org/log";
560 	int plen, olen;
561 
562 	arr[num++] = 0x1;	/* lu, storage config */
563 	arr[num++] = 0x0;	/* reserved */
564 	arr[num++] = 0x0;
565 	olen = strlen(na1);
566 	plen = olen + 1;
567 	if (plen % 4)
568 		plen = ((plen / 4) + 1) * 4;
569 	arr[num++] = plen;	/* length, null termianted, padded */
570 	memcpy(arr + num, na1, olen);
571 	memset(arr + num + olen, 0, plen - olen);
572 	num += plen;
573 
574 	arr[num++] = 0x4;	/* lu, logging */
575 	arr[num++] = 0x0;	/* reserved */
576 	arr[num++] = 0x0;
577 	olen = strlen(na2);
578 	plen = olen + 1;
579 	if (plen % 4)
580 		plen = ((plen / 4) + 1) * 4;
581 	arr[num++] = plen;	/* length, null terminated, padded */
582 	memcpy(arr + num, na2, olen);
583 	memset(arr + num + olen, 0, plen - olen);
584 	num += plen;
585 
586 	return num;
587 }
588 
589 /* SCSI ports VPD page */
590 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
591 {
592 	int num = 0;
593 	int port_a, port_b;
594 
595 	port_a = target_dev_id + 1;
596 	port_b = port_a + 1;
597 	arr[num++] = 0x0;	/* reserved */
598 	arr[num++] = 0x0;	/* reserved */
599 	arr[num++] = 0x0;
600 	arr[num++] = 0x1;	/* relative port 1 (primary) */
601 	memset(arr + num, 0, 6);
602 	num += 6;
603 	arr[num++] = 0x0;
604 	arr[num++] = 12;	/* length tp descriptor */
605 	/* naa-5 target port identifier (A) */
606 	arr[num++] = 0x61;	/* proto=sas, binary */
607 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
608 	arr[num++] = 0x0;	/* reserved */
609 	arr[num++] = 0x8;	/* length */
610 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
611 	arr[num++] = 0x22;
612 	arr[num++] = 0x22;
613 	arr[num++] = 0x20;
614 	arr[num++] = (port_a >> 24);
615 	arr[num++] = (port_a >> 16) & 0xff;
616 	arr[num++] = (port_a >> 8) & 0xff;
617 	arr[num++] = port_a & 0xff;
618 
619 	arr[num++] = 0x0;	/* reserved */
620 	arr[num++] = 0x0;	/* reserved */
621 	arr[num++] = 0x0;
622 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
623 	memset(arr + num, 0, 6);
624 	num += 6;
625 	arr[num++] = 0x0;
626 	arr[num++] = 12;	/* length tp descriptor */
627 	/* naa-5 target port identifier (B) */
628 	arr[num++] = 0x61;	/* proto=sas, binary */
629 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
630 	arr[num++] = 0x0;	/* reserved */
631 	arr[num++] = 0x8;	/* length */
632 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
633 	arr[num++] = 0x22;
634 	arr[num++] = 0x22;
635 	arr[num++] = 0x20;
636 	arr[num++] = (port_b >> 24);
637 	arr[num++] = (port_b >> 16) & 0xff;
638 	arr[num++] = (port_b >> 8) & 0xff;
639 	arr[num++] = port_b & 0xff;
640 
641 	return num;
642 }
643 
644 
645 static unsigned char vpd89_data[] = {
646 /* from 4th byte */ 0,0,0,0,
647 'l','i','n','u','x',' ',' ',' ',
648 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
649 '1','2','3','4',
650 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
651 0xec,0,0,0,
652 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
653 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
654 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
655 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
656 0x53,0x41,
657 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
658 0x20,0x20,
659 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
660 0x10,0x80,
661 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
662 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
663 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
664 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
665 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
666 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
667 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
668 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
669 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
670 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
671 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
672 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
673 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
674 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
675 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
676 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
677 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
680 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
681 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
685 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
687 };
688 
689 static int inquiry_evpd_89(unsigned char * arr)
690 {
691 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
692 	return sizeof(vpd89_data);
693 }
694 
695 
696 /* Block limits VPD page (SBC-3) */
697 static unsigned char vpdb0_data[] = {
698 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
699 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 };
703 
704 static int inquiry_evpd_b0(unsigned char * arr)
705 {
706 	unsigned int gran;
707 
708 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
709 
710 	/* Optimal transfer length granularity */
711 	gran = 1 << scsi_debug_physblk_exp;
712 	arr[2] = (gran >> 8) & 0xff;
713 	arr[3] = gran & 0xff;
714 
715 	/* Maximum Transfer Length */
716 	if (sdebug_store_sectors > 0x400) {
717 		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
718 		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
719 		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
720 		arr[7] = sdebug_store_sectors & 0xff;
721 	}
722 
723 	/* Optimal Transfer Length */
724 	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
725 
726 	if (scsi_debug_unmap_max_desc) {
727 		unsigned int blocks;
728 
729 		if (scsi_debug_unmap_max_blocks)
730 			blocks = scsi_debug_unmap_max_blocks;
731 		else
732 			blocks = 0xffffffff;
733 
734 		/* Maximum Unmap LBA Count */
735 		put_unaligned_be32(blocks, &arr[16]);
736 
737 		/* Maximum Unmap Block Descriptor Count */
738 		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
739 	}
740 
741 	/* Unmap Granularity Alignment */
742 	if (scsi_debug_unmap_alignment) {
743 		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
744 		arr[28] |= 0x80; /* UGAVALID */
745 	}
746 
747 	/* Optimal Unmap Granularity */
748 	if (scsi_debug_unmap_granularity) {
749 		put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
750 		return 0x3c; /* Mandatory page length for thin provisioning */
751 	}
752 
753 	return sizeof(vpdb0_data);
754 }
755 
756 /* Block device characteristics VPD page (SBC-3) */
757 static int inquiry_evpd_b1(unsigned char *arr)
758 {
759 	memset(arr, 0, 0x3c);
760 	arr[0] = 0;
761 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
762 	arr[2] = 0;
763 	arr[3] = 5;	/* less than 1.8" */
764 
765 	return 0x3c;
766 }
767 
768 #define SDEBUG_LONG_INQ_SZ 96
769 #define SDEBUG_MAX_INQ_ARR_SZ 584
770 
771 static int resp_inquiry(struct scsi_cmnd * scp, int target,
772 			struct sdebug_dev_info * devip)
773 {
774 	unsigned char pq_pdt;
775 	unsigned char * arr;
776 	unsigned char *cmd = (unsigned char *)scp->cmnd;
777 	int alloc_len, n, ret;
778 
779 	alloc_len = (cmd[3] << 8) + cmd[4];
780 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
781 	if (! arr)
782 		return DID_REQUEUE << 16;
783 	if (devip->wlun)
784 		pq_pdt = 0x1e;	/* present, wlun */
785 	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
786 		pq_pdt = 0x7f;	/* not present, no device type */
787 	else
788 		pq_pdt = (scsi_debug_ptype & 0x1f);
789 	arr[0] = pq_pdt;
790 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
791 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
792 			       	0);
793 		kfree(arr);
794 		return check_condition_result;
795 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
796 		int lu_id_num, port_group_id, target_dev_id, len;
797 		char lu_id_str[6];
798 		int host_no = devip->sdbg_host->shost->host_no;
799 
800 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
801 		    (devip->channel & 0x7f);
802 		if (0 == scsi_debug_vpd_use_hostno)
803 			host_no = 0;
804 		lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
805 			    (devip->target * 1000) + devip->lun);
806 		target_dev_id = ((host_no + 1) * 2000) +
807 				 (devip->target * 1000) - 3;
808 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
809 		if (0 == cmd[2]) { /* supported vital product data pages */
810 			arr[1] = cmd[2];	/*sanity */
811 			n = 4;
812 			arr[n++] = 0x0;   /* this page */
813 			arr[n++] = 0x80;  /* unit serial number */
814 			arr[n++] = 0x83;  /* device identification */
815 			arr[n++] = 0x84;  /* software interface ident. */
816 			arr[n++] = 0x85;  /* management network addresses */
817 			arr[n++] = 0x86;  /* extended inquiry */
818 			arr[n++] = 0x87;  /* mode page policy */
819 			arr[n++] = 0x88;  /* SCSI ports */
820 			arr[n++] = 0x89;  /* ATA information */
821 			arr[n++] = 0xb0;  /* Block limits (SBC) */
822 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
823 			arr[3] = n - 4;	  /* number of supported VPD pages */
824 		} else if (0x80 == cmd[2]) { /* unit serial number */
825 			arr[1] = cmd[2];	/*sanity */
826 			arr[3] = len;
827 			memcpy(&arr[4], lu_id_str, len);
828 		} else if (0x83 == cmd[2]) { /* device identification */
829 			arr[1] = cmd[2];	/*sanity */
830 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
831 						 target_dev_id, lu_id_num,
832 						 lu_id_str, len);
833 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
834 			arr[1] = cmd[2];	/*sanity */
835 			arr[3] = inquiry_evpd_84(&arr[4]);
836 		} else if (0x85 == cmd[2]) { /* Management network addresses */
837 			arr[1] = cmd[2];	/*sanity */
838 			arr[3] = inquiry_evpd_85(&arr[4]);
839 		} else if (0x86 == cmd[2]) { /* extended inquiry */
840 			arr[1] = cmd[2];	/*sanity */
841 			arr[3] = 0x3c;	/* number of following entries */
842 			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
843 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
844 			else if (scsi_debug_dif)
845 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
846 			else
847 				arr[4] = 0x0;   /* no protection stuff */
848 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
849 		} else if (0x87 == cmd[2]) { /* mode page policy */
850 			arr[1] = cmd[2];	/*sanity */
851 			arr[3] = 0x8;	/* number of following entries */
852 			arr[4] = 0x2;	/* disconnect-reconnect mp */
853 			arr[6] = 0x80;	/* mlus, shared */
854 			arr[8] = 0x18;	 /* protocol specific lu */
855 			arr[10] = 0x82;	 /* mlus, per initiator port */
856 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
857 			arr[1] = cmd[2];	/*sanity */
858 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
859 		} else if (0x89 == cmd[2]) { /* ATA information */
860 			arr[1] = cmd[2];        /*sanity */
861 			n = inquiry_evpd_89(&arr[4]);
862 			arr[2] = (n >> 8);
863 			arr[3] = (n & 0xff);
864 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
865 			arr[1] = cmd[2];        /*sanity */
866 			arr[3] = inquiry_evpd_b0(&arr[4]);
867 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
868 			arr[1] = cmd[2];        /*sanity */
869 			arr[3] = inquiry_evpd_b1(&arr[4]);
870 		} else {
871 			/* Illegal request, invalid field in cdb */
872 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
873 					INVALID_FIELD_IN_CDB, 0);
874 			kfree(arr);
875 			return check_condition_result;
876 		}
877 		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
878 		ret = fill_from_dev_buffer(scp, arr,
879 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
880 		kfree(arr);
881 		return ret;
882 	}
883 	/* drops through here for a standard inquiry */
884 	arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0;	/* Removable disk */
885 	arr[2] = scsi_debug_scsi_level;
886 	arr[3] = 2;    /* response_data_format==2 */
887 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
888 	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
889 	if (0 == scsi_debug_vpd_use_hostno)
890 		arr[5] = 0x10; /* claim: implicit TGPS */
891 	arr[6] = 0x10; /* claim: MultiP */
892 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
893 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
894 	memcpy(&arr[8], inq_vendor_id, 8);
895 	memcpy(&arr[16], inq_product_id, 16);
896 	memcpy(&arr[32], inq_product_rev, 4);
897 	/* version descriptors (2 bytes each) follow */
898 	arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
899 	arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
900 	n = 62;
901 	if (scsi_debug_ptype == 0) {
902 		arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
903 	} else if (scsi_debug_ptype == 1) {
904 		arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
905 	}
906 	arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
907 	ret = fill_from_dev_buffer(scp, arr,
908 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
909 	kfree(arr);
910 	return ret;
911 }
912 
913 static int resp_requests(struct scsi_cmnd * scp,
914 			 struct sdebug_dev_info * devip)
915 {
916 	unsigned char * sbuff;
917 	unsigned char *cmd = (unsigned char *)scp->cmnd;
918 	unsigned char arr[SDEBUG_SENSE_LEN];
919 	int want_dsense;
920 	int len = 18;
921 
922 	memset(arr, 0, sizeof(arr));
923 	if (devip->reset == 1)
924 		mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
925 	want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
926 	sbuff = devip->sense_buff;
927 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
928 		if (want_dsense) {
929 			arr[0] = 0x72;
930 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
931 			arr[2] = THRESHOLD_EXCEEDED;
932 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
933 		} else {
934 			arr[0] = 0x70;
935 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
936 			arr[7] = 0xa;   	/* 18 byte sense buffer */
937 			arr[12] = THRESHOLD_EXCEEDED;
938 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
939 		}
940 	} else {
941 		memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
942 		if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
943 			/* DESC bit set and sense_buff in fixed format */
944 			memset(arr, 0, sizeof(arr));
945 			arr[0] = 0x72;
946 			arr[1] = sbuff[2];     /* sense key */
947 			arr[2] = sbuff[12];    /* asc */
948 			arr[3] = sbuff[13];    /* ascq */
949 			len = 8;
950 		}
951 	}
952 	mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
953 	return fill_from_dev_buffer(scp, arr, len);
954 }
955 
956 static int resp_start_stop(struct scsi_cmnd * scp,
957 			   struct sdebug_dev_info * devip)
958 {
959 	unsigned char *cmd = (unsigned char *)scp->cmnd;
960 	int power_cond, errsts, start;
961 
962 	if ((errsts = check_readiness(scp, 1, devip)))
963 		return errsts;
964 	power_cond = (cmd[4] & 0xf0) >> 4;
965 	if (power_cond) {
966 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
967 			       	0);
968 		return check_condition_result;
969 	}
970 	start = cmd[4] & 1;
971 	if (start == devip->stopped)
972 		devip->stopped = !start;
973 	return 0;
974 }
975 
976 static sector_t get_sdebug_capacity(void)
977 {
978 	if (scsi_debug_virtual_gb > 0)
979 		return (sector_t)scsi_debug_virtual_gb *
980 			(1073741824 / scsi_debug_sector_size);
981 	else
982 		return sdebug_store_sectors;
983 }
984 
985 #define SDEBUG_READCAP_ARR_SZ 8
986 static int resp_readcap(struct scsi_cmnd * scp,
987 			struct sdebug_dev_info * devip)
988 {
989 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
990 	unsigned int capac;
991 	int errsts;
992 
993 	if ((errsts = check_readiness(scp, 1, devip)))
994 		return errsts;
995 	/* following just in case virtual_gb changed */
996 	sdebug_capacity = get_sdebug_capacity();
997 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
998 	if (sdebug_capacity < 0xffffffff) {
999 		capac = (unsigned int)sdebug_capacity - 1;
1000 		arr[0] = (capac >> 24);
1001 		arr[1] = (capac >> 16) & 0xff;
1002 		arr[2] = (capac >> 8) & 0xff;
1003 		arr[3] = capac & 0xff;
1004 	} else {
1005 		arr[0] = 0xff;
1006 		arr[1] = 0xff;
1007 		arr[2] = 0xff;
1008 		arr[3] = 0xff;
1009 	}
1010 	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1011 	arr[7] = scsi_debug_sector_size & 0xff;
1012 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1013 }
1014 
1015 #define SDEBUG_READCAP16_ARR_SZ 32
1016 static int resp_readcap16(struct scsi_cmnd * scp,
1017 			  struct sdebug_dev_info * devip)
1018 {
1019 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1020 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1021 	unsigned long long capac;
1022 	int errsts, k, alloc_len;
1023 
1024 	if ((errsts = check_readiness(scp, 1, devip)))
1025 		return errsts;
1026 	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1027 		     + cmd[13]);
1028 	/* following just in case virtual_gb changed */
1029 	sdebug_capacity = get_sdebug_capacity();
1030 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1031 	capac = sdebug_capacity - 1;
1032 	for (k = 0; k < 8; ++k, capac >>= 8)
1033 		arr[7 - k] = capac & 0xff;
1034 	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1035 	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1036 	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1037 	arr[11] = scsi_debug_sector_size & 0xff;
1038 	arr[13] = scsi_debug_physblk_exp & 0xf;
1039 	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1040 
1041 	if (scsi_debug_unmap_granularity)
1042 		arr[14] |= 0x80; /* TPE */
1043 
1044 	arr[15] = scsi_debug_lowest_aligned & 0xff;
1045 
1046 	if (scsi_debug_dif) {
1047 		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1048 		arr[12] |= 1; /* PROT_EN */
1049 	}
1050 
1051 	return fill_from_dev_buffer(scp, arr,
1052 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1053 }
1054 
1055 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1056 
1057 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1058 			      struct sdebug_dev_info * devip)
1059 {
1060 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1061 	unsigned char * arr;
1062 	int host_no = devip->sdbg_host->shost->host_no;
1063 	int n, ret, alen, rlen;
1064 	int port_group_a, port_group_b, port_a, port_b;
1065 
1066 	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1067 		+ cmd[9]);
1068 
1069 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1070 	if (! arr)
1071 		return DID_REQUEUE << 16;
1072 	/*
1073 	 * EVPD page 0x88 states we have two ports, one
1074 	 * real and a fake port with no device connected.
1075 	 * So we create two port groups with one port each
1076 	 * and set the group with port B to unavailable.
1077 	 */
1078 	port_a = 0x1; /* relative port A */
1079 	port_b = 0x2; /* relative port B */
1080 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1081 	    (devip->channel & 0x7f);
1082 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1083 	    (devip->channel & 0x7f) + 0x80;
1084 
1085 	/*
1086 	 * The asymmetric access state is cycled according to the host_id.
1087 	 */
1088 	n = 4;
1089 	if (0 == scsi_debug_vpd_use_hostno) {
1090 	    arr[n++] = host_no % 3; /* Asymm access state */
1091 	    arr[n++] = 0x0F; /* claim: all states are supported */
1092 	} else {
1093 	    arr[n++] = 0x0; /* Active/Optimized path */
1094 	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1095 	}
1096 	arr[n++] = (port_group_a >> 8) & 0xff;
1097 	arr[n++] = port_group_a & 0xff;
1098 	arr[n++] = 0;    /* Reserved */
1099 	arr[n++] = 0;    /* Status code */
1100 	arr[n++] = 0;    /* Vendor unique */
1101 	arr[n++] = 0x1;  /* One port per group */
1102 	arr[n++] = 0;    /* Reserved */
1103 	arr[n++] = 0;    /* Reserved */
1104 	arr[n++] = (port_a >> 8) & 0xff;
1105 	arr[n++] = port_a & 0xff;
1106 	arr[n++] = 3;    /* Port unavailable */
1107 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1108 	arr[n++] = (port_group_b >> 8) & 0xff;
1109 	arr[n++] = port_group_b & 0xff;
1110 	arr[n++] = 0;    /* Reserved */
1111 	arr[n++] = 0;    /* Status code */
1112 	arr[n++] = 0;    /* Vendor unique */
1113 	arr[n++] = 0x1;  /* One port per group */
1114 	arr[n++] = 0;    /* Reserved */
1115 	arr[n++] = 0;    /* Reserved */
1116 	arr[n++] = (port_b >> 8) & 0xff;
1117 	arr[n++] = port_b & 0xff;
1118 
1119 	rlen = n - 4;
1120 	arr[0] = (rlen >> 24) & 0xff;
1121 	arr[1] = (rlen >> 16) & 0xff;
1122 	arr[2] = (rlen >> 8) & 0xff;
1123 	arr[3] = rlen & 0xff;
1124 
1125 	/*
1126 	 * Return the smallest value of either
1127 	 * - The allocated length
1128 	 * - The constructed command length
1129 	 * - The maximum array size
1130 	 */
1131 	rlen = min(alen,n);
1132 	ret = fill_from_dev_buffer(scp, arr,
1133 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1134 	kfree(arr);
1135 	return ret;
1136 }
1137 
1138 /* <<Following mode page info copied from ST318451LW>> */
1139 
1140 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1141 {	/* Read-Write Error Recovery page for mode_sense */
1142 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1143 					5, 0, 0xff, 0xff};
1144 
1145 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1146 	if (1 == pcontrol)
1147 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1148 	return sizeof(err_recov_pg);
1149 }
1150 
1151 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1152 { 	/* Disconnect-Reconnect page for mode_sense */
1153 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1154 					 0, 0, 0, 0, 0, 0, 0, 0};
1155 
1156 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1157 	if (1 == pcontrol)
1158 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1159 	return sizeof(disconnect_pg);
1160 }
1161 
1162 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1163 {       /* Format device page for mode_sense */
1164 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1165 				     0, 0, 0, 0, 0, 0, 0, 0,
1166 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1167 
1168 	memcpy(p, format_pg, sizeof(format_pg));
1169 	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1170 	p[11] = sdebug_sectors_per & 0xff;
1171 	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1172 	p[13] = scsi_debug_sector_size & 0xff;
1173 	if (DEV_REMOVEABLE(target))
1174 		p[20] |= 0x20; /* should agree with INQUIRY */
1175 	if (1 == pcontrol)
1176 		memset(p + 2, 0, sizeof(format_pg) - 2);
1177 	return sizeof(format_pg);
1178 }
1179 
1180 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1181 { 	/* Caching page for mode_sense */
1182 	unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1183 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1184 
1185 	memcpy(p, caching_pg, sizeof(caching_pg));
1186 	if (1 == pcontrol)
1187 		memset(p + 2, 0, sizeof(caching_pg) - 2);
1188 	return sizeof(caching_pg);
1189 }
1190 
1191 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1192 { 	/* Control mode page for mode_sense */
1193 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1194 				        0, 0, 0, 0};
1195 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1196 				     0, 0, 0x2, 0x4b};
1197 
1198 	if (scsi_debug_dsense)
1199 		ctrl_m_pg[2] |= 0x4;
1200 	else
1201 		ctrl_m_pg[2] &= ~0x4;
1202 
1203 	if (scsi_debug_ato)
1204 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1205 
1206 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1207 	if (1 == pcontrol)
1208 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1209 	else if (2 == pcontrol)
1210 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1211 	return sizeof(ctrl_m_pg);
1212 }
1213 
1214 
1215 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1216 {	/* Informational Exceptions control mode page for mode_sense */
1217 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1218 				       0, 0, 0x0, 0x0};
1219 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1220 				      0, 0, 0x0, 0x0};
1221 
1222 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1223 	if (1 == pcontrol)
1224 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1225 	else if (2 == pcontrol)
1226 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1227 	return sizeof(iec_m_pg);
1228 }
1229 
1230 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1231 {	/* SAS SSP mode page - short format for mode_sense */
1232 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1233 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1234 
1235 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1236 	if (1 == pcontrol)
1237 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1238 	return sizeof(sas_sf_m_pg);
1239 }
1240 
1241 
1242 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1243 			      int target_dev_id)
1244 {	/* SAS phy control and discover mode page for mode_sense */
1245 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1246 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1247 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1248 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1249 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1250 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1251 		    0, 0, 0, 0, 0, 0, 0, 0,
1252 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1253 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1254 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1255 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1256 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1257 		    0, 0, 0, 0, 0, 0, 0, 0,
1258 		};
1259 	int port_a, port_b;
1260 
1261 	port_a = target_dev_id + 1;
1262 	port_b = port_a + 1;
1263 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1264 	p[20] = (port_a >> 24);
1265 	p[21] = (port_a >> 16) & 0xff;
1266 	p[22] = (port_a >> 8) & 0xff;
1267 	p[23] = port_a & 0xff;
1268 	p[48 + 20] = (port_b >> 24);
1269 	p[48 + 21] = (port_b >> 16) & 0xff;
1270 	p[48 + 22] = (port_b >> 8) & 0xff;
1271 	p[48 + 23] = port_b & 0xff;
1272 	if (1 == pcontrol)
1273 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1274 	return sizeof(sas_pcd_m_pg);
1275 }
1276 
1277 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1278 {	/* SAS SSP shared protocol specific port mode subpage */
1279 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1280 		    0, 0, 0, 0, 0, 0, 0, 0,
1281 		};
1282 
1283 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1284 	if (1 == pcontrol)
1285 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1286 	return sizeof(sas_sha_m_pg);
1287 }
1288 
1289 #define SDEBUG_MAX_MSENSE_SZ 256
1290 
1291 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1292 			   struct sdebug_dev_info * devip)
1293 {
1294 	unsigned char dbd, llbaa;
1295 	int pcontrol, pcode, subpcode, bd_len;
1296 	unsigned char dev_spec;
1297 	int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1298 	unsigned char * ap;
1299 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1300 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1301 
1302 	if ((errsts = check_readiness(scp, 1, devip)))
1303 		return errsts;
1304 	dbd = !!(cmd[1] & 0x8);
1305 	pcontrol = (cmd[2] & 0xc0) >> 6;
1306 	pcode = cmd[2] & 0x3f;
1307 	subpcode = cmd[3];
1308 	msense_6 = (MODE_SENSE == cmd[0]);
1309 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1310 	if ((0 == scsi_debug_ptype) && (0 == dbd))
1311 		bd_len = llbaa ? 16 : 8;
1312 	else
1313 		bd_len = 0;
1314 	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1315 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1316 	if (0x3 == pcontrol) {  /* Saving values not supported */
1317 		mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1318 			       	0);
1319 		return check_condition_result;
1320 	}
1321 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1322 			(devip->target * 1000) - 3;
1323 	/* set DPOFUA bit for disks */
1324 	if (0 == scsi_debug_ptype)
1325 		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1326 	else
1327 		dev_spec = 0x0;
1328 	if (msense_6) {
1329 		arr[2] = dev_spec;
1330 		arr[3] = bd_len;
1331 		offset = 4;
1332 	} else {
1333 		arr[3] = dev_spec;
1334 		if (16 == bd_len)
1335 			arr[4] = 0x1;	/* set LONGLBA bit */
1336 		arr[7] = bd_len;	/* assume 255 or less */
1337 		offset = 8;
1338 	}
1339 	ap = arr + offset;
1340 	if ((bd_len > 0) && (!sdebug_capacity))
1341 		sdebug_capacity = get_sdebug_capacity();
1342 
1343 	if (8 == bd_len) {
1344 		if (sdebug_capacity > 0xfffffffe) {
1345 			ap[0] = 0xff;
1346 			ap[1] = 0xff;
1347 			ap[2] = 0xff;
1348 			ap[3] = 0xff;
1349 		} else {
1350 			ap[0] = (sdebug_capacity >> 24) & 0xff;
1351 			ap[1] = (sdebug_capacity >> 16) & 0xff;
1352 			ap[2] = (sdebug_capacity >> 8) & 0xff;
1353 			ap[3] = sdebug_capacity & 0xff;
1354 		}
1355 		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1356 		ap[7] = scsi_debug_sector_size & 0xff;
1357 		offset += bd_len;
1358 		ap = arr + offset;
1359 	} else if (16 == bd_len) {
1360 		unsigned long long capac = sdebug_capacity;
1361 
1362         	for (k = 0; k < 8; ++k, capac >>= 8)
1363                 	ap[7 - k] = capac & 0xff;
1364 		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1365 		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1366 		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1367 		ap[15] = scsi_debug_sector_size & 0xff;
1368 		offset += bd_len;
1369 		ap = arr + offset;
1370 	}
1371 
1372 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1373 		/* TODO: Control Extension page */
1374 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1375 			       	0);
1376 		return check_condition_result;
1377 	}
1378 	switch (pcode) {
1379 	case 0x1:	/* Read-Write error recovery page, direct access */
1380 		len = resp_err_recov_pg(ap, pcontrol, target);
1381 		offset += len;
1382 		break;
1383 	case 0x2:	/* Disconnect-Reconnect page, all devices */
1384 		len = resp_disconnect_pg(ap, pcontrol, target);
1385 		offset += len;
1386 		break;
1387         case 0x3:       /* Format device page, direct access */
1388                 len = resp_format_pg(ap, pcontrol, target);
1389                 offset += len;
1390                 break;
1391 	case 0x8:	/* Caching page, direct access */
1392 		len = resp_caching_pg(ap, pcontrol, target);
1393 		offset += len;
1394 		break;
1395 	case 0xa:	/* Control Mode page, all devices */
1396 		len = resp_ctrl_m_pg(ap, pcontrol, target);
1397 		offset += len;
1398 		break;
1399 	case 0x19:	/* if spc==1 then sas phy, control+discover */
1400 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
1401 		        mk_sense_buffer(devip, ILLEGAL_REQUEST,
1402 					INVALID_FIELD_IN_CDB, 0);
1403 			return check_condition_result;
1404 	        }
1405 		len = 0;
1406 		if ((0x0 == subpcode) || (0xff == subpcode))
1407 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1408 		if ((0x1 == subpcode) || (0xff == subpcode))
1409 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1410 						  target_dev_id);
1411 		if ((0x2 == subpcode) || (0xff == subpcode))
1412 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
1413 		offset += len;
1414 		break;
1415 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
1416 		len = resp_iec_m_pg(ap, pcontrol, target);
1417 		offset += len;
1418 		break;
1419 	case 0x3f:	/* Read all Mode pages */
1420 		if ((0 == subpcode) || (0xff == subpcode)) {
1421 			len = resp_err_recov_pg(ap, pcontrol, target);
1422 			len += resp_disconnect_pg(ap + len, pcontrol, target);
1423 			len += resp_format_pg(ap + len, pcontrol, target);
1424 			len += resp_caching_pg(ap + len, pcontrol, target);
1425 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1426 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1427 			if (0xff == subpcode) {
1428 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1429 						  target, target_dev_id);
1430 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
1431 			}
1432 			len += resp_iec_m_pg(ap + len, pcontrol, target);
1433 		} else {
1434 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1435 					INVALID_FIELD_IN_CDB, 0);
1436 			return check_condition_result;
1437                 }
1438 		offset += len;
1439 		break;
1440 	default:
1441 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1442 			       	0);
1443 		return check_condition_result;
1444 	}
1445 	if (msense_6)
1446 		arr[0] = offset - 1;
1447 	else {
1448 		arr[0] = ((offset - 2) >> 8) & 0xff;
1449 		arr[1] = (offset - 2) & 0xff;
1450 	}
1451 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1452 }
1453 
1454 #define SDEBUG_MAX_MSELECT_SZ 512
1455 
1456 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1457 			    struct sdebug_dev_info * devip)
1458 {
1459 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1460 	int param_len, res, errsts, mpage;
1461 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1462 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1463 
1464 	if ((errsts = check_readiness(scp, 1, devip)))
1465 		return errsts;
1466 	memset(arr, 0, sizeof(arr));
1467 	pf = cmd[1] & 0x10;
1468 	sp = cmd[1] & 0x1;
1469 	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1470 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1471 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1472 				INVALID_FIELD_IN_CDB, 0);
1473 		return check_condition_result;
1474 	}
1475         res = fetch_to_dev_buffer(scp, arr, param_len);
1476         if (-1 == res)
1477                 return (DID_ERROR << 16);
1478         else if ((res < param_len) &&
1479                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1480                 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1481                        " IO sent=%d bytes\n", param_len, res);
1482 	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1483 	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1484 	if (md_len > 2) {
1485 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1486 				INVALID_FIELD_IN_PARAM_LIST, 0);
1487 		return check_condition_result;
1488 	}
1489 	off = bd_len + (mselect6 ? 4 : 8);
1490 	mpage = arr[off] & 0x3f;
1491 	ps = !!(arr[off] & 0x80);
1492 	if (ps) {
1493 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1494 				INVALID_FIELD_IN_PARAM_LIST, 0);
1495 		return check_condition_result;
1496 	}
1497 	spf = !!(arr[off] & 0x40);
1498 	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1499 		       (arr[off + 1] + 2);
1500 	if ((pg_len + off) > param_len) {
1501 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1502 				PARAMETER_LIST_LENGTH_ERR, 0);
1503 		return check_condition_result;
1504 	}
1505 	switch (mpage) {
1506 	case 0xa:      /* Control Mode page */
1507 		if (ctrl_m_pg[1] == arr[off + 1]) {
1508 			memcpy(ctrl_m_pg + 2, arr + off + 2,
1509 			       sizeof(ctrl_m_pg) - 2);
1510 			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1511 			return 0;
1512 		}
1513 		break;
1514 	case 0x1c:      /* Informational Exceptions Mode page */
1515 		if (iec_m_pg[1] == arr[off + 1]) {
1516 			memcpy(iec_m_pg + 2, arr + off + 2,
1517 			       sizeof(iec_m_pg) - 2);
1518 			return 0;
1519 		}
1520 		break;
1521 	default:
1522 		break;
1523 	}
1524 	mk_sense_buffer(devip, ILLEGAL_REQUEST,
1525 			INVALID_FIELD_IN_PARAM_LIST, 0);
1526 	return check_condition_result;
1527 }
1528 
1529 static int resp_temp_l_pg(unsigned char * arr)
1530 {
1531 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1532 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
1533 		};
1534 
1535         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1536         return sizeof(temp_l_pg);
1537 }
1538 
1539 static int resp_ie_l_pg(unsigned char * arr)
1540 {
1541 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1542 		};
1543 
1544         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1545 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
1546 		arr[4] = THRESHOLD_EXCEEDED;
1547 		arr[5] = 0xff;
1548 	}
1549         return sizeof(ie_l_pg);
1550 }
1551 
1552 #define SDEBUG_MAX_LSENSE_SZ 512
1553 
1554 static int resp_log_sense(struct scsi_cmnd * scp,
1555                           struct sdebug_dev_info * devip)
1556 {
1557 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1558 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1559 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1560 
1561 	if ((errsts = check_readiness(scp, 1, devip)))
1562 		return errsts;
1563 	memset(arr, 0, sizeof(arr));
1564 	ppc = cmd[1] & 0x2;
1565 	sp = cmd[1] & 0x1;
1566 	if (ppc || sp) {
1567 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1568 				INVALID_FIELD_IN_CDB, 0);
1569 		return check_condition_result;
1570 	}
1571 	pcontrol = (cmd[2] & 0xc0) >> 6;
1572 	pcode = cmd[2] & 0x3f;
1573 	subpcode = cmd[3] & 0xff;
1574 	alloc_len = (cmd[7] << 8) + cmd[8];
1575 	arr[0] = pcode;
1576 	if (0 == subpcode) {
1577 		switch (pcode) {
1578 		case 0x0:	/* Supported log pages log page */
1579 			n = 4;
1580 			arr[n++] = 0x0;		/* this page */
1581 			arr[n++] = 0xd;		/* Temperature */
1582 			arr[n++] = 0x2f;	/* Informational exceptions */
1583 			arr[3] = n - 4;
1584 			break;
1585 		case 0xd:	/* Temperature log page */
1586 			arr[3] = resp_temp_l_pg(arr + 4);
1587 			break;
1588 		case 0x2f:	/* Informational exceptions log page */
1589 			arr[3] = resp_ie_l_pg(arr + 4);
1590 			break;
1591 		default:
1592 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1593 					INVALID_FIELD_IN_CDB, 0);
1594 			return check_condition_result;
1595 		}
1596 	} else if (0xff == subpcode) {
1597 		arr[0] |= 0x40;
1598 		arr[1] = subpcode;
1599 		switch (pcode) {
1600 		case 0x0:	/* Supported log pages and subpages log page */
1601 			n = 4;
1602 			arr[n++] = 0x0;
1603 			arr[n++] = 0x0;		/* 0,0 page */
1604 			arr[n++] = 0x0;
1605 			arr[n++] = 0xff;	/* this page */
1606 			arr[n++] = 0xd;
1607 			arr[n++] = 0x0;		/* Temperature */
1608 			arr[n++] = 0x2f;
1609 			arr[n++] = 0x0;	/* Informational exceptions */
1610 			arr[3] = n - 4;
1611 			break;
1612 		case 0xd:	/* Temperature subpages */
1613 			n = 4;
1614 			arr[n++] = 0xd;
1615 			arr[n++] = 0x0;		/* Temperature */
1616 			arr[3] = n - 4;
1617 			break;
1618 		case 0x2f:	/* Informational exceptions subpages */
1619 			n = 4;
1620 			arr[n++] = 0x2f;
1621 			arr[n++] = 0x0;		/* Informational exceptions */
1622 			arr[3] = n - 4;
1623 			break;
1624 		default:
1625 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1626 					INVALID_FIELD_IN_CDB, 0);
1627 			return check_condition_result;
1628 		}
1629 	} else {
1630 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1631 				INVALID_FIELD_IN_CDB, 0);
1632 		return check_condition_result;
1633 	}
1634 	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1635 	return fill_from_dev_buffer(scp, arr,
1636 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1637 }
1638 
1639 static int check_device_access_params(struct sdebug_dev_info *devi,
1640 				      unsigned long long lba, unsigned int num)
1641 {
1642 	if (lba + num > sdebug_capacity) {
1643 		mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1644 		return check_condition_result;
1645 	}
1646 	/* transfer length excessive (tie in to block limits VPD page) */
1647 	if (num > sdebug_store_sectors) {
1648 		mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1649 		return check_condition_result;
1650 	}
1651 	return 0;
1652 }
1653 
1654 static int do_device_access(struct scsi_cmnd *scmd,
1655 			    struct sdebug_dev_info *devi,
1656 			    unsigned long long lba, unsigned int num, int write)
1657 {
1658 	int ret;
1659 	unsigned int block, rest = 0;
1660 	int (*func)(struct scsi_cmnd *, unsigned char *, int);
1661 
1662 	func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1663 
1664 	block = do_div(lba, sdebug_store_sectors);
1665 	if (block + num > sdebug_store_sectors)
1666 		rest = block + num - sdebug_store_sectors;
1667 
1668 	ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1669 		   (num - rest) * scsi_debug_sector_size);
1670 	if (!ret && rest)
1671 		ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1672 
1673 	return ret;
1674 }
1675 
1676 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1677 			    unsigned int sectors, u32 ei_lba)
1678 {
1679 	unsigned int i, resid;
1680 	struct scatterlist *psgl;
1681 	struct sd_dif_tuple *sdt;
1682 	sector_t sector;
1683 	sector_t tmp_sec = start_sec;
1684 	void *paddr;
1685 
1686 	start_sec = do_div(tmp_sec, sdebug_store_sectors);
1687 
1688 	sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1689 
1690 	for (i = 0 ; i < sectors ; i++) {
1691 		u16 csum;
1692 
1693 		if (sdt[i].app_tag == 0xffff)
1694 			continue;
1695 
1696 		sector = start_sec + i;
1697 
1698 		switch (scsi_debug_guard) {
1699 		case 1:
1700 			csum = ip_compute_csum(fake_storep +
1701 					       sector * scsi_debug_sector_size,
1702 					       scsi_debug_sector_size);
1703 			break;
1704 		case 0:
1705 			csum = crc_t10dif(fake_storep +
1706 					  sector * scsi_debug_sector_size,
1707 					  scsi_debug_sector_size);
1708 			csum = cpu_to_be16(csum);
1709 			break;
1710 		default:
1711 			BUG();
1712 		}
1713 
1714 		if (sdt[i].guard_tag != csum) {
1715 			printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1716 			       " rcvd 0x%04x, data 0x%04x\n", __func__,
1717 			       (unsigned long)sector,
1718 			       be16_to_cpu(sdt[i].guard_tag),
1719 			       be16_to_cpu(csum));
1720 			dif_errors++;
1721 			return 0x01;
1722 		}
1723 
1724 		if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1725 		    be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1726 			printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1727 			       __func__, (unsigned long)sector);
1728 			dif_errors++;
1729 			return 0x03;
1730 		}
1731 
1732 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1733 		    be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1734 			printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1735 			       __func__, (unsigned long)sector);
1736 			dif_errors++;
1737 			return 0x03;
1738 		}
1739 
1740 		ei_lba++;
1741 	}
1742 
1743 	resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1744 	sector = start_sec;
1745 
1746 	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1747 		int len = min(psgl->length, resid);
1748 
1749 		paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1750 		memcpy(paddr, dif_storep + dif_offset(sector), len);
1751 
1752 		sector += len >> 3;
1753 		if (sector >= sdebug_store_sectors) {
1754 			/* Force wrap */
1755 			tmp_sec = sector;
1756 			sector = do_div(tmp_sec, sdebug_store_sectors);
1757 		}
1758 		resid -= len;
1759 		kunmap_atomic(paddr, KM_IRQ0);
1760 	}
1761 
1762 	dix_reads++;
1763 
1764 	return 0;
1765 }
1766 
1767 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1768 		     unsigned int num, struct sdebug_dev_info *devip,
1769 		     u32 ei_lba)
1770 {
1771 	unsigned long iflags;
1772 	int ret;
1773 
1774 	ret = check_device_access_params(devip, lba, num);
1775 	if (ret)
1776 		return ret;
1777 
1778 	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1779 	    (lba <= OPT_MEDIUM_ERR_ADDR) &&
1780 	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1781 		/* claim unrecoverable read error */
1782 		mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
1783 				0);
1784 		/* set info field and valid bit for fixed descriptor */
1785 		if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1786 			devip->sense_buff[0] |= 0x80;	/* Valid bit */
1787 			ret = OPT_MEDIUM_ERR_ADDR;
1788 			devip->sense_buff[3] = (ret >> 24) & 0xff;
1789 			devip->sense_buff[4] = (ret >> 16) & 0xff;
1790 			devip->sense_buff[5] = (ret >> 8) & 0xff;
1791 			devip->sense_buff[6] = ret & 0xff;
1792 		}
1793 		return check_condition_result;
1794 	}
1795 
1796 	/* DIX + T10 DIF */
1797 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1798 		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1799 
1800 		if (prot_ret) {
1801 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1802 			return illegal_condition_result;
1803 		}
1804 	}
1805 
1806 	read_lock_irqsave(&atomic_rw, iflags);
1807 	ret = do_device_access(SCpnt, devip, lba, num, 0);
1808 	read_unlock_irqrestore(&atomic_rw, iflags);
1809 	return ret;
1810 }
1811 
1812 void dump_sector(unsigned char *buf, int len)
1813 {
1814 	int i, j;
1815 
1816 	printk(KERN_ERR ">>> Sector Dump <<<\n");
1817 
1818 	for (i = 0 ; i < len ; i += 16) {
1819 		printk(KERN_ERR "%04d: ", i);
1820 
1821 		for (j = 0 ; j < 16 ; j++) {
1822 			unsigned char c = buf[i+j];
1823 			if (c >= 0x20 && c < 0x7e)
1824 				printk(" %c ", buf[i+j]);
1825 			else
1826 				printk("%02x ", buf[i+j]);
1827 		}
1828 
1829 		printk("\n");
1830 	}
1831 }
1832 
1833 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1834 			     unsigned int sectors, u32 ei_lba)
1835 {
1836 	int i, j, ret;
1837 	struct sd_dif_tuple *sdt;
1838 	struct scatterlist *dsgl = scsi_sglist(SCpnt);
1839 	struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1840 	void *daddr, *paddr;
1841 	sector_t tmp_sec = start_sec;
1842 	sector_t sector;
1843 	int ppage_offset;
1844 	unsigned short csum;
1845 
1846 	sector = do_div(tmp_sec, sdebug_store_sectors);
1847 
1848 	BUG_ON(scsi_sg_count(SCpnt) == 0);
1849 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1850 
1851 	paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1852 	ppage_offset = 0;
1853 
1854 	/* For each data page */
1855 	scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1856 		daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1857 
1858 		/* For each sector-sized chunk in data page */
1859 		for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1860 
1861 			/* If we're at the end of the current
1862 			 * protection page advance to the next one
1863 			 */
1864 			if (ppage_offset >= psgl->length) {
1865 				kunmap_atomic(paddr, KM_IRQ1);
1866 				psgl = sg_next(psgl);
1867 				BUG_ON(psgl == NULL);
1868 				paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1869 					+ psgl->offset;
1870 				ppage_offset = 0;
1871 			}
1872 
1873 			sdt = paddr + ppage_offset;
1874 
1875 			switch (scsi_debug_guard) {
1876 			case 1:
1877 				csum = ip_compute_csum(daddr,
1878 						       scsi_debug_sector_size);
1879 				break;
1880 			case 0:
1881 				csum = cpu_to_be16(crc_t10dif(daddr,
1882 						      scsi_debug_sector_size));
1883 				break;
1884 			default:
1885 				BUG();
1886 				ret = 0;
1887 				goto out;
1888 			}
1889 
1890 			if (sdt->guard_tag != csum) {
1891 				printk(KERN_ERR
1892 				       "%s: GUARD check failed on sector %lu " \
1893 				       "rcvd 0x%04x, calculated 0x%04x\n",
1894 				       __func__, (unsigned long)sector,
1895 				       be16_to_cpu(sdt->guard_tag),
1896 				       be16_to_cpu(csum));
1897 				ret = 0x01;
1898 				dump_sector(daddr, scsi_debug_sector_size);
1899 				goto out;
1900 			}
1901 
1902 			if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1903 			    be32_to_cpu(sdt->ref_tag)
1904 			    != (start_sec & 0xffffffff)) {
1905 				printk(KERN_ERR
1906 				       "%s: REF check failed on sector %lu\n",
1907 				       __func__, (unsigned long)sector);
1908 				ret = 0x03;
1909 				dump_sector(daddr, scsi_debug_sector_size);
1910 				goto out;
1911 			}
1912 
1913 			if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1914 			    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1915 				printk(KERN_ERR
1916 				       "%s: REF check failed on sector %lu\n",
1917 				       __func__, (unsigned long)sector);
1918 				ret = 0x03;
1919 				dump_sector(daddr, scsi_debug_sector_size);
1920 				goto out;
1921 			}
1922 
1923 			/* Would be great to copy this in bigger
1924 			 * chunks.  However, for the sake of
1925 			 * correctness we need to verify each sector
1926 			 * before writing it to "stable" storage
1927 			 */
1928 			memcpy(dif_storep + dif_offset(sector), sdt, 8);
1929 
1930 			sector++;
1931 
1932 			if (sector == sdebug_store_sectors)
1933 				sector = 0;	/* Force wrap */
1934 
1935 			start_sec++;
1936 			ei_lba++;
1937 			daddr += scsi_debug_sector_size;
1938 			ppage_offset += sizeof(struct sd_dif_tuple);
1939 		}
1940 
1941 		kunmap_atomic(daddr, KM_IRQ0);
1942 	}
1943 
1944 	kunmap_atomic(paddr, KM_IRQ1);
1945 
1946 	dix_writes++;
1947 
1948 	return 0;
1949 
1950 out:
1951 	dif_errors++;
1952 	kunmap_atomic(daddr, KM_IRQ0);
1953 	kunmap_atomic(paddr, KM_IRQ1);
1954 	return ret;
1955 }
1956 
1957 static unsigned int map_state(sector_t lba, unsigned int *num)
1958 {
1959 	unsigned int granularity, alignment, mapped;
1960 	sector_t block, next, end;
1961 
1962 	granularity = scsi_debug_unmap_granularity;
1963 	alignment = granularity - scsi_debug_unmap_alignment;
1964 	block = lba + alignment;
1965 	do_div(block, granularity);
1966 
1967 	mapped = test_bit(block, map_storep);
1968 
1969 	if (mapped)
1970 		next = find_next_zero_bit(map_storep, map_size, block);
1971 	else
1972 		next = find_next_bit(map_storep, map_size, block);
1973 
1974 	end = next * granularity - scsi_debug_unmap_alignment;
1975 	*num = end - lba;
1976 
1977 	return mapped;
1978 }
1979 
1980 static void map_region(sector_t lba, unsigned int len)
1981 {
1982 	unsigned int granularity, alignment;
1983 	sector_t end = lba + len;
1984 
1985 	granularity = scsi_debug_unmap_granularity;
1986 	alignment = granularity - scsi_debug_unmap_alignment;
1987 
1988 	while (lba < end) {
1989 		sector_t block, rem;
1990 
1991 		block = lba + alignment;
1992 		rem = do_div(block, granularity);
1993 
1994 		if (block < map_size)
1995 			set_bit(block, map_storep);
1996 
1997 		lba += granularity - rem;
1998 	}
1999 }
2000 
2001 static void unmap_region(sector_t lba, unsigned int len)
2002 {
2003 	unsigned int granularity, alignment;
2004 	sector_t end = lba + len;
2005 
2006 	granularity = scsi_debug_unmap_granularity;
2007 	alignment = granularity - scsi_debug_unmap_alignment;
2008 
2009 	while (lba < end) {
2010 		sector_t block, rem;
2011 
2012 		block = lba + alignment;
2013 		rem = do_div(block, granularity);
2014 
2015 		if (rem == 0 && lba + granularity <= end &&
2016 		    block < map_size)
2017 			clear_bit(block, map_storep);
2018 
2019 		lba += granularity - rem;
2020 	}
2021 }
2022 
2023 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2024 		      unsigned int num, struct sdebug_dev_info *devip,
2025 		      u32 ei_lba)
2026 {
2027 	unsigned long iflags;
2028 	int ret;
2029 
2030 	ret = check_device_access_params(devip, lba, num);
2031 	if (ret)
2032 		return ret;
2033 
2034 	/* DIX + T10 DIF */
2035 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2036 		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2037 
2038 		if (prot_ret) {
2039 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2040 			return illegal_condition_result;
2041 		}
2042 	}
2043 
2044 	write_lock_irqsave(&atomic_rw, iflags);
2045 	ret = do_device_access(SCpnt, devip, lba, num, 1);
2046 	if (scsi_debug_unmap_granularity)
2047 		map_region(lba, num);
2048 	write_unlock_irqrestore(&atomic_rw, iflags);
2049 	if (-1 == ret)
2050 		return (DID_ERROR << 16);
2051 	else if ((ret < (num * scsi_debug_sector_size)) &&
2052 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2053 		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2054 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2055 
2056 	return 0;
2057 }
2058 
2059 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2060 		      unsigned int num, struct sdebug_dev_info *devip,
2061 			   u32 ei_lba, unsigned int unmap)
2062 {
2063 	unsigned long iflags;
2064 	unsigned long long i;
2065 	int ret;
2066 
2067 	ret = check_device_access_params(devip, lba, num);
2068 	if (ret)
2069 		return ret;
2070 
2071 	write_lock_irqsave(&atomic_rw, iflags);
2072 
2073 	if (unmap && scsi_debug_unmap_granularity) {
2074 		unmap_region(lba, num);
2075 		goto out;
2076 	}
2077 
2078 	/* Else fetch one logical block */
2079 	ret = fetch_to_dev_buffer(scmd,
2080 				  fake_storep + (lba * scsi_debug_sector_size),
2081 				  scsi_debug_sector_size);
2082 
2083 	if (-1 == ret) {
2084 		write_unlock_irqrestore(&atomic_rw, iflags);
2085 		return (DID_ERROR << 16);
2086 	} else if ((ret < (num * scsi_debug_sector_size)) &&
2087 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2088 		printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2089 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2090 
2091 	/* Copy first sector to remaining blocks */
2092 	for (i = 1 ; i < num ; i++)
2093 		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2094 		       fake_storep + (lba * scsi_debug_sector_size),
2095 		       scsi_debug_sector_size);
2096 
2097 	if (scsi_debug_unmap_granularity)
2098 		map_region(lba, num);
2099 out:
2100 	write_unlock_irqrestore(&atomic_rw, iflags);
2101 
2102 	return 0;
2103 }
2104 
2105 struct unmap_block_desc {
2106 	__be64	lba;
2107 	__be32	blocks;
2108 	__be32	__reserved;
2109 };
2110 
2111 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2112 {
2113 	unsigned char *buf;
2114 	struct unmap_block_desc *desc;
2115 	unsigned int i, payload_len, descriptors;
2116 	int ret;
2117 
2118 	ret = check_readiness(scmd, 1, devip);
2119 	if (ret)
2120 		return ret;
2121 
2122 	payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2123 	BUG_ON(scsi_bufflen(scmd) != payload_len);
2124 
2125 	descriptors = (payload_len - 8) / 16;
2126 
2127 	buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2128 	if (!buf)
2129 		return check_condition_result;
2130 
2131 	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2132 
2133 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2134 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2135 
2136 	desc = (void *)&buf[8];
2137 
2138 	for (i = 0 ; i < descriptors ; i++) {
2139 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2140 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
2141 
2142 		ret = check_device_access_params(devip, lba, num);
2143 		if (ret)
2144 			goto out;
2145 
2146 		unmap_region(lba, num);
2147 	}
2148 
2149 	ret = 0;
2150 
2151 out:
2152 	kfree(buf);
2153 
2154 	return ret;
2155 }
2156 
2157 #define SDEBUG_GET_LBA_STATUS_LEN 32
2158 
2159 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2160 			       struct sdebug_dev_info * devip)
2161 {
2162 	unsigned long long lba;
2163 	unsigned int alloc_len, mapped, num;
2164 	unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2165 	int ret;
2166 
2167 	ret = check_readiness(scmd, 1, devip);
2168 	if (ret)
2169 		return ret;
2170 
2171 	lba = get_unaligned_be64(&scmd->cmnd[2]);
2172 	alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2173 
2174 	if (alloc_len < 24)
2175 		return 0;
2176 
2177 	ret = check_device_access_params(devip, lba, 1);
2178 	if (ret)
2179 		return ret;
2180 
2181 	mapped = map_state(lba, &num);
2182 
2183 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2184 	put_unaligned_be32(16, &arr[0]);	/* Parameter Data Length */
2185 	put_unaligned_be64(lba, &arr[8]);	/* LBA */
2186 	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
2187 	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
2188 
2189 	return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2190 }
2191 
2192 #define SDEBUG_RLUN_ARR_SZ 256
2193 
2194 static int resp_report_luns(struct scsi_cmnd * scp,
2195 			    struct sdebug_dev_info * devip)
2196 {
2197 	unsigned int alloc_len;
2198 	int lun_cnt, i, upper, num, n, wlun, lun;
2199 	unsigned char *cmd = (unsigned char *)scp->cmnd;
2200 	int select_report = (int)cmd[2];
2201 	struct scsi_lun *one_lun;
2202 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2203 	unsigned char * max_addr;
2204 
2205 	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2206 	if ((alloc_len < 4) || (select_report > 2)) {
2207 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2208 			       	0);
2209 		return check_condition_result;
2210 	}
2211 	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
2212 	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2213 	lun_cnt = scsi_debug_max_luns;
2214 	if (1 == select_report)
2215 		lun_cnt = 0;
2216 	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2217 		--lun_cnt;
2218 	wlun = (select_report > 0) ? 1 : 0;
2219 	num = lun_cnt + wlun;
2220 	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2221 	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2222 	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2223 			    sizeof(struct scsi_lun)), num);
2224 	if (n < num) {
2225 		wlun = 0;
2226 		lun_cnt = n;
2227 	}
2228 	one_lun = (struct scsi_lun *) &arr[8];
2229 	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2230 	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2231              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2232 	     i++, lun++) {
2233 		upper = (lun >> 8) & 0x3f;
2234 		if (upper)
2235 			one_lun[i].scsi_lun[0] =
2236 			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2237 		one_lun[i].scsi_lun[1] = lun & 0xff;
2238 	}
2239 	if (wlun) {
2240 		one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2241 		one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2242 		i++;
2243 	}
2244 	alloc_len = (unsigned char *)(one_lun + i) - arr;
2245 	return fill_from_dev_buffer(scp, arr,
2246 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2247 }
2248 
2249 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2250 			    unsigned int num, struct sdebug_dev_info *devip)
2251 {
2252 	int i, j, ret = -1;
2253 	unsigned char *kaddr, *buf;
2254 	unsigned int offset;
2255 	struct scatterlist *sg;
2256 	struct scsi_data_buffer *sdb = scsi_in(scp);
2257 
2258 	/* better not to use temporary buffer. */
2259 	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2260 	if (!buf)
2261 		return ret;
2262 
2263 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2264 
2265 	offset = 0;
2266 	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2267 		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2268 		if (!kaddr)
2269 			goto out;
2270 
2271 		for (j = 0; j < sg->length; j++)
2272 			*(kaddr + sg->offset + j) ^= *(buf + offset + j);
2273 
2274 		offset += sg->length;
2275 		kunmap_atomic(kaddr, KM_USER0);
2276 	}
2277 	ret = 0;
2278 out:
2279 	kfree(buf);
2280 
2281 	return ret;
2282 }
2283 
2284 /* When timer goes off this function is called. */
2285 static void timer_intr_handler(unsigned long indx)
2286 {
2287 	struct sdebug_queued_cmd * sqcp;
2288 	unsigned long iflags;
2289 
2290 	if (indx >= scsi_debug_max_queue) {
2291 		printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2292 		       "large\n");
2293 		return;
2294 	}
2295 	spin_lock_irqsave(&queued_arr_lock, iflags);
2296 	sqcp = &queued_arr[(int)indx];
2297 	if (! sqcp->in_use) {
2298 		printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2299 		       "interrupt\n");
2300 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2301 		return;
2302 	}
2303 	sqcp->in_use = 0;
2304 	if (sqcp->done_funct) {
2305 		sqcp->a_cmnd->result = sqcp->scsi_result;
2306 		sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2307 	}
2308 	sqcp->done_funct = NULL;
2309 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2310 }
2311 
2312 
2313 static struct sdebug_dev_info *
2314 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2315 {
2316 	struct sdebug_dev_info *devip;
2317 
2318 	devip = kzalloc(sizeof(*devip), flags);
2319 	if (devip) {
2320 		devip->sdbg_host = sdbg_host;
2321 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2322 	}
2323 	return devip;
2324 }
2325 
2326 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2327 {
2328 	struct sdebug_host_info * sdbg_host;
2329 	struct sdebug_dev_info * open_devip = NULL;
2330 	struct sdebug_dev_info * devip =
2331 			(struct sdebug_dev_info *)sdev->hostdata;
2332 
2333 	if (devip)
2334 		return devip;
2335 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2336 	if (!sdbg_host) {
2337                 printk(KERN_ERR "Host info NULL\n");
2338 		return NULL;
2339         }
2340 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2341 		if ((devip->used) && (devip->channel == sdev->channel) &&
2342                     (devip->target == sdev->id) &&
2343                     (devip->lun == sdev->lun))
2344                         return devip;
2345 		else {
2346 			if ((!devip->used) && (!open_devip))
2347 				open_devip = devip;
2348 		}
2349 	}
2350 	if (!open_devip) { /* try and make a new one */
2351 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2352 		if (!open_devip) {
2353 			printk(KERN_ERR "%s: out of memory at line %d\n",
2354 				__func__, __LINE__);
2355 			return NULL;
2356 		}
2357 	}
2358 
2359 	open_devip->channel = sdev->channel;
2360 	open_devip->target = sdev->id;
2361 	open_devip->lun = sdev->lun;
2362 	open_devip->sdbg_host = sdbg_host;
2363 	open_devip->reset = 1;
2364 	open_devip->used = 1;
2365 	memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2366 	if (scsi_debug_dsense)
2367 		open_devip->sense_buff[0] = 0x72;
2368 	else {
2369 		open_devip->sense_buff[0] = 0x70;
2370 		open_devip->sense_buff[7] = 0xa;
2371 	}
2372 	if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2373 		open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2374 
2375 	return open_devip;
2376 }
2377 
2378 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2379 {
2380 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2381 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2382 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2383 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2384 	return 0;
2385 }
2386 
2387 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2388 {
2389 	struct sdebug_dev_info *devip;
2390 
2391 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2392 		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2393 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2394 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2395 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2396 	devip = devInfoReg(sdp);
2397 	if (NULL == devip)
2398 		return 1;	/* no resources, will be marked offline */
2399 	sdp->hostdata = devip;
2400 	if (sdp->host->cmd_per_lun)
2401 		scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2402 					sdp->host->cmd_per_lun);
2403 	blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2404 	if (scsi_debug_no_uld)
2405 		sdp->no_uld_attach = 1;
2406 	return 0;
2407 }
2408 
2409 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2410 {
2411 	struct sdebug_dev_info *devip =
2412 		(struct sdebug_dev_info *)sdp->hostdata;
2413 
2414 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2415 		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2416 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2417 	if (devip) {
2418 		/* make this slot avaliable for re-use */
2419 		devip->used = 0;
2420 		sdp->hostdata = NULL;
2421 	}
2422 }
2423 
2424 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2425 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2426 {
2427 	unsigned long iflags;
2428 	int k;
2429 	struct sdebug_queued_cmd *sqcp;
2430 
2431 	spin_lock_irqsave(&queued_arr_lock, iflags);
2432 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2433 		sqcp = &queued_arr[k];
2434 		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2435 			del_timer_sync(&sqcp->cmnd_timer);
2436 			sqcp->in_use = 0;
2437 			sqcp->a_cmnd = NULL;
2438 			break;
2439 		}
2440 	}
2441 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2442 	return (k < scsi_debug_max_queue) ? 1 : 0;
2443 }
2444 
2445 /* Deletes (stops) timers of all queued commands */
2446 static void stop_all_queued(void)
2447 {
2448 	unsigned long iflags;
2449 	int k;
2450 	struct sdebug_queued_cmd *sqcp;
2451 
2452 	spin_lock_irqsave(&queued_arr_lock, iflags);
2453 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2454 		sqcp = &queued_arr[k];
2455 		if (sqcp->in_use && sqcp->a_cmnd) {
2456 			del_timer_sync(&sqcp->cmnd_timer);
2457 			sqcp->in_use = 0;
2458 			sqcp->a_cmnd = NULL;
2459 		}
2460 	}
2461 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2462 }
2463 
2464 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2465 {
2466 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2467 		printk(KERN_INFO "scsi_debug: abort\n");
2468 	++num_aborts;
2469 	stop_queued_cmnd(SCpnt);
2470 	return SUCCESS;
2471 }
2472 
2473 static int scsi_debug_biosparam(struct scsi_device *sdev,
2474 		struct block_device * bdev, sector_t capacity, int *info)
2475 {
2476 	int res;
2477 	unsigned char *buf;
2478 
2479 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2480 		printk(KERN_INFO "scsi_debug: biosparam\n");
2481 	buf = scsi_bios_ptable(bdev);
2482 	if (buf) {
2483 		res = scsi_partsize(buf, capacity,
2484 				    &info[2], &info[0], &info[1]);
2485 		kfree(buf);
2486 		if (! res)
2487 			return res;
2488 	}
2489 	info[0] = sdebug_heads;
2490 	info[1] = sdebug_sectors_per;
2491 	info[2] = sdebug_cylinders_per;
2492 	return 0;
2493 }
2494 
2495 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2496 {
2497 	struct sdebug_dev_info * devip;
2498 
2499 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2500 		printk(KERN_INFO "scsi_debug: device_reset\n");
2501 	++num_dev_resets;
2502 	if (SCpnt) {
2503 		devip = devInfoReg(SCpnt->device);
2504 		if (devip)
2505 			devip->reset = 1;
2506 	}
2507 	return SUCCESS;
2508 }
2509 
2510 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2511 {
2512 	struct sdebug_host_info *sdbg_host;
2513         struct sdebug_dev_info * dev_info;
2514         struct scsi_device * sdp;
2515         struct Scsi_Host * hp;
2516 
2517 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2518 		printk(KERN_INFO "scsi_debug: bus_reset\n");
2519 	++num_bus_resets;
2520 	if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2521 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2522 		if (sdbg_host) {
2523 			list_for_each_entry(dev_info,
2524                                             &sdbg_host->dev_info_list,
2525                                             dev_list)
2526 				dev_info->reset = 1;
2527 		}
2528 	}
2529 	return SUCCESS;
2530 }
2531 
2532 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2533 {
2534 	struct sdebug_host_info * sdbg_host;
2535         struct sdebug_dev_info * dev_info;
2536 
2537 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2538 		printk(KERN_INFO "scsi_debug: host_reset\n");
2539 	++num_host_resets;
2540         spin_lock(&sdebug_host_list_lock);
2541         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2542                 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2543                                     dev_list)
2544                         dev_info->reset = 1;
2545         }
2546         spin_unlock(&sdebug_host_list_lock);
2547 	stop_all_queued();
2548 	return SUCCESS;
2549 }
2550 
2551 /* Initializes timers in queued array */
2552 static void __init init_all_queued(void)
2553 {
2554 	unsigned long iflags;
2555 	int k;
2556 	struct sdebug_queued_cmd * sqcp;
2557 
2558 	spin_lock_irqsave(&queued_arr_lock, iflags);
2559 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2560 		sqcp = &queued_arr[k];
2561 		init_timer(&sqcp->cmnd_timer);
2562 		sqcp->in_use = 0;
2563 		sqcp->a_cmnd = NULL;
2564 	}
2565 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2566 }
2567 
2568 static void __init sdebug_build_parts(unsigned char *ramp,
2569 				      unsigned long store_size)
2570 {
2571 	struct partition * pp;
2572 	int starts[SDEBUG_MAX_PARTS + 2];
2573 	int sectors_per_part, num_sectors, k;
2574 	int heads_by_sects, start_sec, end_sec;
2575 
2576 	/* assume partition table already zeroed */
2577 	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2578 		return;
2579 	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2580 		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2581 		printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2582 				    "partitions to %d\n", SDEBUG_MAX_PARTS);
2583 	}
2584 	num_sectors = (int)sdebug_store_sectors;
2585 	sectors_per_part = (num_sectors - sdebug_sectors_per)
2586 			   / scsi_debug_num_parts;
2587 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
2588         starts[0] = sdebug_sectors_per;
2589 	for (k = 1; k < scsi_debug_num_parts; ++k)
2590 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
2591 			    * heads_by_sects;
2592 	starts[scsi_debug_num_parts] = num_sectors;
2593 	starts[scsi_debug_num_parts + 1] = 0;
2594 
2595 	ramp[510] = 0x55;	/* magic partition markings */
2596 	ramp[511] = 0xAA;
2597 	pp = (struct partition *)(ramp + 0x1be);
2598 	for (k = 0; starts[k + 1]; ++k, ++pp) {
2599 		start_sec = starts[k];
2600 		end_sec = starts[k + 1] - 1;
2601 		pp->boot_ind = 0;
2602 
2603 		pp->cyl = start_sec / heads_by_sects;
2604 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
2605 			   / sdebug_sectors_per;
2606 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
2607 
2608 		pp->end_cyl = end_sec / heads_by_sects;
2609 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2610 			       / sdebug_sectors_per;
2611 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2612 
2613 		pp->start_sect = start_sec;
2614 		pp->nr_sects = end_sec - start_sec + 1;
2615 		pp->sys_ind = 0x83;	/* plain Linux partition */
2616 	}
2617 }
2618 
2619 static int schedule_resp(struct scsi_cmnd * cmnd,
2620 			 struct sdebug_dev_info * devip,
2621 			 done_funct_t done, int scsi_result, int delta_jiff)
2622 {
2623 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2624 		if (scsi_result) {
2625 			struct scsi_device * sdp = cmnd->device;
2626 
2627 			printk(KERN_INFO "scsi_debug:    <%u %u %u %u> "
2628 			       "non-zero result=0x%x\n", sdp->host->host_no,
2629 			       sdp->channel, sdp->id, sdp->lun, scsi_result);
2630 		}
2631 	}
2632 	if (cmnd && devip) {
2633 		/* simulate autosense by this driver */
2634 		if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2635 			memcpy(cmnd->sense_buffer, devip->sense_buff,
2636 			       (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2637 			       SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2638 	}
2639 	if (delta_jiff <= 0) {
2640 		if (cmnd)
2641 			cmnd->result = scsi_result;
2642 		if (done)
2643 			done(cmnd);
2644 		return 0;
2645 	} else {
2646 		unsigned long iflags;
2647 		int k;
2648 		struct sdebug_queued_cmd * sqcp = NULL;
2649 
2650 		spin_lock_irqsave(&queued_arr_lock, iflags);
2651 		for (k = 0; k < scsi_debug_max_queue; ++k) {
2652 			sqcp = &queued_arr[k];
2653 			if (! sqcp->in_use)
2654 				break;
2655 		}
2656 		if (k >= scsi_debug_max_queue) {
2657 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
2658 			printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2659 			return 1;	/* report busy to mid level */
2660 		}
2661 		sqcp->in_use = 1;
2662 		sqcp->a_cmnd = cmnd;
2663 		sqcp->scsi_result = scsi_result;
2664 		sqcp->done_funct = done;
2665 		sqcp->cmnd_timer.function = timer_intr_handler;
2666 		sqcp->cmnd_timer.data = k;
2667 		sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2668 		add_timer(&sqcp->cmnd_timer);
2669 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2670 		if (cmnd)
2671 			cmnd->result = 0;
2672 		return 0;
2673 	}
2674 }
2675 /* Note: The following macros create attribute files in the
2676    /sys/module/scsi_debug/parameters directory. Unfortunately this
2677    driver is unaware of a change and cannot trigger auxiliary actions
2678    as it can when the corresponding attribute in the
2679    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2680  */
2681 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2682 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2683 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2684 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2685 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2686 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2687 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2688 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2689 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2690 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2691 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2692 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2693 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2694 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2695 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2696 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2697 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2698 		   S_IRUGO | S_IWUSR);
2699 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2700 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2701 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2702 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2703 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2704 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2705 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2706 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2707 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2708 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2709 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2710 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2711 
2712 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2713 MODULE_DESCRIPTION("SCSI debug adapter driver");
2714 MODULE_LICENSE("GPL");
2715 MODULE_VERSION(SCSI_DEBUG_VERSION);
2716 
2717 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2718 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2719 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2720 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2721 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2722 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2723 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2724 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2725 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2726 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2727 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2728 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2729 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2730 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2731 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2732 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2733 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2734 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2735 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2736 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2737 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2738 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2739 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2740 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2741 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2742 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)");
2743 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)");
2744 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)");
2745 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2746 
2747 static char sdebug_info[256];
2748 
2749 static const char * scsi_debug_info(struct Scsi_Host * shp)
2750 {
2751 	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2752 		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2753 		scsi_debug_version_date, scsi_debug_dev_size_mb,
2754 		scsi_debug_opts);
2755 	return sdebug_info;
2756 }
2757 
2758 /* scsi_debug_proc_info
2759  * Used if the driver currently has no own support for /proc/scsi
2760  */
2761 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2762 				int length, int inout)
2763 {
2764 	int len, pos, begin;
2765 	int orig_length;
2766 
2767 	orig_length = length;
2768 
2769 	if (inout == 1) {
2770 		char arr[16];
2771 		int minLen = length > 15 ? 15 : length;
2772 
2773 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2774 			return -EACCES;
2775 		memcpy(arr, buffer, minLen);
2776 		arr[minLen] = '\0';
2777 		if (1 != sscanf(arr, "%d", &pos))
2778 			return -EINVAL;
2779 		scsi_debug_opts = pos;
2780 		if (scsi_debug_every_nth != 0)
2781                         scsi_debug_cmnd_count = 0;
2782 		return length;
2783 	}
2784 	begin = 0;
2785 	pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2786 	    "%s [%s]\n"
2787 	    "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2788 	    "every_nth=%d(curr:%d)\n"
2789 	    "delay=%d, max_luns=%d, scsi_level=%d\n"
2790 	    "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2791 	    "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2792 	    "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2793 	    SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2794 	    scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2795 	    scsi_debug_cmnd_count, scsi_debug_delay,
2796 	    scsi_debug_max_luns, scsi_debug_scsi_level,
2797 	    scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2798 	    sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2799 	    num_host_resets, dix_reads, dix_writes, dif_errors);
2800 	if (pos < offset) {
2801 		len = 0;
2802 		begin = pos;
2803 	}
2804 	*start = buffer + (offset - begin);	/* Start of wanted data */
2805 	len -= (offset - begin);
2806 	if (len > length)
2807 		len = length;
2808 	return len;
2809 }
2810 
2811 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2812 {
2813         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2814 }
2815 
2816 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2817 				  const char * buf, size_t count)
2818 {
2819         int delay;
2820 	char work[20];
2821 
2822         if (1 == sscanf(buf, "%10s", work)) {
2823 		if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2824 			scsi_debug_delay = delay;
2825 			return count;
2826 		}
2827 	}
2828 	return -EINVAL;
2829 }
2830 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2831 	    sdebug_delay_store);
2832 
2833 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2834 {
2835         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2836 }
2837 
2838 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2839 				 const char * buf, size_t count)
2840 {
2841         int opts;
2842 	char work[20];
2843 
2844         if (1 == sscanf(buf, "%10s", work)) {
2845 		if (0 == strnicmp(work,"0x", 2)) {
2846 			if (1 == sscanf(&work[2], "%x", &opts))
2847 				goto opts_done;
2848 		} else {
2849 			if (1 == sscanf(work, "%d", &opts))
2850 				goto opts_done;
2851 		}
2852 	}
2853 	return -EINVAL;
2854 opts_done:
2855 	scsi_debug_opts = opts;
2856 	scsi_debug_cmnd_count = 0;
2857 	return count;
2858 }
2859 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2860 	    sdebug_opts_store);
2861 
2862 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2863 {
2864         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2865 }
2866 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2867 				  const char * buf, size_t count)
2868 {
2869         int n;
2870 
2871 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2872 		scsi_debug_ptype = n;
2873 		return count;
2874 	}
2875 	return -EINVAL;
2876 }
2877 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2878 
2879 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2880 {
2881         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2882 }
2883 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2884 				  const char * buf, size_t count)
2885 {
2886         int n;
2887 
2888 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2889 		scsi_debug_dsense = n;
2890 		return count;
2891 	}
2892 	return -EINVAL;
2893 }
2894 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2895 	    sdebug_dsense_store);
2896 
2897 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2898 {
2899         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2900 }
2901 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2902 				    const char * buf, size_t count)
2903 {
2904         int n;
2905 
2906 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2907 		scsi_debug_fake_rw = n;
2908 		return count;
2909 	}
2910 	return -EINVAL;
2911 }
2912 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2913 	    sdebug_fake_rw_store);
2914 
2915 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2916 {
2917         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2918 }
2919 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2920 				     const char * buf, size_t count)
2921 {
2922         int n;
2923 
2924 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2925 		scsi_debug_no_lun_0 = n;
2926 		return count;
2927 	}
2928 	return -EINVAL;
2929 }
2930 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2931 	    sdebug_no_lun_0_store);
2932 
2933 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2934 {
2935         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2936 }
2937 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2938 				     const char * buf, size_t count)
2939 {
2940         int n;
2941 
2942 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2943 		scsi_debug_num_tgts = n;
2944 		sdebug_max_tgts_luns();
2945 		return count;
2946 	}
2947 	return -EINVAL;
2948 }
2949 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2950 	    sdebug_num_tgts_store);
2951 
2952 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
2953 {
2954         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
2955 }
2956 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
2957 
2958 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
2959 {
2960         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
2961 }
2962 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
2963 
2964 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
2965 {
2966         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
2967 }
2968 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
2969 				      const char * buf, size_t count)
2970 {
2971         int nth;
2972 
2973 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
2974 		scsi_debug_every_nth = nth;
2975 		scsi_debug_cmnd_count = 0;
2976 		return count;
2977 	}
2978 	return -EINVAL;
2979 }
2980 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
2981 	    sdebug_every_nth_store);
2982 
2983 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
2984 {
2985         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
2986 }
2987 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
2988 				     const char * buf, size_t count)
2989 {
2990         int n;
2991 
2992 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2993 		scsi_debug_max_luns = n;
2994 		sdebug_max_tgts_luns();
2995 		return count;
2996 	}
2997 	return -EINVAL;
2998 }
2999 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3000 	    sdebug_max_luns_store);
3001 
3002 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3003 {
3004         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3005 }
3006 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3007 				      const char * buf, size_t count)
3008 {
3009         int n;
3010 
3011 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3012 	    (n <= SCSI_DEBUG_CANQUEUE)) {
3013 		scsi_debug_max_queue = n;
3014 		return count;
3015 	}
3016 	return -EINVAL;
3017 }
3018 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3019 	    sdebug_max_queue_store);
3020 
3021 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3022 {
3023         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3024 }
3025 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3026 
3027 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3028 {
3029         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3030 }
3031 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3032 
3033 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3034 {
3035         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3036 }
3037 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3038 				       const char * buf, size_t count)
3039 {
3040         int n;
3041 
3042 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3043 		scsi_debug_virtual_gb = n;
3044 
3045 		sdebug_capacity = get_sdebug_capacity();
3046 
3047 		return count;
3048 	}
3049 	return -EINVAL;
3050 }
3051 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3052 	    sdebug_virtual_gb_store);
3053 
3054 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3055 {
3056         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3057 }
3058 
3059 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3060 				     const char * buf, size_t count)
3061 {
3062 	int delta_hosts;
3063 
3064 	if (sscanf(buf, "%d", &delta_hosts) != 1)
3065 		return -EINVAL;
3066 	if (delta_hosts > 0) {
3067 		do {
3068 			sdebug_add_adapter();
3069 		} while (--delta_hosts);
3070 	} else if (delta_hosts < 0) {
3071 		do {
3072 			sdebug_remove_adapter();
3073 		} while (++delta_hosts);
3074 	}
3075 	return count;
3076 }
3077 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3078 	    sdebug_add_host_store);
3079 
3080 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3081 					  char * buf)
3082 {
3083 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3084 }
3085 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3086 					   const char * buf, size_t count)
3087 {
3088 	int n;
3089 
3090 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3091 		scsi_debug_vpd_use_hostno = n;
3092 		return count;
3093 	}
3094 	return -EINVAL;
3095 }
3096 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3097 	    sdebug_vpd_use_hostno_store);
3098 
3099 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3100 {
3101 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3102 }
3103 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3104 
3105 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3106 {
3107 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3108 }
3109 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3110 
3111 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3112 {
3113 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3114 }
3115 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3116 
3117 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3118 {
3119 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3120 }
3121 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3122 
3123 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3124 {
3125 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3126 }
3127 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3128 
3129 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3130 {
3131 	ssize_t count;
3132 
3133 	if (scsi_debug_unmap_granularity == 0)
3134 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3135 				 sdebug_store_sectors);
3136 
3137 	count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3138 
3139 	buf[count++] = '\n';
3140 	buf[count++] = 0;
3141 
3142 	return count;
3143 }
3144 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3145 
3146 
3147 /* Note: The following function creates attribute files in the
3148    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3149    files (over those found in the /sys/module/scsi_debug/parameters
3150    directory) is that auxiliary actions can be triggered when an attribute
3151    is changed. For example see: sdebug_add_host_store() above.
3152  */
3153 static int do_create_driverfs_files(void)
3154 {
3155 	int ret;
3156 
3157 	ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3158 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3159 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3160 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3161 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3162 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3163 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3164 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3165 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3166 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3167 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3168 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3169 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3170 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3171 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3172 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3173 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3174 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3175 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3176 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3177 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3178 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3179 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3180 	return ret;
3181 }
3182 
3183 static void do_remove_driverfs_files(void)
3184 {
3185 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3186 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3187 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3188 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3189 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3190 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3191 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3192 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3193 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3194 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3195 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3196 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3197 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3198 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3199 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3200 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3201 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3202 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3203 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3204 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3205 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3206 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3207 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3208 }
3209 
3210 static void pseudo_0_release(struct device *dev)
3211 {
3212 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3213 		printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
3214 }
3215 
3216 static struct device pseudo_primary = {
3217 	.init_name	= "pseudo_0",
3218 	.release	= pseudo_0_release,
3219 };
3220 
3221 static int __init scsi_debug_init(void)
3222 {
3223 	unsigned long sz;
3224 	int host_to_add;
3225 	int k;
3226 	int ret;
3227 
3228 	switch (scsi_debug_sector_size) {
3229 	case  512:
3230 	case 1024:
3231 	case 2048:
3232 	case 4096:
3233 		break;
3234 	default:
3235 		printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3236 		       scsi_debug_sector_size);
3237 		return -EINVAL;
3238 	}
3239 
3240 	switch (scsi_debug_dif) {
3241 
3242 	case SD_DIF_TYPE0_PROTECTION:
3243 	case SD_DIF_TYPE1_PROTECTION:
3244 	case SD_DIF_TYPE2_PROTECTION:
3245 	case SD_DIF_TYPE3_PROTECTION:
3246 		break;
3247 
3248 	default:
3249 		printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3250 		return -EINVAL;
3251 	}
3252 
3253 	if (scsi_debug_guard > 1) {
3254 		printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3255 		return -EINVAL;
3256 	}
3257 
3258 	if (scsi_debug_ato > 1) {
3259 		printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3260 		return -EINVAL;
3261 	}
3262 
3263 	if (scsi_debug_physblk_exp > 15) {
3264 		printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3265 		       scsi_debug_physblk_exp);
3266 		return -EINVAL;
3267 	}
3268 
3269 	if (scsi_debug_lowest_aligned > 0x3fff) {
3270 		printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3271 		       scsi_debug_lowest_aligned);
3272 		return -EINVAL;
3273 	}
3274 
3275 	if (scsi_debug_dev_size_mb < 1)
3276 		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3277 	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3278 	sdebug_store_sectors = sz / scsi_debug_sector_size;
3279 	sdebug_capacity = get_sdebug_capacity();
3280 
3281 	/* play around with geometry, don't waste too much on track 0 */
3282 	sdebug_heads = 8;
3283 	sdebug_sectors_per = 32;
3284 	if (scsi_debug_dev_size_mb >= 16)
3285 		sdebug_heads = 32;
3286 	else if (scsi_debug_dev_size_mb >= 256)
3287 		sdebug_heads = 64;
3288 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3289 			       (sdebug_sectors_per * sdebug_heads);
3290 	if (sdebug_cylinders_per >= 1024) {
3291 		/* other LLDs do this; implies >= 1GB ram disk ... */
3292 		sdebug_heads = 255;
3293 		sdebug_sectors_per = 63;
3294 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3295 			       (sdebug_sectors_per * sdebug_heads);
3296 	}
3297 
3298 	fake_storep = vmalloc(sz);
3299 	if (NULL == fake_storep) {
3300 		printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3301 		return -ENOMEM;
3302 	}
3303 	memset(fake_storep, 0, sz);
3304 	if (scsi_debug_num_parts > 0)
3305 		sdebug_build_parts(fake_storep, sz);
3306 
3307 	if (scsi_debug_dif) {
3308 		int dif_size;
3309 
3310 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3311 		dif_storep = vmalloc(dif_size);
3312 
3313 		printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3314 		       dif_size, dif_storep);
3315 
3316 		if (dif_storep == NULL) {
3317 			printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3318 			ret = -ENOMEM;
3319 			goto free_vm;
3320 		}
3321 
3322 		memset(dif_storep, 0xff, dif_size);
3323 	}
3324 
3325 	if (scsi_debug_unmap_granularity) {
3326 		unsigned int map_bytes;
3327 
3328 		if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3329 			printk(KERN_ERR
3330 			       "%s: ERR: unmap_granularity < unmap_alignment\n",
3331 			       __func__);
3332 			return -EINVAL;
3333 		}
3334 
3335 		map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3336 		map_bytes = map_size >> 3;
3337 		map_storep = vmalloc(map_bytes);
3338 
3339 		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3340 		       map_size);
3341 
3342 		if (map_storep == NULL) {
3343 			printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3344 			ret = -ENOMEM;
3345 			goto free_vm;
3346 		}
3347 
3348 		memset(map_storep, 0x0, map_bytes);
3349 
3350 		/* Map first 1KB for partition table */
3351 		if (scsi_debug_num_parts)
3352 			map_region(0, 2);
3353 	}
3354 
3355 	ret = device_register(&pseudo_primary);
3356 	if (ret < 0) {
3357 		printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
3358 			ret);
3359 		goto free_vm;
3360 	}
3361 	ret = bus_register(&pseudo_lld_bus);
3362 	if (ret < 0) {
3363 		printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3364 			ret);
3365 		goto dev_unreg;
3366 	}
3367 	ret = driver_register(&sdebug_driverfs_driver);
3368 	if (ret < 0) {
3369 		printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3370 			ret);
3371 		goto bus_unreg;
3372 	}
3373 	ret = do_create_driverfs_files();
3374 	if (ret < 0) {
3375 		printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3376 			ret);
3377 		goto del_files;
3378 	}
3379 
3380 	init_all_queued();
3381 
3382 	host_to_add = scsi_debug_add_host;
3383         scsi_debug_add_host = 0;
3384 
3385         for (k = 0; k < host_to_add; k++) {
3386                 if (sdebug_add_adapter()) {
3387                         printk(KERN_ERR "scsi_debug_init: "
3388                                "sdebug_add_adapter failed k=%d\n", k);
3389                         break;
3390                 }
3391         }
3392 
3393 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3394 		printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3395 		       scsi_debug_add_host);
3396 	}
3397 	return 0;
3398 
3399 del_files:
3400 	do_remove_driverfs_files();
3401 	driver_unregister(&sdebug_driverfs_driver);
3402 bus_unreg:
3403 	bus_unregister(&pseudo_lld_bus);
3404 dev_unreg:
3405 	device_unregister(&pseudo_primary);
3406 free_vm:
3407 	if (map_storep)
3408 		vfree(map_storep);
3409 	if (dif_storep)
3410 		vfree(dif_storep);
3411 	vfree(fake_storep);
3412 
3413 	return ret;
3414 }
3415 
3416 static void __exit scsi_debug_exit(void)
3417 {
3418 	int k = scsi_debug_add_host;
3419 
3420 	stop_all_queued();
3421 	for (; k; k--)
3422 		sdebug_remove_adapter();
3423 	do_remove_driverfs_files();
3424 	driver_unregister(&sdebug_driverfs_driver);
3425 	bus_unregister(&pseudo_lld_bus);
3426 	device_unregister(&pseudo_primary);
3427 
3428 	if (dif_storep)
3429 		vfree(dif_storep);
3430 
3431 	vfree(fake_storep);
3432 }
3433 
3434 device_initcall(scsi_debug_init);
3435 module_exit(scsi_debug_exit);
3436 
3437 static void sdebug_release_adapter(struct device * dev)
3438 {
3439         struct sdebug_host_info *sdbg_host;
3440 
3441 	sdbg_host = to_sdebug_host(dev);
3442         kfree(sdbg_host);
3443 }
3444 
3445 static int sdebug_add_adapter(void)
3446 {
3447 	int k, devs_per_host;
3448         int error = 0;
3449         struct sdebug_host_info *sdbg_host;
3450 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
3451 
3452         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3453         if (NULL == sdbg_host) {
3454                 printk(KERN_ERR "%s: out of memory at line %d\n",
3455                        __func__, __LINE__);
3456                 return -ENOMEM;
3457         }
3458 
3459         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3460 
3461 	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3462         for (k = 0; k < devs_per_host; k++) {
3463 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3464 		if (!sdbg_devinfo) {
3465                         printk(KERN_ERR "%s: out of memory at line %d\n",
3466                                __func__, __LINE__);
3467                         error = -ENOMEM;
3468 			goto clean;
3469                 }
3470         }
3471 
3472         spin_lock(&sdebug_host_list_lock);
3473         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3474         spin_unlock(&sdebug_host_list_lock);
3475 
3476         sdbg_host->dev.bus = &pseudo_lld_bus;
3477         sdbg_host->dev.parent = &pseudo_primary;
3478         sdbg_host->dev.release = &sdebug_release_adapter;
3479         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3480 
3481         error = device_register(&sdbg_host->dev);
3482 
3483         if (error)
3484 		goto clean;
3485 
3486 	++scsi_debug_add_host;
3487         return error;
3488 
3489 clean:
3490 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3491 				 dev_list) {
3492 		list_del(&sdbg_devinfo->dev_list);
3493 		kfree(sdbg_devinfo);
3494 	}
3495 
3496 	kfree(sdbg_host);
3497         return error;
3498 }
3499 
3500 static void sdebug_remove_adapter(void)
3501 {
3502         struct sdebug_host_info * sdbg_host = NULL;
3503 
3504         spin_lock(&sdebug_host_list_lock);
3505         if (!list_empty(&sdebug_host_list)) {
3506                 sdbg_host = list_entry(sdebug_host_list.prev,
3507                                        struct sdebug_host_info, host_list);
3508 		list_del(&sdbg_host->host_list);
3509 	}
3510         spin_unlock(&sdebug_host_list_lock);
3511 
3512 	if (!sdbg_host)
3513 		return;
3514 
3515         device_unregister(&sdbg_host->dev);
3516         --scsi_debug_add_host;
3517 }
3518 
3519 static
3520 int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3521 {
3522 	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3523 	int len, k;
3524 	unsigned int num;
3525 	unsigned long long lba;
3526 	u32 ei_lba;
3527 	int errsts = 0;
3528 	int target = SCpnt->device->id;
3529 	struct sdebug_dev_info *devip = NULL;
3530 	int inj_recovered = 0;
3531 	int inj_transport = 0;
3532 	int inj_dif = 0;
3533 	int inj_dix = 0;
3534 	int delay_override = 0;
3535 	int unmap = 0;
3536 
3537 	scsi_set_resid(SCpnt, 0);
3538 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3539 		printk(KERN_INFO "scsi_debug: cmd ");
3540 		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3541 			printk("%02x ", (int)cmd[k]);
3542 		printk("\n");
3543 	}
3544 
3545 	if (target == SCpnt->device->host->hostt->this_id) {
3546 		printk(KERN_INFO "scsi_debug: initiator's id used as "
3547 		       "target!\n");
3548 		return schedule_resp(SCpnt, NULL, done,
3549 				     DID_NO_CONNECT << 16, 0);
3550 	}
3551 
3552 	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3553 	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3554 		return schedule_resp(SCpnt, NULL, done,
3555 				     DID_NO_CONNECT << 16, 0);
3556 	devip = devInfoReg(SCpnt->device);
3557 	if (NULL == devip)
3558 		return schedule_resp(SCpnt, NULL, done,
3559 				     DID_NO_CONNECT << 16, 0);
3560 
3561 	if ((scsi_debug_every_nth != 0) &&
3562 	    (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3563 		scsi_debug_cmnd_count = 0;
3564 		if (scsi_debug_every_nth < -1)
3565 			scsi_debug_every_nth = -1;
3566 		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3567 			return 0; /* ignore command causing timeout */
3568 		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3569 			inj_recovered = 1; /* to reads and writes below */
3570 		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3571 			inj_transport = 1; /* to reads and writes below */
3572 		else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3573 			inj_dif = 1; /* to reads and writes below */
3574 		else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3575 			inj_dix = 1; /* to reads and writes below */
3576 	}
3577 
3578 	if (devip->wlun) {
3579 		switch (*cmd) {
3580 		case INQUIRY:
3581 		case REQUEST_SENSE:
3582 		case TEST_UNIT_READY:
3583 		case REPORT_LUNS:
3584 			break;  /* only allowable wlun commands */
3585 		default:
3586 			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3587 				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3588 				       "not supported for wlun\n", *cmd);
3589 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3590 					INVALID_OPCODE, 0);
3591 			errsts = check_condition_result;
3592 			return schedule_resp(SCpnt, devip, done, errsts,
3593 					     0);
3594 		}
3595 	}
3596 
3597 	switch (*cmd) {
3598 	case INQUIRY:     /* mandatory, ignore unit attention */
3599 		delay_override = 1;
3600 		errsts = resp_inquiry(SCpnt, target, devip);
3601 		break;
3602 	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
3603 		delay_override = 1;
3604 		errsts = resp_requests(SCpnt, devip);
3605 		break;
3606 	case REZERO_UNIT:	/* actually this is REWIND for SSC */
3607 	case START_STOP:
3608 		errsts = resp_start_stop(SCpnt, devip);
3609 		break;
3610 	case ALLOW_MEDIUM_REMOVAL:
3611 		errsts = check_readiness(SCpnt, 1, devip);
3612 		if (errsts)
3613 			break;
3614 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3615 			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3616 			       cmd[4] ? "inhibited" : "enabled");
3617 		break;
3618 	case SEND_DIAGNOSTIC:     /* mandatory */
3619 		errsts = check_readiness(SCpnt, 1, devip);
3620 		break;
3621 	case TEST_UNIT_READY:     /* mandatory */
3622 		delay_override = 1;
3623 		errsts = check_readiness(SCpnt, 0, devip);
3624 		break;
3625 	case RESERVE:
3626 		errsts = check_readiness(SCpnt, 1, devip);
3627 		break;
3628 	case RESERVE_10:
3629 		errsts = check_readiness(SCpnt, 1, devip);
3630 		break;
3631 	case RELEASE:
3632 		errsts = check_readiness(SCpnt, 1, devip);
3633 		break;
3634 	case RELEASE_10:
3635 		errsts = check_readiness(SCpnt, 1, devip);
3636 		break;
3637 	case READ_CAPACITY:
3638 		errsts = resp_readcap(SCpnt, devip);
3639 		break;
3640 	case SERVICE_ACTION_IN:
3641 		if (cmd[1] == SAI_READ_CAPACITY_16)
3642 			errsts = resp_readcap16(SCpnt, devip);
3643 		else if (cmd[1] == SAI_GET_LBA_STATUS) {
3644 
3645 			if (scsi_debug_unmap_max_desc == 0) {
3646 				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3647 						INVALID_COMMAND_OPCODE, 0);
3648 				errsts = check_condition_result;
3649 			} else
3650 				errsts = resp_get_lba_status(SCpnt, devip);
3651 		} else {
3652 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3653 					INVALID_OPCODE, 0);
3654 			errsts = check_condition_result;
3655 		}
3656 		break;
3657 	case MAINTENANCE_IN:
3658 		if (MI_REPORT_TARGET_PGS != cmd[1]) {
3659 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3660 					INVALID_OPCODE, 0);
3661 			errsts = check_condition_result;
3662 			break;
3663 		}
3664 		errsts = resp_report_tgtpgs(SCpnt, devip);
3665 		break;
3666 	case READ_16:
3667 	case READ_12:
3668 	case READ_10:
3669 		/* READ{10,12,16} and DIF Type 2 are natural enemies */
3670 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3671 		    cmd[1] & 0xe0) {
3672 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3673 					INVALID_COMMAND_OPCODE, 0);
3674 			errsts = check_condition_result;
3675 			break;
3676 		}
3677 
3678 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3679 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3680 		    (cmd[1] & 0xe0) == 0)
3681 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3682 
3683 		/* fall through */
3684 	case READ_6:
3685 read:
3686 		errsts = check_readiness(SCpnt, 0, devip);
3687 		if (errsts)
3688 			break;
3689 		if (scsi_debug_fake_rw)
3690 			break;
3691 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3692 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3693 		if (inj_recovered && (0 == errsts)) {
3694 			mk_sense_buffer(devip, RECOVERED_ERROR,
3695 					THRESHOLD_EXCEEDED, 0);
3696 			errsts = check_condition_result;
3697 		} else if (inj_transport && (0 == errsts)) {
3698 			mk_sense_buffer(devip, ABORTED_COMMAND,
3699 					TRANSPORT_PROBLEM, ACK_NAK_TO);
3700 			errsts = check_condition_result;
3701 		} else if (inj_dif && (0 == errsts)) {
3702 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3703 			errsts = illegal_condition_result;
3704 		} else if (inj_dix && (0 == errsts)) {
3705 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3706 			errsts = illegal_condition_result;
3707 		}
3708 		break;
3709 	case REPORT_LUNS:	/* mandatory, ignore unit attention */
3710 		delay_override = 1;
3711 		errsts = resp_report_luns(SCpnt, devip);
3712 		break;
3713 	case VERIFY:		/* 10 byte SBC-2 command */
3714 		errsts = check_readiness(SCpnt, 0, devip);
3715 		break;
3716 	case WRITE_16:
3717 	case WRITE_12:
3718 	case WRITE_10:
3719 		/* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3720 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3721 		    cmd[1] & 0xe0) {
3722 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3723 					INVALID_COMMAND_OPCODE, 0);
3724 			errsts = check_condition_result;
3725 			break;
3726 		}
3727 
3728 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3729 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3730 		    (cmd[1] & 0xe0) == 0)
3731 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3732 
3733 		/* fall through */
3734 	case WRITE_6:
3735 write:
3736 		errsts = check_readiness(SCpnt, 0, devip);
3737 		if (errsts)
3738 			break;
3739 		if (scsi_debug_fake_rw)
3740 			break;
3741 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3742 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3743 		if (inj_recovered && (0 == errsts)) {
3744 			mk_sense_buffer(devip, RECOVERED_ERROR,
3745 					THRESHOLD_EXCEEDED, 0);
3746 			errsts = check_condition_result;
3747 		} else if (inj_dif && (0 == errsts)) {
3748 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3749 			errsts = illegal_condition_result;
3750 		} else if (inj_dix && (0 == errsts)) {
3751 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3752 			errsts = illegal_condition_result;
3753 		}
3754 		break;
3755 	case WRITE_SAME_16:
3756 		if (cmd[1] & 0x8)
3757 			unmap = 1;
3758 		/* fall through */
3759 	case WRITE_SAME:
3760 		errsts = check_readiness(SCpnt, 0, devip);
3761 		if (errsts)
3762 			break;
3763 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3764 		errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3765 		break;
3766 	case UNMAP:
3767 		errsts = check_readiness(SCpnt, 0, devip);
3768 		if (errsts)
3769 			break;
3770 
3771 		if (scsi_debug_unmap_max_desc == 0) {
3772 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3773 					INVALID_COMMAND_OPCODE, 0);
3774 			errsts = check_condition_result;
3775 		} else
3776 			errsts = resp_unmap(SCpnt, devip);
3777 		break;
3778 	case MODE_SENSE:
3779 	case MODE_SENSE_10:
3780 		errsts = resp_mode_sense(SCpnt, target, devip);
3781 		break;
3782 	case MODE_SELECT:
3783 		errsts = resp_mode_select(SCpnt, 1, devip);
3784 		break;
3785 	case MODE_SELECT_10:
3786 		errsts = resp_mode_select(SCpnt, 0, devip);
3787 		break;
3788 	case LOG_SENSE:
3789 		errsts = resp_log_sense(SCpnt, devip);
3790 		break;
3791 	case SYNCHRONIZE_CACHE:
3792 		delay_override = 1;
3793 		errsts = check_readiness(SCpnt, 0, devip);
3794 		break;
3795 	case WRITE_BUFFER:
3796 		errsts = check_readiness(SCpnt, 1, devip);
3797 		break;
3798 	case XDWRITEREAD_10:
3799 		if (!scsi_bidi_cmnd(SCpnt)) {
3800 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3801 					INVALID_FIELD_IN_CDB, 0);
3802 			errsts = check_condition_result;
3803 			break;
3804 		}
3805 
3806 		errsts = check_readiness(SCpnt, 0, devip);
3807 		if (errsts)
3808 			break;
3809 		if (scsi_debug_fake_rw)
3810 			break;
3811 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3812 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3813 		if (errsts)
3814 			break;
3815 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3816 		if (errsts)
3817 			break;
3818 		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3819 		break;
3820 	case VARIABLE_LENGTH_CMD:
3821 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3822 
3823 			if ((cmd[10] & 0xe0) == 0)
3824 				printk(KERN_ERR
3825 				       "Unprotected RD/WR to DIF device\n");
3826 
3827 			if (cmd[9] == READ_32) {
3828 				BUG_ON(SCpnt->cmd_len < 32);
3829 				goto read;
3830 			}
3831 
3832 			if (cmd[9] == WRITE_32) {
3833 				BUG_ON(SCpnt->cmd_len < 32);
3834 				goto write;
3835 			}
3836 		}
3837 
3838 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
3839 				INVALID_FIELD_IN_CDB, 0);
3840 		errsts = check_condition_result;
3841 		break;
3842 
3843 	default:
3844 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3845 			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3846 			       "supported\n", *cmd);
3847 		errsts = check_readiness(SCpnt, 1, devip);
3848 		if (errsts)
3849 			break;	/* Unit attention takes precedence */
3850 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3851 		errsts = check_condition_result;
3852 		break;
3853 	}
3854 	return schedule_resp(SCpnt, devip, done, errsts,
3855 			     (delay_override ? 0 : scsi_debug_delay));
3856 }
3857 
3858 static struct scsi_host_template sdebug_driver_template = {
3859 	.proc_info =		scsi_debug_proc_info,
3860 	.proc_name =		sdebug_proc_name,
3861 	.name =			"SCSI DEBUG",
3862 	.info =			scsi_debug_info,
3863 	.slave_alloc =		scsi_debug_slave_alloc,
3864 	.slave_configure =	scsi_debug_slave_configure,
3865 	.slave_destroy =	scsi_debug_slave_destroy,
3866 	.ioctl =		scsi_debug_ioctl,
3867 	.queuecommand =		scsi_debug_queuecommand,
3868 	.eh_abort_handler =	scsi_debug_abort,
3869 	.eh_bus_reset_handler = scsi_debug_bus_reset,
3870 	.eh_device_reset_handler = scsi_debug_device_reset,
3871 	.eh_host_reset_handler = scsi_debug_host_reset,
3872 	.bios_param =		scsi_debug_biosparam,
3873 	.can_queue =		SCSI_DEBUG_CANQUEUE,
3874 	.this_id =		7,
3875 	.sg_tablesize =		256,
3876 	.cmd_per_lun =		16,
3877 	.max_sectors =		0xffff,
3878 	.use_clustering = 	DISABLE_CLUSTERING,
3879 	.module =		THIS_MODULE,
3880 };
3881 
3882 static int sdebug_driver_probe(struct device * dev)
3883 {
3884         int error = 0;
3885         struct sdebug_host_info *sdbg_host;
3886         struct Scsi_Host *hpnt;
3887 	int host_prot;
3888 
3889 	sdbg_host = to_sdebug_host(dev);
3890 
3891 	sdebug_driver_template.can_queue = scsi_debug_max_queue;
3892 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3893 	if (NULL == hpnt) {
3894 		printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3895 		error = -ENODEV;
3896 		return error;
3897 	}
3898 
3899         sdbg_host->shost = hpnt;
3900 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3901 	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3902 		hpnt->max_id = scsi_debug_num_tgts + 1;
3903 	else
3904 		hpnt->max_id = scsi_debug_num_tgts;
3905 	hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;	/* = scsi_debug_max_luns; */
3906 
3907 	host_prot = 0;
3908 
3909 	switch (scsi_debug_dif) {
3910 
3911 	case SD_DIF_TYPE1_PROTECTION:
3912 		host_prot = SHOST_DIF_TYPE1_PROTECTION;
3913 		if (scsi_debug_dix)
3914 			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3915 		break;
3916 
3917 	case SD_DIF_TYPE2_PROTECTION:
3918 		host_prot = SHOST_DIF_TYPE2_PROTECTION;
3919 		if (scsi_debug_dix)
3920 			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3921 		break;
3922 
3923 	case SD_DIF_TYPE3_PROTECTION:
3924 		host_prot = SHOST_DIF_TYPE3_PROTECTION;
3925 		if (scsi_debug_dix)
3926 			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3927 		break;
3928 
3929 	default:
3930 		if (scsi_debug_dix)
3931 			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3932 		break;
3933 	}
3934 
3935 	scsi_host_set_prot(hpnt, host_prot);
3936 
3937 	printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3938 	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3939 	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3940 	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3941 	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3942 	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3943 	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3944 	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3945 
3946 	if (scsi_debug_guard == 1)
3947 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3948 	else
3949 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3950 
3951         error = scsi_add_host(hpnt, &sdbg_host->dev);
3952         if (error) {
3953                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3954                 error = -ENODEV;
3955 		scsi_host_put(hpnt);
3956         } else
3957 		scsi_scan_host(hpnt);
3958 
3959 
3960         return error;
3961 }
3962 
3963 static int sdebug_driver_remove(struct device * dev)
3964 {
3965         struct sdebug_host_info *sdbg_host;
3966 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
3967 
3968 	sdbg_host = to_sdebug_host(dev);
3969 
3970 	if (!sdbg_host) {
3971 		printk(KERN_ERR "%s: Unable to locate host info\n",
3972 		       __func__);
3973 		return -ENODEV;
3974 	}
3975 
3976         scsi_remove_host(sdbg_host->shost);
3977 
3978 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3979 				 dev_list) {
3980                 list_del(&sdbg_devinfo->dev_list);
3981                 kfree(sdbg_devinfo);
3982         }
3983 
3984         scsi_host_put(sdbg_host->shost);
3985         return 0;
3986 }
3987 
3988 static int pseudo_lld_bus_match(struct device *dev,
3989 				struct device_driver *dev_driver)
3990 {
3991 	return 1;
3992 }
3993 
3994 static struct bus_type pseudo_lld_bus = {
3995 	.name = "pseudo",
3996 	.match = pseudo_lld_bus_match,
3997 	.probe = sdebug_driver_probe,
3998 	.remove = sdebug_driver_remove,
3999 };
4000