xref: /openbmc/linux/drivers/scsi/scsi_debug.c (revision b34e08d5)
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27 
28 #include <linux/module.h>
29 
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 
46 #include <net/checksum.h>
47 
48 #include <asm/unaligned.h>
49 
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
57 
58 #include "sd.h"
59 #include "scsi_logging.h"
60 
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
63 
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
68 #define UNRECOVERED_READ_ERR 0x11
69 #define PARAMETER_LIST_LENGTH_ERR 0x1a
70 #define INVALID_OPCODE 0x20
71 #define ADDR_OUT_OF_RANGE 0x21
72 #define INVALID_COMMAND_OPCODE 0x20
73 #define INVALID_FIELD_IN_CDB 0x24
74 #define INVALID_FIELD_IN_PARAM_LIST 0x26
75 #define POWERON_RESET 0x29
76 #define SAVING_PARAMS_UNSUP 0x39
77 #define TRANSPORT_PROBLEM 0x4b
78 #define THRESHOLD_EXCEEDED 0x5d
79 #define LOW_POWER_COND_ON 0x5e
80 
81 /* Additional Sense Code Qualifier (ASCQ) */
82 #define ACK_NAK_TO 0x3
83 
84 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 
86 /* Default values for driver parameters */
87 #define DEF_NUM_HOST   1
88 #define DEF_NUM_TGTS   1
89 #define DEF_MAX_LUNS   1
90 /* With these defaults, this driver will make 1 host with 1 target
91  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92  */
93 #define DEF_ATO 1
94 #define DEF_DELAY   1
95 #define DEF_DEV_SIZE_MB   8
96 #define DEF_DIF 0
97 #define DEF_DIX 0
98 #define DEF_D_SENSE   0
99 #define DEF_EVERY_NTH   0
100 #define DEF_FAKE_RW	0
101 #define DEF_GUARD 0
102 #define DEF_LBPU 0
103 #define DEF_LBPWS 0
104 #define DEF_LBPWS10 0
105 #define DEF_LBPRZ 1
106 #define DEF_LOWEST_ALIGNED 0
107 #define DEF_NO_LUN_0   0
108 #define DEF_NUM_PARTS   0
109 #define DEF_OPTS   0
110 #define DEF_OPT_BLKS 64
111 #define DEF_PHYSBLK_EXP 0
112 #define DEF_PTYPE   0
113 #define DEF_REMOVABLE false
114 #define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
115 #define DEF_SECTOR_SIZE 512
116 #define DEF_UNMAP_ALIGNMENT 0
117 #define DEF_UNMAP_GRANULARITY 1
118 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
119 #define DEF_UNMAP_MAX_DESC 256
120 #define DEF_VIRTUAL_GB   0
121 #define DEF_VPD_USE_HOSTNO 1
122 #define DEF_WRITESAME_LENGTH 0xFFFF
123 
124 /* bit mask values for scsi_debug_opts */
125 #define SCSI_DEBUG_OPT_NOISE   1
126 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
127 #define SCSI_DEBUG_OPT_TIMEOUT   4
128 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
129 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
130 #define SCSI_DEBUG_OPT_DIF_ERR   32
131 #define SCSI_DEBUG_OPT_DIX_ERR   64
132 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
133 /* When "every_nth" > 0 then modulo "every_nth" commands:
134  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
135  *   - a RECOVERED_ERROR is simulated on successful read and write
136  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
137  *   - a TRANSPORT_ERROR is simulated on successful read and write
138  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
139  *
140  * When "every_nth" < 0 then after "- every_nth" commands:
141  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
142  *   - a RECOVERED_ERROR is simulated on successful read and write
143  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
144  *   - a TRANSPORT_ERROR is simulated on successful read and write
145  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
146  * This will continue until some other action occurs (e.g. the user
147  * writing a new value (other than -1 or 1) to every_nth via sysfs).
148  */
149 
150 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
151  * sector on read commands: */
152 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
153 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
154 
155 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
156  * or "peripheral device" addressing (value 0) */
157 #define SAM2_LUN_ADDRESS_METHOD 0
158 #define SAM2_WLUN_REPORT_LUNS 0xc101
159 
160 /* Can queue up to this number of commands. Typically commands that
161  * that have a non-zero delay are queued. */
162 #define SCSI_DEBUG_CANQUEUE  255
163 
164 static int scsi_debug_add_host = DEF_NUM_HOST;
165 static int scsi_debug_ato = DEF_ATO;
166 static int scsi_debug_delay = DEF_DELAY;
167 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
168 static int scsi_debug_dif = DEF_DIF;
169 static int scsi_debug_dix = DEF_DIX;
170 static int scsi_debug_dsense = DEF_D_SENSE;
171 static int scsi_debug_every_nth = DEF_EVERY_NTH;
172 static int scsi_debug_fake_rw = DEF_FAKE_RW;
173 static unsigned int scsi_debug_guard = DEF_GUARD;
174 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
175 static int scsi_debug_max_luns = DEF_MAX_LUNS;
176 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
177 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
178 static int scsi_debug_no_uld = 0;
179 static int scsi_debug_num_parts = DEF_NUM_PARTS;
180 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
181 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
182 static int scsi_debug_opts = DEF_OPTS;
183 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
184 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
185 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
186 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
187 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
188 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
189 static unsigned int scsi_debug_lbpu = DEF_LBPU;
190 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
191 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
192 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
193 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
194 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
195 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
196 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
197 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
198 static bool scsi_debug_removable = DEF_REMOVABLE;
199 static bool scsi_debug_clustering;
200 
201 static int scsi_debug_cmnd_count = 0;
202 
203 #define DEV_READONLY(TGT)      (0)
204 
205 static unsigned int sdebug_store_sectors;
206 static sector_t sdebug_capacity;	/* in sectors */
207 
208 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
209    may still need them */
210 static int sdebug_heads;		/* heads per disk */
211 static int sdebug_cylinders_per;	/* cylinders per surface */
212 static int sdebug_sectors_per;		/* sectors per cylinder */
213 
214 #define SDEBUG_MAX_PARTS 4
215 
216 #define SDEBUG_SENSE_LEN 32
217 
218 #define SCSI_DEBUG_MAX_CMD_LEN 32
219 
220 static unsigned int scsi_debug_lbp(void)
221 {
222 	return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
223 }
224 
225 struct sdebug_dev_info {
226 	struct list_head dev_list;
227 	unsigned char sense_buff[SDEBUG_SENSE_LEN];	/* weak nexus */
228 	unsigned int channel;
229 	unsigned int target;
230 	unsigned int lun;
231 	struct sdebug_host_info *sdbg_host;
232 	unsigned int wlun;
233 	char reset;
234 	char stopped;
235 	char used;
236 };
237 
238 struct sdebug_host_info {
239 	struct list_head host_list;
240 	struct Scsi_Host *shost;
241 	struct device dev;
242 	struct list_head dev_info_list;
243 };
244 
245 #define to_sdebug_host(d)	\
246 	container_of(d, struct sdebug_host_info, dev)
247 
248 static LIST_HEAD(sdebug_host_list);
249 static DEFINE_SPINLOCK(sdebug_host_list_lock);
250 
251 typedef void (* done_funct_t) (struct scsi_cmnd *);
252 
253 struct sdebug_queued_cmd {
254 	int in_use;
255 	struct timer_list cmnd_timer;
256 	done_funct_t done_funct;
257 	struct scsi_cmnd * a_cmnd;
258 	int scsi_result;
259 };
260 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
261 
262 static unsigned char * fake_storep;	/* ramdisk storage */
263 static struct sd_dif_tuple *dif_storep;	/* protection info */
264 static void *map_storep;		/* provisioning map */
265 
266 static unsigned long map_size;
267 static int num_aborts = 0;
268 static int num_dev_resets = 0;
269 static int num_bus_resets = 0;
270 static int num_host_resets = 0;
271 static int dix_writes;
272 static int dix_reads;
273 static int dif_errors;
274 
275 static DEFINE_SPINLOCK(queued_arr_lock);
276 static DEFINE_RWLOCK(atomic_rw);
277 
278 static char sdebug_proc_name[] = "scsi_debug";
279 
280 static struct bus_type pseudo_lld_bus;
281 
282 static struct device_driver sdebug_driverfs_driver = {
283 	.name 		= sdebug_proc_name,
284 	.bus		= &pseudo_lld_bus,
285 };
286 
287 static const int check_condition_result =
288 		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
289 
290 static const int illegal_condition_result =
291 	(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
292 
293 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
294 				    0, 0, 0x2, 0x4b};
295 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
296 			           0, 0, 0x0, 0x0};
297 
298 static void *fake_store(unsigned long long lba)
299 {
300 	lba = do_div(lba, sdebug_store_sectors);
301 
302 	return fake_storep + lba * scsi_debug_sector_size;
303 }
304 
305 static struct sd_dif_tuple *dif_store(sector_t sector)
306 {
307 	sector = do_div(sector, sdebug_store_sectors);
308 
309 	return dif_storep + sector;
310 }
311 
312 static int sdebug_add_adapter(void);
313 static void sdebug_remove_adapter(void);
314 
315 static void sdebug_max_tgts_luns(void)
316 {
317 	struct sdebug_host_info *sdbg_host;
318 	struct Scsi_Host *hpnt;
319 
320 	spin_lock(&sdebug_host_list_lock);
321 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
322 		hpnt = sdbg_host->shost;
323 		if ((hpnt->this_id >= 0) &&
324 		    (scsi_debug_num_tgts > hpnt->this_id))
325 			hpnt->max_id = scsi_debug_num_tgts + 1;
326 		else
327 			hpnt->max_id = scsi_debug_num_tgts;
328 		/* scsi_debug_max_luns; */
329 		hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
330 	}
331 	spin_unlock(&sdebug_host_list_lock);
332 }
333 
334 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
335 			    int asc, int asq)
336 {
337 	unsigned char *sbuff;
338 
339 	sbuff = devip->sense_buff;
340 	memset(sbuff, 0, SDEBUG_SENSE_LEN);
341 
342 	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
343 
344 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
345 		printk(KERN_INFO "scsi_debug:    [sense_key,asc,ascq]: "
346 		      "[0x%x,0x%x,0x%x]\n", key, asc, asq);
347 }
348 
349 static void get_data_transfer_info(unsigned char *cmd,
350 				   unsigned long long *lba, unsigned int *num,
351 				   u32 *ei_lba)
352 {
353 	*ei_lba = 0;
354 
355 	switch (*cmd) {
356 	case VARIABLE_LENGTH_CMD:
357 		*lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
358 			(u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
359 			(u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
360 			(u64)cmd[13] << 48 | (u64)cmd[12] << 56;
361 
362 		*ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
363 			(u32)cmd[21] << 16 | (u32)cmd[20] << 24;
364 
365 		*num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
366 			(u32)cmd[28] << 24;
367 		break;
368 
369 	case WRITE_SAME_16:
370 	case WRITE_16:
371 	case READ_16:
372 		*lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
373 			(u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
374 			(u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
375 			(u64)cmd[3] << 48 | (u64)cmd[2] << 56;
376 
377 		*num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
378 			(u32)cmd[10] << 24;
379 		break;
380 	case WRITE_12:
381 	case READ_12:
382 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
383 			(u32)cmd[2] << 24;
384 
385 		*num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
386 			(u32)cmd[6] << 24;
387 		break;
388 	case WRITE_SAME:
389 	case WRITE_10:
390 	case READ_10:
391 	case XDWRITEREAD_10:
392 		*lba = (u32)cmd[5] | (u32)cmd[4] << 8 |	(u32)cmd[3] << 16 |
393 			(u32)cmd[2] << 24;
394 
395 		*num = (u32)cmd[8] | (u32)cmd[7] << 8;
396 		break;
397 	case WRITE_6:
398 	case READ_6:
399 		*lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
400 			(u32)(cmd[1] & 0x1f) << 16;
401 		*num = (0 == cmd[4]) ? 256 : cmd[4];
402 		break;
403 	default:
404 		break;
405 	}
406 }
407 
408 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
409 {
410 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
411 		printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
412 	}
413 	return -EINVAL;
414 	/* return -ENOTTY; // correct return but upsets fdisk */
415 }
416 
417 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
418 			   struct sdebug_dev_info * devip)
419 {
420 	if (devip->reset) {
421 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
422 			printk(KERN_INFO "scsi_debug: Reporting Unit "
423 			       "attention: power on reset\n");
424 		devip->reset = 0;
425 		mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
426 		return check_condition_result;
427 	}
428 	if ((0 == reset_only) && devip->stopped) {
429 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
430 			printk(KERN_INFO "scsi_debug: Reporting Not "
431 			       "ready: initializing command required\n");
432 		mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
433 				0x2);
434 		return check_condition_result;
435 	}
436 	return 0;
437 }
438 
439 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
440 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
441 				int arr_len)
442 {
443 	int act_len;
444 	struct scsi_data_buffer *sdb = scsi_in(scp);
445 
446 	if (!sdb->length)
447 		return 0;
448 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
449 		return (DID_ERROR << 16);
450 
451 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
452 				      arr, arr_len);
453 	sdb->resid = scsi_bufflen(scp) - act_len;
454 
455 	return 0;
456 }
457 
458 /* Returns number of bytes fetched into 'arr' or -1 if error. */
459 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
460 			       int arr_len)
461 {
462 	if (!scsi_bufflen(scp))
463 		return 0;
464 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
465 		return -1;
466 
467 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
468 }
469 
470 
471 static const char * inq_vendor_id = "Linux   ";
472 static const char * inq_product_id = "scsi_debug      ";
473 static const char * inq_product_rev = "0004";
474 
475 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
476 			   int target_dev_id, int dev_id_num,
477 			   const char * dev_id_str,
478 			   int dev_id_str_len)
479 {
480 	int num, port_a;
481 	char b[32];
482 
483 	port_a = target_dev_id + 1;
484 	/* T10 vendor identifier field format (faked) */
485 	arr[0] = 0x2;	/* ASCII */
486 	arr[1] = 0x1;
487 	arr[2] = 0x0;
488 	memcpy(&arr[4], inq_vendor_id, 8);
489 	memcpy(&arr[12], inq_product_id, 16);
490 	memcpy(&arr[28], dev_id_str, dev_id_str_len);
491 	num = 8 + 16 + dev_id_str_len;
492 	arr[3] = num;
493 	num += 4;
494 	if (dev_id_num >= 0) {
495 		/* NAA-5, Logical unit identifier (binary) */
496 		arr[num++] = 0x1;	/* binary (not necessarily sas) */
497 		arr[num++] = 0x3;	/* PIV=0, lu, naa */
498 		arr[num++] = 0x0;
499 		arr[num++] = 0x8;
500 		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
501 		arr[num++] = 0x33;
502 		arr[num++] = 0x33;
503 		arr[num++] = 0x30;
504 		arr[num++] = (dev_id_num >> 24);
505 		arr[num++] = (dev_id_num >> 16) & 0xff;
506 		arr[num++] = (dev_id_num >> 8) & 0xff;
507 		arr[num++] = dev_id_num & 0xff;
508 		/* Target relative port number */
509 		arr[num++] = 0x61;	/* proto=sas, binary */
510 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
511 		arr[num++] = 0x0;	/* reserved */
512 		arr[num++] = 0x4;	/* length */
513 		arr[num++] = 0x0;	/* reserved */
514 		arr[num++] = 0x0;	/* reserved */
515 		arr[num++] = 0x0;
516 		arr[num++] = 0x1;	/* relative port A */
517 	}
518 	/* NAA-5, Target port identifier */
519 	arr[num++] = 0x61;	/* proto=sas, binary */
520 	arr[num++] = 0x93;	/* piv=1, target port, naa */
521 	arr[num++] = 0x0;
522 	arr[num++] = 0x8;
523 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
524 	arr[num++] = 0x22;
525 	arr[num++] = 0x22;
526 	arr[num++] = 0x20;
527 	arr[num++] = (port_a >> 24);
528 	arr[num++] = (port_a >> 16) & 0xff;
529 	arr[num++] = (port_a >> 8) & 0xff;
530 	arr[num++] = port_a & 0xff;
531 	/* NAA-5, Target port group identifier */
532 	arr[num++] = 0x61;	/* proto=sas, binary */
533 	arr[num++] = 0x95;	/* piv=1, target port group id */
534 	arr[num++] = 0x0;
535 	arr[num++] = 0x4;
536 	arr[num++] = 0;
537 	arr[num++] = 0;
538 	arr[num++] = (port_group_id >> 8) & 0xff;
539 	arr[num++] = port_group_id & 0xff;
540 	/* NAA-5, Target device identifier */
541 	arr[num++] = 0x61;	/* proto=sas, binary */
542 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
543 	arr[num++] = 0x0;
544 	arr[num++] = 0x8;
545 	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
546 	arr[num++] = 0x22;
547 	arr[num++] = 0x22;
548 	arr[num++] = 0x20;
549 	arr[num++] = (target_dev_id >> 24);
550 	arr[num++] = (target_dev_id >> 16) & 0xff;
551 	arr[num++] = (target_dev_id >> 8) & 0xff;
552 	arr[num++] = target_dev_id & 0xff;
553 	/* SCSI name string: Target device identifier */
554 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
555 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
556 	arr[num++] = 0x0;
557 	arr[num++] = 24;
558 	memcpy(arr + num, "naa.52222220", 12);
559 	num += 12;
560 	snprintf(b, sizeof(b), "%08X", target_dev_id);
561 	memcpy(arr + num, b, 8);
562 	num += 8;
563 	memset(arr + num, 0, 4);
564 	num += 4;
565 	return num;
566 }
567 
568 
569 static unsigned char vpd84_data[] = {
570 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
571     0x22,0x22,0x22,0x0,0xbb,0x1,
572     0x22,0x22,0x22,0x0,0xbb,0x2,
573 };
574 
575 static int inquiry_evpd_84(unsigned char * arr)
576 {
577 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
578 	return sizeof(vpd84_data);
579 }
580 
581 static int inquiry_evpd_85(unsigned char * arr)
582 {
583 	int num = 0;
584 	const char * na1 = "https://www.kernel.org/config";
585 	const char * na2 = "http://www.kernel.org/log";
586 	int plen, olen;
587 
588 	arr[num++] = 0x1;	/* lu, storage config */
589 	arr[num++] = 0x0;	/* reserved */
590 	arr[num++] = 0x0;
591 	olen = strlen(na1);
592 	plen = olen + 1;
593 	if (plen % 4)
594 		plen = ((plen / 4) + 1) * 4;
595 	arr[num++] = plen;	/* length, null termianted, padded */
596 	memcpy(arr + num, na1, olen);
597 	memset(arr + num + olen, 0, plen - olen);
598 	num += plen;
599 
600 	arr[num++] = 0x4;	/* lu, logging */
601 	arr[num++] = 0x0;	/* reserved */
602 	arr[num++] = 0x0;
603 	olen = strlen(na2);
604 	plen = olen + 1;
605 	if (plen % 4)
606 		plen = ((plen / 4) + 1) * 4;
607 	arr[num++] = plen;	/* length, null terminated, padded */
608 	memcpy(arr + num, na2, olen);
609 	memset(arr + num + olen, 0, plen - olen);
610 	num += plen;
611 
612 	return num;
613 }
614 
615 /* SCSI ports VPD page */
616 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
617 {
618 	int num = 0;
619 	int port_a, port_b;
620 
621 	port_a = target_dev_id + 1;
622 	port_b = port_a + 1;
623 	arr[num++] = 0x0;	/* reserved */
624 	arr[num++] = 0x0;	/* reserved */
625 	arr[num++] = 0x0;
626 	arr[num++] = 0x1;	/* relative port 1 (primary) */
627 	memset(arr + num, 0, 6);
628 	num += 6;
629 	arr[num++] = 0x0;
630 	arr[num++] = 12;	/* length tp descriptor */
631 	/* naa-5 target port identifier (A) */
632 	arr[num++] = 0x61;	/* proto=sas, binary */
633 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
634 	arr[num++] = 0x0;	/* reserved */
635 	arr[num++] = 0x8;	/* length */
636 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
637 	arr[num++] = 0x22;
638 	arr[num++] = 0x22;
639 	arr[num++] = 0x20;
640 	arr[num++] = (port_a >> 24);
641 	arr[num++] = (port_a >> 16) & 0xff;
642 	arr[num++] = (port_a >> 8) & 0xff;
643 	arr[num++] = port_a & 0xff;
644 
645 	arr[num++] = 0x0;	/* reserved */
646 	arr[num++] = 0x0;	/* reserved */
647 	arr[num++] = 0x0;
648 	arr[num++] = 0x2;	/* relative port 2 (secondary) */
649 	memset(arr + num, 0, 6);
650 	num += 6;
651 	arr[num++] = 0x0;
652 	arr[num++] = 12;	/* length tp descriptor */
653 	/* naa-5 target port identifier (B) */
654 	arr[num++] = 0x61;	/* proto=sas, binary */
655 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
656 	arr[num++] = 0x0;	/* reserved */
657 	arr[num++] = 0x8;	/* length */
658 	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
659 	arr[num++] = 0x22;
660 	arr[num++] = 0x22;
661 	arr[num++] = 0x20;
662 	arr[num++] = (port_b >> 24);
663 	arr[num++] = (port_b >> 16) & 0xff;
664 	arr[num++] = (port_b >> 8) & 0xff;
665 	arr[num++] = port_b & 0xff;
666 
667 	return num;
668 }
669 
670 
671 static unsigned char vpd89_data[] = {
672 /* from 4th byte */ 0,0,0,0,
673 'l','i','n','u','x',' ',' ',' ',
674 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
675 '1','2','3','4',
676 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
677 0xec,0,0,0,
678 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
679 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
680 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
681 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
682 0x53,0x41,
683 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
684 0x20,0x20,
685 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
686 0x10,0x80,
687 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
688 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
689 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
691 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
692 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
693 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
698 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
699 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
700 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
711 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
712 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
713 };
714 
715 static int inquiry_evpd_89(unsigned char * arr)
716 {
717 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
718 	return sizeof(vpd89_data);
719 }
720 
721 
722 /* Block limits VPD page (SBC-3) */
723 static unsigned char vpdb0_data[] = {
724 	/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
725 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
726 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
727 	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
728 };
729 
730 static int inquiry_evpd_b0(unsigned char * arr)
731 {
732 	unsigned int gran;
733 
734 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
735 
736 	/* Optimal transfer length granularity */
737 	gran = 1 << scsi_debug_physblk_exp;
738 	arr[2] = (gran >> 8) & 0xff;
739 	arr[3] = gran & 0xff;
740 
741 	/* Maximum Transfer Length */
742 	if (sdebug_store_sectors > 0x400) {
743 		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
744 		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
745 		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
746 		arr[7] = sdebug_store_sectors & 0xff;
747 	}
748 
749 	/* Optimal Transfer Length */
750 	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
751 
752 	if (scsi_debug_lbpu) {
753 		/* Maximum Unmap LBA Count */
754 		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
755 
756 		/* Maximum Unmap Block Descriptor Count */
757 		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
758 	}
759 
760 	/* Unmap Granularity Alignment */
761 	if (scsi_debug_unmap_alignment) {
762 		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
763 		arr[28] |= 0x80; /* UGAVALID */
764 	}
765 
766 	/* Optimal Unmap Granularity */
767 	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
768 
769 	/* Maximum WRITE SAME Length */
770 	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
771 
772 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
773 
774 	return sizeof(vpdb0_data);
775 }
776 
777 /* Block device characteristics VPD page (SBC-3) */
778 static int inquiry_evpd_b1(unsigned char *arr)
779 {
780 	memset(arr, 0, 0x3c);
781 	arr[0] = 0;
782 	arr[1] = 1;	/* non rotating medium (e.g. solid state) */
783 	arr[2] = 0;
784 	arr[3] = 5;	/* less than 1.8" */
785 
786 	return 0x3c;
787 }
788 
789 /* Logical block provisioning VPD page (SBC-3) */
790 static int inquiry_evpd_b2(unsigned char *arr)
791 {
792 	memset(arr, 0, 0x4);
793 	arr[0] = 0;			/* threshold exponent */
794 
795 	if (scsi_debug_lbpu)
796 		arr[1] = 1 << 7;
797 
798 	if (scsi_debug_lbpws)
799 		arr[1] |= 1 << 6;
800 
801 	if (scsi_debug_lbpws10)
802 		arr[1] |= 1 << 5;
803 
804 	if (scsi_debug_lbprz)
805 		arr[1] |= 1 << 2;
806 
807 	return 0x4;
808 }
809 
810 #define SDEBUG_LONG_INQ_SZ 96
811 #define SDEBUG_MAX_INQ_ARR_SZ 584
812 
813 static int resp_inquiry(struct scsi_cmnd * scp, int target,
814 			struct sdebug_dev_info * devip)
815 {
816 	unsigned char pq_pdt;
817 	unsigned char * arr;
818 	unsigned char *cmd = (unsigned char *)scp->cmnd;
819 	int alloc_len, n, ret;
820 
821 	alloc_len = (cmd[3] << 8) + cmd[4];
822 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
823 	if (! arr)
824 		return DID_REQUEUE << 16;
825 	if (devip->wlun)
826 		pq_pdt = 0x1e;	/* present, wlun */
827 	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
828 		pq_pdt = 0x7f;	/* not present, no device type */
829 	else
830 		pq_pdt = (scsi_debug_ptype & 0x1f);
831 	arr[0] = pq_pdt;
832 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
833 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
834 			       	0);
835 		kfree(arr);
836 		return check_condition_result;
837 	} else if (0x1 & cmd[1]) {  /* EVPD bit set */
838 		int lu_id_num, port_group_id, target_dev_id, len;
839 		char lu_id_str[6];
840 		int host_no = devip->sdbg_host->shost->host_no;
841 
842 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
843 		    (devip->channel & 0x7f);
844 		if (0 == scsi_debug_vpd_use_hostno)
845 			host_no = 0;
846 		lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
847 			    (devip->target * 1000) + devip->lun);
848 		target_dev_id = ((host_no + 1) * 2000) +
849 				 (devip->target * 1000) - 3;
850 		len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
851 		if (0 == cmd[2]) { /* supported vital product data pages */
852 			arr[1] = cmd[2];	/*sanity */
853 			n = 4;
854 			arr[n++] = 0x0;   /* this page */
855 			arr[n++] = 0x80;  /* unit serial number */
856 			arr[n++] = 0x83;  /* device identification */
857 			arr[n++] = 0x84;  /* software interface ident. */
858 			arr[n++] = 0x85;  /* management network addresses */
859 			arr[n++] = 0x86;  /* extended inquiry */
860 			arr[n++] = 0x87;  /* mode page policy */
861 			arr[n++] = 0x88;  /* SCSI ports */
862 			arr[n++] = 0x89;  /* ATA information */
863 			arr[n++] = 0xb0;  /* Block limits (SBC) */
864 			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
865 			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
866 				arr[n++] = 0xb2;
867 			arr[3] = n - 4;	  /* number of supported VPD pages */
868 		} else if (0x80 == cmd[2]) { /* unit serial number */
869 			arr[1] = cmd[2];	/*sanity */
870 			arr[3] = len;
871 			memcpy(&arr[4], lu_id_str, len);
872 		} else if (0x83 == cmd[2]) { /* device identification */
873 			arr[1] = cmd[2];	/*sanity */
874 			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
875 						 target_dev_id, lu_id_num,
876 						 lu_id_str, len);
877 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
878 			arr[1] = cmd[2];	/*sanity */
879 			arr[3] = inquiry_evpd_84(&arr[4]);
880 		} else if (0x85 == cmd[2]) { /* Management network addresses */
881 			arr[1] = cmd[2];	/*sanity */
882 			arr[3] = inquiry_evpd_85(&arr[4]);
883 		} else if (0x86 == cmd[2]) { /* extended inquiry */
884 			arr[1] = cmd[2];	/*sanity */
885 			arr[3] = 0x3c;	/* number of following entries */
886 			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
887 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
888 			else if (scsi_debug_dif)
889 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
890 			else
891 				arr[4] = 0x0;   /* no protection stuff */
892 			arr[5] = 0x7;   /* head of q, ordered + simple q's */
893 		} else if (0x87 == cmd[2]) { /* mode page policy */
894 			arr[1] = cmd[2];	/*sanity */
895 			arr[3] = 0x8;	/* number of following entries */
896 			arr[4] = 0x2;	/* disconnect-reconnect mp */
897 			arr[6] = 0x80;	/* mlus, shared */
898 			arr[8] = 0x18;	 /* protocol specific lu */
899 			arr[10] = 0x82;	 /* mlus, per initiator port */
900 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
901 			arr[1] = cmd[2];	/*sanity */
902 			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
903 		} else if (0x89 == cmd[2]) { /* ATA information */
904 			arr[1] = cmd[2];        /*sanity */
905 			n = inquiry_evpd_89(&arr[4]);
906 			arr[2] = (n >> 8);
907 			arr[3] = (n & 0xff);
908 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
909 			arr[1] = cmd[2];        /*sanity */
910 			arr[3] = inquiry_evpd_b0(&arr[4]);
911 		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
912 			arr[1] = cmd[2];        /*sanity */
913 			arr[3] = inquiry_evpd_b1(&arr[4]);
914 		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
915 			arr[1] = cmd[2];        /*sanity */
916 			arr[3] = inquiry_evpd_b2(&arr[4]);
917 		} else {
918 			/* Illegal request, invalid field in cdb */
919 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
920 					INVALID_FIELD_IN_CDB, 0);
921 			kfree(arr);
922 			return check_condition_result;
923 		}
924 		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
925 		ret = fill_from_dev_buffer(scp, arr,
926 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
927 		kfree(arr);
928 		return ret;
929 	}
930 	/* drops through here for a standard inquiry */
931 	arr[1] = scsi_debug_removable ? 0x80 : 0;	/* Removable disk */
932 	arr[2] = scsi_debug_scsi_level;
933 	arr[3] = 2;    /* response_data_format==2 */
934 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
935 	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
936 	if (0 == scsi_debug_vpd_use_hostno)
937 		arr[5] = 0x10; /* claim: implicit TGPS */
938 	arr[6] = 0x10; /* claim: MultiP */
939 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
940 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
941 	memcpy(&arr[8], inq_vendor_id, 8);
942 	memcpy(&arr[16], inq_product_id, 16);
943 	memcpy(&arr[32], inq_product_rev, 4);
944 	/* version descriptors (2 bytes each) follow */
945 	arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
946 	arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
947 	n = 62;
948 	if (scsi_debug_ptype == 0) {
949 		arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
950 	} else if (scsi_debug_ptype == 1) {
951 		arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
952 	}
953 	arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
954 	ret = fill_from_dev_buffer(scp, arr,
955 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
956 	kfree(arr);
957 	return ret;
958 }
959 
960 static int resp_requests(struct scsi_cmnd * scp,
961 			 struct sdebug_dev_info * devip)
962 {
963 	unsigned char * sbuff;
964 	unsigned char *cmd = (unsigned char *)scp->cmnd;
965 	unsigned char arr[SDEBUG_SENSE_LEN];
966 	int want_dsense;
967 	int len = 18;
968 
969 	memset(arr, 0, sizeof(arr));
970 	if (devip->reset == 1)
971 		mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
972 	want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
973 	sbuff = devip->sense_buff;
974 	if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
975 		if (want_dsense) {
976 			arr[0] = 0x72;
977 			arr[1] = 0x0;		/* NO_SENSE in sense_key */
978 			arr[2] = THRESHOLD_EXCEEDED;
979 			arr[3] = 0xff;		/* TEST set and MRIE==6 */
980 		} else {
981 			arr[0] = 0x70;
982 			arr[2] = 0x0;		/* NO_SENSE in sense_key */
983 			arr[7] = 0xa;   	/* 18 byte sense buffer */
984 			arr[12] = THRESHOLD_EXCEEDED;
985 			arr[13] = 0xff;		/* TEST set and MRIE==6 */
986 		}
987 	} else {
988 		memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
989 		if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
990 			/* DESC bit set and sense_buff in fixed format */
991 			memset(arr, 0, sizeof(arr));
992 			arr[0] = 0x72;
993 			arr[1] = sbuff[2];     /* sense key */
994 			arr[2] = sbuff[12];    /* asc */
995 			arr[3] = sbuff[13];    /* ascq */
996 			len = 8;
997 		}
998 	}
999 	mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
1000 	return fill_from_dev_buffer(scp, arr, len);
1001 }
1002 
1003 static int resp_start_stop(struct scsi_cmnd * scp,
1004 			   struct sdebug_dev_info * devip)
1005 {
1006 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1007 	int power_cond, errsts, start;
1008 
1009 	if ((errsts = check_readiness(scp, 1, devip)))
1010 		return errsts;
1011 	power_cond = (cmd[4] & 0xf0) >> 4;
1012 	if (power_cond) {
1013 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1014 			       	0);
1015 		return check_condition_result;
1016 	}
1017 	start = cmd[4] & 1;
1018 	if (start == devip->stopped)
1019 		devip->stopped = !start;
1020 	return 0;
1021 }
1022 
1023 static sector_t get_sdebug_capacity(void)
1024 {
1025 	if (scsi_debug_virtual_gb > 0)
1026 		return (sector_t)scsi_debug_virtual_gb *
1027 			(1073741824 / scsi_debug_sector_size);
1028 	else
1029 		return sdebug_store_sectors;
1030 }
1031 
1032 #define SDEBUG_READCAP_ARR_SZ 8
1033 static int resp_readcap(struct scsi_cmnd * scp,
1034 			struct sdebug_dev_info * devip)
1035 {
1036 	unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1037 	unsigned int capac;
1038 	int errsts;
1039 
1040 	if ((errsts = check_readiness(scp, 1, devip)))
1041 		return errsts;
1042 	/* following just in case virtual_gb changed */
1043 	sdebug_capacity = get_sdebug_capacity();
1044 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1045 	if (sdebug_capacity < 0xffffffff) {
1046 		capac = (unsigned int)sdebug_capacity - 1;
1047 		arr[0] = (capac >> 24);
1048 		arr[1] = (capac >> 16) & 0xff;
1049 		arr[2] = (capac >> 8) & 0xff;
1050 		arr[3] = capac & 0xff;
1051 	} else {
1052 		arr[0] = 0xff;
1053 		arr[1] = 0xff;
1054 		arr[2] = 0xff;
1055 		arr[3] = 0xff;
1056 	}
1057 	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1058 	arr[7] = scsi_debug_sector_size & 0xff;
1059 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1060 }
1061 
1062 #define SDEBUG_READCAP16_ARR_SZ 32
1063 static int resp_readcap16(struct scsi_cmnd * scp,
1064 			  struct sdebug_dev_info * devip)
1065 {
1066 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1067 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1068 	unsigned long long capac;
1069 	int errsts, k, alloc_len;
1070 
1071 	if ((errsts = check_readiness(scp, 1, devip)))
1072 		return errsts;
1073 	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1074 		     + cmd[13]);
1075 	/* following just in case virtual_gb changed */
1076 	sdebug_capacity = get_sdebug_capacity();
1077 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1078 	capac = sdebug_capacity - 1;
1079 	for (k = 0; k < 8; ++k, capac >>= 8)
1080 		arr[7 - k] = capac & 0xff;
1081 	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1082 	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1083 	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1084 	arr[11] = scsi_debug_sector_size & 0xff;
1085 	arr[13] = scsi_debug_physblk_exp & 0xf;
1086 	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1087 
1088 	if (scsi_debug_lbp()) {
1089 		arr[14] |= 0x80; /* LBPME */
1090 		if (scsi_debug_lbprz)
1091 			arr[14] |= 0x40; /* LBPRZ */
1092 	}
1093 
1094 	arr[15] = scsi_debug_lowest_aligned & 0xff;
1095 
1096 	if (scsi_debug_dif) {
1097 		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1098 		arr[12] |= 1; /* PROT_EN */
1099 	}
1100 
1101 	return fill_from_dev_buffer(scp, arr,
1102 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1103 }
1104 
1105 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1106 
1107 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1108 			      struct sdebug_dev_info * devip)
1109 {
1110 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1111 	unsigned char * arr;
1112 	int host_no = devip->sdbg_host->shost->host_no;
1113 	int n, ret, alen, rlen;
1114 	int port_group_a, port_group_b, port_a, port_b;
1115 
1116 	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1117 		+ cmd[9]);
1118 
1119 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1120 	if (! arr)
1121 		return DID_REQUEUE << 16;
1122 	/*
1123 	 * EVPD page 0x88 states we have two ports, one
1124 	 * real and a fake port with no device connected.
1125 	 * So we create two port groups with one port each
1126 	 * and set the group with port B to unavailable.
1127 	 */
1128 	port_a = 0x1; /* relative port A */
1129 	port_b = 0x2; /* relative port B */
1130 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
1131 	    (devip->channel & 0x7f);
1132 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
1133 	    (devip->channel & 0x7f) + 0x80;
1134 
1135 	/*
1136 	 * The asymmetric access state is cycled according to the host_id.
1137 	 */
1138 	n = 4;
1139 	if (0 == scsi_debug_vpd_use_hostno) {
1140 	    arr[n++] = host_no % 3; /* Asymm access state */
1141 	    arr[n++] = 0x0F; /* claim: all states are supported */
1142 	} else {
1143 	    arr[n++] = 0x0; /* Active/Optimized path */
1144 	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
1145 	}
1146 	arr[n++] = (port_group_a >> 8) & 0xff;
1147 	arr[n++] = port_group_a & 0xff;
1148 	arr[n++] = 0;    /* Reserved */
1149 	arr[n++] = 0;    /* Status code */
1150 	arr[n++] = 0;    /* Vendor unique */
1151 	arr[n++] = 0x1;  /* One port per group */
1152 	arr[n++] = 0;    /* Reserved */
1153 	arr[n++] = 0;    /* Reserved */
1154 	arr[n++] = (port_a >> 8) & 0xff;
1155 	arr[n++] = port_a & 0xff;
1156 	arr[n++] = 3;    /* Port unavailable */
1157 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1158 	arr[n++] = (port_group_b >> 8) & 0xff;
1159 	arr[n++] = port_group_b & 0xff;
1160 	arr[n++] = 0;    /* Reserved */
1161 	arr[n++] = 0;    /* Status code */
1162 	arr[n++] = 0;    /* Vendor unique */
1163 	arr[n++] = 0x1;  /* One port per group */
1164 	arr[n++] = 0;    /* Reserved */
1165 	arr[n++] = 0;    /* Reserved */
1166 	arr[n++] = (port_b >> 8) & 0xff;
1167 	arr[n++] = port_b & 0xff;
1168 
1169 	rlen = n - 4;
1170 	arr[0] = (rlen >> 24) & 0xff;
1171 	arr[1] = (rlen >> 16) & 0xff;
1172 	arr[2] = (rlen >> 8) & 0xff;
1173 	arr[3] = rlen & 0xff;
1174 
1175 	/*
1176 	 * Return the smallest value of either
1177 	 * - The allocated length
1178 	 * - The constructed command length
1179 	 * - The maximum array size
1180 	 */
1181 	rlen = min(alen,n);
1182 	ret = fill_from_dev_buffer(scp, arr,
1183 				   min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1184 	kfree(arr);
1185 	return ret;
1186 }
1187 
1188 /* <<Following mode page info copied from ST318451LW>> */
1189 
1190 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1191 {	/* Read-Write Error Recovery page for mode_sense */
1192 	unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1193 					5, 0, 0xff, 0xff};
1194 
1195 	memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1196 	if (1 == pcontrol)
1197 		memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1198 	return sizeof(err_recov_pg);
1199 }
1200 
1201 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1202 { 	/* Disconnect-Reconnect page for mode_sense */
1203 	unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1204 					 0, 0, 0, 0, 0, 0, 0, 0};
1205 
1206 	memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1207 	if (1 == pcontrol)
1208 		memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1209 	return sizeof(disconnect_pg);
1210 }
1211 
1212 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1213 {       /* Format device page for mode_sense */
1214 	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1215 				     0, 0, 0, 0, 0, 0, 0, 0,
1216 				     0, 0, 0, 0, 0x40, 0, 0, 0};
1217 
1218 	memcpy(p, format_pg, sizeof(format_pg));
1219 	p[10] = (sdebug_sectors_per >> 8) & 0xff;
1220 	p[11] = sdebug_sectors_per & 0xff;
1221 	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1222 	p[13] = scsi_debug_sector_size & 0xff;
1223 	if (scsi_debug_removable)
1224 		p[20] |= 0x20; /* should agree with INQUIRY */
1225 	if (1 == pcontrol)
1226 		memset(p + 2, 0, sizeof(format_pg) - 2);
1227 	return sizeof(format_pg);
1228 }
1229 
1230 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1231 { 	/* Caching page for mode_sense */
1232 	unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1233 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1234 
1235 	memcpy(p, caching_pg, sizeof(caching_pg));
1236 	if (1 == pcontrol)
1237 		memset(p + 2, 0, sizeof(caching_pg) - 2);
1238 	return sizeof(caching_pg);
1239 }
1240 
1241 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1242 { 	/* Control mode page for mode_sense */
1243 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1244 				        0, 0, 0, 0};
1245 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1246 				     0, 0, 0x2, 0x4b};
1247 
1248 	if (scsi_debug_dsense)
1249 		ctrl_m_pg[2] |= 0x4;
1250 	else
1251 		ctrl_m_pg[2] &= ~0x4;
1252 
1253 	if (scsi_debug_ato)
1254 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1255 
1256 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1257 	if (1 == pcontrol)
1258 		memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1259 	else if (2 == pcontrol)
1260 		memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1261 	return sizeof(ctrl_m_pg);
1262 }
1263 
1264 
1265 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1266 {	/* Informational Exceptions control mode page for mode_sense */
1267 	unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1268 				       0, 0, 0x0, 0x0};
1269 	unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1270 				      0, 0, 0x0, 0x0};
1271 
1272 	memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1273 	if (1 == pcontrol)
1274 		memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1275 	else if (2 == pcontrol)
1276 		memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1277 	return sizeof(iec_m_pg);
1278 }
1279 
1280 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1281 {	/* SAS SSP mode page - short format for mode_sense */
1282 	unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1283 		0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1284 
1285 	memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1286 	if (1 == pcontrol)
1287 		memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1288 	return sizeof(sas_sf_m_pg);
1289 }
1290 
1291 
1292 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1293 			      int target_dev_id)
1294 {	/* SAS phy control and discover mode page for mode_sense */
1295 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1296 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1297 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1298 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1299 		    0x2, 0, 0, 0, 0, 0, 0, 0,
1300 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1301 		    0, 0, 0, 0, 0, 0, 0, 0,
1302 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1303 		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1304 		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1305 		    0x3, 0, 0, 0, 0, 0, 0, 0,
1306 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
1307 		    0, 0, 0, 0, 0, 0, 0, 0,
1308 		};
1309 	int port_a, port_b;
1310 
1311 	port_a = target_dev_id + 1;
1312 	port_b = port_a + 1;
1313 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1314 	p[20] = (port_a >> 24);
1315 	p[21] = (port_a >> 16) & 0xff;
1316 	p[22] = (port_a >> 8) & 0xff;
1317 	p[23] = port_a & 0xff;
1318 	p[48 + 20] = (port_b >> 24);
1319 	p[48 + 21] = (port_b >> 16) & 0xff;
1320 	p[48 + 22] = (port_b >> 8) & 0xff;
1321 	p[48 + 23] = port_b & 0xff;
1322 	if (1 == pcontrol)
1323 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1324 	return sizeof(sas_pcd_m_pg);
1325 }
1326 
1327 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1328 {	/* SAS SSP shared protocol specific port mode subpage */
1329 	unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1330 		    0, 0, 0, 0, 0, 0, 0, 0,
1331 		};
1332 
1333 	memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1334 	if (1 == pcontrol)
1335 		memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1336 	return sizeof(sas_sha_m_pg);
1337 }
1338 
1339 #define SDEBUG_MAX_MSENSE_SZ 256
1340 
1341 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1342 			   struct sdebug_dev_info * devip)
1343 {
1344 	unsigned char dbd, llbaa;
1345 	int pcontrol, pcode, subpcode, bd_len;
1346 	unsigned char dev_spec;
1347 	int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1348 	unsigned char * ap;
1349 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1350 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1351 
1352 	if ((errsts = check_readiness(scp, 1, devip)))
1353 		return errsts;
1354 	dbd = !!(cmd[1] & 0x8);
1355 	pcontrol = (cmd[2] & 0xc0) >> 6;
1356 	pcode = cmd[2] & 0x3f;
1357 	subpcode = cmd[3];
1358 	msense_6 = (MODE_SENSE == cmd[0]);
1359 	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1360 	if ((0 == scsi_debug_ptype) && (0 == dbd))
1361 		bd_len = llbaa ? 16 : 8;
1362 	else
1363 		bd_len = 0;
1364 	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1365 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1366 	if (0x3 == pcontrol) {  /* Saving values not supported */
1367 		mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1368 			       	0);
1369 		return check_condition_result;
1370 	}
1371 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1372 			(devip->target * 1000) - 3;
1373 	/* set DPOFUA bit for disks */
1374 	if (0 == scsi_debug_ptype)
1375 		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1376 	else
1377 		dev_spec = 0x0;
1378 	if (msense_6) {
1379 		arr[2] = dev_spec;
1380 		arr[3] = bd_len;
1381 		offset = 4;
1382 	} else {
1383 		arr[3] = dev_spec;
1384 		if (16 == bd_len)
1385 			arr[4] = 0x1;	/* set LONGLBA bit */
1386 		arr[7] = bd_len;	/* assume 255 or less */
1387 		offset = 8;
1388 	}
1389 	ap = arr + offset;
1390 	if ((bd_len > 0) && (!sdebug_capacity))
1391 		sdebug_capacity = get_sdebug_capacity();
1392 
1393 	if (8 == bd_len) {
1394 		if (sdebug_capacity > 0xfffffffe) {
1395 			ap[0] = 0xff;
1396 			ap[1] = 0xff;
1397 			ap[2] = 0xff;
1398 			ap[3] = 0xff;
1399 		} else {
1400 			ap[0] = (sdebug_capacity >> 24) & 0xff;
1401 			ap[1] = (sdebug_capacity >> 16) & 0xff;
1402 			ap[2] = (sdebug_capacity >> 8) & 0xff;
1403 			ap[3] = sdebug_capacity & 0xff;
1404 		}
1405 		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1406 		ap[7] = scsi_debug_sector_size & 0xff;
1407 		offset += bd_len;
1408 		ap = arr + offset;
1409 	} else if (16 == bd_len) {
1410 		unsigned long long capac = sdebug_capacity;
1411 
1412         	for (k = 0; k < 8; ++k, capac >>= 8)
1413                 	ap[7 - k] = capac & 0xff;
1414 		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1415 		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1416 		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1417 		ap[15] = scsi_debug_sector_size & 0xff;
1418 		offset += bd_len;
1419 		ap = arr + offset;
1420 	}
1421 
1422 	if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1423 		/* TODO: Control Extension page */
1424 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1425 			       	0);
1426 		return check_condition_result;
1427 	}
1428 	switch (pcode) {
1429 	case 0x1:	/* Read-Write error recovery page, direct access */
1430 		len = resp_err_recov_pg(ap, pcontrol, target);
1431 		offset += len;
1432 		break;
1433 	case 0x2:	/* Disconnect-Reconnect page, all devices */
1434 		len = resp_disconnect_pg(ap, pcontrol, target);
1435 		offset += len;
1436 		break;
1437         case 0x3:       /* Format device page, direct access */
1438                 len = resp_format_pg(ap, pcontrol, target);
1439                 offset += len;
1440                 break;
1441 	case 0x8:	/* Caching page, direct access */
1442 		len = resp_caching_pg(ap, pcontrol, target);
1443 		offset += len;
1444 		break;
1445 	case 0xa:	/* Control Mode page, all devices */
1446 		len = resp_ctrl_m_pg(ap, pcontrol, target);
1447 		offset += len;
1448 		break;
1449 	case 0x19:	/* if spc==1 then sas phy, control+discover */
1450 		if ((subpcode > 0x2) && (subpcode < 0xff)) {
1451 		        mk_sense_buffer(devip, ILLEGAL_REQUEST,
1452 					INVALID_FIELD_IN_CDB, 0);
1453 			return check_condition_result;
1454 	        }
1455 		len = 0;
1456 		if ((0x0 == subpcode) || (0xff == subpcode))
1457 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1458 		if ((0x1 == subpcode) || (0xff == subpcode))
1459 			len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1460 						  target_dev_id);
1461 		if ((0x2 == subpcode) || (0xff == subpcode))
1462 			len += resp_sas_sha_m_spg(ap + len, pcontrol);
1463 		offset += len;
1464 		break;
1465 	case 0x1c:	/* Informational Exceptions Mode page, all devices */
1466 		len = resp_iec_m_pg(ap, pcontrol, target);
1467 		offset += len;
1468 		break;
1469 	case 0x3f:	/* Read all Mode pages */
1470 		if ((0 == subpcode) || (0xff == subpcode)) {
1471 			len = resp_err_recov_pg(ap, pcontrol, target);
1472 			len += resp_disconnect_pg(ap + len, pcontrol, target);
1473 			len += resp_format_pg(ap + len, pcontrol, target);
1474 			len += resp_caching_pg(ap + len, pcontrol, target);
1475 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1476 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1477 			if (0xff == subpcode) {
1478 				len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1479 						  target, target_dev_id);
1480 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
1481 			}
1482 			len += resp_iec_m_pg(ap + len, pcontrol, target);
1483 		} else {
1484 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1485 					INVALID_FIELD_IN_CDB, 0);
1486 			return check_condition_result;
1487                 }
1488 		offset += len;
1489 		break;
1490 	default:
1491 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1492 			       	0);
1493 		return check_condition_result;
1494 	}
1495 	if (msense_6)
1496 		arr[0] = offset - 1;
1497 	else {
1498 		arr[0] = ((offset - 2) >> 8) & 0xff;
1499 		arr[1] = (offset - 2) & 0xff;
1500 	}
1501 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1502 }
1503 
1504 #define SDEBUG_MAX_MSELECT_SZ 512
1505 
1506 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1507 			    struct sdebug_dev_info * devip)
1508 {
1509 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1510 	int param_len, res, errsts, mpage;
1511 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1512 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1513 
1514 	if ((errsts = check_readiness(scp, 1, devip)))
1515 		return errsts;
1516 	memset(arr, 0, sizeof(arr));
1517 	pf = cmd[1] & 0x10;
1518 	sp = cmd[1] & 0x1;
1519 	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1520 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1521 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1522 				INVALID_FIELD_IN_CDB, 0);
1523 		return check_condition_result;
1524 	}
1525         res = fetch_to_dev_buffer(scp, arr, param_len);
1526         if (-1 == res)
1527                 return (DID_ERROR << 16);
1528         else if ((res < param_len) &&
1529                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1530                 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1531                        " IO sent=%d bytes\n", param_len, res);
1532 	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1533 	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1534 	if (md_len > 2) {
1535 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1536 				INVALID_FIELD_IN_PARAM_LIST, 0);
1537 		return check_condition_result;
1538 	}
1539 	off = bd_len + (mselect6 ? 4 : 8);
1540 	mpage = arr[off] & 0x3f;
1541 	ps = !!(arr[off] & 0x80);
1542 	if (ps) {
1543 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1544 				INVALID_FIELD_IN_PARAM_LIST, 0);
1545 		return check_condition_result;
1546 	}
1547 	spf = !!(arr[off] & 0x40);
1548 	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1549 		       (arr[off + 1] + 2);
1550 	if ((pg_len + off) > param_len) {
1551 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1552 				PARAMETER_LIST_LENGTH_ERR, 0);
1553 		return check_condition_result;
1554 	}
1555 	switch (mpage) {
1556 	case 0xa:      /* Control Mode page */
1557 		if (ctrl_m_pg[1] == arr[off + 1]) {
1558 			memcpy(ctrl_m_pg + 2, arr + off + 2,
1559 			       sizeof(ctrl_m_pg) - 2);
1560 			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1561 			return 0;
1562 		}
1563 		break;
1564 	case 0x1c:      /* Informational Exceptions Mode page */
1565 		if (iec_m_pg[1] == arr[off + 1]) {
1566 			memcpy(iec_m_pg + 2, arr + off + 2,
1567 			       sizeof(iec_m_pg) - 2);
1568 			return 0;
1569 		}
1570 		break;
1571 	default:
1572 		break;
1573 	}
1574 	mk_sense_buffer(devip, ILLEGAL_REQUEST,
1575 			INVALID_FIELD_IN_PARAM_LIST, 0);
1576 	return check_condition_result;
1577 }
1578 
1579 static int resp_temp_l_pg(unsigned char * arr)
1580 {
1581 	unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1582 				     0x0, 0x1, 0x3, 0x2, 0x0, 65,
1583 		};
1584 
1585         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1586         return sizeof(temp_l_pg);
1587 }
1588 
1589 static int resp_ie_l_pg(unsigned char * arr)
1590 {
1591 	unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1592 		};
1593 
1594         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1595 	if (iec_m_pg[2] & 0x4) {	/* TEST bit set */
1596 		arr[4] = THRESHOLD_EXCEEDED;
1597 		arr[5] = 0xff;
1598 	}
1599         return sizeof(ie_l_pg);
1600 }
1601 
1602 #define SDEBUG_MAX_LSENSE_SZ 512
1603 
1604 static int resp_log_sense(struct scsi_cmnd * scp,
1605                           struct sdebug_dev_info * devip)
1606 {
1607 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1608 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1609 	unsigned char *cmd = (unsigned char *)scp->cmnd;
1610 
1611 	if ((errsts = check_readiness(scp, 1, devip)))
1612 		return errsts;
1613 	memset(arr, 0, sizeof(arr));
1614 	ppc = cmd[1] & 0x2;
1615 	sp = cmd[1] & 0x1;
1616 	if (ppc || sp) {
1617 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1618 				INVALID_FIELD_IN_CDB, 0);
1619 		return check_condition_result;
1620 	}
1621 	pcontrol = (cmd[2] & 0xc0) >> 6;
1622 	pcode = cmd[2] & 0x3f;
1623 	subpcode = cmd[3] & 0xff;
1624 	alloc_len = (cmd[7] << 8) + cmd[8];
1625 	arr[0] = pcode;
1626 	if (0 == subpcode) {
1627 		switch (pcode) {
1628 		case 0x0:	/* Supported log pages log page */
1629 			n = 4;
1630 			arr[n++] = 0x0;		/* this page */
1631 			arr[n++] = 0xd;		/* Temperature */
1632 			arr[n++] = 0x2f;	/* Informational exceptions */
1633 			arr[3] = n - 4;
1634 			break;
1635 		case 0xd:	/* Temperature log page */
1636 			arr[3] = resp_temp_l_pg(arr + 4);
1637 			break;
1638 		case 0x2f:	/* Informational exceptions log page */
1639 			arr[3] = resp_ie_l_pg(arr + 4);
1640 			break;
1641 		default:
1642 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1643 					INVALID_FIELD_IN_CDB, 0);
1644 			return check_condition_result;
1645 		}
1646 	} else if (0xff == subpcode) {
1647 		arr[0] |= 0x40;
1648 		arr[1] = subpcode;
1649 		switch (pcode) {
1650 		case 0x0:	/* Supported log pages and subpages log page */
1651 			n = 4;
1652 			arr[n++] = 0x0;
1653 			arr[n++] = 0x0;		/* 0,0 page */
1654 			arr[n++] = 0x0;
1655 			arr[n++] = 0xff;	/* this page */
1656 			arr[n++] = 0xd;
1657 			arr[n++] = 0x0;		/* Temperature */
1658 			arr[n++] = 0x2f;
1659 			arr[n++] = 0x0;	/* Informational exceptions */
1660 			arr[3] = n - 4;
1661 			break;
1662 		case 0xd:	/* Temperature subpages */
1663 			n = 4;
1664 			arr[n++] = 0xd;
1665 			arr[n++] = 0x0;		/* Temperature */
1666 			arr[3] = n - 4;
1667 			break;
1668 		case 0x2f:	/* Informational exceptions subpages */
1669 			n = 4;
1670 			arr[n++] = 0x2f;
1671 			arr[n++] = 0x0;		/* Informational exceptions */
1672 			arr[3] = n - 4;
1673 			break;
1674 		default:
1675 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
1676 					INVALID_FIELD_IN_CDB, 0);
1677 			return check_condition_result;
1678 		}
1679 	} else {
1680 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
1681 				INVALID_FIELD_IN_CDB, 0);
1682 		return check_condition_result;
1683 	}
1684 	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1685 	return fill_from_dev_buffer(scp, arr,
1686 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
1687 }
1688 
1689 static int check_device_access_params(struct sdebug_dev_info *devi,
1690 				      unsigned long long lba, unsigned int num)
1691 {
1692 	if (lba + num > sdebug_capacity) {
1693 		mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1694 		return check_condition_result;
1695 	}
1696 	/* transfer length excessive (tie in to block limits VPD page) */
1697 	if (num > sdebug_store_sectors) {
1698 		mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1699 		return check_condition_result;
1700 	}
1701 	return 0;
1702 }
1703 
1704 /* Returns number of bytes copied or -1 if error. */
1705 static int do_device_access(struct scsi_cmnd *scmd,
1706 			    struct sdebug_dev_info *devi,
1707 			    unsigned long long lba, unsigned int num, int write)
1708 {
1709 	int ret;
1710 	unsigned long long block, rest = 0;
1711 	struct scsi_data_buffer *sdb;
1712 	enum dma_data_direction dir;
1713 	size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1714 		       off_t);
1715 
1716 	if (write) {
1717 		sdb = scsi_out(scmd);
1718 		dir = DMA_TO_DEVICE;
1719 		func = sg_pcopy_to_buffer;
1720 	} else {
1721 		sdb = scsi_in(scmd);
1722 		dir = DMA_FROM_DEVICE;
1723 		func = sg_pcopy_from_buffer;
1724 	}
1725 
1726 	if (!sdb->length)
1727 		return 0;
1728 	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1729 		return -1;
1730 
1731 	block = do_div(lba, sdebug_store_sectors);
1732 	if (block + num > sdebug_store_sectors)
1733 		rest = block + num - sdebug_store_sectors;
1734 
1735 	ret = func(sdb->table.sgl, sdb->table.nents,
1736 		   fake_storep + (block * scsi_debug_sector_size),
1737 		   (num - rest) * scsi_debug_sector_size, 0);
1738 	if (ret != (num - rest) * scsi_debug_sector_size)
1739 		return ret;
1740 
1741 	if (rest) {
1742 		ret += func(sdb->table.sgl, sdb->table.nents,
1743 			    fake_storep, rest * scsi_debug_sector_size,
1744 			    (num - rest) * scsi_debug_sector_size);
1745 	}
1746 
1747 	return ret;
1748 }
1749 
1750 static __be16 dif_compute_csum(const void *buf, int len)
1751 {
1752 	__be16 csum;
1753 
1754 	if (scsi_debug_guard)
1755 		csum = (__force __be16)ip_compute_csum(buf, len);
1756 	else
1757 		csum = cpu_to_be16(crc_t10dif(buf, len));
1758 
1759 	return csum;
1760 }
1761 
1762 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1763 		      sector_t sector, u32 ei_lba)
1764 {
1765 	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1766 
1767 	if (sdt->guard_tag != csum) {
1768 		pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1769 			__func__,
1770 			(unsigned long)sector,
1771 			be16_to_cpu(sdt->guard_tag),
1772 			be16_to_cpu(csum));
1773 		return 0x01;
1774 	}
1775 	if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1776 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1777 		pr_err("%s: REF check failed on sector %lu\n",
1778 			__func__, (unsigned long)sector);
1779 		return 0x03;
1780 	}
1781 	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1782 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
1783 		pr_err("%s: REF check failed on sector %lu\n",
1784 			__func__, (unsigned long)sector);
1785 		return 0x03;
1786 	}
1787 	return 0;
1788 }
1789 
1790 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1791 			  unsigned int sectors, bool read)
1792 {
1793 	size_t resid;
1794 	void *paddr;
1795 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
1796 	struct sg_mapping_iter miter;
1797 
1798 	/* Bytes of protection data to copy into sgl */
1799 	resid = sectors * sizeof(*dif_storep);
1800 
1801 	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1802 			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1803 			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1804 
1805 	while (sg_miter_next(&miter) && resid > 0) {
1806 		size_t len = min(miter.length, resid);
1807 		void *start = dif_store(sector);
1808 		size_t rest = 0;
1809 
1810 		if (dif_store_end < start + len)
1811 			rest = start + len - dif_store_end;
1812 
1813 		paddr = miter.addr;
1814 
1815 		if (read)
1816 			memcpy(paddr, start, len - rest);
1817 		else
1818 			memcpy(start, paddr, len - rest);
1819 
1820 		if (rest) {
1821 			if (read)
1822 				memcpy(paddr + len - rest, dif_storep, rest);
1823 			else
1824 				memcpy(dif_storep, paddr + len - rest, rest);
1825 		}
1826 
1827 		sector += len / sizeof(*dif_storep);
1828 		resid -= len;
1829 	}
1830 	sg_miter_stop(&miter);
1831 }
1832 
1833 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1834 			    unsigned int sectors, u32 ei_lba)
1835 {
1836 	unsigned int i;
1837 	struct sd_dif_tuple *sdt;
1838 	sector_t sector;
1839 
1840 	for (i = 0; i < sectors; i++, ei_lba++) {
1841 		int ret;
1842 
1843 		sector = start_sec + i;
1844 		sdt = dif_store(sector);
1845 
1846 		if (sdt->app_tag == cpu_to_be16(0xffff))
1847 			continue;
1848 
1849 		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1850 		if (ret) {
1851 			dif_errors++;
1852 			return ret;
1853 		}
1854 	}
1855 
1856 	dif_copy_prot(SCpnt, start_sec, sectors, true);
1857 	dix_reads++;
1858 
1859 	return 0;
1860 }
1861 
1862 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1863 		     unsigned int num, struct sdebug_dev_info *devip,
1864 		     u32 ei_lba)
1865 {
1866 	unsigned long iflags;
1867 	int ret;
1868 
1869 	ret = check_device_access_params(devip, lba, num);
1870 	if (ret)
1871 		return ret;
1872 
1873 	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1874 	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1875 	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1876 		/* claim unrecoverable read error */
1877 		mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1878 		/* set info field and valid bit for fixed descriptor */
1879 		if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1880 			devip->sense_buff[0] |= 0x80;	/* Valid bit */
1881 			ret = (lba < OPT_MEDIUM_ERR_ADDR)
1882 			      ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1883 			devip->sense_buff[3] = (ret >> 24) & 0xff;
1884 			devip->sense_buff[4] = (ret >> 16) & 0xff;
1885 			devip->sense_buff[5] = (ret >> 8) & 0xff;
1886 			devip->sense_buff[6] = ret & 0xff;
1887 		}
1888 	        scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1889 		return check_condition_result;
1890 	}
1891 
1892 	read_lock_irqsave(&atomic_rw, iflags);
1893 
1894 	/* DIX + T10 DIF */
1895 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1896 		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1897 
1898 		if (prot_ret) {
1899 			read_unlock_irqrestore(&atomic_rw, iflags);
1900 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1901 			return illegal_condition_result;
1902 		}
1903 	}
1904 
1905 	ret = do_device_access(SCpnt, devip, lba, num, 0);
1906 	read_unlock_irqrestore(&atomic_rw, iflags);
1907 	if (ret == -1)
1908 		return DID_ERROR << 16;
1909 
1910 	scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1911 
1912 	return 0;
1913 }
1914 
1915 void dump_sector(unsigned char *buf, int len)
1916 {
1917 	int i, j;
1918 
1919 	printk(KERN_ERR ">>> Sector Dump <<<\n");
1920 
1921 	for (i = 0 ; i < len ; i += 16) {
1922 		printk(KERN_ERR "%04d: ", i);
1923 
1924 		for (j = 0 ; j < 16 ; j++) {
1925 			unsigned char c = buf[i+j];
1926 			if (c >= 0x20 && c < 0x7e)
1927 				printk(" %c ", buf[i+j]);
1928 			else
1929 				printk("%02x ", buf[i+j]);
1930 		}
1931 
1932 		printk("\n");
1933 	}
1934 }
1935 
1936 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1937 			     unsigned int sectors, u32 ei_lba)
1938 {
1939 	int ret;
1940 	struct sd_dif_tuple *sdt;
1941 	void *daddr;
1942 	sector_t sector = start_sec;
1943 	int ppage_offset;
1944 	int dpage_offset;
1945 	struct sg_mapping_iter diter;
1946 	struct sg_mapping_iter piter;
1947 
1948 	BUG_ON(scsi_sg_count(SCpnt) == 0);
1949 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1950 
1951 	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
1952 			scsi_prot_sg_count(SCpnt),
1953 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1954 	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
1955 			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
1956 
1957 	/* For each protection page */
1958 	while (sg_miter_next(&piter)) {
1959 		dpage_offset = 0;
1960 		if (WARN_ON(!sg_miter_next(&diter))) {
1961 			ret = 0x01;
1962 			goto out;
1963 		}
1964 
1965 		for (ppage_offset = 0; ppage_offset < piter.length;
1966 		     ppage_offset += sizeof(struct sd_dif_tuple)) {
1967 			/* If we're at the end of the current
1968 			 * data page advance to the next one
1969 			 */
1970 			if (dpage_offset >= diter.length) {
1971 				if (WARN_ON(!sg_miter_next(&diter))) {
1972 					ret = 0x01;
1973 					goto out;
1974 				}
1975 				dpage_offset = 0;
1976 			}
1977 
1978 			sdt = piter.addr + ppage_offset;
1979 			daddr = diter.addr + dpage_offset;
1980 
1981 			ret = dif_verify(sdt, daddr, sector, ei_lba);
1982 			if (ret) {
1983 				dump_sector(daddr, scsi_debug_sector_size);
1984 				goto out;
1985 			}
1986 
1987 			sector++;
1988 			ei_lba++;
1989 			dpage_offset += scsi_debug_sector_size;
1990 		}
1991 		diter.consumed = dpage_offset;
1992 		sg_miter_stop(&diter);
1993 	}
1994 	sg_miter_stop(&piter);
1995 
1996 	dif_copy_prot(SCpnt, start_sec, sectors, false);
1997 	dix_writes++;
1998 
1999 	return 0;
2000 
2001 out:
2002 	dif_errors++;
2003 	sg_miter_stop(&diter);
2004 	sg_miter_stop(&piter);
2005 	return ret;
2006 }
2007 
2008 static unsigned long lba_to_map_index(sector_t lba)
2009 {
2010 	if (scsi_debug_unmap_alignment) {
2011 		lba += scsi_debug_unmap_granularity -
2012 			scsi_debug_unmap_alignment;
2013 	}
2014 	do_div(lba, scsi_debug_unmap_granularity);
2015 
2016 	return lba;
2017 }
2018 
2019 static sector_t map_index_to_lba(unsigned long index)
2020 {
2021 	sector_t lba = index * scsi_debug_unmap_granularity;
2022 
2023 	if (scsi_debug_unmap_alignment) {
2024 		lba -= scsi_debug_unmap_granularity -
2025 			scsi_debug_unmap_alignment;
2026 	}
2027 
2028 	return lba;
2029 }
2030 
2031 static unsigned int map_state(sector_t lba, unsigned int *num)
2032 {
2033 	sector_t end;
2034 	unsigned int mapped;
2035 	unsigned long index;
2036 	unsigned long next;
2037 
2038 	index = lba_to_map_index(lba);
2039 	mapped = test_bit(index, map_storep);
2040 
2041 	if (mapped)
2042 		next = find_next_zero_bit(map_storep, map_size, index);
2043 	else
2044 		next = find_next_bit(map_storep, map_size, index);
2045 
2046 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2047 	*num = end - lba;
2048 
2049 	return mapped;
2050 }
2051 
2052 static void map_region(sector_t lba, unsigned int len)
2053 {
2054 	sector_t end = lba + len;
2055 
2056 	while (lba < end) {
2057 		unsigned long index = lba_to_map_index(lba);
2058 
2059 		if (index < map_size)
2060 			set_bit(index, map_storep);
2061 
2062 		lba = map_index_to_lba(index + 1);
2063 	}
2064 }
2065 
2066 static void unmap_region(sector_t lba, unsigned int len)
2067 {
2068 	sector_t end = lba + len;
2069 
2070 	while (lba < end) {
2071 		unsigned long index = lba_to_map_index(lba);
2072 
2073 		if (lba == map_index_to_lba(index) &&
2074 		    lba + scsi_debug_unmap_granularity <= end &&
2075 		    index < map_size) {
2076 			clear_bit(index, map_storep);
2077 			if (scsi_debug_lbprz) {
2078 				memset(fake_storep +
2079 				       lba * scsi_debug_sector_size, 0,
2080 				       scsi_debug_sector_size *
2081 				       scsi_debug_unmap_granularity);
2082 			}
2083 			if (dif_storep) {
2084 				memset(dif_storep + lba, 0xff,
2085 				       sizeof(*dif_storep) *
2086 				       scsi_debug_unmap_granularity);
2087 			}
2088 		}
2089 		lba = map_index_to_lba(index + 1);
2090 	}
2091 }
2092 
2093 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2094 		      unsigned int num, struct sdebug_dev_info *devip,
2095 		      u32 ei_lba)
2096 {
2097 	unsigned long iflags;
2098 	int ret;
2099 
2100 	ret = check_device_access_params(devip, lba, num);
2101 	if (ret)
2102 		return ret;
2103 
2104 	write_lock_irqsave(&atomic_rw, iflags);
2105 
2106 	/* DIX + T10 DIF */
2107 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2108 		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2109 
2110 		if (prot_ret) {
2111 			write_unlock_irqrestore(&atomic_rw, iflags);
2112 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2113 			return illegal_condition_result;
2114 		}
2115 	}
2116 
2117 	ret = do_device_access(SCpnt, devip, lba, num, 1);
2118 	if (scsi_debug_lbp())
2119 		map_region(lba, num);
2120 	write_unlock_irqrestore(&atomic_rw, iflags);
2121 	if (-1 == ret)
2122 		return (DID_ERROR << 16);
2123 	else if ((ret < (num * scsi_debug_sector_size)) &&
2124 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2125 		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2126 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2127 
2128 	return 0;
2129 }
2130 
2131 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2132 		      unsigned int num, struct sdebug_dev_info *devip,
2133 			   u32 ei_lba, unsigned int unmap)
2134 {
2135 	unsigned long iflags;
2136 	unsigned long long i;
2137 	int ret;
2138 
2139 	ret = check_device_access_params(devip, lba, num);
2140 	if (ret)
2141 		return ret;
2142 
2143 	if (num > scsi_debug_write_same_length) {
2144 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2145 				0);
2146 		return check_condition_result;
2147 	}
2148 
2149 	write_lock_irqsave(&atomic_rw, iflags);
2150 
2151 	if (unmap && scsi_debug_lbp()) {
2152 		unmap_region(lba, num);
2153 		goto out;
2154 	}
2155 
2156 	/* Else fetch one logical block */
2157 	ret = fetch_to_dev_buffer(scmd,
2158 				  fake_storep + (lba * scsi_debug_sector_size),
2159 				  scsi_debug_sector_size);
2160 
2161 	if (-1 == ret) {
2162 		write_unlock_irqrestore(&atomic_rw, iflags);
2163 		return (DID_ERROR << 16);
2164 	} else if ((ret < (num * scsi_debug_sector_size)) &&
2165 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2166 		printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2167 		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2168 
2169 	/* Copy first sector to remaining blocks */
2170 	for (i = 1 ; i < num ; i++)
2171 		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2172 		       fake_storep + (lba * scsi_debug_sector_size),
2173 		       scsi_debug_sector_size);
2174 
2175 	if (scsi_debug_lbp())
2176 		map_region(lba, num);
2177 out:
2178 	write_unlock_irqrestore(&atomic_rw, iflags);
2179 
2180 	return 0;
2181 }
2182 
2183 struct unmap_block_desc {
2184 	__be64	lba;
2185 	__be32	blocks;
2186 	__be32	__reserved;
2187 };
2188 
2189 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2190 {
2191 	unsigned char *buf;
2192 	struct unmap_block_desc *desc;
2193 	unsigned int i, payload_len, descriptors;
2194 	int ret;
2195 	unsigned long iflags;
2196 
2197 	ret = check_readiness(scmd, 1, devip);
2198 	if (ret)
2199 		return ret;
2200 
2201 	payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2202 	BUG_ON(scsi_bufflen(scmd) != payload_len);
2203 
2204 	descriptors = (payload_len - 8) / 16;
2205 
2206 	buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2207 	if (!buf)
2208 		return check_condition_result;
2209 
2210 	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2211 
2212 	BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2213 	BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2214 
2215 	desc = (void *)&buf[8];
2216 
2217 	write_lock_irqsave(&atomic_rw, iflags);
2218 
2219 	for (i = 0 ; i < descriptors ; i++) {
2220 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2221 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
2222 
2223 		ret = check_device_access_params(devip, lba, num);
2224 		if (ret)
2225 			goto out;
2226 
2227 		unmap_region(lba, num);
2228 	}
2229 
2230 	ret = 0;
2231 
2232 out:
2233 	write_unlock_irqrestore(&atomic_rw, iflags);
2234 	kfree(buf);
2235 
2236 	return ret;
2237 }
2238 
2239 #define SDEBUG_GET_LBA_STATUS_LEN 32
2240 
2241 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2242 			       struct sdebug_dev_info * devip)
2243 {
2244 	unsigned long long lba;
2245 	unsigned int alloc_len, mapped, num;
2246 	unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2247 	int ret;
2248 
2249 	ret = check_readiness(scmd, 1, devip);
2250 	if (ret)
2251 		return ret;
2252 
2253 	lba = get_unaligned_be64(&scmd->cmnd[2]);
2254 	alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2255 
2256 	if (alloc_len < 24)
2257 		return 0;
2258 
2259 	ret = check_device_access_params(devip, lba, 1);
2260 	if (ret)
2261 		return ret;
2262 
2263 	mapped = map_state(lba, &num);
2264 
2265 	memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2266 	put_unaligned_be32(20, &arr[0]);	/* Parameter Data Length */
2267 	put_unaligned_be64(lba, &arr[8]);	/* LBA */
2268 	put_unaligned_be32(num, &arr[16]);	/* Number of blocks */
2269 	arr[20] = !mapped;			/* mapped = 0, unmapped = 1 */
2270 
2271 	return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2272 }
2273 
2274 #define SDEBUG_RLUN_ARR_SZ 256
2275 
2276 static int resp_report_luns(struct scsi_cmnd * scp,
2277 			    struct sdebug_dev_info * devip)
2278 {
2279 	unsigned int alloc_len;
2280 	int lun_cnt, i, upper, num, n, wlun, lun;
2281 	unsigned char *cmd = (unsigned char *)scp->cmnd;
2282 	int select_report = (int)cmd[2];
2283 	struct scsi_lun *one_lun;
2284 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2285 	unsigned char * max_addr;
2286 
2287 	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2288 	if ((alloc_len < 4) || (select_report > 2)) {
2289 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2290 			       	0);
2291 		return check_condition_result;
2292 	}
2293 	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
2294 	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2295 	lun_cnt = scsi_debug_max_luns;
2296 	if (1 == select_report)
2297 		lun_cnt = 0;
2298 	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2299 		--lun_cnt;
2300 	wlun = (select_report > 0) ? 1 : 0;
2301 	num = lun_cnt + wlun;
2302 	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2303 	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2304 	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2305 			    sizeof(struct scsi_lun)), num);
2306 	if (n < num) {
2307 		wlun = 0;
2308 		lun_cnt = n;
2309 	}
2310 	one_lun = (struct scsi_lun *) &arr[8];
2311 	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2312 	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2313              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2314 	     i++, lun++) {
2315 		upper = (lun >> 8) & 0x3f;
2316 		if (upper)
2317 			one_lun[i].scsi_lun[0] =
2318 			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2319 		one_lun[i].scsi_lun[1] = lun & 0xff;
2320 	}
2321 	if (wlun) {
2322 		one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2323 		one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2324 		i++;
2325 	}
2326 	alloc_len = (unsigned char *)(one_lun + i) - arr;
2327 	return fill_from_dev_buffer(scp, arr,
2328 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2329 }
2330 
2331 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2332 			    unsigned int num, struct sdebug_dev_info *devip)
2333 {
2334 	int j;
2335 	unsigned char *kaddr, *buf;
2336 	unsigned int offset;
2337 	struct scsi_data_buffer *sdb = scsi_in(scp);
2338 	struct sg_mapping_iter miter;
2339 
2340 	/* better not to use temporary buffer. */
2341 	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2342 	if (!buf) {
2343 		mk_sense_buffer(devip, NOT_READY,
2344 				LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
2345 		return check_condition_result;
2346 	}
2347 
2348 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2349 
2350 	offset = 0;
2351 	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2352 			SG_MITER_ATOMIC | SG_MITER_TO_SG);
2353 
2354 	while (sg_miter_next(&miter)) {
2355 		kaddr = miter.addr;
2356 		for (j = 0; j < miter.length; j++)
2357 			*(kaddr + j) ^= *(buf + offset + j);
2358 
2359 		offset += miter.length;
2360 	}
2361 	sg_miter_stop(&miter);
2362 	kfree(buf);
2363 
2364 	return 0;
2365 }
2366 
2367 /* When timer goes off this function is called. */
2368 static void timer_intr_handler(unsigned long indx)
2369 {
2370 	struct sdebug_queued_cmd * sqcp;
2371 	unsigned long iflags;
2372 
2373 	if (indx >= scsi_debug_max_queue) {
2374 		printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2375 		       "large\n");
2376 		return;
2377 	}
2378 	spin_lock_irqsave(&queued_arr_lock, iflags);
2379 	sqcp = &queued_arr[(int)indx];
2380 	if (! sqcp->in_use) {
2381 		printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2382 		       "interrupt\n");
2383 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2384 		return;
2385 	}
2386 	sqcp->in_use = 0;
2387 	if (sqcp->done_funct) {
2388 		sqcp->a_cmnd->result = sqcp->scsi_result;
2389 		sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2390 	}
2391 	sqcp->done_funct = NULL;
2392 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2393 }
2394 
2395 
2396 static struct sdebug_dev_info *
2397 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2398 {
2399 	struct sdebug_dev_info *devip;
2400 
2401 	devip = kzalloc(sizeof(*devip), flags);
2402 	if (devip) {
2403 		devip->sdbg_host = sdbg_host;
2404 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2405 	}
2406 	return devip;
2407 }
2408 
2409 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2410 {
2411 	struct sdebug_host_info * sdbg_host;
2412 	struct sdebug_dev_info * open_devip = NULL;
2413 	struct sdebug_dev_info * devip =
2414 			(struct sdebug_dev_info *)sdev->hostdata;
2415 
2416 	if (devip)
2417 		return devip;
2418 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2419 	if (!sdbg_host) {
2420                 printk(KERN_ERR "Host info NULL\n");
2421 		return NULL;
2422         }
2423 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2424 		if ((devip->used) && (devip->channel == sdev->channel) &&
2425                     (devip->target == sdev->id) &&
2426                     (devip->lun == sdev->lun))
2427                         return devip;
2428 		else {
2429 			if ((!devip->used) && (!open_devip))
2430 				open_devip = devip;
2431 		}
2432 	}
2433 	if (!open_devip) { /* try and make a new one */
2434 		open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2435 		if (!open_devip) {
2436 			printk(KERN_ERR "%s: out of memory at line %d\n",
2437 				__func__, __LINE__);
2438 			return NULL;
2439 		}
2440 	}
2441 
2442 	open_devip->channel = sdev->channel;
2443 	open_devip->target = sdev->id;
2444 	open_devip->lun = sdev->lun;
2445 	open_devip->sdbg_host = sdbg_host;
2446 	open_devip->reset = 1;
2447 	open_devip->used = 1;
2448 	memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2449 	if (scsi_debug_dsense)
2450 		open_devip->sense_buff[0] = 0x72;
2451 	else {
2452 		open_devip->sense_buff[0] = 0x70;
2453 		open_devip->sense_buff[7] = 0xa;
2454 	}
2455 	if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2456 		open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2457 
2458 	return open_devip;
2459 }
2460 
2461 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2462 {
2463 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2464 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2465 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2466 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2467 	return 0;
2468 }
2469 
2470 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2471 {
2472 	struct sdebug_dev_info *devip;
2473 
2474 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2475 		printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2476 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2477 	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2478 		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2479 	devip = devInfoReg(sdp);
2480 	if (NULL == devip)
2481 		return 1;	/* no resources, will be marked offline */
2482 	sdp->hostdata = devip;
2483 	if (sdp->host->cmd_per_lun)
2484 		scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2485 					sdp->host->cmd_per_lun);
2486 	blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2487 	if (scsi_debug_no_uld)
2488 		sdp->no_uld_attach = 1;
2489 	return 0;
2490 }
2491 
2492 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2493 {
2494 	struct sdebug_dev_info *devip =
2495 		(struct sdebug_dev_info *)sdp->hostdata;
2496 
2497 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2498 		printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2499 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2500 	if (devip) {
2501 		/* make this slot available for re-use */
2502 		devip->used = 0;
2503 		sdp->hostdata = NULL;
2504 	}
2505 }
2506 
2507 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2508 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2509 {
2510 	unsigned long iflags;
2511 	int k;
2512 	struct sdebug_queued_cmd *sqcp;
2513 
2514 	spin_lock_irqsave(&queued_arr_lock, iflags);
2515 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2516 		sqcp = &queued_arr[k];
2517 		if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2518 			del_timer_sync(&sqcp->cmnd_timer);
2519 			sqcp->in_use = 0;
2520 			sqcp->a_cmnd = NULL;
2521 			break;
2522 		}
2523 	}
2524 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2525 	return (k < scsi_debug_max_queue) ? 1 : 0;
2526 }
2527 
2528 /* Deletes (stops) timers of all queued commands */
2529 static void stop_all_queued(void)
2530 {
2531 	unsigned long iflags;
2532 	int k;
2533 	struct sdebug_queued_cmd *sqcp;
2534 
2535 	spin_lock_irqsave(&queued_arr_lock, iflags);
2536 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2537 		sqcp = &queued_arr[k];
2538 		if (sqcp->in_use && sqcp->a_cmnd) {
2539 			del_timer_sync(&sqcp->cmnd_timer);
2540 			sqcp->in_use = 0;
2541 			sqcp->a_cmnd = NULL;
2542 		}
2543 	}
2544 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2545 }
2546 
2547 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2548 {
2549 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2550 		printk(KERN_INFO "scsi_debug: abort\n");
2551 	++num_aborts;
2552 	stop_queued_cmnd(SCpnt);
2553 	return SUCCESS;
2554 }
2555 
2556 static int scsi_debug_biosparam(struct scsi_device *sdev,
2557 		struct block_device * bdev, sector_t capacity, int *info)
2558 {
2559 	int res;
2560 	unsigned char *buf;
2561 
2562 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2563 		printk(KERN_INFO "scsi_debug: biosparam\n");
2564 	buf = scsi_bios_ptable(bdev);
2565 	if (buf) {
2566 		res = scsi_partsize(buf, capacity,
2567 				    &info[2], &info[0], &info[1]);
2568 		kfree(buf);
2569 		if (! res)
2570 			return res;
2571 	}
2572 	info[0] = sdebug_heads;
2573 	info[1] = sdebug_sectors_per;
2574 	info[2] = sdebug_cylinders_per;
2575 	return 0;
2576 }
2577 
2578 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2579 {
2580 	struct sdebug_dev_info * devip;
2581 
2582 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2583 		printk(KERN_INFO "scsi_debug: device_reset\n");
2584 	++num_dev_resets;
2585 	if (SCpnt) {
2586 		devip = devInfoReg(SCpnt->device);
2587 		if (devip)
2588 			devip->reset = 1;
2589 	}
2590 	return SUCCESS;
2591 }
2592 
2593 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2594 {
2595 	struct sdebug_host_info *sdbg_host;
2596         struct sdebug_dev_info * dev_info;
2597         struct scsi_device * sdp;
2598         struct Scsi_Host * hp;
2599 
2600 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2601 		printk(KERN_INFO "scsi_debug: bus_reset\n");
2602 	++num_bus_resets;
2603 	if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2604 		sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2605 		if (sdbg_host) {
2606 			list_for_each_entry(dev_info,
2607                                             &sdbg_host->dev_info_list,
2608                                             dev_list)
2609 				dev_info->reset = 1;
2610 		}
2611 	}
2612 	return SUCCESS;
2613 }
2614 
2615 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2616 {
2617 	struct sdebug_host_info * sdbg_host;
2618         struct sdebug_dev_info * dev_info;
2619 
2620 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2621 		printk(KERN_INFO "scsi_debug: host_reset\n");
2622 	++num_host_resets;
2623         spin_lock(&sdebug_host_list_lock);
2624         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2625                 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2626                                     dev_list)
2627                         dev_info->reset = 1;
2628         }
2629         spin_unlock(&sdebug_host_list_lock);
2630 	stop_all_queued();
2631 	return SUCCESS;
2632 }
2633 
2634 /* Initializes timers in queued array */
2635 static void __init init_all_queued(void)
2636 {
2637 	unsigned long iflags;
2638 	int k;
2639 	struct sdebug_queued_cmd * sqcp;
2640 
2641 	spin_lock_irqsave(&queued_arr_lock, iflags);
2642 	for (k = 0; k < scsi_debug_max_queue; ++k) {
2643 		sqcp = &queued_arr[k];
2644 		init_timer(&sqcp->cmnd_timer);
2645 		sqcp->in_use = 0;
2646 		sqcp->a_cmnd = NULL;
2647 	}
2648 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
2649 }
2650 
2651 static void __init sdebug_build_parts(unsigned char *ramp,
2652 				      unsigned long store_size)
2653 {
2654 	struct partition * pp;
2655 	int starts[SDEBUG_MAX_PARTS + 2];
2656 	int sectors_per_part, num_sectors, k;
2657 	int heads_by_sects, start_sec, end_sec;
2658 
2659 	/* assume partition table already zeroed */
2660 	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2661 		return;
2662 	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2663 		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2664 		printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2665 				    "partitions to %d\n", SDEBUG_MAX_PARTS);
2666 	}
2667 	num_sectors = (int)sdebug_store_sectors;
2668 	sectors_per_part = (num_sectors - sdebug_sectors_per)
2669 			   / scsi_debug_num_parts;
2670 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
2671         starts[0] = sdebug_sectors_per;
2672 	for (k = 1; k < scsi_debug_num_parts; ++k)
2673 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
2674 			    * heads_by_sects;
2675 	starts[scsi_debug_num_parts] = num_sectors;
2676 	starts[scsi_debug_num_parts + 1] = 0;
2677 
2678 	ramp[510] = 0x55;	/* magic partition markings */
2679 	ramp[511] = 0xAA;
2680 	pp = (struct partition *)(ramp + 0x1be);
2681 	for (k = 0; starts[k + 1]; ++k, ++pp) {
2682 		start_sec = starts[k];
2683 		end_sec = starts[k + 1] - 1;
2684 		pp->boot_ind = 0;
2685 
2686 		pp->cyl = start_sec / heads_by_sects;
2687 		pp->head = (start_sec - (pp->cyl * heads_by_sects))
2688 			   / sdebug_sectors_per;
2689 		pp->sector = (start_sec % sdebug_sectors_per) + 1;
2690 
2691 		pp->end_cyl = end_sec / heads_by_sects;
2692 		pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2693 			       / sdebug_sectors_per;
2694 		pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2695 
2696 		pp->start_sect = cpu_to_le32(start_sec);
2697 		pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2698 		pp->sys_ind = 0x83;	/* plain Linux partition */
2699 	}
2700 }
2701 
2702 static int schedule_resp(struct scsi_cmnd * cmnd,
2703 			 struct sdebug_dev_info * devip,
2704 			 done_funct_t done, int scsi_result, int delta_jiff)
2705 {
2706 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2707 		if (scsi_result) {
2708 			struct scsi_device * sdp = cmnd->device;
2709 
2710 			printk(KERN_INFO "scsi_debug:    <%u %u %u %u> "
2711 			       "non-zero result=0x%x\n", sdp->host->host_no,
2712 			       sdp->channel, sdp->id, sdp->lun, scsi_result);
2713 		}
2714 	}
2715 	if (cmnd && devip) {
2716 		/* simulate autosense by this driver */
2717 		if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2718 			memcpy(cmnd->sense_buffer, devip->sense_buff,
2719 			       (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2720 			       SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2721 	}
2722 	if (delta_jiff <= 0) {
2723 		if (cmnd)
2724 			cmnd->result = scsi_result;
2725 		if (done)
2726 			done(cmnd);
2727 		return 0;
2728 	} else {
2729 		unsigned long iflags;
2730 		int k;
2731 		struct sdebug_queued_cmd * sqcp = NULL;
2732 
2733 		spin_lock_irqsave(&queued_arr_lock, iflags);
2734 		for (k = 0; k < scsi_debug_max_queue; ++k) {
2735 			sqcp = &queued_arr[k];
2736 			if (! sqcp->in_use)
2737 				break;
2738 		}
2739 		if (k >= scsi_debug_max_queue) {
2740 			spin_unlock_irqrestore(&queued_arr_lock, iflags);
2741 			printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2742 			return 1;	/* report busy to mid level */
2743 		}
2744 		sqcp->in_use = 1;
2745 		sqcp->a_cmnd = cmnd;
2746 		sqcp->scsi_result = scsi_result;
2747 		sqcp->done_funct = done;
2748 		sqcp->cmnd_timer.function = timer_intr_handler;
2749 		sqcp->cmnd_timer.data = k;
2750 		sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2751 		add_timer(&sqcp->cmnd_timer);
2752 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
2753 		if (cmnd)
2754 			cmnd->result = 0;
2755 		return 0;
2756 	}
2757 }
2758 /* Note: The following macros create attribute files in the
2759    /sys/module/scsi_debug/parameters directory. Unfortunately this
2760    driver is unaware of a change and cannot trigger auxiliary actions
2761    as it can when the corresponding attribute in the
2762    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2763  */
2764 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2765 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2766 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
2767 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2768 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2769 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2770 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2771 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2772 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2773 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2774 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
2775 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2776 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2777 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2778 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2779 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2780 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2781 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2782 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2783 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2784 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2785 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2786 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2787 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2788 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2789 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2790 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2791 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2792 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2793 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2794 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2795 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2796 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2797 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2798 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2799 		   S_IRUGO | S_IWUSR);
2800 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2801 		   S_IRUGO | S_IWUSR);
2802 
2803 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2804 MODULE_DESCRIPTION("SCSI debug adapter driver");
2805 MODULE_LICENSE("GPL");
2806 MODULE_VERSION(SCSI_DEBUG_VERSION);
2807 
2808 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2809 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2810 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
2811 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2812 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2813 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2814 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2815 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2816 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2817 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2818 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2819 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2820 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2821 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2822 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2823 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2824 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2825 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2826 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2827 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2828 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2829 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2830 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2831 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2832 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2833 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2834 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2835 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2836 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2837 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2838 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2839 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2840 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2841 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2842 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2843 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2844 
2845 static char sdebug_info[256];
2846 
2847 static const char * scsi_debug_info(struct Scsi_Host * shp)
2848 {
2849 	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2850 		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2851 		scsi_debug_version_date, scsi_debug_dev_size_mb,
2852 		scsi_debug_opts);
2853 	return sdebug_info;
2854 }
2855 
2856 /* scsi_debug_proc_info
2857  * Used if the driver currently has no own support for /proc/scsi
2858  */
2859 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2860 {
2861 	char arr[16];
2862 	int opts;
2863 	int minLen = length > 15 ? 15 : length;
2864 
2865 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2866 		return -EACCES;
2867 	memcpy(arr, buffer, minLen);
2868 	arr[minLen] = '\0';
2869 	if (1 != sscanf(arr, "%d", &opts))
2870 		return -EINVAL;
2871 	scsi_debug_opts = opts;
2872 	if (scsi_debug_every_nth != 0)
2873 		scsi_debug_cmnd_count = 0;
2874 	return length;
2875 }
2876 
2877 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2878 {
2879 	seq_printf(m, "scsi_debug adapter driver, version "
2880 	    "%s [%s]\n"
2881 	    "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2882 	    "every_nth=%d(curr:%d)\n"
2883 	    "delay=%d, max_luns=%d, scsi_level=%d\n"
2884 	    "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2885 	    "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2886 	    "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2887 	    SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2888 	    scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2889 	    scsi_debug_cmnd_count, scsi_debug_delay,
2890 	    scsi_debug_max_luns, scsi_debug_scsi_level,
2891 	    scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2892 	    sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2893 	    num_host_resets, dix_reads, dix_writes, dif_errors);
2894 	return 0;
2895 }
2896 
2897 static ssize_t delay_show(struct device_driver *ddp, char *buf)
2898 {
2899         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2900 }
2901 
2902 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
2903 			   size_t count)
2904 {
2905         int delay;
2906 	char work[20];
2907 
2908         if (1 == sscanf(buf, "%10s", work)) {
2909 		if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2910 			scsi_debug_delay = delay;
2911 			return count;
2912 		}
2913 	}
2914 	return -EINVAL;
2915 }
2916 static DRIVER_ATTR_RW(delay);
2917 
2918 static ssize_t opts_show(struct device_driver *ddp, char *buf)
2919 {
2920         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2921 }
2922 
2923 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
2924 			  size_t count)
2925 {
2926         int opts;
2927 	char work[20];
2928 
2929         if (1 == sscanf(buf, "%10s", work)) {
2930 		if (0 == strnicmp(work,"0x", 2)) {
2931 			if (1 == sscanf(&work[2], "%x", &opts))
2932 				goto opts_done;
2933 		} else {
2934 			if (1 == sscanf(work, "%d", &opts))
2935 				goto opts_done;
2936 		}
2937 	}
2938 	return -EINVAL;
2939 opts_done:
2940 	scsi_debug_opts = opts;
2941 	scsi_debug_cmnd_count = 0;
2942 	return count;
2943 }
2944 static DRIVER_ATTR_RW(opts);
2945 
2946 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
2947 {
2948         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2949 }
2950 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
2951 			   size_t count)
2952 {
2953         int n;
2954 
2955 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2956 		scsi_debug_ptype = n;
2957 		return count;
2958 	}
2959 	return -EINVAL;
2960 }
2961 static DRIVER_ATTR_RW(ptype);
2962 
2963 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
2964 {
2965         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2966 }
2967 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
2968 			    size_t count)
2969 {
2970         int n;
2971 
2972 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2973 		scsi_debug_dsense = n;
2974 		return count;
2975 	}
2976 	return -EINVAL;
2977 }
2978 static DRIVER_ATTR_RW(dsense);
2979 
2980 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
2981 {
2982         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2983 }
2984 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
2985 			     size_t count)
2986 {
2987         int n;
2988 
2989 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2990 		scsi_debug_fake_rw = n;
2991 		return count;
2992 	}
2993 	return -EINVAL;
2994 }
2995 static DRIVER_ATTR_RW(fake_rw);
2996 
2997 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
2998 {
2999         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3000 }
3001 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3002 			      size_t count)
3003 {
3004         int n;
3005 
3006 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3007 		scsi_debug_no_lun_0 = n;
3008 		return count;
3009 	}
3010 	return -EINVAL;
3011 }
3012 static DRIVER_ATTR_RW(no_lun_0);
3013 
3014 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3015 {
3016         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3017 }
3018 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3019 			      size_t count)
3020 {
3021         int n;
3022 
3023 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3024 		scsi_debug_num_tgts = n;
3025 		sdebug_max_tgts_luns();
3026 		return count;
3027 	}
3028 	return -EINVAL;
3029 }
3030 static DRIVER_ATTR_RW(num_tgts);
3031 
3032 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3033 {
3034         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3035 }
3036 static DRIVER_ATTR_RO(dev_size_mb);
3037 
3038 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3039 {
3040         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3041 }
3042 static DRIVER_ATTR_RO(num_parts);
3043 
3044 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3045 {
3046         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3047 }
3048 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3049 			       size_t count)
3050 {
3051         int nth;
3052 
3053 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3054 		scsi_debug_every_nth = nth;
3055 		scsi_debug_cmnd_count = 0;
3056 		return count;
3057 	}
3058 	return -EINVAL;
3059 }
3060 static DRIVER_ATTR_RW(every_nth);
3061 
3062 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3063 {
3064         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3065 }
3066 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3067 			      size_t count)
3068 {
3069         int n;
3070 
3071 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3072 		scsi_debug_max_luns = n;
3073 		sdebug_max_tgts_luns();
3074 		return count;
3075 	}
3076 	return -EINVAL;
3077 }
3078 static DRIVER_ATTR_RW(max_luns);
3079 
3080 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3081 {
3082         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3083 }
3084 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3085 			       size_t count)
3086 {
3087         int n;
3088 
3089 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3090 	    (n <= SCSI_DEBUG_CANQUEUE)) {
3091 		scsi_debug_max_queue = n;
3092 		return count;
3093 	}
3094 	return -EINVAL;
3095 }
3096 static DRIVER_ATTR_RW(max_queue);
3097 
3098 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3099 {
3100         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3101 }
3102 static DRIVER_ATTR_RO(no_uld);
3103 
3104 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3105 {
3106         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3107 }
3108 static DRIVER_ATTR_RO(scsi_level);
3109 
3110 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3111 {
3112         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3113 }
3114 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3115 				size_t count)
3116 {
3117         int n;
3118 
3119 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3120 		scsi_debug_virtual_gb = n;
3121 
3122 		sdebug_capacity = get_sdebug_capacity();
3123 
3124 		return count;
3125 	}
3126 	return -EINVAL;
3127 }
3128 static DRIVER_ATTR_RW(virtual_gb);
3129 
3130 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3131 {
3132         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3133 }
3134 
3135 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3136 			      size_t count)
3137 {
3138 	int delta_hosts;
3139 
3140 	if (sscanf(buf, "%d", &delta_hosts) != 1)
3141 		return -EINVAL;
3142 	if (delta_hosts > 0) {
3143 		do {
3144 			sdebug_add_adapter();
3145 		} while (--delta_hosts);
3146 	} else if (delta_hosts < 0) {
3147 		do {
3148 			sdebug_remove_adapter();
3149 		} while (++delta_hosts);
3150 	}
3151 	return count;
3152 }
3153 static DRIVER_ATTR_RW(add_host);
3154 
3155 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3156 {
3157 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3158 }
3159 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3160 				    size_t count)
3161 {
3162 	int n;
3163 
3164 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3165 		scsi_debug_vpd_use_hostno = n;
3166 		return count;
3167 	}
3168 	return -EINVAL;
3169 }
3170 static DRIVER_ATTR_RW(vpd_use_hostno);
3171 
3172 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3173 {
3174 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3175 }
3176 static DRIVER_ATTR_RO(sector_size);
3177 
3178 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3179 {
3180 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3181 }
3182 static DRIVER_ATTR_RO(dix);
3183 
3184 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3185 {
3186 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3187 }
3188 static DRIVER_ATTR_RO(dif);
3189 
3190 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3191 {
3192 	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3193 }
3194 static DRIVER_ATTR_RO(guard);
3195 
3196 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3197 {
3198 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3199 }
3200 static DRIVER_ATTR_RO(ato);
3201 
3202 static ssize_t map_show(struct device_driver *ddp, char *buf)
3203 {
3204 	ssize_t count;
3205 
3206 	if (!scsi_debug_lbp())
3207 		return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3208 				 sdebug_store_sectors);
3209 
3210 	count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3211 
3212 	buf[count++] = '\n';
3213 	buf[count++] = 0;
3214 
3215 	return count;
3216 }
3217 static DRIVER_ATTR_RO(map);
3218 
3219 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3220 {
3221 	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3222 }
3223 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3224 			       size_t count)
3225 {
3226 	int n;
3227 
3228 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3229 		scsi_debug_removable = (n > 0);
3230 		return count;
3231 	}
3232 	return -EINVAL;
3233 }
3234 static DRIVER_ATTR_RW(removable);
3235 
3236 /* Note: The following array creates attribute files in the
3237    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3238    files (over those found in the /sys/module/scsi_debug/parameters
3239    directory) is that auxiliary actions can be triggered when an attribute
3240    is changed. For example see: sdebug_add_host_store() above.
3241  */
3242 
3243 static struct attribute *sdebug_drv_attrs[] = {
3244 	&driver_attr_delay.attr,
3245 	&driver_attr_opts.attr,
3246 	&driver_attr_ptype.attr,
3247 	&driver_attr_dsense.attr,
3248 	&driver_attr_fake_rw.attr,
3249 	&driver_attr_no_lun_0.attr,
3250 	&driver_attr_num_tgts.attr,
3251 	&driver_attr_dev_size_mb.attr,
3252 	&driver_attr_num_parts.attr,
3253 	&driver_attr_every_nth.attr,
3254 	&driver_attr_max_luns.attr,
3255 	&driver_attr_max_queue.attr,
3256 	&driver_attr_no_uld.attr,
3257 	&driver_attr_scsi_level.attr,
3258 	&driver_attr_virtual_gb.attr,
3259 	&driver_attr_add_host.attr,
3260 	&driver_attr_vpd_use_hostno.attr,
3261 	&driver_attr_sector_size.attr,
3262 	&driver_attr_dix.attr,
3263 	&driver_attr_dif.attr,
3264 	&driver_attr_guard.attr,
3265 	&driver_attr_ato.attr,
3266 	&driver_attr_map.attr,
3267 	&driver_attr_removable.attr,
3268 	NULL,
3269 };
3270 ATTRIBUTE_GROUPS(sdebug_drv);
3271 
3272 static struct device *pseudo_primary;
3273 
3274 static int __init scsi_debug_init(void)
3275 {
3276 	unsigned long sz;
3277 	int host_to_add;
3278 	int k;
3279 	int ret;
3280 
3281 	switch (scsi_debug_sector_size) {
3282 	case  512:
3283 	case 1024:
3284 	case 2048:
3285 	case 4096:
3286 		break;
3287 	default:
3288 		printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3289 		       scsi_debug_sector_size);
3290 		return -EINVAL;
3291 	}
3292 
3293 	switch (scsi_debug_dif) {
3294 
3295 	case SD_DIF_TYPE0_PROTECTION:
3296 	case SD_DIF_TYPE1_PROTECTION:
3297 	case SD_DIF_TYPE2_PROTECTION:
3298 	case SD_DIF_TYPE3_PROTECTION:
3299 		break;
3300 
3301 	default:
3302 		printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3303 		return -EINVAL;
3304 	}
3305 
3306 	if (scsi_debug_guard > 1) {
3307 		printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3308 		return -EINVAL;
3309 	}
3310 
3311 	if (scsi_debug_ato > 1) {
3312 		printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3313 		return -EINVAL;
3314 	}
3315 
3316 	if (scsi_debug_physblk_exp > 15) {
3317 		printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3318 		       scsi_debug_physblk_exp);
3319 		return -EINVAL;
3320 	}
3321 
3322 	if (scsi_debug_lowest_aligned > 0x3fff) {
3323 		printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3324 		       scsi_debug_lowest_aligned);
3325 		return -EINVAL;
3326 	}
3327 
3328 	if (scsi_debug_dev_size_mb < 1)
3329 		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3330 	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3331 	sdebug_store_sectors = sz / scsi_debug_sector_size;
3332 	sdebug_capacity = get_sdebug_capacity();
3333 
3334 	/* play around with geometry, don't waste too much on track 0 */
3335 	sdebug_heads = 8;
3336 	sdebug_sectors_per = 32;
3337 	if (scsi_debug_dev_size_mb >= 16)
3338 		sdebug_heads = 32;
3339 	else if (scsi_debug_dev_size_mb >= 256)
3340 		sdebug_heads = 64;
3341 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3342 			       (sdebug_sectors_per * sdebug_heads);
3343 	if (sdebug_cylinders_per >= 1024) {
3344 		/* other LLDs do this; implies >= 1GB ram disk ... */
3345 		sdebug_heads = 255;
3346 		sdebug_sectors_per = 63;
3347 		sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3348 			       (sdebug_sectors_per * sdebug_heads);
3349 	}
3350 
3351 	fake_storep = vmalloc(sz);
3352 	if (NULL == fake_storep) {
3353 		printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3354 		return -ENOMEM;
3355 	}
3356 	memset(fake_storep, 0, sz);
3357 	if (scsi_debug_num_parts > 0)
3358 		sdebug_build_parts(fake_storep, sz);
3359 
3360 	if (scsi_debug_dix) {
3361 		int dif_size;
3362 
3363 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3364 		dif_storep = vmalloc(dif_size);
3365 
3366 		printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3367 		       dif_size, dif_storep);
3368 
3369 		if (dif_storep == NULL) {
3370 			printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3371 			ret = -ENOMEM;
3372 			goto free_vm;
3373 		}
3374 
3375 		memset(dif_storep, 0xff, dif_size);
3376 	}
3377 
3378 	/* Logical Block Provisioning */
3379 	if (scsi_debug_lbp()) {
3380 		scsi_debug_unmap_max_blocks =
3381 			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3382 
3383 		scsi_debug_unmap_max_desc =
3384 			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3385 
3386 		scsi_debug_unmap_granularity =
3387 			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3388 
3389 		if (scsi_debug_unmap_alignment &&
3390 		    scsi_debug_unmap_granularity <=
3391 		    scsi_debug_unmap_alignment) {
3392 			printk(KERN_ERR
3393 			       "%s: ERR: unmap_granularity <= unmap_alignment\n",
3394 			       __func__);
3395 			return -EINVAL;
3396 		}
3397 
3398 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3399 		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3400 
3401 		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3402 		       map_size);
3403 
3404 		if (map_storep == NULL) {
3405 			printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3406 			ret = -ENOMEM;
3407 			goto free_vm;
3408 		}
3409 
3410 		bitmap_zero(map_storep, map_size);
3411 
3412 		/* Map first 1KB for partition table */
3413 		if (scsi_debug_num_parts)
3414 			map_region(0, 2);
3415 	}
3416 
3417 	pseudo_primary = root_device_register("pseudo_0");
3418 	if (IS_ERR(pseudo_primary)) {
3419 		printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3420 		ret = PTR_ERR(pseudo_primary);
3421 		goto free_vm;
3422 	}
3423 	ret = bus_register(&pseudo_lld_bus);
3424 	if (ret < 0) {
3425 		printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3426 			ret);
3427 		goto dev_unreg;
3428 	}
3429 	ret = driver_register(&sdebug_driverfs_driver);
3430 	if (ret < 0) {
3431 		printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3432 			ret);
3433 		goto bus_unreg;
3434 	}
3435 
3436 	init_all_queued();
3437 
3438 	host_to_add = scsi_debug_add_host;
3439         scsi_debug_add_host = 0;
3440 
3441         for (k = 0; k < host_to_add; k++) {
3442                 if (sdebug_add_adapter()) {
3443                         printk(KERN_ERR "scsi_debug_init: "
3444                                "sdebug_add_adapter failed k=%d\n", k);
3445                         break;
3446                 }
3447         }
3448 
3449 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3450 		printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3451 		       scsi_debug_add_host);
3452 	}
3453 	return 0;
3454 
3455 bus_unreg:
3456 	bus_unregister(&pseudo_lld_bus);
3457 dev_unreg:
3458 	root_device_unregister(pseudo_primary);
3459 free_vm:
3460 	if (map_storep)
3461 		vfree(map_storep);
3462 	if (dif_storep)
3463 		vfree(dif_storep);
3464 	vfree(fake_storep);
3465 
3466 	return ret;
3467 }
3468 
3469 static void __exit scsi_debug_exit(void)
3470 {
3471 	int k = scsi_debug_add_host;
3472 
3473 	stop_all_queued();
3474 	for (; k; k--)
3475 		sdebug_remove_adapter();
3476 	driver_unregister(&sdebug_driverfs_driver);
3477 	bus_unregister(&pseudo_lld_bus);
3478 	root_device_unregister(pseudo_primary);
3479 
3480 	if (dif_storep)
3481 		vfree(dif_storep);
3482 
3483 	vfree(fake_storep);
3484 }
3485 
3486 device_initcall(scsi_debug_init);
3487 module_exit(scsi_debug_exit);
3488 
3489 static void sdebug_release_adapter(struct device * dev)
3490 {
3491         struct sdebug_host_info *sdbg_host;
3492 
3493 	sdbg_host = to_sdebug_host(dev);
3494         kfree(sdbg_host);
3495 }
3496 
3497 static int sdebug_add_adapter(void)
3498 {
3499 	int k, devs_per_host;
3500         int error = 0;
3501         struct sdebug_host_info *sdbg_host;
3502 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
3503 
3504         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3505         if (NULL == sdbg_host) {
3506                 printk(KERN_ERR "%s: out of memory at line %d\n",
3507                        __func__, __LINE__);
3508                 return -ENOMEM;
3509         }
3510 
3511         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3512 
3513 	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3514         for (k = 0; k < devs_per_host; k++) {
3515 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3516 		if (!sdbg_devinfo) {
3517                         printk(KERN_ERR "%s: out of memory at line %d\n",
3518                                __func__, __LINE__);
3519                         error = -ENOMEM;
3520 			goto clean;
3521                 }
3522         }
3523 
3524         spin_lock(&sdebug_host_list_lock);
3525         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3526         spin_unlock(&sdebug_host_list_lock);
3527 
3528         sdbg_host->dev.bus = &pseudo_lld_bus;
3529         sdbg_host->dev.parent = pseudo_primary;
3530         sdbg_host->dev.release = &sdebug_release_adapter;
3531         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3532 
3533         error = device_register(&sdbg_host->dev);
3534 
3535         if (error)
3536 		goto clean;
3537 
3538 	++scsi_debug_add_host;
3539         return error;
3540 
3541 clean:
3542 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3543 				 dev_list) {
3544 		list_del(&sdbg_devinfo->dev_list);
3545 		kfree(sdbg_devinfo);
3546 	}
3547 
3548 	kfree(sdbg_host);
3549         return error;
3550 }
3551 
3552 static void sdebug_remove_adapter(void)
3553 {
3554         struct sdebug_host_info * sdbg_host = NULL;
3555 
3556         spin_lock(&sdebug_host_list_lock);
3557         if (!list_empty(&sdebug_host_list)) {
3558                 sdbg_host = list_entry(sdebug_host_list.prev,
3559                                        struct sdebug_host_info, host_list);
3560 		list_del(&sdbg_host->host_list);
3561 	}
3562         spin_unlock(&sdebug_host_list_lock);
3563 
3564 	if (!sdbg_host)
3565 		return;
3566 
3567         device_unregister(&sdbg_host->dev);
3568         --scsi_debug_add_host;
3569 }
3570 
3571 static
3572 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3573 {
3574 	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3575 	int len, k;
3576 	unsigned int num;
3577 	unsigned long long lba;
3578 	u32 ei_lba;
3579 	int errsts = 0;
3580 	int target = SCpnt->device->id;
3581 	struct sdebug_dev_info *devip = NULL;
3582 	int inj_recovered = 0;
3583 	int inj_transport = 0;
3584 	int inj_dif = 0;
3585 	int inj_dix = 0;
3586 	int delay_override = 0;
3587 	int unmap = 0;
3588 
3589 	scsi_set_resid(SCpnt, 0);
3590 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3591 		printk(KERN_INFO "scsi_debug: cmd ");
3592 		for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3593 			printk("%02x ", (int)cmd[k]);
3594 		printk("\n");
3595 	}
3596 
3597 	if (target == SCpnt->device->host->hostt->this_id) {
3598 		printk(KERN_INFO "scsi_debug: initiator's id used as "
3599 		       "target!\n");
3600 		return schedule_resp(SCpnt, NULL, done,
3601 				     DID_NO_CONNECT << 16, 0);
3602 	}
3603 
3604 	if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3605 	    (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3606 		return schedule_resp(SCpnt, NULL, done,
3607 				     DID_NO_CONNECT << 16, 0);
3608 	devip = devInfoReg(SCpnt->device);
3609 	if (NULL == devip)
3610 		return schedule_resp(SCpnt, NULL, done,
3611 				     DID_NO_CONNECT << 16, 0);
3612 
3613 	if ((scsi_debug_every_nth != 0) &&
3614 	    (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3615 		scsi_debug_cmnd_count = 0;
3616 		if (scsi_debug_every_nth < -1)
3617 			scsi_debug_every_nth = -1;
3618 		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3619 			return 0; /* ignore command causing timeout */
3620 		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3621 			 scsi_medium_access_command(SCpnt))
3622 			return 0; /* time out reads and writes */
3623 		else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3624 			inj_recovered = 1; /* to reads and writes below */
3625 		else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3626 			inj_transport = 1; /* to reads and writes below */
3627 		else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3628 			inj_dif = 1; /* to reads and writes below */
3629 		else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3630 			inj_dix = 1; /* to reads and writes below */
3631 	}
3632 
3633 	if (devip->wlun) {
3634 		switch (*cmd) {
3635 		case INQUIRY:
3636 		case REQUEST_SENSE:
3637 		case TEST_UNIT_READY:
3638 		case REPORT_LUNS:
3639 			break;  /* only allowable wlun commands */
3640 		default:
3641 			if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3642 				printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3643 				       "not supported for wlun\n", *cmd);
3644 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3645 					INVALID_OPCODE, 0);
3646 			errsts = check_condition_result;
3647 			return schedule_resp(SCpnt, devip, done, errsts,
3648 					     0);
3649 		}
3650 	}
3651 
3652 	switch (*cmd) {
3653 	case INQUIRY:     /* mandatory, ignore unit attention */
3654 		delay_override = 1;
3655 		errsts = resp_inquiry(SCpnt, target, devip);
3656 		break;
3657 	case REQUEST_SENSE:	/* mandatory, ignore unit attention */
3658 		delay_override = 1;
3659 		errsts = resp_requests(SCpnt, devip);
3660 		break;
3661 	case REZERO_UNIT:	/* actually this is REWIND for SSC */
3662 	case START_STOP:
3663 		errsts = resp_start_stop(SCpnt, devip);
3664 		break;
3665 	case ALLOW_MEDIUM_REMOVAL:
3666 		errsts = check_readiness(SCpnt, 1, devip);
3667 		if (errsts)
3668 			break;
3669 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3670 			printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3671 			       cmd[4] ? "inhibited" : "enabled");
3672 		break;
3673 	case SEND_DIAGNOSTIC:     /* mandatory */
3674 		errsts = check_readiness(SCpnt, 1, devip);
3675 		break;
3676 	case TEST_UNIT_READY:     /* mandatory */
3677 		delay_override = 1;
3678 		errsts = check_readiness(SCpnt, 0, devip);
3679 		break;
3680 	case RESERVE:
3681 		errsts = check_readiness(SCpnt, 1, devip);
3682 		break;
3683 	case RESERVE_10:
3684 		errsts = check_readiness(SCpnt, 1, devip);
3685 		break;
3686 	case RELEASE:
3687 		errsts = check_readiness(SCpnt, 1, devip);
3688 		break;
3689 	case RELEASE_10:
3690 		errsts = check_readiness(SCpnt, 1, devip);
3691 		break;
3692 	case READ_CAPACITY:
3693 		errsts = resp_readcap(SCpnt, devip);
3694 		break;
3695 	case SERVICE_ACTION_IN:
3696 		if (cmd[1] == SAI_READ_CAPACITY_16)
3697 			errsts = resp_readcap16(SCpnt, devip);
3698 		else if (cmd[1] == SAI_GET_LBA_STATUS) {
3699 
3700 			if (scsi_debug_lbp() == 0) {
3701 				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3702 						INVALID_COMMAND_OPCODE, 0);
3703 				errsts = check_condition_result;
3704 			} else
3705 				errsts = resp_get_lba_status(SCpnt, devip);
3706 		} else {
3707 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3708 					INVALID_OPCODE, 0);
3709 			errsts = check_condition_result;
3710 		}
3711 		break;
3712 	case MAINTENANCE_IN:
3713 		if (MI_REPORT_TARGET_PGS != cmd[1]) {
3714 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3715 					INVALID_OPCODE, 0);
3716 			errsts = check_condition_result;
3717 			break;
3718 		}
3719 		errsts = resp_report_tgtpgs(SCpnt, devip);
3720 		break;
3721 	case READ_16:
3722 	case READ_12:
3723 	case READ_10:
3724 		/* READ{10,12,16} and DIF Type 2 are natural enemies */
3725 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3726 		    cmd[1] & 0xe0) {
3727 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3728 					INVALID_COMMAND_OPCODE, 0);
3729 			errsts = check_condition_result;
3730 			break;
3731 		}
3732 
3733 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3734 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3735 		    (cmd[1] & 0xe0) == 0)
3736 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3737 
3738 		/* fall through */
3739 	case READ_6:
3740 read:
3741 		errsts = check_readiness(SCpnt, 0, devip);
3742 		if (errsts)
3743 			break;
3744 		if (scsi_debug_fake_rw)
3745 			break;
3746 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3747 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3748 		if (inj_recovered && (0 == errsts)) {
3749 			mk_sense_buffer(devip, RECOVERED_ERROR,
3750 					THRESHOLD_EXCEEDED, 0);
3751 			errsts = check_condition_result;
3752 		} else if (inj_transport && (0 == errsts)) {
3753 			mk_sense_buffer(devip, ABORTED_COMMAND,
3754 					TRANSPORT_PROBLEM, ACK_NAK_TO);
3755 			errsts = check_condition_result;
3756 		} else if (inj_dif && (0 == errsts)) {
3757 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3758 			errsts = illegal_condition_result;
3759 		} else if (inj_dix && (0 == errsts)) {
3760 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3761 			errsts = illegal_condition_result;
3762 		}
3763 		break;
3764 	case REPORT_LUNS:	/* mandatory, ignore unit attention */
3765 		delay_override = 1;
3766 		errsts = resp_report_luns(SCpnt, devip);
3767 		break;
3768 	case VERIFY:		/* 10 byte SBC-2 command */
3769 		errsts = check_readiness(SCpnt, 0, devip);
3770 		break;
3771 	case WRITE_16:
3772 	case WRITE_12:
3773 	case WRITE_10:
3774 		/* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3775 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3776 		    cmd[1] & 0xe0) {
3777 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3778 					INVALID_COMMAND_OPCODE, 0);
3779 			errsts = check_condition_result;
3780 			break;
3781 		}
3782 
3783 		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3784 		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3785 		    (cmd[1] & 0xe0) == 0)
3786 			printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3787 
3788 		/* fall through */
3789 	case WRITE_6:
3790 write:
3791 		errsts = check_readiness(SCpnt, 0, devip);
3792 		if (errsts)
3793 			break;
3794 		if (scsi_debug_fake_rw)
3795 			break;
3796 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3797 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3798 		if (inj_recovered && (0 == errsts)) {
3799 			mk_sense_buffer(devip, RECOVERED_ERROR,
3800 					THRESHOLD_EXCEEDED, 0);
3801 			errsts = check_condition_result;
3802 		} else if (inj_dif && (0 == errsts)) {
3803 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3804 			errsts = illegal_condition_result;
3805 		} else if (inj_dix && (0 == errsts)) {
3806 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3807 			errsts = illegal_condition_result;
3808 		}
3809 		break;
3810 	case WRITE_SAME_16:
3811 	case WRITE_SAME:
3812 		if (cmd[1] & 0x8) {
3813 			if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3814 			    (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3815 				mk_sense_buffer(devip, ILLEGAL_REQUEST,
3816 						INVALID_FIELD_IN_CDB, 0);
3817 				errsts = check_condition_result;
3818 			} else
3819 				unmap = 1;
3820 		}
3821 		if (errsts)
3822 			break;
3823 		errsts = check_readiness(SCpnt, 0, devip);
3824 		if (errsts)
3825 			break;
3826 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3827 		errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3828 		break;
3829 	case UNMAP:
3830 		errsts = check_readiness(SCpnt, 0, devip);
3831 		if (errsts)
3832 			break;
3833 
3834 		if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3835 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3836 					INVALID_COMMAND_OPCODE, 0);
3837 			errsts = check_condition_result;
3838 		} else
3839 			errsts = resp_unmap(SCpnt, devip);
3840 		break;
3841 	case MODE_SENSE:
3842 	case MODE_SENSE_10:
3843 		errsts = resp_mode_sense(SCpnt, target, devip);
3844 		break;
3845 	case MODE_SELECT:
3846 		errsts = resp_mode_select(SCpnt, 1, devip);
3847 		break;
3848 	case MODE_SELECT_10:
3849 		errsts = resp_mode_select(SCpnt, 0, devip);
3850 		break;
3851 	case LOG_SENSE:
3852 		errsts = resp_log_sense(SCpnt, devip);
3853 		break;
3854 	case SYNCHRONIZE_CACHE:
3855 		delay_override = 1;
3856 		errsts = check_readiness(SCpnt, 0, devip);
3857 		break;
3858 	case WRITE_BUFFER:
3859 		errsts = check_readiness(SCpnt, 1, devip);
3860 		break;
3861 	case XDWRITEREAD_10:
3862 		if (!scsi_bidi_cmnd(SCpnt)) {
3863 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
3864 					INVALID_FIELD_IN_CDB, 0);
3865 			errsts = check_condition_result;
3866 			break;
3867 		}
3868 
3869 		errsts = check_readiness(SCpnt, 0, devip);
3870 		if (errsts)
3871 			break;
3872 		if (scsi_debug_fake_rw)
3873 			break;
3874 		get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3875 		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3876 		if (errsts)
3877 			break;
3878 		errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3879 		if (errsts)
3880 			break;
3881 		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3882 		break;
3883 	case VARIABLE_LENGTH_CMD:
3884 		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3885 
3886 			if ((cmd[10] & 0xe0) == 0)
3887 				printk(KERN_ERR
3888 				       "Unprotected RD/WR to DIF device\n");
3889 
3890 			if (cmd[9] == READ_32) {
3891 				BUG_ON(SCpnt->cmd_len < 32);
3892 				goto read;
3893 			}
3894 
3895 			if (cmd[9] == WRITE_32) {
3896 				BUG_ON(SCpnt->cmd_len < 32);
3897 				goto write;
3898 			}
3899 		}
3900 
3901 		mk_sense_buffer(devip, ILLEGAL_REQUEST,
3902 				INVALID_FIELD_IN_CDB, 0);
3903 		errsts = check_condition_result;
3904 		break;
3905 
3906 	default:
3907 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3908 			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3909 			       "supported\n", *cmd);
3910 		errsts = check_readiness(SCpnt, 1, devip);
3911 		if (errsts)
3912 			break;	/* Unit attention takes precedence */
3913 		mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3914 		errsts = check_condition_result;
3915 		break;
3916 	}
3917 	return schedule_resp(SCpnt, devip, done, errsts,
3918 			     (delay_override ? 0 : scsi_debug_delay));
3919 }
3920 
3921 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3922 
3923 static struct scsi_host_template sdebug_driver_template = {
3924 	.show_info =		scsi_debug_show_info,
3925 	.write_info =		scsi_debug_write_info,
3926 	.proc_name =		sdebug_proc_name,
3927 	.name =			"SCSI DEBUG",
3928 	.info =			scsi_debug_info,
3929 	.slave_alloc =		scsi_debug_slave_alloc,
3930 	.slave_configure =	scsi_debug_slave_configure,
3931 	.slave_destroy =	scsi_debug_slave_destroy,
3932 	.ioctl =		scsi_debug_ioctl,
3933 	.queuecommand =		scsi_debug_queuecommand,
3934 	.eh_abort_handler =	scsi_debug_abort,
3935 	.eh_bus_reset_handler = scsi_debug_bus_reset,
3936 	.eh_device_reset_handler = scsi_debug_device_reset,
3937 	.eh_host_reset_handler = scsi_debug_host_reset,
3938 	.bios_param =		scsi_debug_biosparam,
3939 	.can_queue =		SCSI_DEBUG_CANQUEUE,
3940 	.this_id =		7,
3941 	.sg_tablesize =		256,
3942 	.cmd_per_lun =		16,
3943 	.max_sectors =		0xffff,
3944 	.use_clustering = 	DISABLE_CLUSTERING,
3945 	.module =		THIS_MODULE,
3946 };
3947 
3948 static int sdebug_driver_probe(struct device * dev)
3949 {
3950         int error = 0;
3951         struct sdebug_host_info *sdbg_host;
3952         struct Scsi_Host *hpnt;
3953 	int host_prot;
3954 
3955 	sdbg_host = to_sdebug_host(dev);
3956 
3957 	sdebug_driver_template.can_queue = scsi_debug_max_queue;
3958 	if (scsi_debug_clustering)
3959 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
3960 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3961 	if (NULL == hpnt) {
3962 		printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3963 		error = -ENODEV;
3964 		return error;
3965 	}
3966 
3967         sdbg_host->shost = hpnt;
3968 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3969 	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3970 		hpnt->max_id = scsi_debug_num_tgts + 1;
3971 	else
3972 		hpnt->max_id = scsi_debug_num_tgts;
3973 	hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;	/* = scsi_debug_max_luns; */
3974 
3975 	host_prot = 0;
3976 
3977 	switch (scsi_debug_dif) {
3978 
3979 	case SD_DIF_TYPE1_PROTECTION:
3980 		host_prot = SHOST_DIF_TYPE1_PROTECTION;
3981 		if (scsi_debug_dix)
3982 			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3983 		break;
3984 
3985 	case SD_DIF_TYPE2_PROTECTION:
3986 		host_prot = SHOST_DIF_TYPE2_PROTECTION;
3987 		if (scsi_debug_dix)
3988 			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3989 		break;
3990 
3991 	case SD_DIF_TYPE3_PROTECTION:
3992 		host_prot = SHOST_DIF_TYPE3_PROTECTION;
3993 		if (scsi_debug_dix)
3994 			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3995 		break;
3996 
3997 	default:
3998 		if (scsi_debug_dix)
3999 			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4000 		break;
4001 	}
4002 
4003 	scsi_host_set_prot(hpnt, host_prot);
4004 
4005 	printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4006 	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4007 	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4008 	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4009 	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4010 	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4011 	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4012 	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4013 
4014 	if (scsi_debug_guard == 1)
4015 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4016 	else
4017 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4018 
4019         error = scsi_add_host(hpnt, &sdbg_host->dev);
4020         if (error) {
4021                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4022                 error = -ENODEV;
4023 		scsi_host_put(hpnt);
4024         } else
4025 		scsi_scan_host(hpnt);
4026 
4027 
4028         return error;
4029 }
4030 
4031 static int sdebug_driver_remove(struct device * dev)
4032 {
4033         struct sdebug_host_info *sdbg_host;
4034 	struct sdebug_dev_info *sdbg_devinfo, *tmp;
4035 
4036 	sdbg_host = to_sdebug_host(dev);
4037 
4038 	if (!sdbg_host) {
4039 		printk(KERN_ERR "%s: Unable to locate host info\n",
4040 		       __func__);
4041 		return -ENODEV;
4042 	}
4043 
4044         scsi_remove_host(sdbg_host->shost);
4045 
4046 	list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4047 				 dev_list) {
4048                 list_del(&sdbg_devinfo->dev_list);
4049                 kfree(sdbg_devinfo);
4050         }
4051 
4052         scsi_host_put(sdbg_host->shost);
4053         return 0;
4054 }
4055 
4056 static int pseudo_lld_bus_match(struct device *dev,
4057 				struct device_driver *dev_driver)
4058 {
4059 	return 1;
4060 }
4061 
4062 static struct bus_type pseudo_lld_bus = {
4063 	.name = "pseudo",
4064 	.match = pseudo_lld_bus_match,
4065 	.probe = sdebug_driver_probe,
4066 	.remove = sdebug_driver_remove,
4067 	.drv_groups = sdebug_drv_groups,
4068 };
4069