1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 *
10 * Copyright (C) 2001 - 2021 Douglas Gilbert
11 *
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
13 */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44
45 #include <net/checksum.h>
46
47 #include <asm/unaligned.h>
48
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_host.h>
53 #include <scsi/scsicam.h>
54 #include <scsi/scsi_eh.h>
55 #include <scsi/scsi_tcq.h>
56 #include <scsi/scsi_dbg.h>
57
58 #include "sd.h"
59 #include "scsi_logging.h"
60
61 /* make sure inq_product_rev string corresponds to this version */
62 #define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
63 static const char *sdebug_version_date = "20210520";
64
65 #define MY_NAME "scsi_debug"
66
67 /* Additional Sense Code (ASC) */
68 #define NO_ADDITIONAL_SENSE 0x0
69 #define LOGICAL_UNIT_NOT_READY 0x4
70 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
71 #define UNRECOVERED_READ_ERR 0x11
72 #define PARAMETER_LIST_LENGTH_ERR 0x1a
73 #define INVALID_OPCODE 0x20
74 #define LBA_OUT_OF_RANGE 0x21
75 #define INVALID_FIELD_IN_CDB 0x24
76 #define INVALID_FIELD_IN_PARAM_LIST 0x26
77 #define WRITE_PROTECTED 0x27
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define POWER_ON_OCCURRED_ASCQ 0x1
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define ATTEMPT_ACCESS_GAP 0x9
102 #define INSUFF_ZONE_ASCQ 0xe
103
104 /* Additional Sense Code Qualifier (ASCQ) */
105 #define ACK_NAK_TO 0x3
106
107 /* Default values for driver parameters */
108 #define DEF_NUM_HOST 1
109 #define DEF_NUM_TGTS 1
110 #define DEF_MAX_LUNS 1
111 /* With these defaults, this driver will make 1 host with 1 target
112 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
113 */
114 #define DEF_ATO 1
115 #define DEF_CDB_LEN 10
116 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
117 #define DEF_DEV_SIZE_PRE_INIT 0
118 #define DEF_DEV_SIZE_MB 8
119 #define DEF_ZBC_DEV_SIZE_MB 128
120 #define DEF_DIF 0
121 #define DEF_DIX 0
122 #define DEF_PER_HOST_STORE false
123 #define DEF_D_SENSE 0
124 #define DEF_EVERY_NTH 0
125 #define DEF_FAKE_RW 0
126 #define DEF_GUARD 0
127 #define DEF_HOST_LOCK 0
128 #define DEF_LBPU 0
129 #define DEF_LBPWS 0
130 #define DEF_LBPWS10 0
131 #define DEF_LBPRZ 1
132 #define DEF_LOWEST_ALIGNED 0
133 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
134 #define DEF_NO_LUN_0 0
135 #define DEF_NUM_PARTS 0
136 #define DEF_OPTS 0
137 #define DEF_OPT_BLKS 1024
138 #define DEF_PHYSBLK_EXP 0
139 #define DEF_OPT_XFERLEN_EXP 0
140 #define DEF_PTYPE TYPE_DISK
141 #define DEF_RANDOM false
142 #define DEF_REMOVABLE false
143 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
144 #define DEF_SECTOR_SIZE 512
145 #define DEF_UNMAP_ALIGNMENT 0
146 #define DEF_UNMAP_GRANULARITY 1
147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
148 #define DEF_UNMAP_MAX_DESC 256
149 #define DEF_VIRTUAL_GB 0
150 #define DEF_VPD_USE_HOSTNO 1
151 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STRICT 0
153 #define DEF_STATISTICS false
154 #define DEF_SUBMIT_QUEUES 1
155 #define DEF_TUR_MS_TO_READY 0
156 #define DEF_UUID_CTL 0
157 #define JDELAY_OVERRIDDEN -9999
158
159 /* Default parameters for ZBC drives */
160 #define DEF_ZBC_ZONE_SIZE_MB 128
161 #define DEF_ZBC_MAX_OPEN_ZONES 8
162 #define DEF_ZBC_NR_CONV_ZONES 1
163
164 #define SDEBUG_LUN_0_VAL 0
165
166 /* bit mask values for sdebug_opts */
167 #define SDEBUG_OPT_NOISE 1
168 #define SDEBUG_OPT_MEDIUM_ERR 2
169 #define SDEBUG_OPT_TIMEOUT 4
170 #define SDEBUG_OPT_RECOVERED_ERR 8
171 #define SDEBUG_OPT_TRANSPORT_ERR 16
172 #define SDEBUG_OPT_DIF_ERR 32
173 #define SDEBUG_OPT_DIX_ERR 64
174 #define SDEBUG_OPT_MAC_TIMEOUT 128
175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
176 #define SDEBUG_OPT_Q_NOISE 0x200
177 #define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
178 #define SDEBUG_OPT_RARE_TSF 0x800
179 #define SDEBUG_OPT_N_WCE 0x1000
180 #define SDEBUG_OPT_RESET_NOISE 0x2000
181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
182 #define SDEBUG_OPT_HOST_BUSY 0x8000
183 #define SDEBUG_OPT_CMD_ABORT 0x10000
184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
185 SDEBUG_OPT_RESET_NOISE)
186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
187 SDEBUG_OPT_TRANSPORT_ERR | \
188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
189 SDEBUG_OPT_SHORT_TRANSFER | \
190 SDEBUG_OPT_HOST_BUSY | \
191 SDEBUG_OPT_CMD_ABORT)
192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194
195 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
196 * priority order. In the subset implemented here lower numbers have higher
197 * priority. The UA numbers should be a sequence starting from 0 with
198 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
199 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
200 #define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
201 #define SDEBUG_UA_BUS_RESET 2
202 #define SDEBUG_UA_MODE_CHANGED 3
203 #define SDEBUG_UA_CAPACITY_CHANGED 4
204 #define SDEBUG_UA_LUNS_CHANGED 5
205 #define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
207 #define SDEBUG_NUM_UAS 8
208
209 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
210 * sector on read commands: */
211 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
212 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
213
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
220 */
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
224
225 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
226 #define F_D_IN 1 /* Data-in command (e.g. READ) */
227 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
228 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
229 #define F_D_UNKN 8
230 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
231 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
232 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
233 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
234 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
235 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
236 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
237 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
238 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
239 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
240
241 /* Useful combinations of the above flags */
242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
246
247 #define SDEBUG_MAX_PARTS 4
248
249 #define SDEBUG_MAX_CMD_LEN 32
250
251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
252
253 static struct kmem_cache *queued_cmd_cache;
254
255 #define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
256 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
257
258 /* Zone types (zbcr05 table 25) */
259 enum sdebug_z_type {
260 ZBC_ZTYPE_CNV = 0x1,
261 ZBC_ZTYPE_SWR = 0x2,
262 ZBC_ZTYPE_SWP = 0x3,
263 /* ZBC_ZTYPE_SOBR = 0x4, */
264 ZBC_ZTYPE_GAP = 0x5,
265 };
266
267 /* enumeration names taken from table 26, zbcr05 */
268 enum sdebug_z_cond {
269 ZBC_NOT_WRITE_POINTER = 0x0,
270 ZC1_EMPTY = 0x1,
271 ZC2_IMPLICIT_OPEN = 0x2,
272 ZC3_EXPLICIT_OPEN = 0x3,
273 ZC4_CLOSED = 0x4,
274 ZC6_READ_ONLY = 0xd,
275 ZC5_FULL = 0xe,
276 ZC7_OFFLINE = 0xf,
277 };
278
279 struct sdeb_zone_state { /* ZBC: per zone state */
280 enum sdebug_z_type z_type;
281 enum sdebug_z_cond z_cond;
282 bool z_non_seq_resource;
283 unsigned int z_size;
284 sector_t z_start;
285 sector_t z_wp;
286 };
287
288 struct sdebug_dev_info {
289 struct list_head dev_list;
290 unsigned int channel;
291 unsigned int target;
292 u64 lun;
293 uuid_t lu_name;
294 struct sdebug_host_info *sdbg_host;
295 unsigned long uas_bm[1];
296 atomic_t stopped; /* 1: by SSU, 2: device start */
297 bool used;
298
299 /* For ZBC devices */
300 enum blk_zoned_model zmodel;
301 unsigned int zcap;
302 unsigned int zsize;
303 unsigned int zsize_shift;
304 unsigned int nr_zones;
305 unsigned int nr_conv_zones;
306 unsigned int nr_seq_zones;
307 unsigned int nr_imp_open;
308 unsigned int nr_exp_open;
309 unsigned int nr_closed;
310 unsigned int max_open;
311 ktime_t create_ts; /* time since bootup that this device was created */
312 struct sdeb_zone_state *zstate;
313 };
314
315 struct sdebug_host_info {
316 struct list_head host_list;
317 int si_idx; /* sdeb_store_info (per host) xarray index */
318 struct Scsi_Host *shost;
319 struct device dev;
320 struct list_head dev_info_list;
321 };
322
323 /* There is an xarray of pointers to this struct's objects, one per host */
324 struct sdeb_store_info {
325 rwlock_t macc_lck; /* for atomic media access on this store */
326 u8 *storep; /* user data storage (ram) */
327 struct t10_pi_tuple *dif_storep; /* protection info */
328 void *map_storep; /* provisioning map */
329 };
330
331 #define dev_to_sdebug_host(d) \
332 container_of(d, struct sdebug_host_info, dev)
333
334 #define shost_to_sdebug_host(shost) \
335 dev_to_sdebug_host(shost->dma_dev)
336
337 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
338 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
339
340 struct sdebug_defer {
341 struct hrtimer hrt;
342 struct execute_work ew;
343 ktime_t cmpl_ts;/* time since boot to complete this cmd */
344 int issuing_cpu;
345 bool aborted; /* true when blk_abort_request() already called */
346 enum sdeb_defer_type defer_t;
347 };
348
349 struct sdebug_queued_cmd {
350 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
351 * instance indicates this slot is in use.
352 */
353 struct sdebug_defer sd_dp;
354 struct scsi_cmnd *scmd;
355 };
356
357 struct sdebug_scsi_cmd {
358 spinlock_t lock;
359 };
360
361 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
362 static atomic_t sdebug_completions; /* count of deferred completions */
363 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
364 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
365 static atomic_t sdeb_inject_pending;
366 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
367
368 struct opcode_info_t {
369 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
370 /* for terminating element */
371 u8 opcode; /* if num_attached > 0, preferred */
372 u16 sa; /* service action */
373 u32 flags; /* OR-ed set of SDEB_F_* */
374 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
375 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
376 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
377 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
378 };
379
380 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
381 enum sdeb_opcode_index {
382 SDEB_I_INVALID_OPCODE = 0,
383 SDEB_I_INQUIRY = 1,
384 SDEB_I_REPORT_LUNS = 2,
385 SDEB_I_REQUEST_SENSE = 3,
386 SDEB_I_TEST_UNIT_READY = 4,
387 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
388 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
389 SDEB_I_LOG_SENSE = 7,
390 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
391 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
392 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
393 SDEB_I_START_STOP = 11,
394 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
395 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
396 SDEB_I_MAINT_IN = 14,
397 SDEB_I_MAINT_OUT = 15,
398 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
399 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
400 SDEB_I_RESERVE = 18, /* 6, 10 */
401 SDEB_I_RELEASE = 19, /* 6, 10 */
402 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
403 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
404 SDEB_I_ATA_PT = 22, /* 12, 16 */
405 SDEB_I_SEND_DIAG = 23,
406 SDEB_I_UNMAP = 24,
407 SDEB_I_WRITE_BUFFER = 25,
408 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
409 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
410 SDEB_I_COMP_WRITE = 28,
411 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
412 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
413 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
414 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
415 };
416
417
418 static const unsigned char opcode_ind_arr[256] = {
419 /* 0x0; 0x0->0x1f: 6 byte cdbs */
420 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
421 0, 0, 0, 0,
422 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
423 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
424 SDEB_I_RELEASE,
425 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
426 SDEB_I_ALLOW_REMOVAL, 0,
427 /* 0x20; 0x20->0x3f: 10 byte cdbs */
428 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
429 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
430 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
431 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
432 /* 0x40; 0x40->0x5f: 10 byte cdbs */
433 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
434 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
435 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
436 SDEB_I_RELEASE,
437 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
438 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
439 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
440 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
441 0, SDEB_I_VARIABLE_LEN,
442 /* 0x80; 0x80->0x9f: 16 byte cdbs */
443 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
444 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
445 0, 0, 0, SDEB_I_VERIFY,
446 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
447 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
448 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
449 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
450 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
451 SDEB_I_MAINT_OUT, 0, 0, 0,
452 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
453 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0,
456 /* 0xc0; 0xc0->0xff: vendor specific */
457 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
458 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
461 };
462
463 /*
464 * The following "response" functions return the SCSI mid-level's 4 byte
465 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
466 * command completion, they can mask their return value with
467 * SDEG_RES_IMMED_MASK .
468 */
469 #define SDEG_RES_IMMED_MASK 0x40000000
470
471 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
496 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
498 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
499 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
500
501 static int sdebug_do_add_host(bool mk_new_store);
502 static int sdebug_add_host_helper(int per_host_idx);
503 static void sdebug_do_remove_host(bool the_end);
504 static int sdebug_add_store(void);
505 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
506 static void sdebug_erase_all_stores(bool apart_from_first);
507
508 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
509
510 /*
511 * The following are overflow arrays for cdbs that "hit" the same index in
512 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
513 * should be placed in opcode_info_arr[], the others should be placed here.
514 */
515 static const struct opcode_info_t msense_iarr[] = {
516 {0, 0x1a, 0, F_D_IN, NULL, NULL,
517 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 };
519
520 static const struct opcode_info_t mselect_iarr[] = {
521 {0, 0x15, 0, F_D_OUT, NULL, NULL,
522 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523 };
524
525 static const struct opcode_info_t read_iarr[] = {
526 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
528 0, 0, 0, 0} },
529 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
530 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
531 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
532 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
533 0xc7, 0, 0, 0, 0} },
534 };
535
536 static const struct opcode_info_t write_iarr[] = {
537 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
538 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
539 0, 0, 0, 0, 0, 0} },
540 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
541 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
542 0, 0, 0} },
543 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
544 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 0xbf, 0xc7, 0, 0, 0, 0} },
546 };
547
548 static const struct opcode_info_t verify_iarr[] = {
549 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
550 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
551 0, 0, 0, 0, 0, 0} },
552 };
553
554 static const struct opcode_info_t sa_in_16_iarr[] = {
555 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
556 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
557 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
558 };
559
560 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
561 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
563 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
564 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
565 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
566 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
567 };
568
569 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
570 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
571 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
572 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
573 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
574 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
575 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
576 };
577
578 static const struct opcode_info_t write_same_iarr[] = {
579 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
580 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
581 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
582 };
583
584 static const struct opcode_info_t reserve_iarr[] = {
585 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
586 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
587 };
588
589 static const struct opcode_info_t release_iarr[] = {
590 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
591 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
592 };
593
594 static const struct opcode_info_t sync_cache_iarr[] = {
595 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
596 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
598 };
599
600 static const struct opcode_info_t pre_fetch_iarr[] = {
601 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
604 };
605
606 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
607 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
608 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
610 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
611 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
613 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
614 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
616 };
617
618 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
619 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
620 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
621 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
622 };
623
624
625 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
626 * plus the terminating elements for logic that scans this table such as
627 * REPORT SUPPORTED OPERATION CODES. */
628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
629 /* 0 */
630 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
631 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
632 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
633 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
634 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
635 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
636 0, 0} }, /* REPORT LUNS */
637 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
638 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
639 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
640 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
641 /* 5 */
642 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
643 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
644 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
645 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
646 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
647 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
648 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
649 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
650 0, 0, 0} },
651 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
652 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
653 0, 0} },
654 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
655 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
656 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
657 /* 10 */
658 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
659 resp_write_dt0, write_iarr, /* WRITE(16) */
660 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
662 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
663 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
664 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
665 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
666 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
667 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
668 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
669 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
670 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
671 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
672 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
673 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
674 0xff, 0, 0xc7, 0, 0, 0, 0} },
675 /* 15 */
676 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
677 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
678 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
679 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
680 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
681 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
682 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
683 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
684 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
685 0xff, 0xff} },
686 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
687 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
688 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 0} },
690 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
691 NULL, release_iarr, /* RELEASE(10) <no response function> */
692 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
693 0} },
694 /* 20 */
695 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
696 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
698 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
699 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
700 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
701 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
702 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
703 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
704 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705 /* 25 */
706 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
708 0, 0, 0, 0} }, /* WRITE_BUFFER */
709 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
710 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
711 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
712 0, 0, 0, 0, 0} },
713 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
714 resp_sync_cache, sync_cache_iarr,
715 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
716 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
717 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
718 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
719 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
720 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
721 resp_pre_fetch, pre_fetch_iarr,
722 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
723 0, 0, 0, 0} }, /* PRE-FETCH (10) */
724
725 /* 30 */
726 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
727 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
728 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
729 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
730 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
731 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
732 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
734 /* sentinel */
735 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
736 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
737 };
738
739 static int sdebug_num_hosts;
740 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
741 static int sdebug_ato = DEF_ATO;
742 static int sdebug_cdb_len = DEF_CDB_LEN;
743 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
745 static int sdebug_dif = DEF_DIF;
746 static int sdebug_dix = DEF_DIX;
747 static int sdebug_dsense = DEF_D_SENSE;
748 static int sdebug_every_nth = DEF_EVERY_NTH;
749 static int sdebug_fake_rw = DEF_FAKE_RW;
750 static unsigned int sdebug_guard = DEF_GUARD;
751 static int sdebug_host_max_queue; /* per host */
752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
753 static int sdebug_max_luns = DEF_MAX_LUNS;
754 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
757 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
758 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
759 static int sdebug_no_uld;
760 static int sdebug_num_parts = DEF_NUM_PARTS;
761 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
762 static int sdebug_opt_blks = DEF_OPT_BLKS;
763 static int sdebug_opts = DEF_OPTS;
764 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
765 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
766 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
767 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
768 static int sdebug_sector_size = DEF_SECTOR_SIZE;
769 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
770 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
771 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
772 static unsigned int sdebug_lbpu = DEF_LBPU;
773 static unsigned int sdebug_lbpws = DEF_LBPWS;
774 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
775 static unsigned int sdebug_lbprz = DEF_LBPRZ;
776 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
777 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
778 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
779 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
780 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
781 static int sdebug_uuid_ctl = DEF_UUID_CTL;
782 static bool sdebug_random = DEF_RANDOM;
783 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
784 static bool sdebug_removable = DEF_REMOVABLE;
785 static bool sdebug_clustering;
786 static bool sdebug_host_lock = DEF_HOST_LOCK;
787 static bool sdebug_strict = DEF_STRICT;
788 static bool sdebug_any_injecting_opt;
789 static bool sdebug_no_rwlock;
790 static bool sdebug_verbose;
791 static bool have_dif_prot;
792 static bool write_since_sync;
793 static bool sdebug_statistics = DEF_STATISTICS;
794 static bool sdebug_wp;
795 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
796 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
797 static char *sdeb_zbc_model_s;
798
799 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
800 SAM_LUN_AM_FLAT = 0x1,
801 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
802 SAM_LUN_AM_EXTENDED = 0x3};
803 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
804 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
805
806 static unsigned int sdebug_store_sectors;
807 static sector_t sdebug_capacity; /* in sectors */
808
809 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
810 may still need them */
811 static int sdebug_heads; /* heads per disk */
812 static int sdebug_cylinders_per; /* cylinders per surface */
813 static int sdebug_sectors_per; /* sectors per cylinder */
814
815 static LIST_HEAD(sdebug_host_list);
816 static DEFINE_MUTEX(sdebug_host_list_mutex);
817
818 static struct xarray per_store_arr;
819 static struct xarray *per_store_ap = &per_store_arr;
820 static int sdeb_first_idx = -1; /* invalid index ==> none created */
821 static int sdeb_most_recent_idx = -1;
822 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
823
824 static unsigned long map_size;
825 static int num_aborts;
826 static int num_dev_resets;
827 static int num_target_resets;
828 static int num_bus_resets;
829 static int num_host_resets;
830 static int dix_writes;
831 static int dix_reads;
832 static int dif_errors;
833
834 /* ZBC global data */
835 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
836 static int sdeb_zbc_zone_cap_mb;
837 static int sdeb_zbc_zone_size_mb;
838 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
839 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
840
841 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
842 static int poll_queues; /* iouring iopoll interface.*/
843
844 static char sdebug_proc_name[] = MY_NAME;
845 static const char *my_name = MY_NAME;
846
847 static struct bus_type pseudo_lld_bus;
848
849 static struct device_driver sdebug_driverfs_driver = {
850 .name = sdebug_proc_name,
851 .bus = &pseudo_lld_bus,
852 };
853
854 static const int check_condition_result =
855 SAM_STAT_CHECK_CONDITION;
856
857 static const int illegal_condition_result =
858 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859
860 static const int device_qfull_result =
861 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
862
863 static const int condition_met_result = SAM_STAT_CONDITION_MET;
864
865
866 /* Only do the extra work involved in logical block provisioning if one or
867 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
868 * real reads and writes (i.e. not skipping them for speed).
869 */
scsi_debug_lbp(void)870 static inline bool scsi_debug_lbp(void)
871 {
872 return 0 == sdebug_fake_rw &&
873 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
874 }
875
lba2fake_store(struct sdeb_store_info * sip,unsigned long long lba)876 static void *lba2fake_store(struct sdeb_store_info *sip,
877 unsigned long long lba)
878 {
879 struct sdeb_store_info *lsip = sip;
880
881 lba = do_div(lba, sdebug_store_sectors);
882 if (!sip || !sip->storep) {
883 WARN_ON_ONCE(true);
884 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 }
886 return lsip->storep + lba * sdebug_sector_size;
887 }
888
dif_store(struct sdeb_store_info * sip,sector_t sector)889 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
890 sector_t sector)
891 {
892 sector = sector_div(sector, sdebug_store_sectors);
893
894 return sip->dif_storep + sector;
895 }
896
sdebug_max_tgts_luns(void)897 static void sdebug_max_tgts_luns(void)
898 {
899 struct sdebug_host_info *sdbg_host;
900 struct Scsi_Host *hpnt;
901
902 mutex_lock(&sdebug_host_list_mutex);
903 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
904 hpnt = sdbg_host->shost;
905 if ((hpnt->this_id >= 0) &&
906 (sdebug_num_tgts > hpnt->this_id))
907 hpnt->max_id = sdebug_num_tgts + 1;
908 else
909 hpnt->max_id = sdebug_num_tgts;
910 /* sdebug_max_luns; */
911 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 }
913 mutex_unlock(&sdebug_host_list_mutex);
914 }
915
916 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917
918 /* Set in_bit to -1 to indicate no bit position of invalid field */
mk_sense_invalid_fld(struct scsi_cmnd * scp,enum sdeb_cmd_data c_d,int in_byte,int in_bit)919 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
920 enum sdeb_cmd_data c_d,
921 int in_byte, int in_bit)
922 {
923 unsigned char *sbuff;
924 u8 sks[4];
925 int sl, asc;
926
927 sbuff = scp->sense_buffer;
928 if (!sbuff) {
929 sdev_printk(KERN_ERR, scp->device,
930 "%s: sense_buffer is NULL\n", __func__);
931 return;
932 }
933 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
934 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
935 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
936 memset(sks, 0, sizeof(sks));
937 sks[0] = 0x80;
938 if (c_d)
939 sks[0] |= 0x40;
940 if (in_bit >= 0) {
941 sks[0] |= 0x8;
942 sks[0] |= 0x7 & in_bit;
943 }
944 put_unaligned_be16(in_byte, sks + 1);
945 if (sdebug_dsense) {
946 sl = sbuff[7] + 8;
947 sbuff[7] = sl;
948 sbuff[sl] = 0x2;
949 sbuff[sl + 1] = 0x6;
950 memcpy(sbuff + sl + 4, sks, 3);
951 } else
952 memcpy(sbuff + 15, sks, 3);
953 if (sdebug_verbose)
954 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
955 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
956 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
957 }
958
mk_sense_buffer(struct scsi_cmnd * scp,int key,int asc,int asq)959 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 {
961 if (!scp->sense_buffer) {
962 sdev_printk(KERN_ERR, scp->device,
963 "%s: sense_buffer is NULL\n", __func__);
964 return;
965 }
966 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967
968 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
969
970 if (sdebug_verbose)
971 sdev_printk(KERN_INFO, scp->device,
972 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
973 my_name, key, asc, asq);
974 }
975
mk_sense_invalid_opcode(struct scsi_cmnd * scp)976 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 {
978 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
979 }
980
scsi_debug_ioctl(struct scsi_device * dev,unsigned int cmd,void __user * arg)981 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
982 void __user *arg)
983 {
984 if (sdebug_verbose) {
985 if (0x1261 == cmd)
986 sdev_printk(KERN_INFO, dev,
987 "%s: BLKFLSBUF [0x1261]\n", __func__);
988 else if (0x5331 == cmd)
989 sdev_printk(KERN_INFO, dev,
990 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
991 __func__);
992 else
993 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
994 __func__, cmd);
995 }
996 return -EINVAL;
997 /* return -ENOTTY; // correct return but upsets fdisk */
998 }
999
config_cdb_len(struct scsi_device * sdev)1000 static void config_cdb_len(struct scsi_device *sdev)
1001 {
1002 switch (sdebug_cdb_len) {
1003 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1004 sdev->use_10_for_rw = false;
1005 sdev->use_16_for_rw = false;
1006 sdev->use_10_for_ms = false;
1007 break;
1008 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1009 sdev->use_10_for_rw = true;
1010 sdev->use_16_for_rw = false;
1011 sdev->use_10_for_ms = false;
1012 break;
1013 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1014 sdev->use_10_for_rw = true;
1015 sdev->use_16_for_rw = false;
1016 sdev->use_10_for_ms = true;
1017 break;
1018 case 16:
1019 sdev->use_10_for_rw = false;
1020 sdev->use_16_for_rw = true;
1021 sdev->use_10_for_ms = true;
1022 break;
1023 case 32: /* No knobs to suggest this so same as 16 for now */
1024 sdev->use_10_for_rw = false;
1025 sdev->use_16_for_rw = true;
1026 sdev->use_10_for_ms = true;
1027 break;
1028 default:
1029 pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 sdebug_cdb_len);
1031 sdev->use_10_for_rw = true;
1032 sdev->use_16_for_rw = false;
1033 sdev->use_10_for_ms = false;
1034 sdebug_cdb_len = 10;
1035 break;
1036 }
1037 }
1038
all_config_cdb_len(void)1039 static void all_config_cdb_len(void)
1040 {
1041 struct sdebug_host_info *sdbg_host;
1042 struct Scsi_Host *shost;
1043 struct scsi_device *sdev;
1044
1045 mutex_lock(&sdebug_host_list_mutex);
1046 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1047 shost = sdbg_host->shost;
1048 shost_for_each_device(sdev, shost) {
1049 config_cdb_len(sdev);
1050 }
1051 }
1052 mutex_unlock(&sdebug_host_list_mutex);
1053 }
1054
clear_luns_changed_on_target(struct sdebug_dev_info * devip)1055 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 {
1057 struct sdebug_host_info *sdhp = devip->sdbg_host;
1058 struct sdebug_dev_info *dp;
1059
1060 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1061 if ((devip->sdbg_host == dp->sdbg_host) &&
1062 (devip->target == dp->target)) {
1063 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1064 }
1065 }
1066 }
1067
make_ua(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1068 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1069 {
1070 int k;
1071
1072 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1073 if (k != SDEBUG_NUM_UAS) {
1074 const char *cp = NULL;
1075
1076 switch (k) {
1077 case SDEBUG_UA_POR:
1078 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1079 POWER_ON_RESET_ASCQ);
1080 if (sdebug_verbose)
1081 cp = "power on reset";
1082 break;
1083 case SDEBUG_UA_POOCCUR:
1084 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1085 POWER_ON_OCCURRED_ASCQ);
1086 if (sdebug_verbose)
1087 cp = "power on occurred";
1088 break;
1089 case SDEBUG_UA_BUS_RESET:
1090 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 BUS_RESET_ASCQ);
1092 if (sdebug_verbose)
1093 cp = "bus reset";
1094 break;
1095 case SDEBUG_UA_MODE_CHANGED:
1096 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1097 MODE_CHANGED_ASCQ);
1098 if (sdebug_verbose)
1099 cp = "mode parameters changed";
1100 break;
1101 case SDEBUG_UA_CAPACITY_CHANGED:
1102 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1103 CAPACITY_CHANGED_ASCQ);
1104 if (sdebug_verbose)
1105 cp = "capacity data changed";
1106 break;
1107 case SDEBUG_UA_MICROCODE_CHANGED:
1108 mk_sense_buffer(scp, UNIT_ATTENTION,
1109 TARGET_CHANGED_ASC,
1110 MICROCODE_CHANGED_ASCQ);
1111 if (sdebug_verbose)
1112 cp = "microcode has been changed";
1113 break;
1114 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1115 mk_sense_buffer(scp, UNIT_ATTENTION,
1116 TARGET_CHANGED_ASC,
1117 MICROCODE_CHANGED_WO_RESET_ASCQ);
1118 if (sdebug_verbose)
1119 cp = "microcode has been changed without reset";
1120 break;
1121 case SDEBUG_UA_LUNS_CHANGED:
1122 /*
1123 * SPC-3 behavior is to report a UNIT ATTENTION with
1124 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1125 * on the target, until a REPORT LUNS command is
1126 * received. SPC-4 behavior is to report it only once.
1127 * NOTE: sdebug_scsi_level does not use the same
1128 * values as struct scsi_device->scsi_level.
1129 */
1130 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1131 clear_luns_changed_on_target(devip);
1132 mk_sense_buffer(scp, UNIT_ATTENTION,
1133 TARGET_CHANGED_ASC,
1134 LUNS_CHANGED_ASCQ);
1135 if (sdebug_verbose)
1136 cp = "reported luns data has changed";
1137 break;
1138 default:
1139 pr_warn("unexpected unit attention code=%d\n", k);
1140 if (sdebug_verbose)
1141 cp = "unknown";
1142 break;
1143 }
1144 clear_bit(k, devip->uas_bm);
1145 if (sdebug_verbose)
1146 sdev_printk(KERN_INFO, scp->device,
1147 "%s reports: Unit attention: %s\n",
1148 my_name, cp);
1149 return check_condition_result;
1150 }
1151 return 0;
1152 }
1153
1154 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
fill_from_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1155 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1156 int arr_len)
1157 {
1158 int act_len;
1159 struct scsi_data_buffer *sdb = &scp->sdb;
1160
1161 if (!sdb->length)
1162 return 0;
1163 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1164 return DID_ERROR << 16;
1165
1166 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1167 arr, arr_len);
1168 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169
1170 return 0;
1171 }
1172
1173 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1174 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1175 * calls, not required to write in ascending offset order. Assumes resid
1176 * set to scsi_bufflen() prior to any calls.
1177 */
p_fill_from_dev_buffer(struct scsi_cmnd * scp,const void * arr,int arr_len,unsigned int off_dst)1178 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1179 int arr_len, unsigned int off_dst)
1180 {
1181 unsigned int act_len, n;
1182 struct scsi_data_buffer *sdb = &scp->sdb;
1183 off_t skip = off_dst;
1184
1185 if (sdb->length <= off_dst)
1186 return 0;
1187 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1188 return DID_ERROR << 16;
1189
1190 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1191 arr, arr_len, skip);
1192 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1193 __func__, off_dst, scsi_bufflen(scp), act_len,
1194 scsi_get_resid(scp));
1195 n = scsi_bufflen(scp) - (off_dst + act_len);
1196 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1197 return 0;
1198 }
1199
1200 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1201 * 'arr' or -1 if error.
1202 */
fetch_to_dev_buffer(struct scsi_cmnd * scp,unsigned char * arr,int arr_len)1203 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1204 int arr_len)
1205 {
1206 if (!scsi_bufflen(scp))
1207 return 0;
1208 if (scp->sc_data_direction != DMA_TO_DEVICE)
1209 return -1;
1210
1211 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1212 }
1213
1214
1215 static char sdebug_inq_vendor_id[9] = "Linux ";
1216 static char sdebug_inq_product_id[17] = "scsi_debug ";
1217 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1218 /* Use some locally assigned NAAs for SAS addresses. */
1219 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1220 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1221 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1222
1223 /* Device identification VPD page. Returns number of bytes placed in arr */
inquiry_vpd_83(unsigned char * arr,int port_group_id,int target_dev_id,int dev_id_num,const char * dev_id_str,int dev_id_str_len,const uuid_t * lu_name)1224 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1225 int target_dev_id, int dev_id_num,
1226 const char *dev_id_str, int dev_id_str_len,
1227 const uuid_t *lu_name)
1228 {
1229 int num, port_a;
1230 char b[32];
1231
1232 port_a = target_dev_id + 1;
1233 /* T10 vendor identifier field format (faked) */
1234 arr[0] = 0x2; /* ASCII */
1235 arr[1] = 0x1;
1236 arr[2] = 0x0;
1237 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1238 memcpy(&arr[12], sdebug_inq_product_id, 16);
1239 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1240 num = 8 + 16 + dev_id_str_len;
1241 arr[3] = num;
1242 num += 4;
1243 if (dev_id_num >= 0) {
1244 if (sdebug_uuid_ctl) {
1245 /* Locally assigned UUID */
1246 arr[num++] = 0x1; /* binary (not necessarily sas) */
1247 arr[num++] = 0xa; /* PIV=0, lu, naa */
1248 arr[num++] = 0x0;
1249 arr[num++] = 0x12;
1250 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1251 arr[num++] = 0x0;
1252 memcpy(arr + num, lu_name, 16);
1253 num += 16;
1254 } else {
1255 /* NAA-3, Logical unit identifier (binary) */
1256 arr[num++] = 0x1; /* binary (not necessarily sas) */
1257 arr[num++] = 0x3; /* PIV=0, lu, naa */
1258 arr[num++] = 0x0;
1259 arr[num++] = 0x8;
1260 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1261 num += 8;
1262 }
1263 /* Target relative port number */
1264 arr[num++] = 0x61; /* proto=sas, binary */
1265 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1266 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x4; /* length */
1268 arr[num++] = 0x0; /* reserved */
1269 arr[num++] = 0x0; /* reserved */
1270 arr[num++] = 0x0;
1271 arr[num++] = 0x1; /* relative port A */
1272 }
1273 /* NAA-3, Target port identifier */
1274 arr[num++] = 0x61; /* proto=sas, binary */
1275 arr[num++] = 0x93; /* piv=1, target port, naa */
1276 arr[num++] = 0x0;
1277 arr[num++] = 0x8;
1278 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1279 num += 8;
1280 /* NAA-3, Target port group identifier */
1281 arr[num++] = 0x61; /* proto=sas, binary */
1282 arr[num++] = 0x95; /* piv=1, target port group id */
1283 arr[num++] = 0x0;
1284 arr[num++] = 0x4;
1285 arr[num++] = 0;
1286 arr[num++] = 0;
1287 put_unaligned_be16(port_group_id, arr + num);
1288 num += 2;
1289 /* NAA-3, Target device identifier */
1290 arr[num++] = 0x61; /* proto=sas, binary */
1291 arr[num++] = 0xa3; /* piv=1, target device, naa */
1292 arr[num++] = 0x0;
1293 arr[num++] = 0x8;
1294 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1295 num += 8;
1296 /* SCSI name string: Target device identifier */
1297 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1298 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1299 arr[num++] = 0x0;
1300 arr[num++] = 24;
1301 memcpy(arr + num, "naa.32222220", 12);
1302 num += 12;
1303 snprintf(b, sizeof(b), "%08X", target_dev_id);
1304 memcpy(arr + num, b, 8);
1305 num += 8;
1306 memset(arr + num, 0, 4);
1307 num += 4;
1308 return num;
1309 }
1310
1311 static unsigned char vpd84_data[] = {
1312 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1313 0x22,0x22,0x22,0x0,0xbb,0x1,
1314 0x22,0x22,0x22,0x0,0xbb,0x2,
1315 };
1316
1317 /* Software interface identification VPD page */
inquiry_vpd_84(unsigned char * arr)1318 static int inquiry_vpd_84(unsigned char *arr)
1319 {
1320 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1321 return sizeof(vpd84_data);
1322 }
1323
1324 /* Management network addresses VPD page */
inquiry_vpd_85(unsigned char * arr)1325 static int inquiry_vpd_85(unsigned char *arr)
1326 {
1327 int num = 0;
1328 const char *na1 = "https://www.kernel.org/config";
1329 const char *na2 = "http://www.kernel.org/log";
1330 int plen, olen;
1331
1332 arr[num++] = 0x1; /* lu, storage config */
1333 arr[num++] = 0x0; /* reserved */
1334 arr[num++] = 0x0;
1335 olen = strlen(na1);
1336 plen = olen + 1;
1337 if (plen % 4)
1338 plen = ((plen / 4) + 1) * 4;
1339 arr[num++] = plen; /* length, null termianted, padded */
1340 memcpy(arr + num, na1, olen);
1341 memset(arr + num + olen, 0, plen - olen);
1342 num += plen;
1343
1344 arr[num++] = 0x4; /* lu, logging */
1345 arr[num++] = 0x0; /* reserved */
1346 arr[num++] = 0x0;
1347 olen = strlen(na2);
1348 plen = olen + 1;
1349 if (plen % 4)
1350 plen = ((plen / 4) + 1) * 4;
1351 arr[num++] = plen; /* length, null terminated, padded */
1352 memcpy(arr + num, na2, olen);
1353 memset(arr + num + olen, 0, plen - olen);
1354 num += plen;
1355
1356 return num;
1357 }
1358
1359 /* SCSI ports VPD page */
inquiry_vpd_88(unsigned char * arr,int target_dev_id)1360 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 {
1362 int num = 0;
1363 int port_a, port_b;
1364
1365 port_a = target_dev_id + 1;
1366 port_b = port_a + 1;
1367 arr[num++] = 0x0; /* reserved */
1368 arr[num++] = 0x0; /* reserved */
1369 arr[num++] = 0x0;
1370 arr[num++] = 0x1; /* relative port 1 (primary) */
1371 memset(arr + num, 0, 6);
1372 num += 6;
1373 arr[num++] = 0x0;
1374 arr[num++] = 12; /* length tp descriptor */
1375 /* naa-5 target port identifier (A) */
1376 arr[num++] = 0x61; /* proto=sas, binary */
1377 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x8; /* length */
1380 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1381 num += 8;
1382 arr[num++] = 0x0; /* reserved */
1383 arr[num++] = 0x0; /* reserved */
1384 arr[num++] = 0x0;
1385 arr[num++] = 0x2; /* relative port 2 (secondary) */
1386 memset(arr + num, 0, 6);
1387 num += 6;
1388 arr[num++] = 0x0;
1389 arr[num++] = 12; /* length tp descriptor */
1390 /* naa-5 target port identifier (B) */
1391 arr[num++] = 0x61; /* proto=sas, binary */
1392 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1393 arr[num++] = 0x0; /* reserved */
1394 arr[num++] = 0x8; /* length */
1395 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1396 num += 8;
1397
1398 return num;
1399 }
1400
1401
1402 static unsigned char vpd89_data[] = {
1403 /* from 4th byte */ 0,0,0,0,
1404 'l','i','n','u','x',' ',' ',' ',
1405 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1406 '1','2','3','4',
1407 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1408 0xec,0,0,0,
1409 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1410 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1412 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1413 0x53,0x41,
1414 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1415 0x20,0x20,
1416 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1417 0x10,0x80,
1418 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1419 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1420 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1422 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1423 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1424 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1425 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1426 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1429 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1430 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1431 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1440 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1441 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1444 };
1445
1446 /* ATA Information VPD page */
inquiry_vpd_89(unsigned char * arr)1447 static int inquiry_vpd_89(unsigned char *arr)
1448 {
1449 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1450 return sizeof(vpd89_data);
1451 }
1452
1453
1454 static unsigned char vpdb0_data[] = {
1455 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1456 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1458 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1459 };
1460
1461 /* Block limits VPD page (SBC-3) */
inquiry_vpd_b0(unsigned char * arr)1462 static int inquiry_vpd_b0(unsigned char *arr)
1463 {
1464 unsigned int gran;
1465
1466 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1467
1468 /* Optimal transfer length granularity */
1469 if (sdebug_opt_xferlen_exp != 0 &&
1470 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1471 gran = 1 << sdebug_opt_xferlen_exp;
1472 else
1473 gran = 1 << sdebug_physblk_exp;
1474 put_unaligned_be16(gran, arr + 2);
1475
1476 /* Maximum Transfer Length */
1477 if (sdebug_store_sectors > 0x400)
1478 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1479
1480 /* Optimal Transfer Length */
1481 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1482
1483 if (sdebug_lbpu) {
1484 /* Maximum Unmap LBA Count */
1485 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1486
1487 /* Maximum Unmap Block Descriptor Count */
1488 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1489 }
1490
1491 /* Unmap Granularity Alignment */
1492 if (sdebug_unmap_alignment) {
1493 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1494 arr[28] |= 0x80; /* UGAVALID */
1495 }
1496
1497 /* Optimal Unmap Granularity */
1498 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1499
1500 /* Maximum WRITE SAME Length */
1501 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1502
1503 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1504 }
1505
1506 /* Block device characteristics VPD page (SBC-3) */
inquiry_vpd_b1(struct sdebug_dev_info * devip,unsigned char * arr)1507 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1508 {
1509 memset(arr, 0, 0x3c);
1510 arr[0] = 0;
1511 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1512 arr[2] = 0;
1513 arr[3] = 5; /* less than 1.8" */
1514 if (devip->zmodel == BLK_ZONED_HA)
1515 arr[4] = 1 << 4; /* zoned field = 01b */
1516
1517 return 0x3c;
1518 }
1519
1520 /* Logical block provisioning VPD page (SBC-4) */
inquiry_vpd_b2(unsigned char * arr)1521 static int inquiry_vpd_b2(unsigned char *arr)
1522 {
1523 memset(arr, 0, 0x4);
1524 arr[0] = 0; /* threshold exponent */
1525 if (sdebug_lbpu)
1526 arr[1] = 1 << 7;
1527 if (sdebug_lbpws)
1528 arr[1] |= 1 << 6;
1529 if (sdebug_lbpws10)
1530 arr[1] |= 1 << 5;
1531 if (sdebug_lbprz && scsi_debug_lbp())
1532 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1533 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1534 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1535 /* threshold_percentage=0 */
1536 return 0x4;
1537 }
1538
1539 /* Zoned block device characteristics VPD page (ZBC mandatory) */
inquiry_vpd_b6(struct sdebug_dev_info * devip,unsigned char * arr)1540 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1541 {
1542 memset(arr, 0, 0x3c);
1543 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1544 /*
1545 * Set Optimal number of open sequential write preferred zones and
1546 * Optimal number of non-sequentially written sequential write
1547 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1548 * fields set to zero, apart from Max. number of open swrz_s field.
1549 */
1550 put_unaligned_be32(0xffffffff, &arr[4]);
1551 put_unaligned_be32(0xffffffff, &arr[8]);
1552 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1553 put_unaligned_be32(devip->max_open, &arr[12]);
1554 else
1555 put_unaligned_be32(0xffffffff, &arr[12]);
1556 if (devip->zcap < devip->zsize) {
1557 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1558 put_unaligned_be64(devip->zsize, &arr[20]);
1559 } else {
1560 arr[19] = 0;
1561 }
1562 return 0x3c;
1563 }
1564
1565 #define SDEBUG_LONG_INQ_SZ 96
1566 #define SDEBUG_MAX_INQ_ARR_SZ 584
1567
resp_inquiry(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1568 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1569 {
1570 unsigned char pq_pdt;
1571 unsigned char *arr;
1572 unsigned char *cmd = scp->cmnd;
1573 u32 alloc_len, n;
1574 int ret;
1575 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1576
1577 alloc_len = get_unaligned_be16(cmd + 3);
1578 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1579 if (! arr)
1580 return DID_REQUEUE << 16;
1581 is_disk = (sdebug_ptype == TYPE_DISK);
1582 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1583 is_disk_zbc = (is_disk || is_zbc);
1584 have_wlun = scsi_is_wlun(scp->device->lun);
1585 if (have_wlun)
1586 pq_pdt = TYPE_WLUN; /* present, wlun */
1587 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1588 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1589 else
1590 pq_pdt = (sdebug_ptype & 0x1f);
1591 arr[0] = pq_pdt;
1592 if (0x2 & cmd[1]) { /* CMDDT bit set */
1593 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1594 kfree(arr);
1595 return check_condition_result;
1596 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1597 int lu_id_num, port_group_id, target_dev_id;
1598 u32 len;
1599 char lu_id_str[6];
1600 int host_no = devip->sdbg_host->shost->host_no;
1601
1602 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1603 (devip->channel & 0x7f);
1604 if (sdebug_vpd_use_hostno == 0)
1605 host_no = 0;
1606 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1607 (devip->target * 1000) + devip->lun);
1608 target_dev_id = ((host_no + 1) * 2000) +
1609 (devip->target * 1000) - 3;
1610 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1611 if (0 == cmd[2]) { /* supported vital product data pages */
1612 arr[1] = cmd[2]; /*sanity */
1613 n = 4;
1614 arr[n++] = 0x0; /* this page */
1615 arr[n++] = 0x80; /* unit serial number */
1616 arr[n++] = 0x83; /* device identification */
1617 arr[n++] = 0x84; /* software interface ident. */
1618 arr[n++] = 0x85; /* management network addresses */
1619 arr[n++] = 0x86; /* extended inquiry */
1620 arr[n++] = 0x87; /* mode page policy */
1621 arr[n++] = 0x88; /* SCSI ports */
1622 if (is_disk_zbc) { /* SBC or ZBC */
1623 arr[n++] = 0x89; /* ATA information */
1624 arr[n++] = 0xb0; /* Block limits */
1625 arr[n++] = 0xb1; /* Block characteristics */
1626 if (is_disk)
1627 arr[n++] = 0xb2; /* LB Provisioning */
1628 if (is_zbc)
1629 arr[n++] = 0xb6; /* ZB dev. char. */
1630 }
1631 arr[3] = n - 4; /* number of supported VPD pages */
1632 } else if (0x80 == cmd[2]) { /* unit serial number */
1633 arr[1] = cmd[2]; /*sanity */
1634 arr[3] = len;
1635 memcpy(&arr[4], lu_id_str, len);
1636 } else if (0x83 == cmd[2]) { /* device identification */
1637 arr[1] = cmd[2]; /*sanity */
1638 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1639 target_dev_id, lu_id_num,
1640 lu_id_str, len,
1641 &devip->lu_name);
1642 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1643 arr[1] = cmd[2]; /*sanity */
1644 arr[3] = inquiry_vpd_84(&arr[4]);
1645 } else if (0x85 == cmd[2]) { /* Management network addresses */
1646 arr[1] = cmd[2]; /*sanity */
1647 arr[3] = inquiry_vpd_85(&arr[4]);
1648 } else if (0x86 == cmd[2]) { /* extended inquiry */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = 0x3c; /* number of following entries */
1651 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1652 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1653 else if (have_dif_prot)
1654 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1655 else
1656 arr[4] = 0x0; /* no protection stuff */
1657 arr[5] = 0x7; /* head of q, ordered + simple q's */
1658 } else if (0x87 == cmd[2]) { /* mode page policy */
1659 arr[1] = cmd[2]; /*sanity */
1660 arr[3] = 0x8; /* number of following entries */
1661 arr[4] = 0x2; /* disconnect-reconnect mp */
1662 arr[6] = 0x80; /* mlus, shared */
1663 arr[8] = 0x18; /* protocol specific lu */
1664 arr[10] = 0x82; /* mlus, per initiator port */
1665 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1666 arr[1] = cmd[2]; /*sanity */
1667 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1668 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1669 arr[1] = cmd[2]; /*sanity */
1670 n = inquiry_vpd_89(&arr[4]);
1671 put_unaligned_be16(n, arr + 2);
1672 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1673 arr[1] = cmd[2]; /*sanity */
1674 arr[3] = inquiry_vpd_b0(&arr[4]);
1675 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1676 arr[1] = cmd[2]; /*sanity */
1677 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1678 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1679 arr[1] = cmd[2]; /*sanity */
1680 arr[3] = inquiry_vpd_b2(&arr[4]);
1681 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1682 arr[1] = cmd[2]; /*sanity */
1683 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1684 } else {
1685 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1686 kfree(arr);
1687 return check_condition_result;
1688 }
1689 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1690 ret = fill_from_dev_buffer(scp, arr,
1691 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1692 kfree(arr);
1693 return ret;
1694 }
1695 /* drops through here for a standard inquiry */
1696 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1697 arr[2] = sdebug_scsi_level;
1698 arr[3] = 2; /* response_data_format==2 */
1699 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1700 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1701 if (sdebug_vpd_use_hostno == 0)
1702 arr[5] |= 0x10; /* claim: implicit TPGS */
1703 arr[6] = 0x10; /* claim: MultiP */
1704 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1705 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1706 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1707 memcpy(&arr[16], sdebug_inq_product_id, 16);
1708 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1709 /* Use Vendor Specific area to place driver date in ASCII hex */
1710 memcpy(&arr[36], sdebug_version_date, 8);
1711 /* version descriptors (2 bytes each) follow */
1712 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1713 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1714 n = 62;
1715 if (is_disk) { /* SBC-4 no version claimed */
1716 put_unaligned_be16(0x600, arr + n);
1717 n += 2;
1718 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1719 put_unaligned_be16(0x525, arr + n);
1720 n += 2;
1721 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1722 put_unaligned_be16(0x624, arr + n);
1723 n += 2;
1724 }
1725 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1726 ret = fill_from_dev_buffer(scp, arr,
1727 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1728 kfree(arr);
1729 return ret;
1730 }
1731
1732 /* See resp_iec_m_pg() for how this data is manipulated */
1733 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1734 0, 0, 0x0, 0x0};
1735
resp_requests(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1736 static int resp_requests(struct scsi_cmnd *scp,
1737 struct sdebug_dev_info *devip)
1738 {
1739 unsigned char *cmd = scp->cmnd;
1740 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1741 bool dsense = !!(cmd[1] & 1);
1742 u32 alloc_len = cmd[4];
1743 u32 len = 18;
1744 int stopped_state = atomic_read(&devip->stopped);
1745
1746 memset(arr, 0, sizeof(arr));
1747 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1748 if (dsense) {
1749 arr[0] = 0x72;
1750 arr[1] = NOT_READY;
1751 arr[2] = LOGICAL_UNIT_NOT_READY;
1752 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1753 len = 8;
1754 } else {
1755 arr[0] = 0x70;
1756 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1757 arr[7] = 0xa; /* 18 byte sense buffer */
1758 arr[12] = LOGICAL_UNIT_NOT_READY;
1759 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1760 }
1761 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1762 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1763 if (dsense) {
1764 arr[0] = 0x72;
1765 arr[1] = 0x0; /* NO_SENSE in sense_key */
1766 arr[2] = THRESHOLD_EXCEEDED;
1767 arr[3] = 0xff; /* Failure prediction(false) */
1768 len = 8;
1769 } else {
1770 arr[0] = 0x70;
1771 arr[2] = 0x0; /* NO_SENSE in sense_key */
1772 arr[7] = 0xa; /* 18 byte sense buffer */
1773 arr[12] = THRESHOLD_EXCEEDED;
1774 arr[13] = 0xff; /* Failure prediction(false) */
1775 }
1776 } else { /* nothing to report */
1777 if (dsense) {
1778 len = 8;
1779 memset(arr, 0, len);
1780 arr[0] = 0x72;
1781 } else {
1782 memset(arr, 0, len);
1783 arr[0] = 0x70;
1784 arr[7] = 0xa;
1785 }
1786 }
1787 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1788 }
1789
resp_start_stop(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1790 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1791 {
1792 unsigned char *cmd = scp->cmnd;
1793 int power_cond, want_stop, stopped_state;
1794 bool changing;
1795
1796 power_cond = (cmd[4] & 0xf0) >> 4;
1797 if (power_cond) {
1798 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1799 return check_condition_result;
1800 }
1801 want_stop = !(cmd[4] & 1);
1802 stopped_state = atomic_read(&devip->stopped);
1803 if (stopped_state == 2) {
1804 ktime_t now_ts = ktime_get_boottime();
1805
1806 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1807 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1808
1809 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1810 /* tur_ms_to_ready timer extinguished */
1811 atomic_set(&devip->stopped, 0);
1812 stopped_state = 0;
1813 }
1814 }
1815 if (stopped_state == 2) {
1816 if (want_stop) {
1817 stopped_state = 1; /* dummy up success */
1818 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1819 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1820 return check_condition_result;
1821 }
1822 }
1823 }
1824 changing = (stopped_state != want_stop);
1825 if (changing)
1826 atomic_xchg(&devip->stopped, want_stop);
1827 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1828 return SDEG_RES_IMMED_MASK;
1829 else
1830 return 0;
1831 }
1832
get_sdebug_capacity(void)1833 static sector_t get_sdebug_capacity(void)
1834 {
1835 static const unsigned int gibibyte = 1073741824;
1836
1837 if (sdebug_virtual_gb > 0)
1838 return (sector_t)sdebug_virtual_gb *
1839 (gibibyte / sdebug_sector_size);
1840 else
1841 return sdebug_store_sectors;
1842 }
1843
1844 #define SDEBUG_READCAP_ARR_SZ 8
resp_readcap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1845 static int resp_readcap(struct scsi_cmnd *scp,
1846 struct sdebug_dev_info *devip)
1847 {
1848 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1849 unsigned int capac;
1850
1851 /* following just in case virtual_gb changed */
1852 sdebug_capacity = get_sdebug_capacity();
1853 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1854 if (sdebug_capacity < 0xffffffff) {
1855 capac = (unsigned int)sdebug_capacity - 1;
1856 put_unaligned_be32(capac, arr + 0);
1857 } else
1858 put_unaligned_be32(0xffffffff, arr + 0);
1859 put_unaligned_be16(sdebug_sector_size, arr + 6);
1860 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1861 }
1862
1863 #define SDEBUG_READCAP16_ARR_SZ 32
resp_readcap16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1864 static int resp_readcap16(struct scsi_cmnd *scp,
1865 struct sdebug_dev_info *devip)
1866 {
1867 unsigned char *cmd = scp->cmnd;
1868 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1869 u32 alloc_len;
1870
1871 alloc_len = get_unaligned_be32(cmd + 10);
1872 /* following just in case virtual_gb changed */
1873 sdebug_capacity = get_sdebug_capacity();
1874 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1875 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1876 put_unaligned_be32(sdebug_sector_size, arr + 8);
1877 arr[13] = sdebug_physblk_exp & 0xf;
1878 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1879
1880 if (scsi_debug_lbp()) {
1881 arr[14] |= 0x80; /* LBPME */
1882 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1883 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1884 * in the wider field maps to 0 in this field.
1885 */
1886 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1887 arr[14] |= 0x40;
1888 }
1889
1890 /*
1891 * Since the scsi_debug READ CAPACITY implementation always reports the
1892 * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
1893 */
1894 if (devip->zmodel == BLK_ZONED_HM)
1895 arr[12] |= 1 << 4;
1896
1897 arr[15] = sdebug_lowest_aligned & 0xff;
1898
1899 if (have_dif_prot) {
1900 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1901 arr[12] |= 1; /* PROT_EN */
1902 }
1903
1904 return fill_from_dev_buffer(scp, arr,
1905 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1906 }
1907
1908 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1909
resp_report_tgtpgs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1910 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1911 struct sdebug_dev_info *devip)
1912 {
1913 unsigned char *cmd = scp->cmnd;
1914 unsigned char *arr;
1915 int host_no = devip->sdbg_host->shost->host_no;
1916 int port_group_a, port_group_b, port_a, port_b;
1917 u32 alen, n, rlen;
1918 int ret;
1919
1920 alen = get_unaligned_be32(cmd + 6);
1921 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1922 if (! arr)
1923 return DID_REQUEUE << 16;
1924 /*
1925 * EVPD page 0x88 states we have two ports, one
1926 * real and a fake port with no device connected.
1927 * So we create two port groups with one port each
1928 * and set the group with port B to unavailable.
1929 */
1930 port_a = 0x1; /* relative port A */
1931 port_b = 0x2; /* relative port B */
1932 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1933 (devip->channel & 0x7f);
1934 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1935 (devip->channel & 0x7f) + 0x80;
1936
1937 /*
1938 * The asymmetric access state is cycled according to the host_id.
1939 */
1940 n = 4;
1941 if (sdebug_vpd_use_hostno == 0) {
1942 arr[n++] = host_no % 3; /* Asymm access state */
1943 arr[n++] = 0x0F; /* claim: all states are supported */
1944 } else {
1945 arr[n++] = 0x0; /* Active/Optimized path */
1946 arr[n++] = 0x01; /* only support active/optimized paths */
1947 }
1948 put_unaligned_be16(port_group_a, arr + n);
1949 n += 2;
1950 arr[n++] = 0; /* Reserved */
1951 arr[n++] = 0; /* Status code */
1952 arr[n++] = 0; /* Vendor unique */
1953 arr[n++] = 0x1; /* One port per group */
1954 arr[n++] = 0; /* Reserved */
1955 arr[n++] = 0; /* Reserved */
1956 put_unaligned_be16(port_a, arr + n);
1957 n += 2;
1958 arr[n++] = 3; /* Port unavailable */
1959 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1960 put_unaligned_be16(port_group_b, arr + n);
1961 n += 2;
1962 arr[n++] = 0; /* Reserved */
1963 arr[n++] = 0; /* Status code */
1964 arr[n++] = 0; /* Vendor unique */
1965 arr[n++] = 0x1; /* One port per group */
1966 arr[n++] = 0; /* Reserved */
1967 arr[n++] = 0; /* Reserved */
1968 put_unaligned_be16(port_b, arr + n);
1969 n += 2;
1970
1971 rlen = n - 4;
1972 put_unaligned_be32(rlen, arr + 0);
1973
1974 /*
1975 * Return the smallest value of either
1976 * - The allocated length
1977 * - The constructed command length
1978 * - The maximum array size
1979 */
1980 rlen = min(alen, n);
1981 ret = fill_from_dev_buffer(scp, arr,
1982 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1983 kfree(arr);
1984 return ret;
1985 }
1986
resp_rsup_opcodes(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)1987 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1988 struct sdebug_dev_info *devip)
1989 {
1990 bool rctd;
1991 u8 reporting_opts, req_opcode, sdeb_i, supp;
1992 u16 req_sa, u;
1993 u32 alloc_len, a_len;
1994 int k, offset, len, errsts, count, bump, na;
1995 const struct opcode_info_t *oip;
1996 const struct opcode_info_t *r_oip;
1997 u8 *arr;
1998 u8 *cmd = scp->cmnd;
1999
2000 rctd = !!(cmd[2] & 0x80);
2001 reporting_opts = cmd[2] & 0x7;
2002 req_opcode = cmd[3];
2003 req_sa = get_unaligned_be16(cmd + 4);
2004 alloc_len = get_unaligned_be32(cmd + 6);
2005 if (alloc_len < 4 || alloc_len > 0xffff) {
2006 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2007 return check_condition_result;
2008 }
2009 if (alloc_len > 8192)
2010 a_len = 8192;
2011 else
2012 a_len = alloc_len;
2013 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2014 if (NULL == arr) {
2015 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2016 INSUFF_RES_ASCQ);
2017 return check_condition_result;
2018 }
2019 switch (reporting_opts) {
2020 case 0: /* all commands */
2021 /* count number of commands */
2022 for (count = 0, oip = opcode_info_arr;
2023 oip->num_attached != 0xff; ++oip) {
2024 if (F_INV_OP & oip->flags)
2025 continue;
2026 count += (oip->num_attached + 1);
2027 }
2028 bump = rctd ? 20 : 8;
2029 put_unaligned_be32(count * bump, arr);
2030 for (offset = 4, oip = opcode_info_arr;
2031 oip->num_attached != 0xff && offset < a_len; ++oip) {
2032 if (F_INV_OP & oip->flags)
2033 continue;
2034 na = oip->num_attached;
2035 arr[offset] = oip->opcode;
2036 put_unaligned_be16(oip->sa, arr + offset + 2);
2037 if (rctd)
2038 arr[offset + 5] |= 0x2;
2039 if (FF_SA & oip->flags)
2040 arr[offset + 5] |= 0x1;
2041 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2042 if (rctd)
2043 put_unaligned_be16(0xa, arr + offset + 8);
2044 r_oip = oip;
2045 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2046 if (F_INV_OP & oip->flags)
2047 continue;
2048 offset += bump;
2049 arr[offset] = oip->opcode;
2050 put_unaligned_be16(oip->sa, arr + offset + 2);
2051 if (rctd)
2052 arr[offset + 5] |= 0x2;
2053 if (FF_SA & oip->flags)
2054 arr[offset + 5] |= 0x1;
2055 put_unaligned_be16(oip->len_mask[0],
2056 arr + offset + 6);
2057 if (rctd)
2058 put_unaligned_be16(0xa,
2059 arr + offset + 8);
2060 }
2061 oip = r_oip;
2062 offset += bump;
2063 }
2064 break;
2065 case 1: /* one command: opcode only */
2066 case 2: /* one command: opcode plus service action */
2067 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2068 sdeb_i = opcode_ind_arr[req_opcode];
2069 oip = &opcode_info_arr[sdeb_i];
2070 if (F_INV_OP & oip->flags) {
2071 supp = 1;
2072 offset = 4;
2073 } else {
2074 if (1 == reporting_opts) {
2075 if (FF_SA & oip->flags) {
2076 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2077 2, 2);
2078 kfree(arr);
2079 return check_condition_result;
2080 }
2081 req_sa = 0;
2082 } else if (2 == reporting_opts &&
2083 0 == (FF_SA & oip->flags)) {
2084 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2085 kfree(arr); /* point at requested sa */
2086 return check_condition_result;
2087 }
2088 if (0 == (FF_SA & oip->flags) &&
2089 req_opcode == oip->opcode)
2090 supp = 3;
2091 else if (0 == (FF_SA & oip->flags)) {
2092 na = oip->num_attached;
2093 for (k = 0, oip = oip->arrp; k < na;
2094 ++k, ++oip) {
2095 if (req_opcode == oip->opcode)
2096 break;
2097 }
2098 supp = (k >= na) ? 1 : 3;
2099 } else if (req_sa != oip->sa) {
2100 na = oip->num_attached;
2101 for (k = 0, oip = oip->arrp; k < na;
2102 ++k, ++oip) {
2103 if (req_sa == oip->sa)
2104 break;
2105 }
2106 supp = (k >= na) ? 1 : 3;
2107 } else
2108 supp = 3;
2109 if (3 == supp) {
2110 u = oip->len_mask[0];
2111 put_unaligned_be16(u, arr + 2);
2112 arr[4] = oip->opcode;
2113 for (k = 1; k < u; ++k)
2114 arr[4 + k] = (k < 16) ?
2115 oip->len_mask[k] : 0xff;
2116 offset = 4 + u;
2117 } else
2118 offset = 4;
2119 }
2120 arr[1] = (rctd ? 0x80 : 0) | supp;
2121 if (rctd) {
2122 put_unaligned_be16(0xa, arr + offset);
2123 offset += 12;
2124 }
2125 break;
2126 default:
2127 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2128 kfree(arr);
2129 return check_condition_result;
2130 }
2131 offset = (offset < a_len) ? offset : a_len;
2132 len = (offset < alloc_len) ? offset : alloc_len;
2133 errsts = fill_from_dev_buffer(scp, arr, len);
2134 kfree(arr);
2135 return errsts;
2136 }
2137
resp_rsup_tmfs(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2138 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2139 struct sdebug_dev_info *devip)
2140 {
2141 bool repd;
2142 u32 alloc_len, len;
2143 u8 arr[16];
2144 u8 *cmd = scp->cmnd;
2145
2146 memset(arr, 0, sizeof(arr));
2147 repd = !!(cmd[2] & 0x80);
2148 alloc_len = get_unaligned_be32(cmd + 6);
2149 if (alloc_len < 4) {
2150 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2151 return check_condition_result;
2152 }
2153 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2154 arr[1] = 0x1; /* ITNRS */
2155 if (repd) {
2156 arr[3] = 0xc;
2157 len = 16;
2158 } else
2159 len = 4;
2160
2161 len = (len < alloc_len) ? len : alloc_len;
2162 return fill_from_dev_buffer(scp, arr, len);
2163 }
2164
2165 /* <<Following mode page info copied from ST318451LW>> */
2166
resp_err_recov_pg(unsigned char * p,int pcontrol,int target)2167 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2168 { /* Read-Write Error Recovery page for mode_sense */
2169 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2170 5, 0, 0xff, 0xff};
2171
2172 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2173 if (1 == pcontrol)
2174 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2175 return sizeof(err_recov_pg);
2176 }
2177
resp_disconnect_pg(unsigned char * p,int pcontrol,int target)2178 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2179 { /* Disconnect-Reconnect page for mode_sense */
2180 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2181 0, 0, 0, 0, 0, 0, 0, 0};
2182
2183 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2184 if (1 == pcontrol)
2185 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2186 return sizeof(disconnect_pg);
2187 }
2188
resp_format_pg(unsigned char * p,int pcontrol,int target)2189 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2190 { /* Format device page for mode_sense */
2191 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2192 0, 0, 0, 0, 0, 0, 0, 0,
2193 0, 0, 0, 0, 0x40, 0, 0, 0};
2194
2195 memcpy(p, format_pg, sizeof(format_pg));
2196 put_unaligned_be16(sdebug_sectors_per, p + 10);
2197 put_unaligned_be16(sdebug_sector_size, p + 12);
2198 if (sdebug_removable)
2199 p[20] |= 0x20; /* should agree with INQUIRY */
2200 if (1 == pcontrol)
2201 memset(p + 2, 0, sizeof(format_pg) - 2);
2202 return sizeof(format_pg);
2203 }
2204
2205 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2206 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2207 0, 0, 0, 0};
2208
resp_caching_pg(unsigned char * p,int pcontrol,int target)2209 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2210 { /* Caching page for mode_sense */
2211 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2212 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2213 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2214 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2215
2216 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2217 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2218 memcpy(p, caching_pg, sizeof(caching_pg));
2219 if (1 == pcontrol)
2220 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2221 else if (2 == pcontrol)
2222 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2223 return sizeof(caching_pg);
2224 }
2225
2226 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2227 0, 0, 0x2, 0x4b};
2228
resp_ctrl_m_pg(unsigned char * p,int pcontrol,int target)2229 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2230 { /* Control mode page for mode_sense */
2231 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2232 0, 0, 0, 0};
2233 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2234 0, 0, 0x2, 0x4b};
2235
2236 if (sdebug_dsense)
2237 ctrl_m_pg[2] |= 0x4;
2238 else
2239 ctrl_m_pg[2] &= ~0x4;
2240
2241 if (sdebug_ato)
2242 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2243
2244 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2245 if (1 == pcontrol)
2246 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2247 else if (2 == pcontrol)
2248 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2249 return sizeof(ctrl_m_pg);
2250 }
2251
2252
resp_iec_m_pg(unsigned char * p,int pcontrol,int target)2253 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2254 { /* Informational Exceptions control mode page for mode_sense */
2255 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2256 0, 0, 0x0, 0x0};
2257 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2258 0, 0, 0x0, 0x0};
2259
2260 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2261 if (1 == pcontrol)
2262 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2263 else if (2 == pcontrol)
2264 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2265 return sizeof(iec_m_pg);
2266 }
2267
resp_sas_sf_m_pg(unsigned char * p,int pcontrol,int target)2268 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2269 { /* SAS SSP mode page - short format for mode_sense */
2270 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2271 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2272
2273 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2274 if (1 == pcontrol)
2275 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2276 return sizeof(sas_sf_m_pg);
2277 }
2278
2279
resp_sas_pcd_m_spg(unsigned char * p,int pcontrol,int target,int target_dev_id)2280 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2281 int target_dev_id)
2282 { /* SAS phy control and discover mode page for mode_sense */
2283 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2284 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2285 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2286 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2287 0x2, 0, 0, 0, 0, 0, 0, 0,
2288 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2289 0, 0, 0, 0, 0, 0, 0, 0,
2290 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2291 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2292 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2293 0x3, 0, 0, 0, 0, 0, 0, 0,
2294 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2295 0, 0, 0, 0, 0, 0, 0, 0,
2296 };
2297 int port_a, port_b;
2298
2299 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2300 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2301 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2302 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2303 port_a = target_dev_id + 1;
2304 port_b = port_a + 1;
2305 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2306 put_unaligned_be32(port_a, p + 20);
2307 put_unaligned_be32(port_b, p + 48 + 20);
2308 if (1 == pcontrol)
2309 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2310 return sizeof(sas_pcd_m_pg);
2311 }
2312
resp_sas_sha_m_spg(unsigned char * p,int pcontrol)2313 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2314 { /* SAS SSP shared protocol specific port mode subpage */
2315 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2316 0, 0, 0, 0, 0, 0, 0, 0,
2317 };
2318
2319 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2320 if (1 == pcontrol)
2321 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2322 return sizeof(sas_sha_m_pg);
2323 }
2324
2325 #define SDEBUG_MAX_MSENSE_SZ 256
2326
resp_mode_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2327 static int resp_mode_sense(struct scsi_cmnd *scp,
2328 struct sdebug_dev_info *devip)
2329 {
2330 int pcontrol, pcode, subpcode, bd_len;
2331 unsigned char dev_spec;
2332 u32 alloc_len, offset, len;
2333 int target_dev_id;
2334 int target = scp->device->id;
2335 unsigned char *ap;
2336 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2337 unsigned char *cmd = scp->cmnd;
2338 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2339
2340 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2341 pcontrol = (cmd[2] & 0xc0) >> 6;
2342 pcode = cmd[2] & 0x3f;
2343 subpcode = cmd[3];
2344 msense_6 = (MODE_SENSE == cmd[0]);
2345 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2346 is_disk = (sdebug_ptype == TYPE_DISK);
2347 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2348 if ((is_disk || is_zbc) && !dbd)
2349 bd_len = llbaa ? 16 : 8;
2350 else
2351 bd_len = 0;
2352 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2353 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2354 if (0x3 == pcontrol) { /* Saving values not supported */
2355 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2356 return check_condition_result;
2357 }
2358 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2359 (devip->target * 1000) - 3;
2360 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2361 if (is_disk || is_zbc) {
2362 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2363 if (sdebug_wp)
2364 dev_spec |= 0x80;
2365 } else
2366 dev_spec = 0x0;
2367 if (msense_6) {
2368 arr[2] = dev_spec;
2369 arr[3] = bd_len;
2370 offset = 4;
2371 } else {
2372 arr[3] = dev_spec;
2373 if (16 == bd_len)
2374 arr[4] = 0x1; /* set LONGLBA bit */
2375 arr[7] = bd_len; /* assume 255 or less */
2376 offset = 8;
2377 }
2378 ap = arr + offset;
2379 if ((bd_len > 0) && (!sdebug_capacity))
2380 sdebug_capacity = get_sdebug_capacity();
2381
2382 if (8 == bd_len) {
2383 if (sdebug_capacity > 0xfffffffe)
2384 put_unaligned_be32(0xffffffff, ap + 0);
2385 else
2386 put_unaligned_be32(sdebug_capacity, ap + 0);
2387 put_unaligned_be16(sdebug_sector_size, ap + 6);
2388 offset += bd_len;
2389 ap = arr + offset;
2390 } else if (16 == bd_len) {
2391 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2392 put_unaligned_be32(sdebug_sector_size, ap + 12);
2393 offset += bd_len;
2394 ap = arr + offset;
2395 }
2396
2397 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2398 /* TODO: Control Extension page */
2399 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2400 return check_condition_result;
2401 }
2402 bad_pcode = false;
2403
2404 switch (pcode) {
2405 case 0x1: /* Read-Write error recovery page, direct access */
2406 len = resp_err_recov_pg(ap, pcontrol, target);
2407 offset += len;
2408 break;
2409 case 0x2: /* Disconnect-Reconnect page, all devices */
2410 len = resp_disconnect_pg(ap, pcontrol, target);
2411 offset += len;
2412 break;
2413 case 0x3: /* Format device page, direct access */
2414 if (is_disk) {
2415 len = resp_format_pg(ap, pcontrol, target);
2416 offset += len;
2417 } else
2418 bad_pcode = true;
2419 break;
2420 case 0x8: /* Caching page, direct access */
2421 if (is_disk || is_zbc) {
2422 len = resp_caching_pg(ap, pcontrol, target);
2423 offset += len;
2424 } else
2425 bad_pcode = true;
2426 break;
2427 case 0xa: /* Control Mode page, all devices */
2428 len = resp_ctrl_m_pg(ap, pcontrol, target);
2429 offset += len;
2430 break;
2431 case 0x19: /* if spc==1 then sas phy, control+discover */
2432 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2433 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2434 return check_condition_result;
2435 }
2436 len = 0;
2437 if ((0x0 == subpcode) || (0xff == subpcode))
2438 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2439 if ((0x1 == subpcode) || (0xff == subpcode))
2440 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2441 target_dev_id);
2442 if ((0x2 == subpcode) || (0xff == subpcode))
2443 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2444 offset += len;
2445 break;
2446 case 0x1c: /* Informational Exceptions Mode page, all devices */
2447 len = resp_iec_m_pg(ap, pcontrol, target);
2448 offset += len;
2449 break;
2450 case 0x3f: /* Read all Mode pages */
2451 if ((0 == subpcode) || (0xff == subpcode)) {
2452 len = resp_err_recov_pg(ap, pcontrol, target);
2453 len += resp_disconnect_pg(ap + len, pcontrol, target);
2454 if (is_disk) {
2455 len += resp_format_pg(ap + len, pcontrol,
2456 target);
2457 len += resp_caching_pg(ap + len, pcontrol,
2458 target);
2459 } else if (is_zbc) {
2460 len += resp_caching_pg(ap + len, pcontrol,
2461 target);
2462 }
2463 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2464 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2465 if (0xff == subpcode) {
2466 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2467 target, target_dev_id);
2468 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2469 }
2470 len += resp_iec_m_pg(ap + len, pcontrol, target);
2471 offset += len;
2472 } else {
2473 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2474 return check_condition_result;
2475 }
2476 break;
2477 default:
2478 bad_pcode = true;
2479 break;
2480 }
2481 if (bad_pcode) {
2482 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2483 return check_condition_result;
2484 }
2485 if (msense_6)
2486 arr[0] = offset - 1;
2487 else
2488 put_unaligned_be16((offset - 2), arr + 0);
2489 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2490 }
2491
2492 #define SDEBUG_MAX_MSELECT_SZ 512
2493
resp_mode_select(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2494 static int resp_mode_select(struct scsi_cmnd *scp,
2495 struct sdebug_dev_info *devip)
2496 {
2497 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2498 int param_len, res, mpage;
2499 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2500 unsigned char *cmd = scp->cmnd;
2501 int mselect6 = (MODE_SELECT == cmd[0]);
2502
2503 memset(arr, 0, sizeof(arr));
2504 pf = cmd[1] & 0x10;
2505 sp = cmd[1] & 0x1;
2506 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2507 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2508 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2509 return check_condition_result;
2510 }
2511 res = fetch_to_dev_buffer(scp, arr, param_len);
2512 if (-1 == res)
2513 return DID_ERROR << 16;
2514 else if (sdebug_verbose && (res < param_len))
2515 sdev_printk(KERN_INFO, scp->device,
2516 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2517 __func__, param_len, res);
2518 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2519 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2520 off = bd_len + (mselect6 ? 4 : 8);
2521 if (md_len > 2 || off >= res) {
2522 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2523 return check_condition_result;
2524 }
2525 mpage = arr[off] & 0x3f;
2526 ps = !!(arr[off] & 0x80);
2527 if (ps) {
2528 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2529 return check_condition_result;
2530 }
2531 spf = !!(arr[off] & 0x40);
2532 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2533 (arr[off + 1] + 2);
2534 if ((pg_len + off) > param_len) {
2535 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2536 PARAMETER_LIST_LENGTH_ERR, 0);
2537 return check_condition_result;
2538 }
2539 switch (mpage) {
2540 case 0x8: /* Caching Mode page */
2541 if (caching_pg[1] == arr[off + 1]) {
2542 memcpy(caching_pg + 2, arr + off + 2,
2543 sizeof(caching_pg) - 2);
2544 goto set_mode_changed_ua;
2545 }
2546 break;
2547 case 0xa: /* Control Mode page */
2548 if (ctrl_m_pg[1] == arr[off + 1]) {
2549 memcpy(ctrl_m_pg + 2, arr + off + 2,
2550 sizeof(ctrl_m_pg) - 2);
2551 if (ctrl_m_pg[4] & 0x8)
2552 sdebug_wp = true;
2553 else
2554 sdebug_wp = false;
2555 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2556 goto set_mode_changed_ua;
2557 }
2558 break;
2559 case 0x1c: /* Informational Exceptions Mode page */
2560 if (iec_m_pg[1] == arr[off + 1]) {
2561 memcpy(iec_m_pg + 2, arr + off + 2,
2562 sizeof(iec_m_pg) - 2);
2563 goto set_mode_changed_ua;
2564 }
2565 break;
2566 default:
2567 break;
2568 }
2569 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2570 return check_condition_result;
2571 set_mode_changed_ua:
2572 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2573 return 0;
2574 }
2575
resp_temp_l_pg(unsigned char * arr)2576 static int resp_temp_l_pg(unsigned char *arr)
2577 {
2578 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2579 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2580 };
2581
2582 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2583 return sizeof(temp_l_pg);
2584 }
2585
resp_ie_l_pg(unsigned char * arr)2586 static int resp_ie_l_pg(unsigned char *arr)
2587 {
2588 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2589 };
2590
2591 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2592 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2593 arr[4] = THRESHOLD_EXCEEDED;
2594 arr[5] = 0xff;
2595 }
2596 return sizeof(ie_l_pg);
2597 }
2598
resp_env_rep_l_spg(unsigned char * arr)2599 static int resp_env_rep_l_spg(unsigned char *arr)
2600 {
2601 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2602 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2603 0x1, 0x0, 0x23, 0x8,
2604 0x0, 55, 72, 35, 55, 45, 0, 0,
2605 };
2606
2607 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2608 return sizeof(env_rep_l_spg);
2609 }
2610
2611 #define SDEBUG_MAX_LSENSE_SZ 512
2612
resp_log_sense(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)2613 static int resp_log_sense(struct scsi_cmnd *scp,
2614 struct sdebug_dev_info *devip)
2615 {
2616 int ppc, sp, pcode, subpcode;
2617 u32 alloc_len, len, n;
2618 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2619 unsigned char *cmd = scp->cmnd;
2620
2621 memset(arr, 0, sizeof(arr));
2622 ppc = cmd[1] & 0x2;
2623 sp = cmd[1] & 0x1;
2624 if (ppc || sp) {
2625 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2626 return check_condition_result;
2627 }
2628 pcode = cmd[2] & 0x3f;
2629 subpcode = cmd[3] & 0xff;
2630 alloc_len = get_unaligned_be16(cmd + 7);
2631 arr[0] = pcode;
2632 if (0 == subpcode) {
2633 switch (pcode) {
2634 case 0x0: /* Supported log pages log page */
2635 n = 4;
2636 arr[n++] = 0x0; /* this page */
2637 arr[n++] = 0xd; /* Temperature */
2638 arr[n++] = 0x2f; /* Informational exceptions */
2639 arr[3] = n - 4;
2640 break;
2641 case 0xd: /* Temperature log page */
2642 arr[3] = resp_temp_l_pg(arr + 4);
2643 break;
2644 case 0x2f: /* Informational exceptions log page */
2645 arr[3] = resp_ie_l_pg(arr + 4);
2646 break;
2647 default:
2648 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2649 return check_condition_result;
2650 }
2651 } else if (0xff == subpcode) {
2652 arr[0] |= 0x40;
2653 arr[1] = subpcode;
2654 switch (pcode) {
2655 case 0x0: /* Supported log pages and subpages log page */
2656 n = 4;
2657 arr[n++] = 0x0;
2658 arr[n++] = 0x0; /* 0,0 page */
2659 arr[n++] = 0x0;
2660 arr[n++] = 0xff; /* this page */
2661 arr[n++] = 0xd;
2662 arr[n++] = 0x0; /* Temperature */
2663 arr[n++] = 0xd;
2664 arr[n++] = 0x1; /* Environment reporting */
2665 arr[n++] = 0xd;
2666 arr[n++] = 0xff; /* all 0xd subpages */
2667 arr[n++] = 0x2f;
2668 arr[n++] = 0x0; /* Informational exceptions */
2669 arr[n++] = 0x2f;
2670 arr[n++] = 0xff; /* all 0x2f subpages */
2671 arr[3] = n - 4;
2672 break;
2673 case 0xd: /* Temperature subpages */
2674 n = 4;
2675 arr[n++] = 0xd;
2676 arr[n++] = 0x0; /* Temperature */
2677 arr[n++] = 0xd;
2678 arr[n++] = 0x1; /* Environment reporting */
2679 arr[n++] = 0xd;
2680 arr[n++] = 0xff; /* these subpages */
2681 arr[3] = n - 4;
2682 break;
2683 case 0x2f: /* Informational exceptions subpages */
2684 n = 4;
2685 arr[n++] = 0x2f;
2686 arr[n++] = 0x0; /* Informational exceptions */
2687 arr[n++] = 0x2f;
2688 arr[n++] = 0xff; /* these subpages */
2689 arr[3] = n - 4;
2690 break;
2691 default:
2692 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2693 return check_condition_result;
2694 }
2695 } else if (subpcode > 0) {
2696 arr[0] |= 0x40;
2697 arr[1] = subpcode;
2698 if (pcode == 0xd && subpcode == 1)
2699 arr[3] = resp_env_rep_l_spg(arr + 4);
2700 else {
2701 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2702 return check_condition_result;
2703 }
2704 } else {
2705 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2706 return check_condition_result;
2707 }
2708 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2709 return fill_from_dev_buffer(scp, arr,
2710 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2711 }
2712
sdebug_dev_is_zoned(struct sdebug_dev_info * devip)2713 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2714 {
2715 return devip->nr_zones != 0;
2716 }
2717
zbc_zone(struct sdebug_dev_info * devip,unsigned long long lba)2718 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2719 unsigned long long lba)
2720 {
2721 u32 zno = lba >> devip->zsize_shift;
2722 struct sdeb_zone_state *zsp;
2723
2724 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2725 return &devip->zstate[zno];
2726
2727 /*
2728 * If the zone capacity is less than the zone size, adjust for gap
2729 * zones.
2730 */
2731 zno = 2 * zno - devip->nr_conv_zones;
2732 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2733 zsp = &devip->zstate[zno];
2734 if (lba >= zsp->z_start + zsp->z_size)
2735 zsp++;
2736 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2737 return zsp;
2738 }
2739
zbc_zone_is_conv(struct sdeb_zone_state * zsp)2740 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2741 {
2742 return zsp->z_type == ZBC_ZTYPE_CNV;
2743 }
2744
zbc_zone_is_gap(struct sdeb_zone_state * zsp)2745 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2746 {
2747 return zsp->z_type == ZBC_ZTYPE_GAP;
2748 }
2749
zbc_zone_is_seq(struct sdeb_zone_state * zsp)2750 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2751 {
2752 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2753 }
2754
zbc_close_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2755 static void zbc_close_zone(struct sdebug_dev_info *devip,
2756 struct sdeb_zone_state *zsp)
2757 {
2758 enum sdebug_z_cond zc;
2759
2760 if (!zbc_zone_is_seq(zsp))
2761 return;
2762
2763 zc = zsp->z_cond;
2764 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2765 return;
2766
2767 if (zc == ZC2_IMPLICIT_OPEN)
2768 devip->nr_imp_open--;
2769 else
2770 devip->nr_exp_open--;
2771
2772 if (zsp->z_wp == zsp->z_start) {
2773 zsp->z_cond = ZC1_EMPTY;
2774 } else {
2775 zsp->z_cond = ZC4_CLOSED;
2776 devip->nr_closed++;
2777 }
2778 }
2779
zbc_close_imp_open_zone(struct sdebug_dev_info * devip)2780 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2781 {
2782 struct sdeb_zone_state *zsp = &devip->zstate[0];
2783 unsigned int i;
2784
2785 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2786 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2787 zbc_close_zone(devip, zsp);
2788 return;
2789 }
2790 }
2791 }
2792
zbc_open_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool explicit)2793 static void zbc_open_zone(struct sdebug_dev_info *devip,
2794 struct sdeb_zone_state *zsp, bool explicit)
2795 {
2796 enum sdebug_z_cond zc;
2797
2798 if (!zbc_zone_is_seq(zsp))
2799 return;
2800
2801 zc = zsp->z_cond;
2802 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2803 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2804 return;
2805
2806 /* Close an implicit open zone if necessary */
2807 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2808 zbc_close_zone(devip, zsp);
2809 else if (devip->max_open &&
2810 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2811 zbc_close_imp_open_zone(devip);
2812
2813 if (zsp->z_cond == ZC4_CLOSED)
2814 devip->nr_closed--;
2815 if (explicit) {
2816 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2817 devip->nr_exp_open++;
2818 } else {
2819 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2820 devip->nr_imp_open++;
2821 }
2822 }
2823
zbc_set_zone_full(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)2824 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2825 struct sdeb_zone_state *zsp)
2826 {
2827 switch (zsp->z_cond) {
2828 case ZC2_IMPLICIT_OPEN:
2829 devip->nr_imp_open--;
2830 break;
2831 case ZC3_EXPLICIT_OPEN:
2832 devip->nr_exp_open--;
2833 break;
2834 default:
2835 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2836 zsp->z_start, zsp->z_cond);
2837 break;
2838 }
2839 zsp->z_cond = ZC5_FULL;
2840 }
2841
zbc_inc_wp(struct sdebug_dev_info * devip,unsigned long long lba,unsigned int num)2842 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2843 unsigned long long lba, unsigned int num)
2844 {
2845 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2846 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2847
2848 if (!zbc_zone_is_seq(zsp))
2849 return;
2850
2851 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2852 zsp->z_wp += num;
2853 if (zsp->z_wp >= zend)
2854 zbc_set_zone_full(devip, zsp);
2855 return;
2856 }
2857
2858 while (num) {
2859 if (lba != zsp->z_wp)
2860 zsp->z_non_seq_resource = true;
2861
2862 end = lba + num;
2863 if (end >= zend) {
2864 n = zend - lba;
2865 zsp->z_wp = zend;
2866 } else if (end > zsp->z_wp) {
2867 n = num;
2868 zsp->z_wp = end;
2869 } else {
2870 n = num;
2871 }
2872 if (zsp->z_wp >= zend)
2873 zbc_set_zone_full(devip, zsp);
2874
2875 num -= n;
2876 lba += n;
2877 if (num) {
2878 zsp++;
2879 zend = zsp->z_start + zsp->z_size;
2880 }
2881 }
2882 }
2883
check_zbc_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2884 static int check_zbc_access_params(struct scsi_cmnd *scp,
2885 unsigned long long lba, unsigned int num, bool write)
2886 {
2887 struct scsi_device *sdp = scp->device;
2888 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2889 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2890 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2891
2892 if (!write) {
2893 if (devip->zmodel == BLK_ZONED_HA)
2894 return 0;
2895 /* For host-managed, reads cannot cross zone types boundaries */
2896 if (zsp->z_type != zsp_end->z_type) {
2897 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2898 LBA_OUT_OF_RANGE,
2899 READ_INVDATA_ASCQ);
2900 return check_condition_result;
2901 }
2902 return 0;
2903 }
2904
2905 /* Writing into a gap zone is not allowed */
2906 if (zbc_zone_is_gap(zsp)) {
2907 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2908 ATTEMPT_ACCESS_GAP);
2909 return check_condition_result;
2910 }
2911
2912 /* No restrictions for writes within conventional zones */
2913 if (zbc_zone_is_conv(zsp)) {
2914 if (!zbc_zone_is_conv(zsp_end)) {
2915 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2916 LBA_OUT_OF_RANGE,
2917 WRITE_BOUNDARY_ASCQ);
2918 return check_condition_result;
2919 }
2920 return 0;
2921 }
2922
2923 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2924 /* Writes cannot cross sequential zone boundaries */
2925 if (zsp_end != zsp) {
2926 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2927 LBA_OUT_OF_RANGE,
2928 WRITE_BOUNDARY_ASCQ);
2929 return check_condition_result;
2930 }
2931 /* Cannot write full zones */
2932 if (zsp->z_cond == ZC5_FULL) {
2933 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2934 INVALID_FIELD_IN_CDB, 0);
2935 return check_condition_result;
2936 }
2937 /* Writes must be aligned to the zone WP */
2938 if (lba != zsp->z_wp) {
2939 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2940 LBA_OUT_OF_RANGE,
2941 UNALIGNED_WRITE_ASCQ);
2942 return check_condition_result;
2943 }
2944 }
2945
2946 /* Handle implicit open of closed and empty zones */
2947 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2948 if (devip->max_open &&
2949 devip->nr_exp_open >= devip->max_open) {
2950 mk_sense_buffer(scp, DATA_PROTECT,
2951 INSUFF_RES_ASC,
2952 INSUFF_ZONE_ASCQ);
2953 return check_condition_result;
2954 }
2955 zbc_open_zone(devip, zsp, false);
2956 }
2957
2958 return 0;
2959 }
2960
check_device_access_params(struct scsi_cmnd * scp,unsigned long long lba,unsigned int num,bool write)2961 static inline int check_device_access_params
2962 (struct scsi_cmnd *scp, unsigned long long lba,
2963 unsigned int num, bool write)
2964 {
2965 struct scsi_device *sdp = scp->device;
2966 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2967
2968 if (lba + num > sdebug_capacity) {
2969 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2970 return check_condition_result;
2971 }
2972 /* transfer length excessive (tie in to block limits VPD page) */
2973 if (num > sdebug_store_sectors) {
2974 /* needs work to find which cdb byte 'num' comes from */
2975 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2976 return check_condition_result;
2977 }
2978 if (write && unlikely(sdebug_wp)) {
2979 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2980 return check_condition_result;
2981 }
2982 if (sdebug_dev_is_zoned(devip))
2983 return check_zbc_access_params(scp, lba, num, write);
2984
2985 return 0;
2986 }
2987
2988 /*
2989 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2990 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2991 * that access any of the "stores" in struct sdeb_store_info should call this
2992 * function with bug_if_fake_rw set to true.
2993 */
devip2sip(struct sdebug_dev_info * devip,bool bug_if_fake_rw)2994 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2995 bool bug_if_fake_rw)
2996 {
2997 if (sdebug_fake_rw) {
2998 BUG_ON(bug_if_fake_rw); /* See note above */
2999 return NULL;
3000 }
3001 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3002 }
3003
3004 /* Returns number of bytes copied or -1 if error. */
do_device_access(struct sdeb_store_info * sip,struct scsi_cmnd * scp,u32 sg_skip,u64 lba,u32 num,bool do_write)3005 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3006 u32 sg_skip, u64 lba, u32 num, bool do_write)
3007 {
3008 int ret;
3009 u64 block, rest = 0;
3010 enum dma_data_direction dir;
3011 struct scsi_data_buffer *sdb = &scp->sdb;
3012 u8 *fsp;
3013
3014 if (do_write) {
3015 dir = DMA_TO_DEVICE;
3016 write_since_sync = true;
3017 } else {
3018 dir = DMA_FROM_DEVICE;
3019 }
3020
3021 if (!sdb->length || !sip)
3022 return 0;
3023 if (scp->sc_data_direction != dir)
3024 return -1;
3025 fsp = sip->storep;
3026
3027 block = do_div(lba, sdebug_store_sectors);
3028 if (block + num > sdebug_store_sectors)
3029 rest = block + num - sdebug_store_sectors;
3030
3031 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3032 fsp + (block * sdebug_sector_size),
3033 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3034 if (ret != (num - rest) * sdebug_sector_size)
3035 return ret;
3036
3037 if (rest) {
3038 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3039 fsp, rest * sdebug_sector_size,
3040 sg_skip + ((num - rest) * sdebug_sector_size),
3041 do_write);
3042 }
3043
3044 return ret;
3045 }
3046
3047 /* Returns number of bytes copied or -1 if error. */
do_dout_fetch(struct scsi_cmnd * scp,u32 num,u8 * doutp)3048 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3049 {
3050 struct scsi_data_buffer *sdb = &scp->sdb;
3051
3052 if (!sdb->length)
3053 return 0;
3054 if (scp->sc_data_direction != DMA_TO_DEVICE)
3055 return -1;
3056 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3057 num * sdebug_sector_size, 0, true);
3058 }
3059
3060 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3061 * arr into sip->storep+lba and return true. If comparison fails then
3062 * return false. */
comp_write_worker(struct sdeb_store_info * sip,u64 lba,u32 num,const u8 * arr,bool compare_only)3063 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3064 const u8 *arr, bool compare_only)
3065 {
3066 bool res;
3067 u64 block, rest = 0;
3068 u32 store_blks = sdebug_store_sectors;
3069 u32 lb_size = sdebug_sector_size;
3070 u8 *fsp = sip->storep;
3071
3072 block = do_div(lba, store_blks);
3073 if (block + num > store_blks)
3074 rest = block + num - store_blks;
3075
3076 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3077 if (!res)
3078 return res;
3079 if (rest)
3080 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3081 rest * lb_size);
3082 if (!res)
3083 return res;
3084 if (compare_only)
3085 return true;
3086 arr += num * lb_size;
3087 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3088 if (rest)
3089 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3090 return res;
3091 }
3092
dif_compute_csum(const void * buf,int len)3093 static __be16 dif_compute_csum(const void *buf, int len)
3094 {
3095 __be16 csum;
3096
3097 if (sdebug_guard)
3098 csum = (__force __be16)ip_compute_csum(buf, len);
3099 else
3100 csum = cpu_to_be16(crc_t10dif(buf, len));
3101
3102 return csum;
3103 }
3104
dif_verify(struct t10_pi_tuple * sdt,const void * data,sector_t sector,u32 ei_lba)3105 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3106 sector_t sector, u32 ei_lba)
3107 {
3108 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3109
3110 if (sdt->guard_tag != csum) {
3111 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3112 (unsigned long)sector,
3113 be16_to_cpu(sdt->guard_tag),
3114 be16_to_cpu(csum));
3115 return 0x01;
3116 }
3117 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3118 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3119 pr_err("REF check failed on sector %lu\n",
3120 (unsigned long)sector);
3121 return 0x03;
3122 }
3123 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3124 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3125 pr_err("REF check failed on sector %lu\n",
3126 (unsigned long)sector);
3127 return 0x03;
3128 }
3129 return 0;
3130 }
3131
dif_copy_prot(struct scsi_cmnd * scp,sector_t sector,unsigned int sectors,bool read)3132 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3133 unsigned int sectors, bool read)
3134 {
3135 size_t resid;
3136 void *paddr;
3137 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3138 scp->device->hostdata, true);
3139 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3140 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3141 struct sg_mapping_iter miter;
3142
3143 /* Bytes of protection data to copy into sgl */
3144 resid = sectors * sizeof(*dif_storep);
3145
3146 sg_miter_start(&miter, scsi_prot_sglist(scp),
3147 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3148 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3149
3150 while (sg_miter_next(&miter) && resid > 0) {
3151 size_t len = min_t(size_t, miter.length, resid);
3152 void *start = dif_store(sip, sector);
3153 size_t rest = 0;
3154
3155 if (dif_store_end < start + len)
3156 rest = start + len - dif_store_end;
3157
3158 paddr = miter.addr;
3159
3160 if (read)
3161 memcpy(paddr, start, len - rest);
3162 else
3163 memcpy(start, paddr, len - rest);
3164
3165 if (rest) {
3166 if (read)
3167 memcpy(paddr + len - rest, dif_storep, rest);
3168 else
3169 memcpy(dif_storep, paddr + len - rest, rest);
3170 }
3171
3172 sector += len / sizeof(*dif_storep);
3173 resid -= len;
3174 }
3175 sg_miter_stop(&miter);
3176 }
3177
prot_verify_read(struct scsi_cmnd * scp,sector_t start_sec,unsigned int sectors,u32 ei_lba)3178 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3179 unsigned int sectors, u32 ei_lba)
3180 {
3181 int ret = 0;
3182 unsigned int i;
3183 sector_t sector;
3184 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3185 scp->device->hostdata, true);
3186 struct t10_pi_tuple *sdt;
3187
3188 for (i = 0; i < sectors; i++, ei_lba++) {
3189 sector = start_sec + i;
3190 sdt = dif_store(sip, sector);
3191
3192 if (sdt->app_tag == cpu_to_be16(0xffff))
3193 continue;
3194
3195 /*
3196 * Because scsi_debug acts as both initiator and
3197 * target we proceed to verify the PI even if
3198 * RDPROTECT=3. This is done so the "initiator" knows
3199 * which type of error to return. Otherwise we would
3200 * have to iterate over the PI twice.
3201 */
3202 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3203 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3204 sector, ei_lba);
3205 if (ret) {
3206 dif_errors++;
3207 break;
3208 }
3209 }
3210 }
3211
3212 dif_copy_prot(scp, start_sec, sectors, true);
3213 dix_reads++;
3214
3215 return ret;
3216 }
3217
3218 static inline void
sdeb_read_lock(struct sdeb_store_info * sip)3219 sdeb_read_lock(struct sdeb_store_info *sip)
3220 {
3221 if (sdebug_no_rwlock) {
3222 if (sip)
3223 __acquire(&sip->macc_lck);
3224 else
3225 __acquire(&sdeb_fake_rw_lck);
3226 } else {
3227 if (sip)
3228 read_lock(&sip->macc_lck);
3229 else
3230 read_lock(&sdeb_fake_rw_lck);
3231 }
3232 }
3233
3234 static inline void
sdeb_read_unlock(struct sdeb_store_info * sip)3235 sdeb_read_unlock(struct sdeb_store_info *sip)
3236 {
3237 if (sdebug_no_rwlock) {
3238 if (sip)
3239 __release(&sip->macc_lck);
3240 else
3241 __release(&sdeb_fake_rw_lck);
3242 } else {
3243 if (sip)
3244 read_unlock(&sip->macc_lck);
3245 else
3246 read_unlock(&sdeb_fake_rw_lck);
3247 }
3248 }
3249
3250 static inline void
sdeb_write_lock(struct sdeb_store_info * sip)3251 sdeb_write_lock(struct sdeb_store_info *sip)
3252 {
3253 if (sdebug_no_rwlock) {
3254 if (sip)
3255 __acquire(&sip->macc_lck);
3256 else
3257 __acquire(&sdeb_fake_rw_lck);
3258 } else {
3259 if (sip)
3260 write_lock(&sip->macc_lck);
3261 else
3262 write_lock(&sdeb_fake_rw_lck);
3263 }
3264 }
3265
3266 static inline void
sdeb_write_unlock(struct sdeb_store_info * sip)3267 sdeb_write_unlock(struct sdeb_store_info *sip)
3268 {
3269 if (sdebug_no_rwlock) {
3270 if (sip)
3271 __release(&sip->macc_lck);
3272 else
3273 __release(&sdeb_fake_rw_lck);
3274 } else {
3275 if (sip)
3276 write_unlock(&sip->macc_lck);
3277 else
3278 write_unlock(&sdeb_fake_rw_lck);
3279 }
3280 }
3281
resp_read_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3282 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3283 {
3284 bool check_prot;
3285 u32 num;
3286 u32 ei_lba;
3287 int ret;
3288 u64 lba;
3289 struct sdeb_store_info *sip = devip2sip(devip, true);
3290 u8 *cmd = scp->cmnd;
3291
3292 switch (cmd[0]) {
3293 case READ_16:
3294 ei_lba = 0;
3295 lba = get_unaligned_be64(cmd + 2);
3296 num = get_unaligned_be32(cmd + 10);
3297 check_prot = true;
3298 break;
3299 case READ_10:
3300 ei_lba = 0;
3301 lba = get_unaligned_be32(cmd + 2);
3302 num = get_unaligned_be16(cmd + 7);
3303 check_prot = true;
3304 break;
3305 case READ_6:
3306 ei_lba = 0;
3307 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3308 (u32)(cmd[1] & 0x1f) << 16;
3309 num = (0 == cmd[4]) ? 256 : cmd[4];
3310 check_prot = true;
3311 break;
3312 case READ_12:
3313 ei_lba = 0;
3314 lba = get_unaligned_be32(cmd + 2);
3315 num = get_unaligned_be32(cmd + 6);
3316 check_prot = true;
3317 break;
3318 case XDWRITEREAD_10:
3319 ei_lba = 0;
3320 lba = get_unaligned_be32(cmd + 2);
3321 num = get_unaligned_be16(cmd + 7);
3322 check_prot = false;
3323 break;
3324 default: /* assume READ(32) */
3325 lba = get_unaligned_be64(cmd + 12);
3326 ei_lba = get_unaligned_be32(cmd + 20);
3327 num = get_unaligned_be32(cmd + 28);
3328 check_prot = false;
3329 break;
3330 }
3331 if (unlikely(have_dif_prot && check_prot)) {
3332 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3333 (cmd[1] & 0xe0)) {
3334 mk_sense_invalid_opcode(scp);
3335 return check_condition_result;
3336 }
3337 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3338 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3339 (cmd[1] & 0xe0) == 0)
3340 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3341 "to DIF device\n");
3342 }
3343 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3344 atomic_read(&sdeb_inject_pending))) {
3345 num /= 2;
3346 atomic_set(&sdeb_inject_pending, 0);
3347 }
3348
3349 ret = check_device_access_params(scp, lba, num, false);
3350 if (ret)
3351 return ret;
3352 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3353 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3354 ((lba + num) > sdebug_medium_error_start))) {
3355 /* claim unrecoverable read error */
3356 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3357 /* set info field and valid bit for fixed descriptor */
3358 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3359 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3360 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3361 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3362 put_unaligned_be32(ret, scp->sense_buffer + 3);
3363 }
3364 scsi_set_resid(scp, scsi_bufflen(scp));
3365 return check_condition_result;
3366 }
3367
3368 sdeb_read_lock(sip);
3369
3370 /* DIX + T10 DIF */
3371 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3372 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3373 case 1: /* Guard tag error */
3374 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3375 sdeb_read_unlock(sip);
3376 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3377 return check_condition_result;
3378 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3379 sdeb_read_unlock(sip);
3380 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3381 return illegal_condition_result;
3382 }
3383 break;
3384 case 3: /* Reference tag error */
3385 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3386 sdeb_read_unlock(sip);
3387 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3388 return check_condition_result;
3389 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3390 sdeb_read_unlock(sip);
3391 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3392 return illegal_condition_result;
3393 }
3394 break;
3395 }
3396 }
3397
3398 ret = do_device_access(sip, scp, 0, lba, num, false);
3399 sdeb_read_unlock(sip);
3400 if (unlikely(ret == -1))
3401 return DID_ERROR << 16;
3402
3403 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3404
3405 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3406 atomic_read(&sdeb_inject_pending))) {
3407 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3408 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3409 atomic_set(&sdeb_inject_pending, 0);
3410 return check_condition_result;
3411 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3412 /* Logical block guard check failed */
3413 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3414 atomic_set(&sdeb_inject_pending, 0);
3415 return illegal_condition_result;
3416 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3417 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3418 atomic_set(&sdeb_inject_pending, 0);
3419 return illegal_condition_result;
3420 }
3421 }
3422 return 0;
3423 }
3424
prot_verify_write(struct scsi_cmnd * SCpnt,sector_t start_sec,unsigned int sectors,u32 ei_lba)3425 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3426 unsigned int sectors, u32 ei_lba)
3427 {
3428 int ret;
3429 struct t10_pi_tuple *sdt;
3430 void *daddr;
3431 sector_t sector = start_sec;
3432 int ppage_offset;
3433 int dpage_offset;
3434 struct sg_mapping_iter diter;
3435 struct sg_mapping_iter piter;
3436
3437 BUG_ON(scsi_sg_count(SCpnt) == 0);
3438 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3439
3440 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3441 scsi_prot_sg_count(SCpnt),
3442 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3443 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3444 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3445
3446 /* For each protection page */
3447 while (sg_miter_next(&piter)) {
3448 dpage_offset = 0;
3449 if (WARN_ON(!sg_miter_next(&diter))) {
3450 ret = 0x01;
3451 goto out;
3452 }
3453
3454 for (ppage_offset = 0; ppage_offset < piter.length;
3455 ppage_offset += sizeof(struct t10_pi_tuple)) {
3456 /* If we're at the end of the current
3457 * data page advance to the next one
3458 */
3459 if (dpage_offset >= diter.length) {
3460 if (WARN_ON(!sg_miter_next(&diter))) {
3461 ret = 0x01;
3462 goto out;
3463 }
3464 dpage_offset = 0;
3465 }
3466
3467 sdt = piter.addr + ppage_offset;
3468 daddr = diter.addr + dpage_offset;
3469
3470 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3471 ret = dif_verify(sdt, daddr, sector, ei_lba);
3472 if (ret)
3473 goto out;
3474 }
3475
3476 sector++;
3477 ei_lba++;
3478 dpage_offset += sdebug_sector_size;
3479 }
3480 diter.consumed = dpage_offset;
3481 sg_miter_stop(&diter);
3482 }
3483 sg_miter_stop(&piter);
3484
3485 dif_copy_prot(SCpnt, start_sec, sectors, false);
3486 dix_writes++;
3487
3488 return 0;
3489
3490 out:
3491 dif_errors++;
3492 sg_miter_stop(&diter);
3493 sg_miter_stop(&piter);
3494 return ret;
3495 }
3496
lba_to_map_index(sector_t lba)3497 static unsigned long lba_to_map_index(sector_t lba)
3498 {
3499 if (sdebug_unmap_alignment)
3500 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3501 sector_div(lba, sdebug_unmap_granularity);
3502 return lba;
3503 }
3504
map_index_to_lba(unsigned long index)3505 static sector_t map_index_to_lba(unsigned long index)
3506 {
3507 sector_t lba = index * sdebug_unmap_granularity;
3508
3509 if (sdebug_unmap_alignment)
3510 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3511 return lba;
3512 }
3513
map_state(struct sdeb_store_info * sip,sector_t lba,unsigned int * num)3514 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3515 unsigned int *num)
3516 {
3517 sector_t end;
3518 unsigned int mapped;
3519 unsigned long index;
3520 unsigned long next;
3521
3522 index = lba_to_map_index(lba);
3523 mapped = test_bit(index, sip->map_storep);
3524
3525 if (mapped)
3526 next = find_next_zero_bit(sip->map_storep, map_size, index);
3527 else
3528 next = find_next_bit(sip->map_storep, map_size, index);
3529
3530 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3531 *num = end - lba;
3532 return mapped;
3533 }
3534
map_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3535 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3536 unsigned int len)
3537 {
3538 sector_t end = lba + len;
3539
3540 while (lba < end) {
3541 unsigned long index = lba_to_map_index(lba);
3542
3543 if (index < map_size)
3544 set_bit(index, sip->map_storep);
3545
3546 lba = map_index_to_lba(index + 1);
3547 }
3548 }
3549
unmap_region(struct sdeb_store_info * sip,sector_t lba,unsigned int len)3550 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3551 unsigned int len)
3552 {
3553 sector_t end = lba + len;
3554 u8 *fsp = sip->storep;
3555
3556 while (lba < end) {
3557 unsigned long index = lba_to_map_index(lba);
3558
3559 if (lba == map_index_to_lba(index) &&
3560 lba + sdebug_unmap_granularity <= end &&
3561 index < map_size) {
3562 clear_bit(index, sip->map_storep);
3563 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3564 memset(fsp + lba * sdebug_sector_size,
3565 (sdebug_lbprz & 1) ? 0 : 0xff,
3566 sdebug_sector_size *
3567 sdebug_unmap_granularity);
3568 }
3569 if (sip->dif_storep) {
3570 memset(sip->dif_storep + lba, 0xff,
3571 sizeof(*sip->dif_storep) *
3572 sdebug_unmap_granularity);
3573 }
3574 }
3575 lba = map_index_to_lba(index + 1);
3576 }
3577 }
3578
resp_write_dt0(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3579 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3580 {
3581 bool check_prot;
3582 u32 num;
3583 u32 ei_lba;
3584 int ret;
3585 u64 lba;
3586 struct sdeb_store_info *sip = devip2sip(devip, true);
3587 u8 *cmd = scp->cmnd;
3588
3589 switch (cmd[0]) {
3590 case WRITE_16:
3591 ei_lba = 0;
3592 lba = get_unaligned_be64(cmd + 2);
3593 num = get_unaligned_be32(cmd + 10);
3594 check_prot = true;
3595 break;
3596 case WRITE_10:
3597 ei_lba = 0;
3598 lba = get_unaligned_be32(cmd + 2);
3599 num = get_unaligned_be16(cmd + 7);
3600 check_prot = true;
3601 break;
3602 case WRITE_6:
3603 ei_lba = 0;
3604 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3605 (u32)(cmd[1] & 0x1f) << 16;
3606 num = (0 == cmd[4]) ? 256 : cmd[4];
3607 check_prot = true;
3608 break;
3609 case WRITE_12:
3610 ei_lba = 0;
3611 lba = get_unaligned_be32(cmd + 2);
3612 num = get_unaligned_be32(cmd + 6);
3613 check_prot = true;
3614 break;
3615 case 0x53: /* XDWRITEREAD(10) */
3616 ei_lba = 0;
3617 lba = get_unaligned_be32(cmd + 2);
3618 num = get_unaligned_be16(cmd + 7);
3619 check_prot = false;
3620 break;
3621 default: /* assume WRITE(32) */
3622 lba = get_unaligned_be64(cmd + 12);
3623 ei_lba = get_unaligned_be32(cmd + 20);
3624 num = get_unaligned_be32(cmd + 28);
3625 check_prot = false;
3626 break;
3627 }
3628 if (unlikely(have_dif_prot && check_prot)) {
3629 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3630 (cmd[1] & 0xe0)) {
3631 mk_sense_invalid_opcode(scp);
3632 return check_condition_result;
3633 }
3634 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3635 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3636 (cmd[1] & 0xe0) == 0)
3637 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3638 "to DIF device\n");
3639 }
3640
3641 sdeb_write_lock(sip);
3642 ret = check_device_access_params(scp, lba, num, true);
3643 if (ret) {
3644 sdeb_write_unlock(sip);
3645 return ret;
3646 }
3647
3648 /* DIX + T10 DIF */
3649 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3650 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3651 case 1: /* Guard tag error */
3652 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3653 sdeb_write_unlock(sip);
3654 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3655 return illegal_condition_result;
3656 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3657 sdeb_write_unlock(sip);
3658 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3659 return check_condition_result;
3660 }
3661 break;
3662 case 3: /* Reference tag error */
3663 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3664 sdeb_write_unlock(sip);
3665 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3666 return illegal_condition_result;
3667 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3668 sdeb_write_unlock(sip);
3669 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3670 return check_condition_result;
3671 }
3672 break;
3673 }
3674 }
3675
3676 ret = do_device_access(sip, scp, 0, lba, num, true);
3677 if (unlikely(scsi_debug_lbp()))
3678 map_region(sip, lba, num);
3679 /* If ZBC zone then bump its write pointer */
3680 if (sdebug_dev_is_zoned(devip))
3681 zbc_inc_wp(devip, lba, num);
3682 sdeb_write_unlock(sip);
3683 if (unlikely(-1 == ret))
3684 return DID_ERROR << 16;
3685 else if (unlikely(sdebug_verbose &&
3686 (ret < (num * sdebug_sector_size))))
3687 sdev_printk(KERN_INFO, scp->device,
3688 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3689 my_name, num * sdebug_sector_size, ret);
3690
3691 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3692 atomic_read(&sdeb_inject_pending))) {
3693 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3694 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3695 atomic_set(&sdeb_inject_pending, 0);
3696 return check_condition_result;
3697 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3698 /* Logical block guard check failed */
3699 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3700 atomic_set(&sdeb_inject_pending, 0);
3701 return illegal_condition_result;
3702 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3703 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3704 atomic_set(&sdeb_inject_pending, 0);
3705 return illegal_condition_result;
3706 }
3707 }
3708 return 0;
3709 }
3710
3711 /*
3712 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3713 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3714 */
resp_write_scat(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3715 static int resp_write_scat(struct scsi_cmnd *scp,
3716 struct sdebug_dev_info *devip)
3717 {
3718 u8 *cmd = scp->cmnd;
3719 u8 *lrdp = NULL;
3720 u8 *up;
3721 struct sdeb_store_info *sip = devip2sip(devip, true);
3722 u8 wrprotect;
3723 u16 lbdof, num_lrd, k;
3724 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3725 u32 lb_size = sdebug_sector_size;
3726 u32 ei_lba;
3727 u64 lba;
3728 int ret, res;
3729 bool is_16;
3730 static const u32 lrd_size = 32; /* + parameter list header size */
3731
3732 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3733 is_16 = false;
3734 wrprotect = (cmd[10] >> 5) & 0x7;
3735 lbdof = get_unaligned_be16(cmd + 12);
3736 num_lrd = get_unaligned_be16(cmd + 16);
3737 bt_len = get_unaligned_be32(cmd + 28);
3738 } else { /* that leaves WRITE SCATTERED(16) */
3739 is_16 = true;
3740 wrprotect = (cmd[2] >> 5) & 0x7;
3741 lbdof = get_unaligned_be16(cmd + 4);
3742 num_lrd = get_unaligned_be16(cmd + 8);
3743 bt_len = get_unaligned_be32(cmd + 10);
3744 if (unlikely(have_dif_prot)) {
3745 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3746 wrprotect) {
3747 mk_sense_invalid_opcode(scp);
3748 return illegal_condition_result;
3749 }
3750 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3751 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3752 wrprotect == 0)
3753 sdev_printk(KERN_ERR, scp->device,
3754 "Unprotected WR to DIF device\n");
3755 }
3756 }
3757 if ((num_lrd == 0) || (bt_len == 0))
3758 return 0; /* T10 says these do-nothings are not errors */
3759 if (lbdof == 0) {
3760 if (sdebug_verbose)
3761 sdev_printk(KERN_INFO, scp->device,
3762 "%s: %s: LB Data Offset field bad\n",
3763 my_name, __func__);
3764 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3765 return illegal_condition_result;
3766 }
3767 lbdof_blen = lbdof * lb_size;
3768 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3769 if (sdebug_verbose)
3770 sdev_printk(KERN_INFO, scp->device,
3771 "%s: %s: LBA range descriptors don't fit\n",
3772 my_name, __func__);
3773 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3774 return illegal_condition_result;
3775 }
3776 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
3777 if (lrdp == NULL)
3778 return SCSI_MLQUEUE_HOST_BUSY;
3779 if (sdebug_verbose)
3780 sdev_printk(KERN_INFO, scp->device,
3781 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3782 my_name, __func__, lbdof_blen);
3783 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3784 if (res == -1) {
3785 ret = DID_ERROR << 16;
3786 goto err_out;
3787 }
3788
3789 sdeb_write_lock(sip);
3790 sg_off = lbdof_blen;
3791 /* Spec says Buffer xfer Length field in number of LBs in dout */
3792 cum_lb = 0;
3793 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3794 lba = get_unaligned_be64(up + 0);
3795 num = get_unaligned_be32(up + 8);
3796 if (sdebug_verbose)
3797 sdev_printk(KERN_INFO, scp->device,
3798 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3799 my_name, __func__, k, lba, num, sg_off);
3800 if (num == 0)
3801 continue;
3802 ret = check_device_access_params(scp, lba, num, true);
3803 if (ret)
3804 goto err_out_unlock;
3805 num_by = num * lb_size;
3806 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3807
3808 if ((cum_lb + num) > bt_len) {
3809 if (sdebug_verbose)
3810 sdev_printk(KERN_INFO, scp->device,
3811 "%s: %s: sum of blocks > data provided\n",
3812 my_name, __func__);
3813 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3814 0);
3815 ret = illegal_condition_result;
3816 goto err_out_unlock;
3817 }
3818
3819 /* DIX + T10 DIF */
3820 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3821 int prot_ret = prot_verify_write(scp, lba, num,
3822 ei_lba);
3823
3824 if (prot_ret) {
3825 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3826 prot_ret);
3827 ret = illegal_condition_result;
3828 goto err_out_unlock;
3829 }
3830 }
3831
3832 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3833 /* If ZBC zone then bump its write pointer */
3834 if (sdebug_dev_is_zoned(devip))
3835 zbc_inc_wp(devip, lba, num);
3836 if (unlikely(scsi_debug_lbp()))
3837 map_region(sip, lba, num);
3838 if (unlikely(-1 == ret)) {
3839 ret = DID_ERROR << 16;
3840 goto err_out_unlock;
3841 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3842 sdev_printk(KERN_INFO, scp->device,
3843 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3844 my_name, num_by, ret);
3845
3846 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3847 atomic_read(&sdeb_inject_pending))) {
3848 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3849 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3850 atomic_set(&sdeb_inject_pending, 0);
3851 ret = check_condition_result;
3852 goto err_out_unlock;
3853 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3854 /* Logical block guard check failed */
3855 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3856 atomic_set(&sdeb_inject_pending, 0);
3857 ret = illegal_condition_result;
3858 goto err_out_unlock;
3859 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3860 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3861 atomic_set(&sdeb_inject_pending, 0);
3862 ret = illegal_condition_result;
3863 goto err_out_unlock;
3864 }
3865 }
3866 sg_off += num_by;
3867 cum_lb += num;
3868 }
3869 ret = 0;
3870 err_out_unlock:
3871 sdeb_write_unlock(sip);
3872 err_out:
3873 kfree(lrdp);
3874 return ret;
3875 }
3876
resp_write_same(struct scsi_cmnd * scp,u64 lba,u32 num,u32 ei_lba,bool unmap,bool ndob)3877 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3878 u32 ei_lba, bool unmap, bool ndob)
3879 {
3880 struct scsi_device *sdp = scp->device;
3881 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3882 unsigned long long i;
3883 u64 block, lbaa;
3884 u32 lb_size = sdebug_sector_size;
3885 int ret;
3886 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3887 scp->device->hostdata, true);
3888 u8 *fs1p;
3889 u8 *fsp;
3890
3891 sdeb_write_lock(sip);
3892
3893 ret = check_device_access_params(scp, lba, num, true);
3894 if (ret) {
3895 sdeb_write_unlock(sip);
3896 return ret;
3897 }
3898
3899 if (unmap && scsi_debug_lbp()) {
3900 unmap_region(sip, lba, num);
3901 goto out;
3902 }
3903 lbaa = lba;
3904 block = do_div(lbaa, sdebug_store_sectors);
3905 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3906 fsp = sip->storep;
3907 fs1p = fsp + (block * lb_size);
3908 if (ndob) {
3909 memset(fs1p, 0, lb_size);
3910 ret = 0;
3911 } else
3912 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3913
3914 if (-1 == ret) {
3915 sdeb_write_unlock(sip);
3916 return DID_ERROR << 16;
3917 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3918 sdev_printk(KERN_INFO, scp->device,
3919 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3920 my_name, "write same", lb_size, ret);
3921
3922 /* Copy first sector to remaining blocks */
3923 for (i = 1 ; i < num ; i++) {
3924 lbaa = lba + i;
3925 block = do_div(lbaa, sdebug_store_sectors);
3926 memmove(fsp + (block * lb_size), fs1p, lb_size);
3927 }
3928 if (scsi_debug_lbp())
3929 map_region(sip, lba, num);
3930 /* If ZBC zone then bump its write pointer */
3931 if (sdebug_dev_is_zoned(devip))
3932 zbc_inc_wp(devip, lba, num);
3933 out:
3934 sdeb_write_unlock(sip);
3935
3936 return 0;
3937 }
3938
resp_write_same_10(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3939 static int resp_write_same_10(struct scsi_cmnd *scp,
3940 struct sdebug_dev_info *devip)
3941 {
3942 u8 *cmd = scp->cmnd;
3943 u32 lba;
3944 u16 num;
3945 u32 ei_lba = 0;
3946 bool unmap = false;
3947
3948 if (cmd[1] & 0x8) {
3949 if (sdebug_lbpws10 == 0) {
3950 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3951 return check_condition_result;
3952 } else
3953 unmap = true;
3954 }
3955 lba = get_unaligned_be32(cmd + 2);
3956 num = get_unaligned_be16(cmd + 7);
3957 if (num > sdebug_write_same_length) {
3958 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3959 return check_condition_result;
3960 }
3961 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3962 }
3963
resp_write_same_16(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3964 static int resp_write_same_16(struct scsi_cmnd *scp,
3965 struct sdebug_dev_info *devip)
3966 {
3967 u8 *cmd = scp->cmnd;
3968 u64 lba;
3969 u32 num;
3970 u32 ei_lba = 0;
3971 bool unmap = false;
3972 bool ndob = false;
3973
3974 if (cmd[1] & 0x8) { /* UNMAP */
3975 if (sdebug_lbpws == 0) {
3976 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3977 return check_condition_result;
3978 } else
3979 unmap = true;
3980 }
3981 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3982 ndob = true;
3983 lba = get_unaligned_be64(cmd + 2);
3984 num = get_unaligned_be32(cmd + 10);
3985 if (num > sdebug_write_same_length) {
3986 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3987 return check_condition_result;
3988 }
3989 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3990 }
3991
3992 /* Note the mode field is in the same position as the (lower) service action
3993 * field. For the Report supported operation codes command, SPC-4 suggests
3994 * each mode of this command should be reported separately; for future. */
resp_write_buffer(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)3995 static int resp_write_buffer(struct scsi_cmnd *scp,
3996 struct sdebug_dev_info *devip)
3997 {
3998 u8 *cmd = scp->cmnd;
3999 struct scsi_device *sdp = scp->device;
4000 struct sdebug_dev_info *dp;
4001 u8 mode;
4002
4003 mode = cmd[1] & 0x1f;
4004 switch (mode) {
4005 case 0x4: /* download microcode (MC) and activate (ACT) */
4006 /* set UAs on this device only */
4007 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4008 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4009 break;
4010 case 0x5: /* download MC, save and ACT */
4011 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4012 break;
4013 case 0x6: /* download MC with offsets and ACT */
4014 /* set UAs on most devices (LUs) in this target */
4015 list_for_each_entry(dp,
4016 &devip->sdbg_host->dev_info_list,
4017 dev_list)
4018 if (dp->target == sdp->id) {
4019 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4020 if (devip != dp)
4021 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4022 dp->uas_bm);
4023 }
4024 break;
4025 case 0x7: /* download MC with offsets, save, and ACT */
4026 /* set UA on all devices (LUs) in this target */
4027 list_for_each_entry(dp,
4028 &devip->sdbg_host->dev_info_list,
4029 dev_list)
4030 if (dp->target == sdp->id)
4031 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4032 dp->uas_bm);
4033 break;
4034 default:
4035 /* do nothing for this command for other mode values */
4036 break;
4037 }
4038 return 0;
4039 }
4040
resp_comp_write(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4041 static int resp_comp_write(struct scsi_cmnd *scp,
4042 struct sdebug_dev_info *devip)
4043 {
4044 u8 *cmd = scp->cmnd;
4045 u8 *arr;
4046 struct sdeb_store_info *sip = devip2sip(devip, true);
4047 u64 lba;
4048 u32 dnum;
4049 u32 lb_size = sdebug_sector_size;
4050 u8 num;
4051 int ret;
4052 int retval = 0;
4053
4054 lba = get_unaligned_be64(cmd + 2);
4055 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
4056 if (0 == num)
4057 return 0; /* degenerate case, not an error */
4058 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4059 (cmd[1] & 0xe0)) {
4060 mk_sense_invalid_opcode(scp);
4061 return check_condition_result;
4062 }
4063 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4064 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4065 (cmd[1] & 0xe0) == 0)
4066 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4067 "to DIF device\n");
4068 ret = check_device_access_params(scp, lba, num, false);
4069 if (ret)
4070 return ret;
4071 dnum = 2 * num;
4072 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4073 if (NULL == arr) {
4074 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4075 INSUFF_RES_ASCQ);
4076 return check_condition_result;
4077 }
4078
4079 sdeb_write_lock(sip);
4080
4081 ret = do_dout_fetch(scp, dnum, arr);
4082 if (ret == -1) {
4083 retval = DID_ERROR << 16;
4084 goto cleanup;
4085 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4086 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4087 "indicated=%u, IO sent=%d bytes\n", my_name,
4088 dnum * lb_size, ret);
4089 if (!comp_write_worker(sip, lba, num, arr, false)) {
4090 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4091 retval = check_condition_result;
4092 goto cleanup;
4093 }
4094 if (scsi_debug_lbp())
4095 map_region(sip, lba, num);
4096 cleanup:
4097 sdeb_write_unlock(sip);
4098 kfree(arr);
4099 return retval;
4100 }
4101
4102 struct unmap_block_desc {
4103 __be64 lba;
4104 __be32 blocks;
4105 __be32 __reserved;
4106 };
4107
resp_unmap(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4108 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4109 {
4110 unsigned char *buf;
4111 struct unmap_block_desc *desc;
4112 struct sdeb_store_info *sip = devip2sip(devip, true);
4113 unsigned int i, payload_len, descriptors;
4114 int ret;
4115
4116 if (!scsi_debug_lbp())
4117 return 0; /* fib and say its done */
4118 payload_len = get_unaligned_be16(scp->cmnd + 7);
4119 BUG_ON(scsi_bufflen(scp) != payload_len);
4120
4121 descriptors = (payload_len - 8) / 16;
4122 if (descriptors > sdebug_unmap_max_desc) {
4123 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4124 return check_condition_result;
4125 }
4126
4127 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4128 if (!buf) {
4129 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4130 INSUFF_RES_ASCQ);
4131 return check_condition_result;
4132 }
4133
4134 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4135
4136 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4137 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4138
4139 desc = (void *)&buf[8];
4140
4141 sdeb_write_lock(sip);
4142
4143 for (i = 0 ; i < descriptors ; i++) {
4144 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4145 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4146
4147 ret = check_device_access_params(scp, lba, num, true);
4148 if (ret)
4149 goto out;
4150
4151 unmap_region(sip, lba, num);
4152 }
4153
4154 ret = 0;
4155
4156 out:
4157 sdeb_write_unlock(sip);
4158 kfree(buf);
4159
4160 return ret;
4161 }
4162
4163 #define SDEBUG_GET_LBA_STATUS_LEN 32
4164
resp_get_lba_status(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4165 static int resp_get_lba_status(struct scsi_cmnd *scp,
4166 struct sdebug_dev_info *devip)
4167 {
4168 u8 *cmd = scp->cmnd;
4169 u64 lba;
4170 u32 alloc_len, mapped, num;
4171 int ret;
4172 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4173
4174 lba = get_unaligned_be64(cmd + 2);
4175 alloc_len = get_unaligned_be32(cmd + 10);
4176
4177 if (alloc_len < 24)
4178 return 0;
4179
4180 ret = check_device_access_params(scp, lba, 1, false);
4181 if (ret)
4182 return ret;
4183
4184 if (scsi_debug_lbp()) {
4185 struct sdeb_store_info *sip = devip2sip(devip, true);
4186
4187 mapped = map_state(sip, lba, &num);
4188 } else {
4189 mapped = 1;
4190 /* following just in case virtual_gb changed */
4191 sdebug_capacity = get_sdebug_capacity();
4192 if (sdebug_capacity - lba <= 0xffffffff)
4193 num = sdebug_capacity - lba;
4194 else
4195 num = 0xffffffff;
4196 }
4197
4198 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4199 put_unaligned_be32(20, arr); /* Parameter Data Length */
4200 put_unaligned_be64(lba, arr + 8); /* LBA */
4201 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4202 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4203
4204 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4205 }
4206
resp_sync_cache(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4207 static int resp_sync_cache(struct scsi_cmnd *scp,
4208 struct sdebug_dev_info *devip)
4209 {
4210 int res = 0;
4211 u64 lba;
4212 u32 num_blocks;
4213 u8 *cmd = scp->cmnd;
4214
4215 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4216 lba = get_unaligned_be32(cmd + 2);
4217 num_blocks = get_unaligned_be16(cmd + 7);
4218 } else { /* SYNCHRONIZE_CACHE(16) */
4219 lba = get_unaligned_be64(cmd + 2);
4220 num_blocks = get_unaligned_be32(cmd + 10);
4221 }
4222 if (lba + num_blocks > sdebug_capacity) {
4223 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4224 return check_condition_result;
4225 }
4226 if (!write_since_sync || (cmd[1] & 0x2))
4227 res = SDEG_RES_IMMED_MASK;
4228 else /* delay if write_since_sync and IMMED clear */
4229 write_since_sync = false;
4230 return res;
4231 }
4232
4233 /*
4234 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4235 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4236 * a GOOD status otherwise. Model a disk with a big cache and yield
4237 * CONDITION MET. Actually tries to bring range in main memory into the
4238 * cache associated with the CPU(s).
4239 */
resp_pre_fetch(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4240 static int resp_pre_fetch(struct scsi_cmnd *scp,
4241 struct sdebug_dev_info *devip)
4242 {
4243 int res = 0;
4244 u64 lba;
4245 u64 block, rest = 0;
4246 u32 nblks;
4247 u8 *cmd = scp->cmnd;
4248 struct sdeb_store_info *sip = devip2sip(devip, true);
4249 u8 *fsp = sip->storep;
4250
4251 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4252 lba = get_unaligned_be32(cmd + 2);
4253 nblks = get_unaligned_be16(cmd + 7);
4254 } else { /* PRE-FETCH(16) */
4255 lba = get_unaligned_be64(cmd + 2);
4256 nblks = get_unaligned_be32(cmd + 10);
4257 }
4258 if (lba + nblks > sdebug_capacity) {
4259 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4260 return check_condition_result;
4261 }
4262 if (!fsp)
4263 goto fini;
4264 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4265 block = do_div(lba, sdebug_store_sectors);
4266 if (block + nblks > sdebug_store_sectors)
4267 rest = block + nblks - sdebug_store_sectors;
4268
4269 /* Try to bring the PRE-FETCH range into CPU's cache */
4270 sdeb_read_lock(sip);
4271 prefetch_range(fsp + (sdebug_sector_size * block),
4272 (nblks - rest) * sdebug_sector_size);
4273 if (rest)
4274 prefetch_range(fsp, rest * sdebug_sector_size);
4275 sdeb_read_unlock(sip);
4276 fini:
4277 if (cmd[1] & 0x2)
4278 res = SDEG_RES_IMMED_MASK;
4279 return res | condition_met_result;
4280 }
4281
4282 #define RL_BUCKET_ELEMS 8
4283
4284 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4285 * (W-LUN), the normal Linux scanning logic does not associate it with a
4286 * device (e.g. /dev/sg7). The following magic will make that association:
4287 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4288 * where <n> is a host number. If there are multiple targets in a host then
4289 * the above will associate a W-LUN to each target. To only get a W-LUN
4290 * for target 2, then use "echo '- 2 49409' > scan" .
4291 */
resp_report_luns(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4292 static int resp_report_luns(struct scsi_cmnd *scp,
4293 struct sdebug_dev_info *devip)
4294 {
4295 unsigned char *cmd = scp->cmnd;
4296 unsigned int alloc_len;
4297 unsigned char select_report;
4298 u64 lun;
4299 struct scsi_lun *lun_p;
4300 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4301 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4302 unsigned int wlun_cnt; /* report luns W-LUN count */
4303 unsigned int tlun_cnt; /* total LUN count */
4304 unsigned int rlen; /* response length (in bytes) */
4305 int k, j, n, res;
4306 unsigned int off_rsp = 0;
4307 const int sz_lun = sizeof(struct scsi_lun);
4308
4309 clear_luns_changed_on_target(devip);
4310
4311 select_report = cmd[2];
4312 alloc_len = get_unaligned_be32(cmd + 6);
4313
4314 if (alloc_len < 4) {
4315 pr_err("alloc len too small %d\n", alloc_len);
4316 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4317 return check_condition_result;
4318 }
4319
4320 switch (select_report) {
4321 case 0: /* all LUNs apart from W-LUNs */
4322 lun_cnt = sdebug_max_luns;
4323 wlun_cnt = 0;
4324 break;
4325 case 1: /* only W-LUNs */
4326 lun_cnt = 0;
4327 wlun_cnt = 1;
4328 break;
4329 case 2: /* all LUNs */
4330 lun_cnt = sdebug_max_luns;
4331 wlun_cnt = 1;
4332 break;
4333 case 0x10: /* only administrative LUs */
4334 case 0x11: /* see SPC-5 */
4335 case 0x12: /* only subsiduary LUs owned by referenced LU */
4336 default:
4337 pr_debug("select report invalid %d\n", select_report);
4338 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4339 return check_condition_result;
4340 }
4341
4342 if (sdebug_no_lun_0 && (lun_cnt > 0))
4343 --lun_cnt;
4344
4345 tlun_cnt = lun_cnt + wlun_cnt;
4346 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4347 scsi_set_resid(scp, scsi_bufflen(scp));
4348 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4349 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4350
4351 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4352 lun = sdebug_no_lun_0 ? 1 : 0;
4353 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4354 memset(arr, 0, sizeof(arr));
4355 lun_p = (struct scsi_lun *)&arr[0];
4356 if (k == 0) {
4357 put_unaligned_be32(rlen, &arr[0]);
4358 ++lun_p;
4359 j = 1;
4360 }
4361 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4362 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4363 break;
4364 int_to_scsilun(lun++, lun_p);
4365 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4366 lun_p->scsi_lun[0] |= 0x40;
4367 }
4368 if (j < RL_BUCKET_ELEMS)
4369 break;
4370 n = j * sz_lun;
4371 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4372 if (res)
4373 return res;
4374 off_rsp += n;
4375 }
4376 if (wlun_cnt) {
4377 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4378 ++j;
4379 }
4380 if (j > 0)
4381 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4382 return res;
4383 }
4384
resp_verify(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4385 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4386 {
4387 bool is_bytchk3 = false;
4388 u8 bytchk;
4389 int ret, j;
4390 u32 vnum, a_num, off;
4391 const u32 lb_size = sdebug_sector_size;
4392 u64 lba;
4393 u8 *arr;
4394 u8 *cmd = scp->cmnd;
4395 struct sdeb_store_info *sip = devip2sip(devip, true);
4396
4397 bytchk = (cmd[1] >> 1) & 0x3;
4398 if (bytchk == 0) {
4399 return 0; /* always claim internal verify okay */
4400 } else if (bytchk == 2) {
4401 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4402 return check_condition_result;
4403 } else if (bytchk == 3) {
4404 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4405 }
4406 switch (cmd[0]) {
4407 case VERIFY_16:
4408 lba = get_unaligned_be64(cmd + 2);
4409 vnum = get_unaligned_be32(cmd + 10);
4410 break;
4411 case VERIFY: /* is VERIFY(10) */
4412 lba = get_unaligned_be32(cmd + 2);
4413 vnum = get_unaligned_be16(cmd + 7);
4414 break;
4415 default:
4416 mk_sense_invalid_opcode(scp);
4417 return check_condition_result;
4418 }
4419 if (vnum == 0)
4420 return 0; /* not an error */
4421 a_num = is_bytchk3 ? 1 : vnum;
4422 /* Treat following check like one for read (i.e. no write) access */
4423 ret = check_device_access_params(scp, lba, a_num, false);
4424 if (ret)
4425 return ret;
4426
4427 arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4428 if (!arr) {
4429 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4430 INSUFF_RES_ASCQ);
4431 return check_condition_result;
4432 }
4433 /* Not changing store, so only need read access */
4434 sdeb_read_lock(sip);
4435
4436 ret = do_dout_fetch(scp, a_num, arr);
4437 if (ret == -1) {
4438 ret = DID_ERROR << 16;
4439 goto cleanup;
4440 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4441 sdev_printk(KERN_INFO, scp->device,
4442 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4443 my_name, __func__, a_num * lb_size, ret);
4444 }
4445 if (is_bytchk3) {
4446 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4447 memcpy(arr + off, arr, lb_size);
4448 }
4449 ret = 0;
4450 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4451 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4452 ret = check_condition_result;
4453 goto cleanup;
4454 }
4455 cleanup:
4456 sdeb_read_unlock(sip);
4457 kfree(arr);
4458 return ret;
4459 }
4460
4461 #define RZONES_DESC_HD 64
4462
4463 /* Report zones depending on start LBA and reporting options */
resp_report_zones(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4464 static int resp_report_zones(struct scsi_cmnd *scp,
4465 struct sdebug_dev_info *devip)
4466 {
4467 unsigned int rep_max_zones, nrz = 0;
4468 int ret = 0;
4469 u32 alloc_len, rep_opts, rep_len;
4470 bool partial;
4471 u64 lba, zs_lba;
4472 u8 *arr = NULL, *desc;
4473 u8 *cmd = scp->cmnd;
4474 struct sdeb_zone_state *zsp = NULL;
4475 struct sdeb_store_info *sip = devip2sip(devip, false);
4476
4477 if (!sdebug_dev_is_zoned(devip)) {
4478 mk_sense_invalid_opcode(scp);
4479 return check_condition_result;
4480 }
4481 zs_lba = get_unaligned_be64(cmd + 2);
4482 alloc_len = get_unaligned_be32(cmd + 10);
4483 if (alloc_len == 0)
4484 return 0; /* not an error */
4485 rep_opts = cmd[14] & 0x3f;
4486 partial = cmd[14] & 0x80;
4487
4488 if (zs_lba >= sdebug_capacity) {
4489 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4490 return check_condition_result;
4491 }
4492
4493 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4494
4495 arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4496 if (!arr) {
4497 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4498 INSUFF_RES_ASCQ);
4499 return check_condition_result;
4500 }
4501
4502 sdeb_read_lock(sip);
4503
4504 desc = arr + 64;
4505 for (lba = zs_lba; lba < sdebug_capacity;
4506 lba = zsp->z_start + zsp->z_size) {
4507 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4508 break;
4509 zsp = zbc_zone(devip, lba);
4510 switch (rep_opts) {
4511 case 0x00:
4512 /* All zones */
4513 break;
4514 case 0x01:
4515 /* Empty zones */
4516 if (zsp->z_cond != ZC1_EMPTY)
4517 continue;
4518 break;
4519 case 0x02:
4520 /* Implicit open zones */
4521 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4522 continue;
4523 break;
4524 case 0x03:
4525 /* Explicit open zones */
4526 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4527 continue;
4528 break;
4529 case 0x04:
4530 /* Closed zones */
4531 if (zsp->z_cond != ZC4_CLOSED)
4532 continue;
4533 break;
4534 case 0x05:
4535 /* Full zones */
4536 if (zsp->z_cond != ZC5_FULL)
4537 continue;
4538 break;
4539 case 0x06:
4540 case 0x07:
4541 case 0x10:
4542 /*
4543 * Read-only, offline, reset WP recommended are
4544 * not emulated: no zones to report;
4545 */
4546 continue;
4547 case 0x11:
4548 /* non-seq-resource set */
4549 if (!zsp->z_non_seq_resource)
4550 continue;
4551 break;
4552 case 0x3e:
4553 /* All zones except gap zones. */
4554 if (zbc_zone_is_gap(zsp))
4555 continue;
4556 break;
4557 case 0x3f:
4558 /* Not write pointer (conventional) zones */
4559 if (zbc_zone_is_seq(zsp))
4560 continue;
4561 break;
4562 default:
4563 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4564 INVALID_FIELD_IN_CDB, 0);
4565 ret = check_condition_result;
4566 goto fini;
4567 }
4568
4569 if (nrz < rep_max_zones) {
4570 /* Fill zone descriptor */
4571 desc[0] = zsp->z_type;
4572 desc[1] = zsp->z_cond << 4;
4573 if (zsp->z_non_seq_resource)
4574 desc[1] |= 1 << 1;
4575 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4576 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4577 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4578 desc += 64;
4579 }
4580
4581 if (partial && nrz >= rep_max_zones)
4582 break;
4583
4584 nrz++;
4585 }
4586
4587 /* Report header */
4588 /* Zone list length. */
4589 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4590 /* Maximum LBA */
4591 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4592 /* Zone starting LBA granularity. */
4593 if (devip->zcap < devip->zsize)
4594 put_unaligned_be64(devip->zsize, arr + 16);
4595
4596 rep_len = (unsigned long)desc - (unsigned long)arr;
4597 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4598
4599 fini:
4600 sdeb_read_unlock(sip);
4601 kfree(arr);
4602 return ret;
4603 }
4604
4605 /* Logic transplanted from tcmu-runner, file_zbc.c */
zbc_open_all(struct sdebug_dev_info * devip)4606 static void zbc_open_all(struct sdebug_dev_info *devip)
4607 {
4608 struct sdeb_zone_state *zsp = &devip->zstate[0];
4609 unsigned int i;
4610
4611 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4612 if (zsp->z_cond == ZC4_CLOSED)
4613 zbc_open_zone(devip, &devip->zstate[i], true);
4614 }
4615 }
4616
resp_open_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4617 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4618 {
4619 int res = 0;
4620 u64 z_id;
4621 enum sdebug_z_cond zc;
4622 u8 *cmd = scp->cmnd;
4623 struct sdeb_zone_state *zsp;
4624 bool all = cmd[14] & 0x01;
4625 struct sdeb_store_info *sip = devip2sip(devip, false);
4626
4627 if (!sdebug_dev_is_zoned(devip)) {
4628 mk_sense_invalid_opcode(scp);
4629 return check_condition_result;
4630 }
4631
4632 sdeb_write_lock(sip);
4633
4634 if (all) {
4635 /* Check if all closed zones can be open */
4636 if (devip->max_open &&
4637 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4638 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4639 INSUFF_ZONE_ASCQ);
4640 res = check_condition_result;
4641 goto fini;
4642 }
4643 /* Open all closed zones */
4644 zbc_open_all(devip);
4645 goto fini;
4646 }
4647
4648 /* Open the specified zone */
4649 z_id = get_unaligned_be64(cmd + 2);
4650 if (z_id >= sdebug_capacity) {
4651 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4652 res = check_condition_result;
4653 goto fini;
4654 }
4655
4656 zsp = zbc_zone(devip, z_id);
4657 if (z_id != zsp->z_start) {
4658 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4659 res = check_condition_result;
4660 goto fini;
4661 }
4662 if (zbc_zone_is_conv(zsp)) {
4663 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 res = check_condition_result;
4665 goto fini;
4666 }
4667
4668 zc = zsp->z_cond;
4669 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4670 goto fini;
4671
4672 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4673 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4674 INSUFF_ZONE_ASCQ);
4675 res = check_condition_result;
4676 goto fini;
4677 }
4678
4679 zbc_open_zone(devip, zsp, true);
4680 fini:
4681 sdeb_write_unlock(sip);
4682 return res;
4683 }
4684
zbc_close_all(struct sdebug_dev_info * devip)4685 static void zbc_close_all(struct sdebug_dev_info *devip)
4686 {
4687 unsigned int i;
4688
4689 for (i = 0; i < devip->nr_zones; i++)
4690 zbc_close_zone(devip, &devip->zstate[i]);
4691 }
4692
resp_close_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4693 static int resp_close_zone(struct scsi_cmnd *scp,
4694 struct sdebug_dev_info *devip)
4695 {
4696 int res = 0;
4697 u64 z_id;
4698 u8 *cmd = scp->cmnd;
4699 struct sdeb_zone_state *zsp;
4700 bool all = cmd[14] & 0x01;
4701 struct sdeb_store_info *sip = devip2sip(devip, false);
4702
4703 if (!sdebug_dev_is_zoned(devip)) {
4704 mk_sense_invalid_opcode(scp);
4705 return check_condition_result;
4706 }
4707
4708 sdeb_write_lock(sip);
4709
4710 if (all) {
4711 zbc_close_all(devip);
4712 goto fini;
4713 }
4714
4715 /* Close specified zone */
4716 z_id = get_unaligned_be64(cmd + 2);
4717 if (z_id >= sdebug_capacity) {
4718 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4719 res = check_condition_result;
4720 goto fini;
4721 }
4722
4723 zsp = zbc_zone(devip, z_id);
4724 if (z_id != zsp->z_start) {
4725 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4726 res = check_condition_result;
4727 goto fini;
4728 }
4729 if (zbc_zone_is_conv(zsp)) {
4730 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4731 res = check_condition_result;
4732 goto fini;
4733 }
4734
4735 zbc_close_zone(devip, zsp);
4736 fini:
4737 sdeb_write_unlock(sip);
4738 return res;
4739 }
4740
zbc_finish_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp,bool empty)4741 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4742 struct sdeb_zone_state *zsp, bool empty)
4743 {
4744 enum sdebug_z_cond zc = zsp->z_cond;
4745
4746 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4747 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4748 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4749 zbc_close_zone(devip, zsp);
4750 if (zsp->z_cond == ZC4_CLOSED)
4751 devip->nr_closed--;
4752 zsp->z_wp = zsp->z_start + zsp->z_size;
4753 zsp->z_cond = ZC5_FULL;
4754 }
4755 }
4756
zbc_finish_all(struct sdebug_dev_info * devip)4757 static void zbc_finish_all(struct sdebug_dev_info *devip)
4758 {
4759 unsigned int i;
4760
4761 for (i = 0; i < devip->nr_zones; i++)
4762 zbc_finish_zone(devip, &devip->zstate[i], false);
4763 }
4764
resp_finish_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4765 static int resp_finish_zone(struct scsi_cmnd *scp,
4766 struct sdebug_dev_info *devip)
4767 {
4768 struct sdeb_zone_state *zsp;
4769 int res = 0;
4770 u64 z_id;
4771 u8 *cmd = scp->cmnd;
4772 bool all = cmd[14] & 0x01;
4773 struct sdeb_store_info *sip = devip2sip(devip, false);
4774
4775 if (!sdebug_dev_is_zoned(devip)) {
4776 mk_sense_invalid_opcode(scp);
4777 return check_condition_result;
4778 }
4779
4780 sdeb_write_lock(sip);
4781
4782 if (all) {
4783 zbc_finish_all(devip);
4784 goto fini;
4785 }
4786
4787 /* Finish the specified zone */
4788 z_id = get_unaligned_be64(cmd + 2);
4789 if (z_id >= sdebug_capacity) {
4790 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4791 res = check_condition_result;
4792 goto fini;
4793 }
4794
4795 zsp = zbc_zone(devip, z_id);
4796 if (z_id != zsp->z_start) {
4797 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4798 res = check_condition_result;
4799 goto fini;
4800 }
4801 if (zbc_zone_is_conv(zsp)) {
4802 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4803 res = check_condition_result;
4804 goto fini;
4805 }
4806
4807 zbc_finish_zone(devip, zsp, true);
4808 fini:
4809 sdeb_write_unlock(sip);
4810 return res;
4811 }
4812
zbc_rwp_zone(struct sdebug_dev_info * devip,struct sdeb_zone_state * zsp)4813 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4814 struct sdeb_zone_state *zsp)
4815 {
4816 enum sdebug_z_cond zc;
4817 struct sdeb_store_info *sip = devip2sip(devip, false);
4818
4819 if (!zbc_zone_is_seq(zsp))
4820 return;
4821
4822 zc = zsp->z_cond;
4823 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4824 zbc_close_zone(devip, zsp);
4825
4826 if (zsp->z_cond == ZC4_CLOSED)
4827 devip->nr_closed--;
4828
4829 if (zsp->z_wp > zsp->z_start)
4830 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4831 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4832
4833 zsp->z_non_seq_resource = false;
4834 zsp->z_wp = zsp->z_start;
4835 zsp->z_cond = ZC1_EMPTY;
4836 }
4837
zbc_rwp_all(struct sdebug_dev_info * devip)4838 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4839 {
4840 unsigned int i;
4841
4842 for (i = 0; i < devip->nr_zones; i++)
4843 zbc_rwp_zone(devip, &devip->zstate[i]);
4844 }
4845
resp_rwp_zone(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)4846 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4847 {
4848 struct sdeb_zone_state *zsp;
4849 int res = 0;
4850 u64 z_id;
4851 u8 *cmd = scp->cmnd;
4852 bool all = cmd[14] & 0x01;
4853 struct sdeb_store_info *sip = devip2sip(devip, false);
4854
4855 if (!sdebug_dev_is_zoned(devip)) {
4856 mk_sense_invalid_opcode(scp);
4857 return check_condition_result;
4858 }
4859
4860 sdeb_write_lock(sip);
4861
4862 if (all) {
4863 zbc_rwp_all(devip);
4864 goto fini;
4865 }
4866
4867 z_id = get_unaligned_be64(cmd + 2);
4868 if (z_id >= sdebug_capacity) {
4869 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4870 res = check_condition_result;
4871 goto fini;
4872 }
4873
4874 zsp = zbc_zone(devip, z_id);
4875 if (z_id != zsp->z_start) {
4876 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4877 res = check_condition_result;
4878 goto fini;
4879 }
4880 if (zbc_zone_is_conv(zsp)) {
4881 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4882 res = check_condition_result;
4883 goto fini;
4884 }
4885
4886 zbc_rwp_zone(devip, zsp);
4887 fini:
4888 sdeb_write_unlock(sip);
4889 return res;
4890 }
4891
get_tag(struct scsi_cmnd * cmnd)4892 static u32 get_tag(struct scsi_cmnd *cmnd)
4893 {
4894 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4895 }
4896
4897 /* Queued (deferred) command completions converge here. */
sdebug_q_cmd_complete(struct sdebug_defer * sd_dp)4898 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4899 {
4900 struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
4901 unsigned long flags;
4902 struct scsi_cmnd *scp = sqcp->scmd;
4903 struct sdebug_scsi_cmd *sdsc;
4904 bool aborted;
4905
4906 if (sdebug_statistics) {
4907 atomic_inc(&sdebug_completions);
4908 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4909 atomic_inc(&sdebug_miss_cpus);
4910 }
4911
4912 if (!scp) {
4913 pr_err("scmd=NULL\n");
4914 goto out;
4915 }
4916
4917 sdsc = scsi_cmd_priv(scp);
4918 spin_lock_irqsave(&sdsc->lock, flags);
4919 aborted = sd_dp->aborted;
4920 if (unlikely(aborted))
4921 sd_dp->aborted = false;
4922 ASSIGN_QUEUED_CMD(scp, NULL);
4923
4924 spin_unlock_irqrestore(&sdsc->lock, flags);
4925
4926 if (aborted) {
4927 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
4928 blk_abort_request(scsi_cmd_to_rq(scp));
4929 goto out;
4930 }
4931
4932 scsi_done(scp); /* callback to mid level */
4933 out:
4934 sdebug_free_queued_cmd(sqcp);
4935 }
4936
4937 /* When high resolution timer goes off this function is called. */
sdebug_q_cmd_hrt_complete(struct hrtimer * timer)4938 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4939 {
4940 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4941 hrt);
4942 sdebug_q_cmd_complete(sd_dp);
4943 return HRTIMER_NORESTART;
4944 }
4945
4946 /* When work queue schedules work, it calls this function. */
sdebug_q_cmd_wq_complete(struct work_struct * work)4947 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4948 {
4949 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4950 ew.work);
4951 sdebug_q_cmd_complete(sd_dp);
4952 }
4953
4954 static bool got_shared_uuid;
4955 static uuid_t shared_uuid;
4956
sdebug_device_create_zones(struct sdebug_dev_info * devip)4957 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4958 {
4959 struct sdeb_zone_state *zsp;
4960 sector_t capacity = get_sdebug_capacity();
4961 sector_t conv_capacity;
4962 sector_t zstart = 0;
4963 unsigned int i;
4964
4965 /*
4966 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4967 * a zone size allowing for at least 4 zones on the device. Otherwise,
4968 * use the specified zone size checking that at least 2 zones can be
4969 * created for the device.
4970 */
4971 if (!sdeb_zbc_zone_size_mb) {
4972 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4973 >> ilog2(sdebug_sector_size);
4974 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4975 devip->zsize >>= 1;
4976 if (devip->zsize < 2) {
4977 pr_err("Device capacity too small\n");
4978 return -EINVAL;
4979 }
4980 } else {
4981 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4982 pr_err("Zone size is not a power of 2\n");
4983 return -EINVAL;
4984 }
4985 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4986 >> ilog2(sdebug_sector_size);
4987 if (devip->zsize >= capacity) {
4988 pr_err("Zone size too large for device capacity\n");
4989 return -EINVAL;
4990 }
4991 }
4992
4993 devip->zsize_shift = ilog2(devip->zsize);
4994 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4995
4996 if (sdeb_zbc_zone_cap_mb == 0) {
4997 devip->zcap = devip->zsize;
4998 } else {
4999 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5000 ilog2(sdebug_sector_size);
5001 if (devip->zcap > devip->zsize) {
5002 pr_err("Zone capacity too large\n");
5003 return -EINVAL;
5004 }
5005 }
5006
5007 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5008 if (conv_capacity >= capacity) {
5009 pr_err("Number of conventional zones too large\n");
5010 return -EINVAL;
5011 }
5012 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5013 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5014 devip->zsize_shift;
5015 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5016
5017 /* Add gap zones if zone capacity is smaller than the zone size */
5018 if (devip->zcap < devip->zsize)
5019 devip->nr_zones += devip->nr_seq_zones;
5020
5021 if (devip->zmodel == BLK_ZONED_HM) {
5022 /* zbc_max_open_zones can be 0, meaning "not reported" */
5023 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5024 devip->max_open = (devip->nr_zones - 1) / 2;
5025 else
5026 devip->max_open = sdeb_zbc_max_open;
5027 }
5028
5029 devip->zstate = kcalloc(devip->nr_zones,
5030 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5031 if (!devip->zstate)
5032 return -ENOMEM;
5033
5034 for (i = 0; i < devip->nr_zones; i++) {
5035 zsp = &devip->zstate[i];
5036
5037 zsp->z_start = zstart;
5038
5039 if (i < devip->nr_conv_zones) {
5040 zsp->z_type = ZBC_ZTYPE_CNV;
5041 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5042 zsp->z_wp = (sector_t)-1;
5043 zsp->z_size =
5044 min_t(u64, devip->zsize, capacity - zstart);
5045 } else if ((zstart & (devip->zsize - 1)) == 0) {
5046 if (devip->zmodel == BLK_ZONED_HM)
5047 zsp->z_type = ZBC_ZTYPE_SWR;
5048 else
5049 zsp->z_type = ZBC_ZTYPE_SWP;
5050 zsp->z_cond = ZC1_EMPTY;
5051 zsp->z_wp = zsp->z_start;
5052 zsp->z_size =
5053 min_t(u64, devip->zcap, capacity - zstart);
5054 } else {
5055 zsp->z_type = ZBC_ZTYPE_GAP;
5056 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5057 zsp->z_wp = (sector_t)-1;
5058 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5059 capacity - zstart);
5060 }
5061
5062 WARN_ON_ONCE((int)zsp->z_size <= 0);
5063 zstart += zsp->z_size;
5064 }
5065
5066 return 0;
5067 }
5068
sdebug_device_create(struct sdebug_host_info * sdbg_host,gfp_t flags)5069 static struct sdebug_dev_info *sdebug_device_create(
5070 struct sdebug_host_info *sdbg_host, gfp_t flags)
5071 {
5072 struct sdebug_dev_info *devip;
5073
5074 devip = kzalloc(sizeof(*devip), flags);
5075 if (devip) {
5076 if (sdebug_uuid_ctl == 1)
5077 uuid_gen(&devip->lu_name);
5078 else if (sdebug_uuid_ctl == 2) {
5079 if (got_shared_uuid)
5080 devip->lu_name = shared_uuid;
5081 else {
5082 uuid_gen(&shared_uuid);
5083 got_shared_uuid = true;
5084 devip->lu_name = shared_uuid;
5085 }
5086 }
5087 devip->sdbg_host = sdbg_host;
5088 if (sdeb_zbc_in_use) {
5089 devip->zmodel = sdeb_zbc_model;
5090 if (sdebug_device_create_zones(devip)) {
5091 kfree(devip);
5092 return NULL;
5093 }
5094 } else {
5095 devip->zmodel = BLK_ZONED_NONE;
5096 }
5097 devip->create_ts = ktime_get_boottime();
5098 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5099 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5100 }
5101 return devip;
5102 }
5103
find_build_dev_info(struct scsi_device * sdev)5104 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5105 {
5106 struct sdebug_host_info *sdbg_host;
5107 struct sdebug_dev_info *open_devip = NULL;
5108 struct sdebug_dev_info *devip;
5109
5110 sdbg_host = shost_to_sdebug_host(sdev->host);
5111
5112 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5113 if ((devip->used) && (devip->channel == sdev->channel) &&
5114 (devip->target == sdev->id) &&
5115 (devip->lun == sdev->lun))
5116 return devip;
5117 else {
5118 if ((!devip->used) && (!open_devip))
5119 open_devip = devip;
5120 }
5121 }
5122 if (!open_devip) { /* try and make a new one */
5123 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5124 if (!open_devip) {
5125 pr_err("out of memory at line %d\n", __LINE__);
5126 return NULL;
5127 }
5128 }
5129
5130 open_devip->channel = sdev->channel;
5131 open_devip->target = sdev->id;
5132 open_devip->lun = sdev->lun;
5133 open_devip->sdbg_host = sdbg_host;
5134 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5135 open_devip->used = true;
5136 return open_devip;
5137 }
5138
scsi_debug_slave_alloc(struct scsi_device * sdp)5139 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5140 {
5141 if (sdebug_verbose)
5142 pr_info("slave_alloc <%u %u %u %llu>\n",
5143 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5144 return 0;
5145 }
5146
scsi_debug_slave_configure(struct scsi_device * sdp)5147 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5148 {
5149 struct sdebug_dev_info *devip =
5150 (struct sdebug_dev_info *)sdp->hostdata;
5151
5152 if (sdebug_verbose)
5153 pr_info("slave_configure <%u %u %u %llu>\n",
5154 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5155 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5156 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5157 if (devip == NULL) {
5158 devip = find_build_dev_info(sdp);
5159 if (devip == NULL)
5160 return 1; /* no resources, will be marked offline */
5161 }
5162 sdp->hostdata = devip;
5163 if (sdebug_no_uld)
5164 sdp->no_uld_attach = 1;
5165 config_cdb_len(sdp);
5166 return 0;
5167 }
5168
scsi_debug_slave_destroy(struct scsi_device * sdp)5169 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5170 {
5171 struct sdebug_dev_info *devip =
5172 (struct sdebug_dev_info *)sdp->hostdata;
5173
5174 if (sdebug_verbose)
5175 pr_info("slave_destroy <%u %u %u %llu>\n",
5176 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5177 if (devip) {
5178 /* make this slot available for re-use */
5179 devip->used = false;
5180 sdp->hostdata = NULL;
5181 }
5182 }
5183
5184 /* Returns true if we require the queued memory to be freed by the caller. */
stop_qc_helper(struct sdebug_defer * sd_dp,enum sdeb_defer_type defer_t)5185 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5186 enum sdeb_defer_type defer_t)
5187 {
5188 if (defer_t == SDEB_DEFER_HRT) {
5189 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5190
5191 switch (res) {
5192 case 0: /* Not active, it must have already run */
5193 case -1: /* -1 It's executing the CB */
5194 return false;
5195 case 1: /* Was active, we've now cancelled */
5196 default:
5197 return true;
5198 }
5199 } else if (defer_t == SDEB_DEFER_WQ) {
5200 /* Cancel if pending */
5201 if (cancel_work_sync(&sd_dp->ew.work))
5202 return true;
5203 /* Was not pending, so it must have run */
5204 return false;
5205 } else if (defer_t == SDEB_DEFER_POLL) {
5206 return true;
5207 }
5208
5209 return false;
5210 }
5211
5212
scsi_debug_stop_cmnd(struct scsi_cmnd * cmnd)5213 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5214 {
5215 enum sdeb_defer_type l_defer_t;
5216 struct sdebug_defer *sd_dp;
5217 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5218 struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5219
5220 lockdep_assert_held(&sdsc->lock);
5221
5222 if (!sqcp)
5223 return false;
5224 sd_dp = &sqcp->sd_dp;
5225 l_defer_t = READ_ONCE(sd_dp->defer_t);
5226 ASSIGN_QUEUED_CMD(cmnd, NULL);
5227
5228 if (stop_qc_helper(sd_dp, l_defer_t))
5229 sdebug_free_queued_cmd(sqcp);
5230
5231 return true;
5232 }
5233
5234 /*
5235 * Called from scsi_debug_abort() only, which is for timed-out cmd.
5236 */
scsi_debug_abort_cmnd(struct scsi_cmnd * cmnd)5237 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5238 {
5239 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5240 unsigned long flags;
5241 bool res;
5242
5243 spin_lock_irqsave(&sdsc->lock, flags);
5244 res = scsi_debug_stop_cmnd(cmnd);
5245 spin_unlock_irqrestore(&sdsc->lock, flags);
5246
5247 return res;
5248 }
5249
5250 /*
5251 * All we can do is set the cmnd as internally aborted and wait for it to
5252 * finish. We cannot call scsi_done() as normal completion path may do that.
5253 */
sdebug_stop_cmnd(struct request * rq,void * data)5254 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5255 {
5256 scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5257
5258 return true;
5259 }
5260
5261 /* Deletes (stops) timers or work queues of all queued commands */
stop_all_queued(void)5262 static void stop_all_queued(void)
5263 {
5264 struct sdebug_host_info *sdhp;
5265
5266 mutex_lock(&sdebug_host_list_mutex);
5267 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5268 struct Scsi_Host *shost = sdhp->shost;
5269
5270 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5271 }
5272 mutex_unlock(&sdebug_host_list_mutex);
5273 }
5274
scsi_debug_abort(struct scsi_cmnd * SCpnt)5275 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5276 {
5277 bool ok = scsi_debug_abort_cmnd(SCpnt);
5278
5279 ++num_aborts;
5280
5281 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5282 sdev_printk(KERN_INFO, SCpnt->device,
5283 "%s: command%s found\n", __func__,
5284 ok ? "" : " not");
5285
5286 return SUCCESS;
5287 }
5288
scsi_debug_stop_all_queued_iter(struct request * rq,void * data)5289 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5290 {
5291 struct scsi_device *sdp = data;
5292 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5293
5294 if (scmd->device == sdp)
5295 scsi_debug_abort_cmnd(scmd);
5296
5297 return true;
5298 }
5299
5300 /* Deletes (stops) timers or work queues of all queued commands per sdev */
scsi_debug_stop_all_queued(struct scsi_device * sdp)5301 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5302 {
5303 struct Scsi_Host *shost = sdp->host;
5304
5305 blk_mq_tagset_busy_iter(&shost->tag_set,
5306 scsi_debug_stop_all_queued_iter, sdp);
5307 }
5308
scsi_debug_device_reset(struct scsi_cmnd * SCpnt)5309 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5310 {
5311 struct scsi_device *sdp = SCpnt->device;
5312 struct sdebug_dev_info *devip = sdp->hostdata;
5313
5314 ++num_dev_resets;
5315
5316 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5317 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5318
5319 scsi_debug_stop_all_queued(sdp);
5320 if (devip)
5321 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5322
5323 return SUCCESS;
5324 }
5325
scsi_debug_target_reset(struct scsi_cmnd * SCpnt)5326 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5327 {
5328 struct scsi_device *sdp = SCpnt->device;
5329 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5330 struct sdebug_dev_info *devip;
5331 int k = 0;
5332
5333 ++num_target_resets;
5334 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5335 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5336
5337 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5338 if (devip->target == sdp->id) {
5339 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5340 ++k;
5341 }
5342 }
5343
5344 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5345 sdev_printk(KERN_INFO, sdp,
5346 "%s: %d device(s) found in target\n", __func__, k);
5347
5348 return SUCCESS;
5349 }
5350
scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)5351 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5352 {
5353 struct scsi_device *sdp = SCpnt->device;
5354 struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5355 struct sdebug_dev_info *devip;
5356 int k = 0;
5357
5358 ++num_bus_resets;
5359
5360 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5361 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5362
5363 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5364 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5365 ++k;
5366 }
5367
5368 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5369 sdev_printk(KERN_INFO, sdp,
5370 "%s: %d device(s) found in host\n", __func__, k);
5371 return SUCCESS;
5372 }
5373
scsi_debug_host_reset(struct scsi_cmnd * SCpnt)5374 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5375 {
5376 struct sdebug_host_info *sdbg_host;
5377 struct sdebug_dev_info *devip;
5378 int k = 0;
5379
5380 ++num_host_resets;
5381 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5382 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5383 mutex_lock(&sdebug_host_list_mutex);
5384 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5385 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5386 dev_list) {
5387 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5388 ++k;
5389 }
5390 }
5391 mutex_unlock(&sdebug_host_list_mutex);
5392 stop_all_queued();
5393 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5394 sdev_printk(KERN_INFO, SCpnt->device,
5395 "%s: %d device(s) found\n", __func__, k);
5396 return SUCCESS;
5397 }
5398
sdebug_build_parts(unsigned char * ramp,unsigned long store_size)5399 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5400 {
5401 struct msdos_partition *pp;
5402 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5403 int sectors_per_part, num_sectors, k;
5404 int heads_by_sects, start_sec, end_sec;
5405
5406 /* assume partition table already zeroed */
5407 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5408 return;
5409 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5410 sdebug_num_parts = SDEBUG_MAX_PARTS;
5411 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5412 }
5413 num_sectors = (int)get_sdebug_capacity();
5414 sectors_per_part = (num_sectors - sdebug_sectors_per)
5415 / sdebug_num_parts;
5416 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5417 starts[0] = sdebug_sectors_per;
5418 max_part_secs = sectors_per_part;
5419 for (k = 1; k < sdebug_num_parts; ++k) {
5420 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5421 * heads_by_sects;
5422 if (starts[k] - starts[k - 1] < max_part_secs)
5423 max_part_secs = starts[k] - starts[k - 1];
5424 }
5425 starts[sdebug_num_parts] = num_sectors;
5426 starts[sdebug_num_parts + 1] = 0;
5427
5428 ramp[510] = 0x55; /* magic partition markings */
5429 ramp[511] = 0xAA;
5430 pp = (struct msdos_partition *)(ramp + 0x1be);
5431 for (k = 0; starts[k + 1]; ++k, ++pp) {
5432 start_sec = starts[k];
5433 end_sec = starts[k] + max_part_secs - 1;
5434 pp->boot_ind = 0;
5435
5436 pp->cyl = start_sec / heads_by_sects;
5437 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5438 / sdebug_sectors_per;
5439 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5440
5441 pp->end_cyl = end_sec / heads_by_sects;
5442 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5443 / sdebug_sectors_per;
5444 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5445
5446 pp->start_sect = cpu_to_le32(start_sec);
5447 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5448 pp->sys_ind = 0x83; /* plain Linux partition */
5449 }
5450 }
5451
block_unblock_all_queues(bool block)5452 static void block_unblock_all_queues(bool block)
5453 {
5454 struct sdebug_host_info *sdhp;
5455
5456 lockdep_assert_held(&sdebug_host_list_mutex);
5457
5458 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5459 struct Scsi_Host *shost = sdhp->shost;
5460
5461 if (block)
5462 scsi_block_requests(shost);
5463 else
5464 scsi_unblock_requests(shost);
5465 }
5466 }
5467
5468 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5469 * commands will be processed normally before triggers occur.
5470 */
tweak_cmnd_count(void)5471 static void tweak_cmnd_count(void)
5472 {
5473 int count, modulo;
5474
5475 modulo = abs(sdebug_every_nth);
5476 if (modulo < 2)
5477 return;
5478
5479 mutex_lock(&sdebug_host_list_mutex);
5480 block_unblock_all_queues(true);
5481 count = atomic_read(&sdebug_cmnd_count);
5482 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5483 block_unblock_all_queues(false);
5484 mutex_unlock(&sdebug_host_list_mutex);
5485 }
5486
clear_queue_stats(void)5487 static void clear_queue_stats(void)
5488 {
5489 atomic_set(&sdebug_cmnd_count, 0);
5490 atomic_set(&sdebug_completions, 0);
5491 atomic_set(&sdebug_miss_cpus, 0);
5492 atomic_set(&sdebug_a_tsf, 0);
5493 }
5494
inject_on_this_cmd(void)5495 static bool inject_on_this_cmd(void)
5496 {
5497 if (sdebug_every_nth == 0)
5498 return false;
5499 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5500 }
5501
5502 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5503
5504
sdebug_free_queued_cmd(struct sdebug_queued_cmd * sqcp)5505 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
5506 {
5507 if (sqcp)
5508 kmem_cache_free(queued_cmd_cache, sqcp);
5509 }
5510
sdebug_alloc_queued_cmd(struct scsi_cmnd * scmd)5511 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
5512 {
5513 struct sdebug_queued_cmd *sqcp;
5514 struct sdebug_defer *sd_dp;
5515
5516 sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
5517 if (!sqcp)
5518 return NULL;
5519
5520 sd_dp = &sqcp->sd_dp;
5521
5522 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5523 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5524 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5525
5526 sqcp->scmd = scmd;
5527
5528 return sqcp;
5529 }
5530
5531 /* Complete the processing of the thread that queued a SCSI command to this
5532 * driver. It either completes the command by calling cmnd_done() or
5533 * schedules a hr timer or work queue then returns 0. Returns
5534 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5535 */
schedule_resp(struct scsi_cmnd * cmnd,struct sdebug_dev_info * devip,int scsi_result,int (* pfp)(struct scsi_cmnd *,struct sdebug_dev_info *),int delta_jiff,int ndelay)5536 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5537 int scsi_result,
5538 int (*pfp)(struct scsi_cmnd *,
5539 struct sdebug_dev_info *),
5540 int delta_jiff, int ndelay)
5541 {
5542 struct request *rq = scsi_cmd_to_rq(cmnd);
5543 bool polled = rq->cmd_flags & REQ_POLLED;
5544 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5545 unsigned long flags;
5546 u64 ns_from_boot = 0;
5547 struct sdebug_queued_cmd *sqcp;
5548 struct scsi_device *sdp;
5549 struct sdebug_defer *sd_dp;
5550
5551 if (unlikely(devip == NULL)) {
5552 if (scsi_result == 0)
5553 scsi_result = DID_NO_CONNECT << 16;
5554 goto respond_in_thread;
5555 }
5556 sdp = cmnd->device;
5557
5558 if (delta_jiff == 0)
5559 goto respond_in_thread;
5560
5561
5562 if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5563 (scsi_result == 0))) {
5564 int num_in_q = scsi_device_busy(sdp);
5565 int qdepth = cmnd->device->queue_depth;
5566
5567 if ((num_in_q == qdepth) &&
5568 (atomic_inc_return(&sdebug_a_tsf) >=
5569 abs(sdebug_every_nth))) {
5570 atomic_set(&sdebug_a_tsf, 0);
5571 scsi_result = device_qfull_result;
5572
5573 if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
5574 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
5575 __func__, num_in_q);
5576 }
5577 }
5578
5579 sqcp = sdebug_alloc_queued_cmd(cmnd);
5580 if (!sqcp) {
5581 pr_err("%s no alloc\n", __func__);
5582 return SCSI_MLQUEUE_HOST_BUSY;
5583 }
5584 sd_dp = &sqcp->sd_dp;
5585
5586 if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
5587 ns_from_boot = ktime_get_boottime_ns();
5588
5589 /* one of the resp_*() response functions is called here */
5590 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5591 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5592 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5593 delta_jiff = ndelay = 0;
5594 }
5595 if (cmnd->result == 0 && scsi_result != 0)
5596 cmnd->result = scsi_result;
5597 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5598 if (atomic_read(&sdeb_inject_pending)) {
5599 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5600 atomic_set(&sdeb_inject_pending, 0);
5601 cmnd->result = check_condition_result;
5602 }
5603 }
5604
5605 if (unlikely(sdebug_verbose && cmnd->result))
5606 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5607 __func__, cmnd->result);
5608
5609 if (delta_jiff > 0 || ndelay > 0) {
5610 ktime_t kt;
5611
5612 if (delta_jiff > 0) {
5613 u64 ns = jiffies_to_nsecs(delta_jiff);
5614
5615 if (sdebug_random && ns < U32_MAX) {
5616 ns = get_random_u32_below((u32)ns);
5617 } else if (sdebug_random) {
5618 ns >>= 12; /* scale to 4 usec precision */
5619 if (ns < U32_MAX) /* over 4 hours max */
5620 ns = get_random_u32_below((u32)ns);
5621 ns <<= 12;
5622 }
5623 kt = ns_to_ktime(ns);
5624 } else { /* ndelay has a 4.2 second max */
5625 kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
5626 (u32)ndelay;
5627 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5628 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5629
5630 if (kt <= d) { /* elapsed duration >= kt */
5631 /* call scsi_done() from this thread */
5632 sdebug_free_queued_cmd(sqcp);
5633 scsi_done(cmnd);
5634 return 0;
5635 }
5636 /* otherwise reduce kt by elapsed time */
5637 kt -= d;
5638 }
5639 }
5640 if (sdebug_statistics)
5641 sd_dp->issuing_cpu = raw_smp_processor_id();
5642 if (polled) {
5643 spin_lock_irqsave(&sdsc->lock, flags);
5644 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5645 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5646 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5647 spin_unlock_irqrestore(&sdsc->lock, flags);
5648 } else {
5649 /* schedule the invocation of scsi_done() for a later time */
5650 spin_lock_irqsave(&sdsc->lock, flags);
5651 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5652 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5653 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5654 /*
5655 * The completion handler will try to grab sqcp->lock,
5656 * so there is no chance that the completion handler
5657 * will call scsi_done() until we release the lock
5658 * here (so ok to keep referencing sdsc).
5659 */
5660 spin_unlock_irqrestore(&sdsc->lock, flags);
5661 }
5662 } else { /* jdelay < 0, use work queue */
5663 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5664 atomic_read(&sdeb_inject_pending))) {
5665 sd_dp->aborted = true;
5666 atomic_set(&sdeb_inject_pending, 0);
5667 sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
5668 blk_mq_unique_tag_to_tag(get_tag(cmnd)));
5669 }
5670
5671 if (sdebug_statistics)
5672 sd_dp->issuing_cpu = raw_smp_processor_id();
5673 if (polled) {
5674 spin_lock_irqsave(&sdsc->lock, flags);
5675 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5676 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5677 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5678 spin_unlock_irqrestore(&sdsc->lock, flags);
5679 } else {
5680 spin_lock_irqsave(&sdsc->lock, flags);
5681 ASSIGN_QUEUED_CMD(cmnd, sqcp);
5682 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5683 schedule_work(&sd_dp->ew.work);
5684 spin_unlock_irqrestore(&sdsc->lock, flags);
5685 }
5686 }
5687
5688 return 0;
5689
5690 respond_in_thread: /* call back to mid-layer using invocation thread */
5691 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5692 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5693 if (cmnd->result == 0 && scsi_result != 0)
5694 cmnd->result = scsi_result;
5695 scsi_done(cmnd);
5696 return 0;
5697 }
5698
5699 /* Note: The following macros create attribute files in the
5700 /sys/module/scsi_debug/parameters directory. Unfortunately this
5701 driver is unaware of a change and cannot trigger auxiliary actions
5702 as it can when the corresponding attribute in the
5703 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5704 */
5705 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5706 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5707 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5708 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5709 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5710 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5711 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5712 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5713 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5714 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5715 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5716 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5717 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5718 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5719 module_param_string(inq_product, sdebug_inq_product_id,
5720 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5721 module_param_string(inq_rev, sdebug_inq_product_rev,
5722 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5723 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5724 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5725 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5726 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5727 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5728 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5729 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5730 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5731 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5732 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5733 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5734 S_IRUGO | S_IWUSR);
5735 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5736 S_IRUGO | S_IWUSR);
5737 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5738 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5739 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5740 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5741 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5742 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5743 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5744 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5745 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5746 module_param_named(per_host_store, sdebug_per_host_store, bool,
5747 S_IRUGO | S_IWUSR);
5748 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5749 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5750 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5751 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5752 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5753 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5754 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5755 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5756 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5757 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5758 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5759 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5760 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5761 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5762 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5763 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5764 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5765 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5766 S_IRUGO | S_IWUSR);
5767 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5768 module_param_named(write_same_length, sdebug_write_same_length, int,
5769 S_IRUGO | S_IWUSR);
5770 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5771 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5772 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5773 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5774 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5775
5776 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5777 MODULE_DESCRIPTION("SCSI debug adapter driver");
5778 MODULE_LICENSE("GPL");
5779 MODULE_VERSION(SDEBUG_VERSION);
5780
5781 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5782 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5783 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5784 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5785 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5786 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5787 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5788 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5789 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5790 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5791 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5792 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5793 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5794 MODULE_PARM_DESC(host_max_queue,
5795 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5796 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5797 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5798 SDEBUG_VERSION "\")");
5799 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5800 MODULE_PARM_DESC(lbprz,
5801 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5802 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5803 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5804 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5805 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5806 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5807 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5808 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5809 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5810 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5811 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5812 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5813 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5814 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5815 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5816 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5817 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5818 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5819 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5820 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5821 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5822 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5823 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5824 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5825 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5826 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5827 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5828 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5829 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5830 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5831 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5832 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5833 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5834 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5835 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5836 MODULE_PARM_DESC(uuid_ctl,
5837 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5838 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5839 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5840 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5841 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5842 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5843 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5844 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5845 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5846 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5847
5848 #define SDEBUG_INFO_LEN 256
5849 static char sdebug_info[SDEBUG_INFO_LEN];
5850
scsi_debug_info(struct Scsi_Host * shp)5851 static const char *scsi_debug_info(struct Scsi_Host *shp)
5852 {
5853 int k;
5854
5855 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5856 my_name, SDEBUG_VERSION, sdebug_version_date);
5857 if (k >= (SDEBUG_INFO_LEN - 1))
5858 return sdebug_info;
5859 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5860 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5861 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5862 "statistics", (int)sdebug_statistics);
5863 return sdebug_info;
5864 }
5865
5866 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
scsi_debug_write_info(struct Scsi_Host * host,char * buffer,int length)5867 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5868 int length)
5869 {
5870 char arr[16];
5871 int opts;
5872 int minLen = length > 15 ? 15 : length;
5873
5874 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5875 return -EACCES;
5876 memcpy(arr, buffer, minLen);
5877 arr[minLen] = '\0';
5878 if (1 != sscanf(arr, "%d", &opts))
5879 return -EINVAL;
5880 sdebug_opts = opts;
5881 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5882 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5883 if (sdebug_every_nth != 0)
5884 tweak_cmnd_count();
5885 return length;
5886 }
5887
5888 struct sdebug_submit_queue_data {
5889 int *first;
5890 int *last;
5891 int queue_num;
5892 };
5893
sdebug_submit_queue_iter(struct request * rq,void * opaque)5894 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
5895 {
5896 struct sdebug_submit_queue_data *data = opaque;
5897 u32 unique_tag = blk_mq_unique_tag(rq);
5898 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
5899 u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
5900 int queue_num = data->queue_num;
5901
5902 if (hwq != queue_num)
5903 return true;
5904
5905 /* Rely on iter'ing in ascending tag order */
5906 if (*data->first == -1)
5907 *data->first = *data->last = tag;
5908 else
5909 *data->last = tag;
5910
5911 return true;
5912 }
5913
5914 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5915 * same for each scsi_debug host (if more than one). Some of the counters
5916 * output are not atomics so might be inaccurate in a busy system. */
scsi_debug_show_info(struct seq_file * m,struct Scsi_Host * host)5917 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5918 {
5919 struct sdebug_host_info *sdhp;
5920 int j;
5921
5922 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5923 SDEBUG_VERSION, sdebug_version_date);
5924 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5925 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5926 sdebug_opts, sdebug_every_nth);
5927 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5928 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5929 sdebug_sector_size, "bytes");
5930 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5931 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5932 num_aborts);
5933 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5934 num_dev_resets, num_target_resets, num_bus_resets,
5935 num_host_resets);
5936 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5937 dix_reads, dix_writes, dif_errors);
5938 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5939 sdebug_statistics);
5940 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5941 atomic_read(&sdebug_cmnd_count),
5942 atomic_read(&sdebug_completions),
5943 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5944 atomic_read(&sdebug_a_tsf),
5945 atomic_read(&sdeb_mq_poll_count));
5946
5947 seq_printf(m, "submit_queues=%d\n", submit_queues);
5948 for (j = 0; j < submit_queues; ++j) {
5949 int f = -1, l = -1;
5950 struct sdebug_submit_queue_data data = {
5951 .queue_num = j,
5952 .first = &f,
5953 .last = &l,
5954 };
5955 seq_printf(m, " queue %d:\n", j);
5956 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
5957 &data);
5958 if (f >= 0) {
5959 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5960 "first,last bits", f, l);
5961 }
5962 }
5963
5964 seq_printf(m, "this host_no=%d\n", host->host_no);
5965 if (!xa_empty(per_store_ap)) {
5966 bool niu;
5967 int idx;
5968 unsigned long l_idx;
5969 struct sdeb_store_info *sip;
5970
5971 seq_puts(m, "\nhost list:\n");
5972 j = 0;
5973 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5974 idx = sdhp->si_idx;
5975 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5976 sdhp->shost->host_no, idx);
5977 ++j;
5978 }
5979 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5980 sdeb_most_recent_idx);
5981 j = 0;
5982 xa_for_each(per_store_ap, l_idx, sip) {
5983 niu = xa_get_mark(per_store_ap, l_idx,
5984 SDEB_XA_NOT_IN_USE);
5985 idx = (int)l_idx;
5986 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5987 (niu ? " not_in_use" : ""));
5988 ++j;
5989 }
5990 }
5991 return 0;
5992 }
5993
delay_show(struct device_driver * ddp,char * buf)5994 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5995 {
5996 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5997 }
5998 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5999 * of delay is jiffies.
6000 */
delay_store(struct device_driver * ddp,const char * buf,size_t count)6001 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6002 size_t count)
6003 {
6004 int jdelay, res;
6005
6006 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6007 res = count;
6008 if (sdebug_jdelay != jdelay) {
6009 struct sdebug_host_info *sdhp;
6010
6011 mutex_lock(&sdebug_host_list_mutex);
6012 block_unblock_all_queues(true);
6013
6014 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6015 struct Scsi_Host *shost = sdhp->shost;
6016
6017 if (scsi_host_busy(shost)) {
6018 res = -EBUSY; /* queued commands */
6019 break;
6020 }
6021 }
6022 if (res > 0) {
6023 sdebug_jdelay = jdelay;
6024 sdebug_ndelay = 0;
6025 }
6026 block_unblock_all_queues(false);
6027 mutex_unlock(&sdebug_host_list_mutex);
6028 }
6029 return res;
6030 }
6031 return -EINVAL;
6032 }
6033 static DRIVER_ATTR_RW(delay);
6034
ndelay_show(struct device_driver * ddp,char * buf)6035 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6036 {
6037 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6038 }
6039 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6040 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
ndelay_store(struct device_driver * ddp,const char * buf,size_t count)6041 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6042 size_t count)
6043 {
6044 int ndelay, res;
6045
6046 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6047 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6048 res = count;
6049 if (sdebug_ndelay != ndelay) {
6050 struct sdebug_host_info *sdhp;
6051
6052 mutex_lock(&sdebug_host_list_mutex);
6053 block_unblock_all_queues(true);
6054
6055 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6056 struct Scsi_Host *shost = sdhp->shost;
6057
6058 if (scsi_host_busy(shost)) {
6059 res = -EBUSY; /* queued commands */
6060 break;
6061 }
6062 }
6063
6064 if (res > 0) {
6065 sdebug_ndelay = ndelay;
6066 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6067 : DEF_JDELAY;
6068 }
6069 block_unblock_all_queues(false);
6070 mutex_unlock(&sdebug_host_list_mutex);
6071 }
6072 return res;
6073 }
6074 return -EINVAL;
6075 }
6076 static DRIVER_ATTR_RW(ndelay);
6077
opts_show(struct device_driver * ddp,char * buf)6078 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6079 {
6080 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6081 }
6082
opts_store(struct device_driver * ddp,const char * buf,size_t count)6083 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6084 size_t count)
6085 {
6086 int opts;
6087 char work[20];
6088
6089 if (sscanf(buf, "%10s", work) == 1) {
6090 if (strncasecmp(work, "0x", 2) == 0) {
6091 if (kstrtoint(work + 2, 16, &opts) == 0)
6092 goto opts_done;
6093 } else {
6094 if (kstrtoint(work, 10, &opts) == 0)
6095 goto opts_done;
6096 }
6097 }
6098 return -EINVAL;
6099 opts_done:
6100 sdebug_opts = opts;
6101 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6102 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6103 tweak_cmnd_count();
6104 return count;
6105 }
6106 static DRIVER_ATTR_RW(opts);
6107
ptype_show(struct device_driver * ddp,char * buf)6108 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6109 {
6110 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6111 }
ptype_store(struct device_driver * ddp,const char * buf,size_t count)6112 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6113 size_t count)
6114 {
6115 int n;
6116
6117 /* Cannot change from or to TYPE_ZBC with sysfs */
6118 if (sdebug_ptype == TYPE_ZBC)
6119 return -EINVAL;
6120
6121 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6122 if (n == TYPE_ZBC)
6123 return -EINVAL;
6124 sdebug_ptype = n;
6125 return count;
6126 }
6127 return -EINVAL;
6128 }
6129 static DRIVER_ATTR_RW(ptype);
6130
dsense_show(struct device_driver * ddp,char * buf)6131 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6132 {
6133 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6134 }
dsense_store(struct device_driver * ddp,const char * buf,size_t count)6135 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6136 size_t count)
6137 {
6138 int n;
6139
6140 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6141 sdebug_dsense = n;
6142 return count;
6143 }
6144 return -EINVAL;
6145 }
6146 static DRIVER_ATTR_RW(dsense);
6147
fake_rw_show(struct device_driver * ddp,char * buf)6148 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6149 {
6150 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6151 }
fake_rw_store(struct device_driver * ddp,const char * buf,size_t count)6152 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6153 size_t count)
6154 {
6155 int n, idx;
6156
6157 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6158 bool want_store = (n == 0);
6159 struct sdebug_host_info *sdhp;
6160
6161 n = (n > 0);
6162 sdebug_fake_rw = (sdebug_fake_rw > 0);
6163 if (sdebug_fake_rw == n)
6164 return count; /* not transitioning so do nothing */
6165
6166 if (want_store) { /* 1 --> 0 transition, set up store */
6167 if (sdeb_first_idx < 0) {
6168 idx = sdebug_add_store();
6169 if (idx < 0)
6170 return idx;
6171 } else {
6172 idx = sdeb_first_idx;
6173 xa_clear_mark(per_store_ap, idx,
6174 SDEB_XA_NOT_IN_USE);
6175 }
6176 /* make all hosts use same store */
6177 list_for_each_entry(sdhp, &sdebug_host_list,
6178 host_list) {
6179 if (sdhp->si_idx != idx) {
6180 xa_set_mark(per_store_ap, sdhp->si_idx,
6181 SDEB_XA_NOT_IN_USE);
6182 sdhp->si_idx = idx;
6183 }
6184 }
6185 sdeb_most_recent_idx = idx;
6186 } else { /* 0 --> 1 transition is trigger for shrink */
6187 sdebug_erase_all_stores(true /* apart from first */);
6188 }
6189 sdebug_fake_rw = n;
6190 return count;
6191 }
6192 return -EINVAL;
6193 }
6194 static DRIVER_ATTR_RW(fake_rw);
6195
no_lun_0_show(struct device_driver * ddp,char * buf)6196 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6197 {
6198 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6199 }
no_lun_0_store(struct device_driver * ddp,const char * buf,size_t count)6200 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6201 size_t count)
6202 {
6203 int n;
6204
6205 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6206 sdebug_no_lun_0 = n;
6207 return count;
6208 }
6209 return -EINVAL;
6210 }
6211 static DRIVER_ATTR_RW(no_lun_0);
6212
num_tgts_show(struct device_driver * ddp,char * buf)6213 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6214 {
6215 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6216 }
num_tgts_store(struct device_driver * ddp,const char * buf,size_t count)6217 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6218 size_t count)
6219 {
6220 int n;
6221
6222 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6223 sdebug_num_tgts = n;
6224 sdebug_max_tgts_luns();
6225 return count;
6226 }
6227 return -EINVAL;
6228 }
6229 static DRIVER_ATTR_RW(num_tgts);
6230
dev_size_mb_show(struct device_driver * ddp,char * buf)6231 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6232 {
6233 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6234 }
6235 static DRIVER_ATTR_RO(dev_size_mb);
6236
per_host_store_show(struct device_driver * ddp,char * buf)6237 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6238 {
6239 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6240 }
6241
per_host_store_store(struct device_driver * ddp,const char * buf,size_t count)6242 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6243 size_t count)
6244 {
6245 bool v;
6246
6247 if (kstrtobool(buf, &v))
6248 return -EINVAL;
6249
6250 sdebug_per_host_store = v;
6251 return count;
6252 }
6253 static DRIVER_ATTR_RW(per_host_store);
6254
num_parts_show(struct device_driver * ddp,char * buf)6255 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6256 {
6257 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6258 }
6259 static DRIVER_ATTR_RO(num_parts);
6260
every_nth_show(struct device_driver * ddp,char * buf)6261 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6262 {
6263 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6264 }
every_nth_store(struct device_driver * ddp,const char * buf,size_t count)6265 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6266 size_t count)
6267 {
6268 int nth;
6269 char work[20];
6270
6271 if (sscanf(buf, "%10s", work) == 1) {
6272 if (strncasecmp(work, "0x", 2) == 0) {
6273 if (kstrtoint(work + 2, 16, &nth) == 0)
6274 goto every_nth_done;
6275 } else {
6276 if (kstrtoint(work, 10, &nth) == 0)
6277 goto every_nth_done;
6278 }
6279 }
6280 return -EINVAL;
6281
6282 every_nth_done:
6283 sdebug_every_nth = nth;
6284 if (nth && !sdebug_statistics) {
6285 pr_info("every_nth needs statistics=1, set it\n");
6286 sdebug_statistics = true;
6287 }
6288 tweak_cmnd_count();
6289 return count;
6290 }
6291 static DRIVER_ATTR_RW(every_nth);
6292
lun_format_show(struct device_driver * ddp,char * buf)6293 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6294 {
6295 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6296 }
lun_format_store(struct device_driver * ddp,const char * buf,size_t count)6297 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6298 size_t count)
6299 {
6300 int n;
6301 bool changed;
6302
6303 if (kstrtoint(buf, 0, &n))
6304 return -EINVAL;
6305 if (n >= 0) {
6306 if (n > (int)SAM_LUN_AM_FLAT) {
6307 pr_warn("only LUN address methods 0 and 1 are supported\n");
6308 return -EINVAL;
6309 }
6310 changed = ((int)sdebug_lun_am != n);
6311 sdebug_lun_am = n;
6312 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6313 struct sdebug_host_info *sdhp;
6314 struct sdebug_dev_info *dp;
6315
6316 mutex_lock(&sdebug_host_list_mutex);
6317 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6318 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6319 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6320 }
6321 }
6322 mutex_unlock(&sdebug_host_list_mutex);
6323 }
6324 return count;
6325 }
6326 return -EINVAL;
6327 }
6328 static DRIVER_ATTR_RW(lun_format);
6329
max_luns_show(struct device_driver * ddp,char * buf)6330 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6331 {
6332 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6333 }
max_luns_store(struct device_driver * ddp,const char * buf,size_t count)6334 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6335 size_t count)
6336 {
6337 int n;
6338 bool changed;
6339
6340 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6341 if (n > 256) {
6342 pr_warn("max_luns can be no more than 256\n");
6343 return -EINVAL;
6344 }
6345 changed = (sdebug_max_luns != n);
6346 sdebug_max_luns = n;
6347 sdebug_max_tgts_luns();
6348 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6349 struct sdebug_host_info *sdhp;
6350 struct sdebug_dev_info *dp;
6351
6352 mutex_lock(&sdebug_host_list_mutex);
6353 list_for_each_entry(sdhp, &sdebug_host_list,
6354 host_list) {
6355 list_for_each_entry(dp, &sdhp->dev_info_list,
6356 dev_list) {
6357 set_bit(SDEBUG_UA_LUNS_CHANGED,
6358 dp->uas_bm);
6359 }
6360 }
6361 mutex_unlock(&sdebug_host_list_mutex);
6362 }
6363 return count;
6364 }
6365 return -EINVAL;
6366 }
6367 static DRIVER_ATTR_RW(max_luns);
6368
max_queue_show(struct device_driver * ddp,char * buf)6369 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6370 {
6371 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6372 }
6373 /* N.B. max_queue can be changed while there are queued commands. In flight
6374 * commands beyond the new max_queue will be completed. */
max_queue_store(struct device_driver * ddp,const char * buf,size_t count)6375 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6376 size_t count)
6377 {
6378 int n;
6379
6380 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6381 (n <= SDEBUG_CANQUEUE) &&
6382 (sdebug_host_max_queue == 0)) {
6383 mutex_lock(&sdebug_host_list_mutex);
6384
6385 /* We may only change sdebug_max_queue when we have no shosts */
6386 if (list_empty(&sdebug_host_list))
6387 sdebug_max_queue = n;
6388 else
6389 count = -EBUSY;
6390 mutex_unlock(&sdebug_host_list_mutex);
6391 return count;
6392 }
6393 return -EINVAL;
6394 }
6395 static DRIVER_ATTR_RW(max_queue);
6396
host_max_queue_show(struct device_driver * ddp,char * buf)6397 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6398 {
6399 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6400 }
6401
no_rwlock_show(struct device_driver * ddp,char * buf)6402 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6403 {
6404 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6405 }
6406
no_rwlock_store(struct device_driver * ddp,const char * buf,size_t count)6407 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6408 {
6409 bool v;
6410
6411 if (kstrtobool(buf, &v))
6412 return -EINVAL;
6413
6414 sdebug_no_rwlock = v;
6415 return count;
6416 }
6417 static DRIVER_ATTR_RW(no_rwlock);
6418
6419 /*
6420 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6421 * in range [0, sdebug_host_max_queue), we can't change it.
6422 */
6423 static DRIVER_ATTR_RO(host_max_queue);
6424
no_uld_show(struct device_driver * ddp,char * buf)6425 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6426 {
6427 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6428 }
6429 static DRIVER_ATTR_RO(no_uld);
6430
scsi_level_show(struct device_driver * ddp,char * buf)6431 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6432 {
6433 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6434 }
6435 static DRIVER_ATTR_RO(scsi_level);
6436
virtual_gb_show(struct device_driver * ddp,char * buf)6437 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6438 {
6439 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6440 }
virtual_gb_store(struct device_driver * ddp,const char * buf,size_t count)6441 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6442 size_t count)
6443 {
6444 int n;
6445 bool changed;
6446
6447 /* Ignore capacity change for ZBC drives for now */
6448 if (sdeb_zbc_in_use)
6449 return -ENOTSUPP;
6450
6451 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6452 changed = (sdebug_virtual_gb != n);
6453 sdebug_virtual_gb = n;
6454 sdebug_capacity = get_sdebug_capacity();
6455 if (changed) {
6456 struct sdebug_host_info *sdhp;
6457 struct sdebug_dev_info *dp;
6458
6459 mutex_lock(&sdebug_host_list_mutex);
6460 list_for_each_entry(sdhp, &sdebug_host_list,
6461 host_list) {
6462 list_for_each_entry(dp, &sdhp->dev_info_list,
6463 dev_list) {
6464 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6465 dp->uas_bm);
6466 }
6467 }
6468 mutex_unlock(&sdebug_host_list_mutex);
6469 }
6470 return count;
6471 }
6472 return -EINVAL;
6473 }
6474 static DRIVER_ATTR_RW(virtual_gb);
6475
add_host_show(struct device_driver * ddp,char * buf)6476 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6477 {
6478 /* absolute number of hosts currently active is what is shown */
6479 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6480 }
6481
add_host_store(struct device_driver * ddp,const char * buf,size_t count)6482 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6483 size_t count)
6484 {
6485 bool found;
6486 unsigned long idx;
6487 struct sdeb_store_info *sip;
6488 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6489 int delta_hosts;
6490
6491 if (sscanf(buf, "%d", &delta_hosts) != 1)
6492 return -EINVAL;
6493 if (delta_hosts > 0) {
6494 do {
6495 found = false;
6496 if (want_phs) {
6497 xa_for_each_marked(per_store_ap, idx, sip,
6498 SDEB_XA_NOT_IN_USE) {
6499 sdeb_most_recent_idx = (int)idx;
6500 found = true;
6501 break;
6502 }
6503 if (found) /* re-use case */
6504 sdebug_add_host_helper((int)idx);
6505 else
6506 sdebug_do_add_host(true);
6507 } else {
6508 sdebug_do_add_host(false);
6509 }
6510 } while (--delta_hosts);
6511 } else if (delta_hosts < 0) {
6512 do {
6513 sdebug_do_remove_host(false);
6514 } while (++delta_hosts);
6515 }
6516 return count;
6517 }
6518 static DRIVER_ATTR_RW(add_host);
6519
vpd_use_hostno_show(struct device_driver * ddp,char * buf)6520 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6521 {
6522 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6523 }
vpd_use_hostno_store(struct device_driver * ddp,const char * buf,size_t count)6524 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6525 size_t count)
6526 {
6527 int n;
6528
6529 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6530 sdebug_vpd_use_hostno = n;
6531 return count;
6532 }
6533 return -EINVAL;
6534 }
6535 static DRIVER_ATTR_RW(vpd_use_hostno);
6536
statistics_show(struct device_driver * ddp,char * buf)6537 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6538 {
6539 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6540 }
statistics_store(struct device_driver * ddp,const char * buf,size_t count)6541 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6542 size_t count)
6543 {
6544 int n;
6545
6546 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6547 if (n > 0)
6548 sdebug_statistics = true;
6549 else {
6550 clear_queue_stats();
6551 sdebug_statistics = false;
6552 }
6553 return count;
6554 }
6555 return -EINVAL;
6556 }
6557 static DRIVER_ATTR_RW(statistics);
6558
sector_size_show(struct device_driver * ddp,char * buf)6559 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6560 {
6561 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6562 }
6563 static DRIVER_ATTR_RO(sector_size);
6564
submit_queues_show(struct device_driver * ddp,char * buf)6565 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6566 {
6567 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6568 }
6569 static DRIVER_ATTR_RO(submit_queues);
6570
dix_show(struct device_driver * ddp,char * buf)6571 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6572 {
6573 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6574 }
6575 static DRIVER_ATTR_RO(dix);
6576
dif_show(struct device_driver * ddp,char * buf)6577 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6578 {
6579 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6580 }
6581 static DRIVER_ATTR_RO(dif);
6582
guard_show(struct device_driver * ddp,char * buf)6583 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6584 {
6585 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6586 }
6587 static DRIVER_ATTR_RO(guard);
6588
ato_show(struct device_driver * ddp,char * buf)6589 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6590 {
6591 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6592 }
6593 static DRIVER_ATTR_RO(ato);
6594
map_show(struct device_driver * ddp,char * buf)6595 static ssize_t map_show(struct device_driver *ddp, char *buf)
6596 {
6597 ssize_t count = 0;
6598
6599 if (!scsi_debug_lbp())
6600 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6601 sdebug_store_sectors);
6602
6603 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6604 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6605
6606 if (sip)
6607 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6608 (int)map_size, sip->map_storep);
6609 }
6610 buf[count++] = '\n';
6611 buf[count] = '\0';
6612
6613 return count;
6614 }
6615 static DRIVER_ATTR_RO(map);
6616
random_show(struct device_driver * ddp,char * buf)6617 static ssize_t random_show(struct device_driver *ddp, char *buf)
6618 {
6619 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6620 }
6621
random_store(struct device_driver * ddp,const char * buf,size_t count)6622 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6623 size_t count)
6624 {
6625 bool v;
6626
6627 if (kstrtobool(buf, &v))
6628 return -EINVAL;
6629
6630 sdebug_random = v;
6631 return count;
6632 }
6633 static DRIVER_ATTR_RW(random);
6634
removable_show(struct device_driver * ddp,char * buf)6635 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6636 {
6637 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6638 }
removable_store(struct device_driver * ddp,const char * buf,size_t count)6639 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6640 size_t count)
6641 {
6642 int n;
6643
6644 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6645 sdebug_removable = (n > 0);
6646 return count;
6647 }
6648 return -EINVAL;
6649 }
6650 static DRIVER_ATTR_RW(removable);
6651
host_lock_show(struct device_driver * ddp,char * buf)6652 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6653 {
6654 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6655 }
6656 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
host_lock_store(struct device_driver * ddp,const char * buf,size_t count)6657 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6658 size_t count)
6659 {
6660 int n;
6661
6662 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6663 sdebug_host_lock = (n > 0);
6664 return count;
6665 }
6666 return -EINVAL;
6667 }
6668 static DRIVER_ATTR_RW(host_lock);
6669
strict_show(struct device_driver * ddp,char * buf)6670 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6671 {
6672 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6673 }
strict_store(struct device_driver * ddp,const char * buf,size_t count)6674 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6675 size_t count)
6676 {
6677 int n;
6678
6679 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6680 sdebug_strict = (n > 0);
6681 return count;
6682 }
6683 return -EINVAL;
6684 }
6685 static DRIVER_ATTR_RW(strict);
6686
uuid_ctl_show(struct device_driver * ddp,char * buf)6687 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6688 {
6689 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6690 }
6691 static DRIVER_ATTR_RO(uuid_ctl);
6692
cdb_len_show(struct device_driver * ddp,char * buf)6693 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6694 {
6695 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6696 }
cdb_len_store(struct device_driver * ddp,const char * buf,size_t count)6697 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6698 size_t count)
6699 {
6700 int ret, n;
6701
6702 ret = kstrtoint(buf, 0, &n);
6703 if (ret)
6704 return ret;
6705 sdebug_cdb_len = n;
6706 all_config_cdb_len();
6707 return count;
6708 }
6709 static DRIVER_ATTR_RW(cdb_len);
6710
6711 static const char * const zbc_model_strs_a[] = {
6712 [BLK_ZONED_NONE] = "none",
6713 [BLK_ZONED_HA] = "host-aware",
6714 [BLK_ZONED_HM] = "host-managed",
6715 };
6716
6717 static const char * const zbc_model_strs_b[] = {
6718 [BLK_ZONED_NONE] = "no",
6719 [BLK_ZONED_HA] = "aware",
6720 [BLK_ZONED_HM] = "managed",
6721 };
6722
6723 static const char * const zbc_model_strs_c[] = {
6724 [BLK_ZONED_NONE] = "0",
6725 [BLK_ZONED_HA] = "1",
6726 [BLK_ZONED_HM] = "2",
6727 };
6728
sdeb_zbc_model_str(const char * cp)6729 static int sdeb_zbc_model_str(const char *cp)
6730 {
6731 int res = sysfs_match_string(zbc_model_strs_a, cp);
6732
6733 if (res < 0) {
6734 res = sysfs_match_string(zbc_model_strs_b, cp);
6735 if (res < 0) {
6736 res = sysfs_match_string(zbc_model_strs_c, cp);
6737 if (res < 0)
6738 return -EINVAL;
6739 }
6740 }
6741 return res;
6742 }
6743
zbc_show(struct device_driver * ddp,char * buf)6744 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6745 {
6746 return scnprintf(buf, PAGE_SIZE, "%s\n",
6747 zbc_model_strs_a[sdeb_zbc_model]);
6748 }
6749 static DRIVER_ATTR_RO(zbc);
6750
tur_ms_to_ready_show(struct device_driver * ddp,char * buf)6751 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6752 {
6753 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6754 }
6755 static DRIVER_ATTR_RO(tur_ms_to_ready);
6756
6757 /* Note: The following array creates attribute files in the
6758 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6759 files (over those found in the /sys/module/scsi_debug/parameters
6760 directory) is that auxiliary actions can be triggered when an attribute
6761 is changed. For example see: add_host_store() above.
6762 */
6763
6764 static struct attribute *sdebug_drv_attrs[] = {
6765 &driver_attr_delay.attr,
6766 &driver_attr_opts.attr,
6767 &driver_attr_ptype.attr,
6768 &driver_attr_dsense.attr,
6769 &driver_attr_fake_rw.attr,
6770 &driver_attr_host_max_queue.attr,
6771 &driver_attr_no_lun_0.attr,
6772 &driver_attr_num_tgts.attr,
6773 &driver_attr_dev_size_mb.attr,
6774 &driver_attr_num_parts.attr,
6775 &driver_attr_every_nth.attr,
6776 &driver_attr_lun_format.attr,
6777 &driver_attr_max_luns.attr,
6778 &driver_attr_max_queue.attr,
6779 &driver_attr_no_rwlock.attr,
6780 &driver_attr_no_uld.attr,
6781 &driver_attr_scsi_level.attr,
6782 &driver_attr_virtual_gb.attr,
6783 &driver_attr_add_host.attr,
6784 &driver_attr_per_host_store.attr,
6785 &driver_attr_vpd_use_hostno.attr,
6786 &driver_attr_sector_size.attr,
6787 &driver_attr_statistics.attr,
6788 &driver_attr_submit_queues.attr,
6789 &driver_attr_dix.attr,
6790 &driver_attr_dif.attr,
6791 &driver_attr_guard.attr,
6792 &driver_attr_ato.attr,
6793 &driver_attr_map.attr,
6794 &driver_attr_random.attr,
6795 &driver_attr_removable.attr,
6796 &driver_attr_host_lock.attr,
6797 &driver_attr_ndelay.attr,
6798 &driver_attr_strict.attr,
6799 &driver_attr_uuid_ctl.attr,
6800 &driver_attr_cdb_len.attr,
6801 &driver_attr_tur_ms_to_ready.attr,
6802 &driver_attr_zbc.attr,
6803 NULL,
6804 };
6805 ATTRIBUTE_GROUPS(sdebug_drv);
6806
6807 static struct device *pseudo_primary;
6808
scsi_debug_init(void)6809 static int __init scsi_debug_init(void)
6810 {
6811 bool want_store = (sdebug_fake_rw == 0);
6812 unsigned long sz;
6813 int k, ret, hosts_to_add;
6814 int idx = -1;
6815
6816 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6817 pr_warn("ndelay must be less than 1 second, ignored\n");
6818 sdebug_ndelay = 0;
6819 } else if (sdebug_ndelay > 0)
6820 sdebug_jdelay = JDELAY_OVERRIDDEN;
6821
6822 switch (sdebug_sector_size) {
6823 case 512:
6824 case 1024:
6825 case 2048:
6826 case 4096:
6827 break;
6828 default:
6829 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6830 return -EINVAL;
6831 }
6832
6833 switch (sdebug_dif) {
6834 case T10_PI_TYPE0_PROTECTION:
6835 break;
6836 case T10_PI_TYPE1_PROTECTION:
6837 case T10_PI_TYPE2_PROTECTION:
6838 case T10_PI_TYPE3_PROTECTION:
6839 have_dif_prot = true;
6840 break;
6841
6842 default:
6843 pr_err("dif must be 0, 1, 2 or 3\n");
6844 return -EINVAL;
6845 }
6846
6847 if (sdebug_num_tgts < 0) {
6848 pr_err("num_tgts must be >= 0\n");
6849 return -EINVAL;
6850 }
6851
6852 if (sdebug_guard > 1) {
6853 pr_err("guard must be 0 or 1\n");
6854 return -EINVAL;
6855 }
6856
6857 if (sdebug_ato > 1) {
6858 pr_err("ato must be 0 or 1\n");
6859 return -EINVAL;
6860 }
6861
6862 if (sdebug_physblk_exp > 15) {
6863 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6864 return -EINVAL;
6865 }
6866
6867 sdebug_lun_am = sdebug_lun_am_i;
6868 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6869 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6870 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6871 }
6872
6873 if (sdebug_max_luns > 256) {
6874 if (sdebug_max_luns > 16384) {
6875 pr_warn("max_luns can be no more than 16384, use default\n");
6876 sdebug_max_luns = DEF_MAX_LUNS;
6877 }
6878 sdebug_lun_am = SAM_LUN_AM_FLAT;
6879 }
6880
6881 if (sdebug_lowest_aligned > 0x3fff) {
6882 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6883 return -EINVAL;
6884 }
6885
6886 if (submit_queues < 1) {
6887 pr_err("submit_queues must be 1 or more\n");
6888 return -EINVAL;
6889 }
6890
6891 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6892 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6893 return -EINVAL;
6894 }
6895
6896 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6897 (sdebug_host_max_queue < 0)) {
6898 pr_err("host_max_queue must be in range [0 %d]\n",
6899 SDEBUG_CANQUEUE);
6900 return -EINVAL;
6901 }
6902
6903 if (sdebug_host_max_queue &&
6904 (sdebug_max_queue != sdebug_host_max_queue)) {
6905 sdebug_max_queue = sdebug_host_max_queue;
6906 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6907 sdebug_max_queue);
6908 }
6909
6910 /*
6911 * check for host managed zoned block device specified with
6912 * ptype=0x14 or zbc=XXX.
6913 */
6914 if (sdebug_ptype == TYPE_ZBC) {
6915 sdeb_zbc_model = BLK_ZONED_HM;
6916 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6917 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6918 if (k < 0)
6919 return k;
6920 sdeb_zbc_model = k;
6921 switch (sdeb_zbc_model) {
6922 case BLK_ZONED_NONE:
6923 case BLK_ZONED_HA:
6924 sdebug_ptype = TYPE_DISK;
6925 break;
6926 case BLK_ZONED_HM:
6927 sdebug_ptype = TYPE_ZBC;
6928 break;
6929 default:
6930 pr_err("Invalid ZBC model\n");
6931 return -EINVAL;
6932 }
6933 }
6934 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6935 sdeb_zbc_in_use = true;
6936 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6937 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6938 }
6939
6940 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6941 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6942 if (sdebug_dev_size_mb < 1)
6943 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6944 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6945 sdebug_store_sectors = sz / sdebug_sector_size;
6946 sdebug_capacity = get_sdebug_capacity();
6947
6948 /* play around with geometry, don't waste too much on track 0 */
6949 sdebug_heads = 8;
6950 sdebug_sectors_per = 32;
6951 if (sdebug_dev_size_mb >= 256)
6952 sdebug_heads = 64;
6953 else if (sdebug_dev_size_mb >= 16)
6954 sdebug_heads = 32;
6955 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6956 (sdebug_sectors_per * sdebug_heads);
6957 if (sdebug_cylinders_per >= 1024) {
6958 /* other LLDs do this; implies >= 1GB ram disk ... */
6959 sdebug_heads = 255;
6960 sdebug_sectors_per = 63;
6961 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6962 (sdebug_sectors_per * sdebug_heads);
6963 }
6964 if (scsi_debug_lbp()) {
6965 sdebug_unmap_max_blocks =
6966 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6967
6968 sdebug_unmap_max_desc =
6969 clamp(sdebug_unmap_max_desc, 0U, 256U);
6970
6971 sdebug_unmap_granularity =
6972 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6973
6974 if (sdebug_unmap_alignment &&
6975 sdebug_unmap_granularity <=
6976 sdebug_unmap_alignment) {
6977 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6978 return -EINVAL;
6979 }
6980 }
6981 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6982 if (want_store) {
6983 idx = sdebug_add_store();
6984 if (idx < 0)
6985 return idx;
6986 }
6987
6988 pseudo_primary = root_device_register("pseudo_0");
6989 if (IS_ERR(pseudo_primary)) {
6990 pr_warn("root_device_register() error\n");
6991 ret = PTR_ERR(pseudo_primary);
6992 goto free_vm;
6993 }
6994 ret = bus_register(&pseudo_lld_bus);
6995 if (ret < 0) {
6996 pr_warn("bus_register error: %d\n", ret);
6997 goto dev_unreg;
6998 }
6999 ret = driver_register(&sdebug_driverfs_driver);
7000 if (ret < 0) {
7001 pr_warn("driver_register error: %d\n", ret);
7002 goto bus_unreg;
7003 }
7004
7005 hosts_to_add = sdebug_add_host;
7006 sdebug_add_host = 0;
7007
7008 queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7009 if (!queued_cmd_cache) {
7010 ret = -ENOMEM;
7011 goto driver_unreg;
7012 }
7013
7014 for (k = 0; k < hosts_to_add; k++) {
7015 if (want_store && k == 0) {
7016 ret = sdebug_add_host_helper(idx);
7017 if (ret < 0) {
7018 pr_err("add_host_helper k=%d, error=%d\n",
7019 k, -ret);
7020 break;
7021 }
7022 } else {
7023 ret = sdebug_do_add_host(want_store &&
7024 sdebug_per_host_store);
7025 if (ret < 0) {
7026 pr_err("add_host k=%d error=%d\n", k, -ret);
7027 break;
7028 }
7029 }
7030 }
7031 if (sdebug_verbose)
7032 pr_info("built %d host(s)\n", sdebug_num_hosts);
7033
7034 return 0;
7035
7036 driver_unreg:
7037 driver_unregister(&sdebug_driverfs_driver);
7038 bus_unreg:
7039 bus_unregister(&pseudo_lld_bus);
7040 dev_unreg:
7041 root_device_unregister(pseudo_primary);
7042 free_vm:
7043 sdebug_erase_store(idx, NULL);
7044 return ret;
7045 }
7046
scsi_debug_exit(void)7047 static void __exit scsi_debug_exit(void)
7048 {
7049 int k = sdebug_num_hosts;
7050
7051 for (; k; k--)
7052 sdebug_do_remove_host(true);
7053 kmem_cache_destroy(queued_cmd_cache);
7054 driver_unregister(&sdebug_driverfs_driver);
7055 bus_unregister(&pseudo_lld_bus);
7056 root_device_unregister(pseudo_primary);
7057
7058 sdebug_erase_all_stores(false);
7059 xa_destroy(per_store_ap);
7060 }
7061
7062 device_initcall(scsi_debug_init);
7063 module_exit(scsi_debug_exit);
7064
sdebug_release_adapter(struct device * dev)7065 static void sdebug_release_adapter(struct device *dev)
7066 {
7067 struct sdebug_host_info *sdbg_host;
7068
7069 sdbg_host = dev_to_sdebug_host(dev);
7070 kfree(sdbg_host);
7071 }
7072
7073 /* idx must be valid, if sip is NULL then it will be obtained using idx */
sdebug_erase_store(int idx,struct sdeb_store_info * sip)7074 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7075 {
7076 if (idx < 0)
7077 return;
7078 if (!sip) {
7079 if (xa_empty(per_store_ap))
7080 return;
7081 sip = xa_load(per_store_ap, idx);
7082 if (!sip)
7083 return;
7084 }
7085 vfree(sip->map_storep);
7086 vfree(sip->dif_storep);
7087 vfree(sip->storep);
7088 xa_erase(per_store_ap, idx);
7089 kfree(sip);
7090 }
7091
7092 /* Assume apart_from_first==false only in shutdown case. */
sdebug_erase_all_stores(bool apart_from_first)7093 static void sdebug_erase_all_stores(bool apart_from_first)
7094 {
7095 unsigned long idx;
7096 struct sdeb_store_info *sip = NULL;
7097
7098 xa_for_each(per_store_ap, idx, sip) {
7099 if (apart_from_first)
7100 apart_from_first = false;
7101 else
7102 sdebug_erase_store(idx, sip);
7103 }
7104 if (apart_from_first)
7105 sdeb_most_recent_idx = sdeb_first_idx;
7106 }
7107
7108 /*
7109 * Returns store xarray new element index (idx) if >=0 else negated errno.
7110 * Limit the number of stores to 65536.
7111 */
sdebug_add_store(void)7112 static int sdebug_add_store(void)
7113 {
7114 int res;
7115 u32 n_idx;
7116 unsigned long iflags;
7117 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7118 struct sdeb_store_info *sip = NULL;
7119 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7120
7121 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7122 if (!sip)
7123 return -ENOMEM;
7124
7125 xa_lock_irqsave(per_store_ap, iflags);
7126 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7127 if (unlikely(res < 0)) {
7128 xa_unlock_irqrestore(per_store_ap, iflags);
7129 kfree(sip);
7130 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7131 return res;
7132 }
7133 sdeb_most_recent_idx = n_idx;
7134 if (sdeb_first_idx < 0)
7135 sdeb_first_idx = n_idx;
7136 xa_unlock_irqrestore(per_store_ap, iflags);
7137
7138 res = -ENOMEM;
7139 sip->storep = vzalloc(sz);
7140 if (!sip->storep) {
7141 pr_err("user data oom\n");
7142 goto err;
7143 }
7144 if (sdebug_num_parts > 0)
7145 sdebug_build_parts(sip->storep, sz);
7146
7147 /* DIF/DIX: what T10 calls Protection Information (PI) */
7148 if (sdebug_dix) {
7149 int dif_size;
7150
7151 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7152 sip->dif_storep = vmalloc(dif_size);
7153
7154 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7155 sip->dif_storep);
7156
7157 if (!sip->dif_storep) {
7158 pr_err("DIX oom\n");
7159 goto err;
7160 }
7161 memset(sip->dif_storep, 0xff, dif_size);
7162 }
7163 /* Logical Block Provisioning */
7164 if (scsi_debug_lbp()) {
7165 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7166 sip->map_storep = vmalloc(array_size(sizeof(long),
7167 BITS_TO_LONGS(map_size)));
7168
7169 pr_info("%lu provisioning blocks\n", map_size);
7170
7171 if (!sip->map_storep) {
7172 pr_err("LBP map oom\n");
7173 goto err;
7174 }
7175
7176 bitmap_zero(sip->map_storep, map_size);
7177
7178 /* Map first 1KB for partition table */
7179 if (sdebug_num_parts)
7180 map_region(sip, 0, 2);
7181 }
7182
7183 rwlock_init(&sip->macc_lck);
7184 return (int)n_idx;
7185 err:
7186 sdebug_erase_store((int)n_idx, sip);
7187 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7188 return res;
7189 }
7190
sdebug_add_host_helper(int per_host_idx)7191 static int sdebug_add_host_helper(int per_host_idx)
7192 {
7193 int k, devs_per_host, idx;
7194 int error = -ENOMEM;
7195 struct sdebug_host_info *sdbg_host;
7196 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7197
7198 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7199 if (!sdbg_host)
7200 return -ENOMEM;
7201 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7202 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7203 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7204 sdbg_host->si_idx = idx;
7205
7206 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7207
7208 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7209 for (k = 0; k < devs_per_host; k++) {
7210 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7211 if (!sdbg_devinfo)
7212 goto clean;
7213 }
7214
7215 mutex_lock(&sdebug_host_list_mutex);
7216 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7217 mutex_unlock(&sdebug_host_list_mutex);
7218
7219 sdbg_host->dev.bus = &pseudo_lld_bus;
7220 sdbg_host->dev.parent = pseudo_primary;
7221 sdbg_host->dev.release = &sdebug_release_adapter;
7222 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7223
7224 error = device_register(&sdbg_host->dev);
7225 if (error) {
7226 mutex_lock(&sdebug_host_list_mutex);
7227 list_del(&sdbg_host->host_list);
7228 mutex_unlock(&sdebug_host_list_mutex);
7229 goto clean;
7230 }
7231
7232 ++sdebug_num_hosts;
7233 return 0;
7234
7235 clean:
7236 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7237 dev_list) {
7238 list_del(&sdbg_devinfo->dev_list);
7239 kfree(sdbg_devinfo->zstate);
7240 kfree(sdbg_devinfo);
7241 }
7242 if (sdbg_host->dev.release)
7243 put_device(&sdbg_host->dev);
7244 else
7245 kfree(sdbg_host);
7246 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7247 return error;
7248 }
7249
sdebug_do_add_host(bool mk_new_store)7250 static int sdebug_do_add_host(bool mk_new_store)
7251 {
7252 int ph_idx = sdeb_most_recent_idx;
7253
7254 if (mk_new_store) {
7255 ph_idx = sdebug_add_store();
7256 if (ph_idx < 0)
7257 return ph_idx;
7258 }
7259 return sdebug_add_host_helper(ph_idx);
7260 }
7261
sdebug_do_remove_host(bool the_end)7262 static void sdebug_do_remove_host(bool the_end)
7263 {
7264 int idx = -1;
7265 struct sdebug_host_info *sdbg_host = NULL;
7266 struct sdebug_host_info *sdbg_host2;
7267
7268 mutex_lock(&sdebug_host_list_mutex);
7269 if (!list_empty(&sdebug_host_list)) {
7270 sdbg_host = list_entry(sdebug_host_list.prev,
7271 struct sdebug_host_info, host_list);
7272 idx = sdbg_host->si_idx;
7273 }
7274 if (!the_end && idx >= 0) {
7275 bool unique = true;
7276
7277 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7278 if (sdbg_host2 == sdbg_host)
7279 continue;
7280 if (idx == sdbg_host2->si_idx) {
7281 unique = false;
7282 break;
7283 }
7284 }
7285 if (unique) {
7286 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7287 if (idx == sdeb_most_recent_idx)
7288 --sdeb_most_recent_idx;
7289 }
7290 }
7291 if (sdbg_host)
7292 list_del(&sdbg_host->host_list);
7293 mutex_unlock(&sdebug_host_list_mutex);
7294
7295 if (!sdbg_host)
7296 return;
7297
7298 device_unregister(&sdbg_host->dev);
7299 --sdebug_num_hosts;
7300 }
7301
sdebug_change_qdepth(struct scsi_device * sdev,int qdepth)7302 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7303 {
7304 struct sdebug_dev_info *devip = sdev->hostdata;
7305
7306 if (!devip)
7307 return -ENODEV;
7308
7309 mutex_lock(&sdebug_host_list_mutex);
7310 block_unblock_all_queues(true);
7311
7312 if (qdepth > SDEBUG_CANQUEUE) {
7313 qdepth = SDEBUG_CANQUEUE;
7314 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7315 qdepth, SDEBUG_CANQUEUE);
7316 }
7317 if (qdepth < 1)
7318 qdepth = 1;
7319 if (qdepth != sdev->queue_depth)
7320 scsi_change_queue_depth(sdev, qdepth);
7321
7322 block_unblock_all_queues(false);
7323 mutex_unlock(&sdebug_host_list_mutex);
7324
7325 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7326 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7327
7328 return sdev->queue_depth;
7329 }
7330
fake_timeout(struct scsi_cmnd * scp)7331 static bool fake_timeout(struct scsi_cmnd *scp)
7332 {
7333 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7334 if (sdebug_every_nth < -1)
7335 sdebug_every_nth = -1;
7336 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7337 return true; /* ignore command causing timeout */
7338 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7339 scsi_medium_access_command(scp))
7340 return true; /* time out reads and writes */
7341 }
7342 return false;
7343 }
7344
7345 /* Response to TUR or media access command when device stopped */
resp_not_ready(struct scsi_cmnd * scp,struct sdebug_dev_info * devip)7346 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7347 {
7348 int stopped_state;
7349 u64 diff_ns = 0;
7350 ktime_t now_ts = ktime_get_boottime();
7351 struct scsi_device *sdp = scp->device;
7352
7353 stopped_state = atomic_read(&devip->stopped);
7354 if (stopped_state == 2) {
7355 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7356 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7357 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7358 /* tur_ms_to_ready timer extinguished */
7359 atomic_set(&devip->stopped, 0);
7360 return 0;
7361 }
7362 }
7363 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7364 if (sdebug_verbose)
7365 sdev_printk(KERN_INFO, sdp,
7366 "%s: Not ready: in process of becoming ready\n", my_name);
7367 if (scp->cmnd[0] == TEST_UNIT_READY) {
7368 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7369
7370 if (diff_ns <= tur_nanosecs_to_ready)
7371 diff_ns = tur_nanosecs_to_ready - diff_ns;
7372 else
7373 diff_ns = tur_nanosecs_to_ready;
7374 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7375 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7376 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7377 diff_ns);
7378 return check_condition_result;
7379 }
7380 }
7381 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7382 if (sdebug_verbose)
7383 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7384 my_name);
7385 return check_condition_result;
7386 }
7387
sdebug_map_queues(struct Scsi_Host * shost)7388 static void sdebug_map_queues(struct Scsi_Host *shost)
7389 {
7390 int i, qoff;
7391
7392 if (shost->nr_hw_queues == 1)
7393 return;
7394
7395 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7396 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7397
7398 map->nr_queues = 0;
7399
7400 if (i == HCTX_TYPE_DEFAULT)
7401 map->nr_queues = submit_queues - poll_queues;
7402 else if (i == HCTX_TYPE_POLL)
7403 map->nr_queues = poll_queues;
7404
7405 if (!map->nr_queues) {
7406 BUG_ON(i == HCTX_TYPE_DEFAULT);
7407 continue;
7408 }
7409
7410 map->queue_offset = qoff;
7411 blk_mq_map_queues(map);
7412
7413 qoff += map->nr_queues;
7414 }
7415 }
7416
7417 struct sdebug_blk_mq_poll_data {
7418 unsigned int queue_num;
7419 int *num_entries;
7420 };
7421
7422 /*
7423 * We don't handle aborted commands here, but it does not seem possible to have
7424 * aborted polled commands from schedule_resp()
7425 */
sdebug_blk_mq_poll_iter(struct request * rq,void * opaque)7426 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7427 {
7428 struct sdebug_blk_mq_poll_data *data = opaque;
7429 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7430 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7431 struct sdebug_defer *sd_dp;
7432 u32 unique_tag = blk_mq_unique_tag(rq);
7433 u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7434 struct sdebug_queued_cmd *sqcp;
7435 unsigned long flags;
7436 int queue_num = data->queue_num;
7437 ktime_t time;
7438
7439 /* We're only interested in one queue for this iteration */
7440 if (hwq != queue_num)
7441 return true;
7442
7443 /* Subsequent checks would fail if this failed, but check anyway */
7444 if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7445 return true;
7446
7447 time = ktime_get_boottime();
7448
7449 spin_lock_irqsave(&sdsc->lock, flags);
7450 sqcp = TO_QUEUED_CMD(cmd);
7451 if (!sqcp) {
7452 spin_unlock_irqrestore(&sdsc->lock, flags);
7453 return true;
7454 }
7455
7456 sd_dp = &sqcp->sd_dp;
7457 if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7458 spin_unlock_irqrestore(&sdsc->lock, flags);
7459 return true;
7460 }
7461
7462 if (time < sd_dp->cmpl_ts) {
7463 spin_unlock_irqrestore(&sdsc->lock, flags);
7464 return true;
7465 }
7466
7467 ASSIGN_QUEUED_CMD(cmd, NULL);
7468 spin_unlock_irqrestore(&sdsc->lock, flags);
7469
7470 if (sdebug_statistics) {
7471 atomic_inc(&sdebug_completions);
7472 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7473 atomic_inc(&sdebug_miss_cpus);
7474 }
7475
7476 sdebug_free_queued_cmd(sqcp);
7477
7478 scsi_done(cmd); /* callback to mid level */
7479 (*data->num_entries)++;
7480 return true;
7481 }
7482
sdebug_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)7483 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7484 {
7485 int num_entries = 0;
7486 struct sdebug_blk_mq_poll_data data = {
7487 .queue_num = queue_num,
7488 .num_entries = &num_entries,
7489 };
7490
7491 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7492 &data);
7493
7494 if (num_entries > 0)
7495 atomic_add(num_entries, &sdeb_mq_poll_count);
7496 return num_entries;
7497 }
7498
scsi_debug_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scp)7499 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7500 struct scsi_cmnd *scp)
7501 {
7502 u8 sdeb_i;
7503 struct scsi_device *sdp = scp->device;
7504 const struct opcode_info_t *oip;
7505 const struct opcode_info_t *r_oip;
7506 struct sdebug_dev_info *devip;
7507 u8 *cmd = scp->cmnd;
7508 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7509 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7510 int k, na;
7511 int errsts = 0;
7512 u64 lun_index = sdp->lun & 0x3FFF;
7513 u32 flags;
7514 u16 sa;
7515 u8 opcode = cmd[0];
7516 bool has_wlun_rl;
7517 bool inject_now;
7518
7519 scsi_set_resid(scp, 0);
7520 if (sdebug_statistics) {
7521 atomic_inc(&sdebug_cmnd_count);
7522 inject_now = inject_on_this_cmd();
7523 } else {
7524 inject_now = false;
7525 }
7526 if (unlikely(sdebug_verbose &&
7527 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7528 char b[120];
7529 int n, len, sb;
7530
7531 len = scp->cmd_len;
7532 sb = (int)sizeof(b);
7533 if (len > 32)
7534 strcpy(b, "too long, over 32 bytes");
7535 else {
7536 for (k = 0, n = 0; k < len && n < sb; ++k)
7537 n += scnprintf(b + n, sb - n, "%02x ",
7538 (u32)cmd[k]);
7539 }
7540 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7541 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7542 }
7543 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7544 return SCSI_MLQUEUE_HOST_BUSY;
7545 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7546 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7547 goto err_out;
7548
7549 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7550 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7551 devip = (struct sdebug_dev_info *)sdp->hostdata;
7552 if (unlikely(!devip)) {
7553 devip = find_build_dev_info(sdp);
7554 if (NULL == devip)
7555 goto err_out;
7556 }
7557 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7558 atomic_set(&sdeb_inject_pending, 1);
7559
7560 na = oip->num_attached;
7561 r_pfp = oip->pfp;
7562 if (na) { /* multiple commands with this opcode */
7563 r_oip = oip;
7564 if (FF_SA & r_oip->flags) {
7565 if (F_SA_LOW & oip->flags)
7566 sa = 0x1f & cmd[1];
7567 else
7568 sa = get_unaligned_be16(cmd + 8);
7569 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7570 if (opcode == oip->opcode && sa == oip->sa)
7571 break;
7572 }
7573 } else { /* since no service action only check opcode */
7574 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7575 if (opcode == oip->opcode)
7576 break;
7577 }
7578 }
7579 if (k > na) {
7580 if (F_SA_LOW & r_oip->flags)
7581 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7582 else if (F_SA_HIGH & r_oip->flags)
7583 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7584 else
7585 mk_sense_invalid_opcode(scp);
7586 goto check_cond;
7587 }
7588 } /* else (when na==0) we assume the oip is a match */
7589 flags = oip->flags;
7590 if (unlikely(F_INV_OP & flags)) {
7591 mk_sense_invalid_opcode(scp);
7592 goto check_cond;
7593 }
7594 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7595 if (sdebug_verbose)
7596 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7597 my_name, opcode, " supported for wlun");
7598 mk_sense_invalid_opcode(scp);
7599 goto check_cond;
7600 }
7601 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7602 u8 rem;
7603 int j;
7604
7605 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7606 rem = ~oip->len_mask[k] & cmd[k];
7607 if (rem) {
7608 for (j = 7; j >= 0; --j, rem <<= 1) {
7609 if (0x80 & rem)
7610 break;
7611 }
7612 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7613 goto check_cond;
7614 }
7615 }
7616 }
7617 if (unlikely(!(F_SKIP_UA & flags) &&
7618 find_first_bit(devip->uas_bm,
7619 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7620 errsts = make_ua(scp, devip);
7621 if (errsts)
7622 goto check_cond;
7623 }
7624 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7625 atomic_read(&devip->stopped))) {
7626 errsts = resp_not_ready(scp, devip);
7627 if (errsts)
7628 goto fini;
7629 }
7630 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7631 goto fini;
7632 if (unlikely(sdebug_every_nth)) {
7633 if (fake_timeout(scp))
7634 return 0; /* ignore command: make trouble */
7635 }
7636 if (likely(oip->pfp))
7637 pfp = oip->pfp; /* calls a resp_* function */
7638 else
7639 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7640
7641 fini:
7642 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7643 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7644 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7645 sdebug_ndelay > 10000)) {
7646 /*
7647 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7648 * for Start Stop Unit (SSU) want at least 1 second delay and
7649 * if sdebug_jdelay>1 want a long delay of that many seconds.
7650 * For Synchronize Cache want 1/20 of SSU's delay.
7651 */
7652 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7653 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7654
7655 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7656 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7657 } else
7658 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7659 sdebug_ndelay);
7660 check_cond:
7661 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7662 err_out:
7663 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7664 }
7665
sdebug_init_cmd_priv(struct Scsi_Host * shost,struct scsi_cmnd * cmd)7666 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
7667 {
7668 struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7669
7670 spin_lock_init(&sdsc->lock);
7671
7672 return 0;
7673 }
7674
7675
7676 static struct scsi_host_template sdebug_driver_template = {
7677 .show_info = scsi_debug_show_info,
7678 .write_info = scsi_debug_write_info,
7679 .proc_name = sdebug_proc_name,
7680 .name = "SCSI DEBUG",
7681 .info = scsi_debug_info,
7682 .slave_alloc = scsi_debug_slave_alloc,
7683 .slave_configure = scsi_debug_slave_configure,
7684 .slave_destroy = scsi_debug_slave_destroy,
7685 .ioctl = scsi_debug_ioctl,
7686 .queuecommand = scsi_debug_queuecommand,
7687 .change_queue_depth = sdebug_change_qdepth,
7688 .map_queues = sdebug_map_queues,
7689 .mq_poll = sdebug_blk_mq_poll,
7690 .eh_abort_handler = scsi_debug_abort,
7691 .eh_device_reset_handler = scsi_debug_device_reset,
7692 .eh_target_reset_handler = scsi_debug_target_reset,
7693 .eh_bus_reset_handler = scsi_debug_bus_reset,
7694 .eh_host_reset_handler = scsi_debug_host_reset,
7695 .can_queue = SDEBUG_CANQUEUE,
7696 .this_id = 7,
7697 .sg_tablesize = SG_MAX_SEGMENTS,
7698 .cmd_per_lun = DEF_CMD_PER_LUN,
7699 .max_sectors = -1U,
7700 .max_segment_size = -1U,
7701 .module = THIS_MODULE,
7702 .track_queue_depth = 1,
7703 .cmd_size = sizeof(struct sdebug_scsi_cmd),
7704 .init_cmd_priv = sdebug_init_cmd_priv,
7705 };
7706
sdebug_driver_probe(struct device * dev)7707 static int sdebug_driver_probe(struct device *dev)
7708 {
7709 int error = 0;
7710 struct sdebug_host_info *sdbg_host;
7711 struct Scsi_Host *hpnt;
7712 int hprot;
7713
7714 sdbg_host = dev_to_sdebug_host(dev);
7715
7716 sdebug_driver_template.can_queue = sdebug_max_queue;
7717 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7718 if (!sdebug_clustering)
7719 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7720
7721 hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
7722 if (NULL == hpnt) {
7723 pr_err("scsi_host_alloc failed\n");
7724 error = -ENODEV;
7725 return error;
7726 }
7727 if (submit_queues > nr_cpu_ids) {
7728 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7729 my_name, submit_queues, nr_cpu_ids);
7730 submit_queues = nr_cpu_ids;
7731 }
7732 /*
7733 * Decide whether to tell scsi subsystem that we want mq. The
7734 * following should give the same answer for each host.
7735 */
7736 hpnt->nr_hw_queues = submit_queues;
7737 if (sdebug_host_max_queue)
7738 hpnt->host_tagset = 1;
7739
7740 /* poll queues are possible for nr_hw_queues > 1 */
7741 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7742 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7743 my_name, poll_queues, hpnt->nr_hw_queues);
7744 poll_queues = 0;
7745 }
7746
7747 /*
7748 * Poll queues don't need interrupts, but we need at least one I/O queue
7749 * left over for non-polled I/O.
7750 * If condition not met, trim poll_queues to 1 (just for simplicity).
7751 */
7752 if (poll_queues >= submit_queues) {
7753 if (submit_queues < 3)
7754 pr_warn("%s: trim poll_queues to 1\n", my_name);
7755 else
7756 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7757 my_name, submit_queues - 1);
7758 poll_queues = 1;
7759 }
7760 if (poll_queues)
7761 hpnt->nr_maps = 3;
7762
7763 sdbg_host->shost = hpnt;
7764 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7765 hpnt->max_id = sdebug_num_tgts + 1;
7766 else
7767 hpnt->max_id = sdebug_num_tgts;
7768 /* = sdebug_max_luns; */
7769 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7770
7771 hprot = 0;
7772
7773 switch (sdebug_dif) {
7774
7775 case T10_PI_TYPE1_PROTECTION:
7776 hprot = SHOST_DIF_TYPE1_PROTECTION;
7777 if (sdebug_dix)
7778 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7779 break;
7780
7781 case T10_PI_TYPE2_PROTECTION:
7782 hprot = SHOST_DIF_TYPE2_PROTECTION;
7783 if (sdebug_dix)
7784 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7785 break;
7786
7787 case T10_PI_TYPE3_PROTECTION:
7788 hprot = SHOST_DIF_TYPE3_PROTECTION;
7789 if (sdebug_dix)
7790 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7791 break;
7792
7793 default:
7794 if (sdebug_dix)
7795 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7796 break;
7797 }
7798
7799 scsi_host_set_prot(hpnt, hprot);
7800
7801 if (have_dif_prot || sdebug_dix)
7802 pr_info("host protection%s%s%s%s%s%s%s\n",
7803 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7804 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7805 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7806 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7807 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7808 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7809 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7810
7811 if (sdebug_guard == 1)
7812 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7813 else
7814 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7815
7816 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7817 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7818 if (sdebug_every_nth) /* need stats counters for every_nth */
7819 sdebug_statistics = true;
7820 error = scsi_add_host(hpnt, &sdbg_host->dev);
7821 if (error) {
7822 pr_err("scsi_add_host failed\n");
7823 error = -ENODEV;
7824 scsi_host_put(hpnt);
7825 } else {
7826 scsi_scan_host(hpnt);
7827 }
7828
7829 return error;
7830 }
7831
sdebug_driver_remove(struct device * dev)7832 static void sdebug_driver_remove(struct device *dev)
7833 {
7834 struct sdebug_host_info *sdbg_host;
7835 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7836
7837 sdbg_host = dev_to_sdebug_host(dev);
7838
7839 scsi_remove_host(sdbg_host->shost);
7840
7841 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7842 dev_list) {
7843 list_del(&sdbg_devinfo->dev_list);
7844 kfree(sdbg_devinfo->zstate);
7845 kfree(sdbg_devinfo);
7846 }
7847
7848 scsi_host_put(sdbg_host->shost);
7849 }
7850
7851 static struct bus_type pseudo_lld_bus = {
7852 .name = "pseudo",
7853 .probe = sdebug_driver_probe,
7854 .remove = sdebug_driver_remove,
7855 .drv_groups = sdebug_drv_groups,
7856 };
7857