1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 4 * Copyright (C) 1992 Eric Youngdale 5 * Simulate a host adapter with 2 disks attached. Do a lot of checking 6 * to make sure that we are not getting blocks mixed up, and PANIC if 7 * anything out of the ordinary is seen. 8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 9 * 10 * Copyright (C) 2001 - 2020 Douglas Gilbert 11 * 12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html 13 */ 14 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 17 18 #include <linux/module.h> 19 20 #include <linux/kernel.h> 21 #include <linux/errno.h> 22 #include <linux/jiffies.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/string.h> 26 #include <linux/genhd.h> 27 #include <linux/fs.h> 28 #include <linux/init.h> 29 #include <linux/proc_fs.h> 30 #include <linux/vmalloc.h> 31 #include <linux/moduleparam.h> 32 #include <linux/scatterlist.h> 33 #include <linux/blkdev.h> 34 #include <linux/crc-t10dif.h> 35 #include <linux/spinlock.h> 36 #include <linux/interrupt.h> 37 #include <linux/atomic.h> 38 #include <linux/hrtimer.h> 39 #include <linux/uuid.h> 40 #include <linux/t10-pi.h> 41 #include <linux/msdos_partition.h> 42 #include <linux/random.h> 43 #include <linux/xarray.h> 44 #include <linux/prefetch.h> 45 46 #include <net/checksum.h> 47 48 #include <asm/unaligned.h> 49 50 #include <scsi/scsi.h> 51 #include <scsi/scsi_cmnd.h> 52 #include <scsi/scsi_device.h> 53 #include <scsi/scsi_host.h> 54 #include <scsi/scsicam.h> 55 #include <scsi/scsi_eh.h> 56 #include <scsi/scsi_tcq.h> 57 #include <scsi/scsi_dbg.h> 58 59 #include "sd.h" 60 #include "scsi_logging.h" 61 62 /* make sure inq_product_rev string corresponds to this version */ 63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */ 64 static const char *sdebug_version_date = "20200710"; 65 66 #define MY_NAME "scsi_debug" 67 68 /* Additional Sense Code (ASC) */ 69 #define NO_ADDITIONAL_SENSE 0x0 70 #define LOGICAL_UNIT_NOT_READY 0x4 71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8 72 #define UNRECOVERED_READ_ERR 0x11 73 #define PARAMETER_LIST_LENGTH_ERR 0x1a 74 #define INVALID_OPCODE 0x20 75 #define LBA_OUT_OF_RANGE 0x21 76 #define INVALID_FIELD_IN_CDB 0x24 77 #define INVALID_FIELD_IN_PARAM_LIST 0x26 78 #define WRITE_PROTECTED 0x27 79 #define UA_RESET_ASC 0x29 80 #define UA_CHANGED_ASC 0x2a 81 #define TARGET_CHANGED_ASC 0x3f 82 #define LUNS_CHANGED_ASCQ 0x0e 83 #define INSUFF_RES_ASC 0x55 84 #define INSUFF_RES_ASCQ 0x3 85 #define POWER_ON_RESET_ASCQ 0x0 86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */ 87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */ 88 #define CAPACITY_CHANGED_ASCQ 0x9 89 #define SAVING_PARAMS_UNSUP 0x39 90 #define TRANSPORT_PROBLEM 0x4b 91 #define THRESHOLD_EXCEEDED 0x5d 92 #define LOW_POWER_COND_ON 0x5e 93 #define MISCOMPARE_VERIFY_ASC 0x1d 94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */ 95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16 96 #define WRITE_ERROR_ASC 0xc 97 #define UNALIGNED_WRITE_ASCQ 0x4 98 #define WRITE_BOUNDARY_ASCQ 0x5 99 #define READ_INVDATA_ASCQ 0x6 100 #define READ_BOUNDARY_ASCQ 0x7 101 #define INSUFF_ZONE_ASCQ 0xe 102 103 /* Additional Sense Code Qualifier (ASCQ) */ 104 #define ACK_NAK_TO 0x3 105 106 /* Default values for driver parameters */ 107 #define DEF_NUM_HOST 1 108 #define DEF_NUM_TGTS 1 109 #define DEF_MAX_LUNS 1 110 /* With these defaults, this driver will make 1 host with 1 target 111 * (id 0) containing 1 logical unit (lun 0). That is 1 device. 112 */ 113 #define DEF_ATO 1 114 #define DEF_CDB_LEN 10 115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */ 116 #define DEF_DEV_SIZE_PRE_INIT 0 117 #define DEF_DEV_SIZE_MB 8 118 #define DEF_ZBC_DEV_SIZE_MB 128 119 #define DEF_DIF 0 120 #define DEF_DIX 0 121 #define DEF_PER_HOST_STORE false 122 #define DEF_D_SENSE 0 123 #define DEF_EVERY_NTH 0 124 #define DEF_FAKE_RW 0 125 #define DEF_GUARD 0 126 #define DEF_HOST_LOCK 0 127 #define DEF_LBPU 0 128 #define DEF_LBPWS 0 129 #define DEF_LBPWS10 0 130 #define DEF_LBPRZ 1 131 #define DEF_LOWEST_ALIGNED 0 132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */ 133 #define DEF_NO_LUN_0 0 134 #define DEF_NUM_PARTS 0 135 #define DEF_OPTS 0 136 #define DEF_OPT_BLKS 1024 137 #define DEF_PHYSBLK_EXP 0 138 #define DEF_OPT_XFERLEN_EXP 0 139 #define DEF_PTYPE TYPE_DISK 140 #define DEF_RANDOM false 141 #define DEF_REMOVABLE false 142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */ 143 #define DEF_SECTOR_SIZE 512 144 #define DEF_UNMAP_ALIGNMENT 0 145 #define DEF_UNMAP_GRANULARITY 1 146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF 147 #define DEF_UNMAP_MAX_DESC 256 148 #define DEF_VIRTUAL_GB 0 149 #define DEF_VPD_USE_HOSTNO 1 150 #define DEF_WRITESAME_LENGTH 0xFFFF 151 #define DEF_STRICT 0 152 #define DEF_STATISTICS false 153 #define DEF_SUBMIT_QUEUES 1 154 #define DEF_TUR_MS_TO_READY 0 155 #define DEF_UUID_CTL 0 156 #define JDELAY_OVERRIDDEN -9999 157 158 /* Default parameters for ZBC drives */ 159 #define DEF_ZBC_ZONE_SIZE_MB 128 160 #define DEF_ZBC_MAX_OPEN_ZONES 8 161 #define DEF_ZBC_NR_CONV_ZONES 1 162 163 #define SDEBUG_LUN_0_VAL 0 164 165 /* bit mask values for sdebug_opts */ 166 #define SDEBUG_OPT_NOISE 1 167 #define SDEBUG_OPT_MEDIUM_ERR 2 168 #define SDEBUG_OPT_TIMEOUT 4 169 #define SDEBUG_OPT_RECOVERED_ERR 8 170 #define SDEBUG_OPT_TRANSPORT_ERR 16 171 #define SDEBUG_OPT_DIF_ERR 32 172 #define SDEBUG_OPT_DIX_ERR 64 173 #define SDEBUG_OPT_MAC_TIMEOUT 128 174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100 175 #define SDEBUG_OPT_Q_NOISE 0x200 176 #define SDEBUG_OPT_ALL_TSF 0x400 177 #define SDEBUG_OPT_RARE_TSF 0x800 178 #define SDEBUG_OPT_N_WCE 0x1000 179 #define SDEBUG_OPT_RESET_NOISE 0x2000 180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000 181 #define SDEBUG_OPT_HOST_BUSY 0x8000 182 #define SDEBUG_OPT_CMD_ABORT 0x10000 183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ 184 SDEBUG_OPT_RESET_NOISE) 185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ 186 SDEBUG_OPT_TRANSPORT_ERR | \ 187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ 188 SDEBUG_OPT_SHORT_TRANSFER | \ 189 SDEBUG_OPT_HOST_BUSY | \ 190 SDEBUG_OPT_CMD_ABORT) 191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \ 192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR) 193 194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in 195 * priority order. In the subset implemented here lower numbers have higher 196 * priority. The UA numbers should be a sequence starting from 0 with 197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */ 198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */ 199 #define SDEBUG_UA_BUS_RESET 1 200 #define SDEBUG_UA_MODE_CHANGED 2 201 #define SDEBUG_UA_CAPACITY_CHANGED 3 202 #define SDEBUG_UA_LUNS_CHANGED 4 203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */ 204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6 205 #define SDEBUG_NUM_UAS 7 206 207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this 208 * sector on read commands: */ 209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ 210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ 211 212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued 213 * (for response) per submit queue at one time. Can be reduced by max_queue 214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The 215 * per-device DEF_CMD_PER_LUN can be changed via sysfs: 216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth 217 * but cannot exceed SDEBUG_CANQUEUE . 218 */ 219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */ 220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG) 221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE 222 223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */ 224 #define F_D_IN 1 /* Data-in command (e.g. READ) */ 225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */ 226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ 227 #define F_D_UNKN 8 228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */ 229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */ 230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */ 231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */ 232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */ 233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */ 234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */ 235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */ 236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */ 237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */ 238 239 /* Useful combinations of the above flags */ 240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) 241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW) 242 #define FF_SA (F_SA_HIGH | F_SA_LOW) 243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY) 244 245 #define SDEBUG_MAX_PARTS 4 246 247 #define SDEBUG_MAX_CMD_LEN 32 248 249 #define SDEB_XA_NOT_IN_USE XA_MARK_1 250 251 /* Zone types (zbcr05 table 25) */ 252 enum sdebug_z_type { 253 ZBC_ZONE_TYPE_CNV = 0x1, 254 ZBC_ZONE_TYPE_SWR = 0x2, 255 ZBC_ZONE_TYPE_SWP = 0x3, 256 }; 257 258 /* enumeration names taken from table 26, zbcr05 */ 259 enum sdebug_z_cond { 260 ZBC_NOT_WRITE_POINTER = 0x0, 261 ZC1_EMPTY = 0x1, 262 ZC2_IMPLICIT_OPEN = 0x2, 263 ZC3_EXPLICIT_OPEN = 0x3, 264 ZC4_CLOSED = 0x4, 265 ZC6_READ_ONLY = 0xd, 266 ZC5_FULL = 0xe, 267 ZC7_OFFLINE = 0xf, 268 }; 269 270 struct sdeb_zone_state { /* ZBC: per zone state */ 271 enum sdebug_z_type z_type; 272 enum sdebug_z_cond z_cond; 273 bool z_non_seq_resource; 274 unsigned int z_size; 275 sector_t z_start; 276 sector_t z_wp; 277 }; 278 279 struct sdebug_dev_info { 280 struct list_head dev_list; 281 unsigned int channel; 282 unsigned int target; 283 u64 lun; 284 uuid_t lu_name; 285 struct sdebug_host_info *sdbg_host; 286 unsigned long uas_bm[1]; 287 atomic_t num_in_q; 288 atomic_t stopped; /* 1: by SSU, 2: device start */ 289 bool used; 290 291 /* For ZBC devices */ 292 enum blk_zoned_model zmodel; 293 unsigned int zsize; 294 unsigned int zsize_shift; 295 unsigned int nr_zones; 296 unsigned int nr_conv_zones; 297 unsigned int nr_imp_open; 298 unsigned int nr_exp_open; 299 unsigned int nr_closed; 300 unsigned int max_open; 301 ktime_t create_ts; /* time since bootup that this device was created */ 302 struct sdeb_zone_state *zstate; 303 }; 304 305 struct sdebug_host_info { 306 struct list_head host_list; 307 int si_idx; /* sdeb_store_info (per host) xarray index */ 308 struct Scsi_Host *shost; 309 struct device dev; 310 struct list_head dev_info_list; 311 }; 312 313 /* There is an xarray of pointers to this struct's objects, one per host */ 314 struct sdeb_store_info { 315 rwlock_t macc_lck; /* for atomic media access on this store */ 316 u8 *storep; /* user data storage (ram) */ 317 struct t10_pi_tuple *dif_storep; /* protection info */ 318 void *map_storep; /* provisioning map */ 319 }; 320 321 #define to_sdebug_host(d) \ 322 container_of(d, struct sdebug_host_info, dev) 323 324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1, 325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3}; 326 327 struct sdebug_defer { 328 struct hrtimer hrt; 329 struct execute_work ew; 330 ktime_t cmpl_ts;/* time since boot to complete this cmd */ 331 int sqa_idx; /* index of sdebug_queue array */ 332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */ 333 int hc_idx; /* hostwide tag index */ 334 int issuing_cpu; 335 bool init_hrt; 336 bool init_wq; 337 bool init_poll; 338 bool aborted; /* true when blk_abort_request() already called */ 339 enum sdeb_defer_type defer_t; 340 }; 341 342 struct sdebug_queued_cmd { 343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue 344 * instance indicates this slot is in use. 345 */ 346 struct sdebug_defer *sd_dp; 347 struct scsi_cmnd *a_cmnd; 348 }; 349 350 struct sdebug_queue { 351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE]; 352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS]; 353 spinlock_t qc_lock; 354 atomic_t blocked; /* to temporarily stop more being queued */ 355 }; 356 357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */ 358 static atomic_t sdebug_completions; /* count of deferred completions */ 359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */ 360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */ 361 static atomic_t sdeb_inject_pending; 362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */ 363 364 struct opcode_info_t { 365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */ 366 /* for terminating element */ 367 u8 opcode; /* if num_attached > 0, preferred */ 368 u16 sa; /* service action */ 369 u32 flags; /* OR-ed set of SDEB_F_* */ 370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); 371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */ 372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */ 373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */ 374 }; 375 376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */ 377 enum sdeb_opcode_index { 378 SDEB_I_INVALID_OPCODE = 0, 379 SDEB_I_INQUIRY = 1, 380 SDEB_I_REPORT_LUNS = 2, 381 SDEB_I_REQUEST_SENSE = 3, 382 SDEB_I_TEST_UNIT_READY = 4, 383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */ 384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */ 385 SDEB_I_LOG_SENSE = 7, 386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */ 387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */ 388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */ 389 SDEB_I_START_STOP = 11, 390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */ 391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */ 392 SDEB_I_MAINT_IN = 14, 393 SDEB_I_MAINT_OUT = 15, 394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */ 395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */ 396 SDEB_I_RESERVE = 18, /* 6, 10 */ 397 SDEB_I_RELEASE = 19, /* 6, 10 */ 398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */ 399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */ 400 SDEB_I_ATA_PT = 22, /* 12, 16 */ 401 SDEB_I_SEND_DIAG = 23, 402 SDEB_I_UNMAP = 24, 403 SDEB_I_WRITE_BUFFER = 25, 404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */ 405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */ 406 SDEB_I_COMP_WRITE = 28, 407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */ 408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */ 409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */ 410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */ 411 }; 412 413 414 static const unsigned char opcode_ind_arr[256] = { 415 /* 0x0; 0x0->0x1f: 6 byte cdbs */ 416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, 417 0, 0, 0, 0, 418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0, 419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, 420 SDEB_I_RELEASE, 421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, 422 SDEB_I_ALLOW_REMOVAL, 0, 423 /* 0x20; 0x20->0x3f: 10 byte cdbs */ 424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0, 425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY, 426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0, 427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0, 428 /* 0x40; 0x40->0x5f: 10 byte cdbs */ 429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0, 430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0, 431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, 432 SDEB_I_RELEASE, 433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, 434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */ 435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 437 0, SDEB_I_VARIABLE_LEN, 438 /* 0x80; 0x80->0x9f: 16 byte cdbs */ 439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0, 440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 441 0, 0, 0, SDEB_I_VERIFY, 442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0, 444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, 445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */ 446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN, 447 SDEB_I_MAINT_OUT, 0, 0, 0, 448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE, 449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0, 450 0, 0, 0, 0, 0, 0, 0, 0, 451 0, 0, 0, 0, 0, 0, 0, 0, 452 /* 0xc0; 0xc0->0xff: vendor specific */ 453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 457 }; 458 459 /* 460 * The following "response" functions return the SCSI mid-level's 4 byte 461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster 462 * command completion, they can mask their return value with 463 * SDEG_RES_IMMED_MASK . 464 */ 465 #define SDEG_RES_IMMED_MASK 0x40000000 466 467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *); 468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *); 469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *); 470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *); 471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *); 472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *); 473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *); 474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); 475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); 476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *); 477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *); 478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *); 479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *); 480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *); 481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *); 482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *); 483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *); 484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *); 485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *); 486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); 487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); 488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); 489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *); 490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *); 491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *); 492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *); 493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *); 494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *); 495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *); 496 497 static int sdebug_do_add_host(bool mk_new_store); 498 static int sdebug_add_host_helper(int per_host_idx); 499 static void sdebug_do_remove_host(bool the_end); 500 static int sdebug_add_store(void); 501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip); 502 static void sdebug_erase_all_stores(bool apart_from_first); 503 504 /* 505 * The following are overflow arrays for cdbs that "hit" the same index in 506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb 507 * should be placed in opcode_info_arr[], the others should be placed here. 508 */ 509 static const struct opcode_info_t msense_iarr[] = { 510 {0, 0x1a, 0, F_D_IN, NULL, NULL, 511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 512 }; 513 514 static const struct opcode_info_t mselect_iarr[] = { 515 {0, 0x15, 0, F_D_OUT, NULL, NULL, 516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 517 }; 518 519 static const struct opcode_info_t read_iarr[] = { 520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ 521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 522 0, 0, 0, 0} }, 523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */ 524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ 526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 527 0xc7, 0, 0, 0, 0} }, 528 }; 529 530 static const struct opcode_info_t write_iarr[] = { 531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ 532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 533 0, 0, 0, 0, 0, 0} }, 534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */ 535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 536 0, 0, 0} }, 537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ 538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 539 0xbf, 0xc7, 0, 0, 0, 0} }, 540 }; 541 542 static const struct opcode_info_t verify_iarr[] = { 543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ 544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7, 545 0, 0, 0, 0, 0, 0} }, 546 }; 547 548 static const struct opcode_info_t sa_in_16_iarr[] = { 549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, 550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */ 552 }; 553 554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */ 555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, 556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa, 557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */ 558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, 559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8, 560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */ 561 }; 562 563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */ 564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, 565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */ 567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, 568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */ 570 }; 571 572 static const struct opcode_info_t write_same_iarr[] = { 573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, 574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */ 576 }; 577 578 static const struct opcode_info_t reserve_iarr[] = { 579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */ 580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 581 }; 582 583 static const struct opcode_info_t release_iarr[] = { 584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */ 585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 586 }; 587 588 static const struct opcode_info_t sync_cache_iarr[] = { 589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, 590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ 592 }; 593 594 static const struct opcode_info_t pre_fetch_iarr[] = { 595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, 596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */ 598 }; 599 600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */ 601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, 602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */ 604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, 605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */ 607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, 608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */ 610 }; 611 612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ 613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, 614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */ 616 }; 617 618 619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped, 620 * plus the terminating elements for logic that scans this table such as 621 * REPORT SUPPORTED OPERATION CODES. */ 622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { 623 /* 0 */ 624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ 625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ 627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL, 629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 630 0, 0} }, /* REPORT LUNS */ 631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL, 632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ 634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 635 /* 5 */ 636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */ 637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0, 638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, 639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */ 640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, 642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ 643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 644 0, 0, 0} }, 645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ 646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0, 647 0, 0} }, 648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */ 649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, 651 /* 10 */ 652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO, 653 resp_write_dt0, write_iarr, /* WRITE(16) */ 654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, 656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ 657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, 659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ 660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} }, 662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, 663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */ 665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN, 666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */ 667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 668 0xff, 0, 0xc7, 0, 0, 0, 0} }, 669 /* 15 */ 670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ 671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 672 {ARRAY_SIZE(verify_iarr), 0x8f, 0, 673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */ 674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, 676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, 677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */ 678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff, 679 0xff, 0xff} }, 680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT, 681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */ 682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 683 0} }, 684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT, 685 NULL, release_iarr, /* RELEASE(10) <no response function> */ 686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 687 0} }, 688 /* 20 */ 689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ 690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ 692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ 694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */ 696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ 698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, 699 /* 25 */ 700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, 701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 702 0, 0, 0, 0} }, /* WRITE_BUFFER */ 703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, 704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ 705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 706 0, 0, 0, 0, 0} }, 707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS, 708 resp_sync_cache, sync_cache_iarr, 709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ 711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, 712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */ 714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO, 715 resp_pre_fetch, pre_fetch_iarr, 716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 717 0, 0, 0, 0} }, /* PRE-FETCH (10) */ 718 719 /* 30 */ 720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS, 721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */ 722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} }, 724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS, 725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */ 726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} }, 728 /* sentinel */ 729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ 730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 731 }; 732 733 static int sdebug_num_hosts; 734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */ 735 static int sdebug_ato = DEF_ATO; 736 static int sdebug_cdb_len = DEF_CDB_LEN; 737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */ 738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT; 739 static int sdebug_dif = DEF_DIF; 740 static int sdebug_dix = DEF_DIX; 741 static int sdebug_dsense = DEF_D_SENSE; 742 static int sdebug_every_nth = DEF_EVERY_NTH; 743 static int sdebug_fake_rw = DEF_FAKE_RW; 744 static unsigned int sdebug_guard = DEF_GUARD; 745 static int sdebug_host_max_queue; /* per host */ 746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED; 747 static int sdebug_max_luns = DEF_MAX_LUNS; 748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */ 749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR; 750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM; 751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */ 752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */ 753 static int sdebug_no_lun_0 = DEF_NO_LUN_0; 754 static int sdebug_no_uld; 755 static int sdebug_num_parts = DEF_NUM_PARTS; 756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */ 757 static int sdebug_opt_blks = DEF_OPT_BLKS; 758 static int sdebug_opts = DEF_OPTS; 759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP; 760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP; 761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */ 762 static int sdebug_scsi_level = DEF_SCSI_LEVEL; 763 static int sdebug_sector_size = DEF_SECTOR_SIZE; 764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY; 765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB; 766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; 767 static unsigned int sdebug_lbpu = DEF_LBPU; 768 static unsigned int sdebug_lbpws = DEF_LBPWS; 769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10; 770 static unsigned int sdebug_lbprz = DEF_LBPRZ; 771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT; 772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; 773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; 776 static int sdebug_uuid_ctl = DEF_UUID_CTL; 777 static bool sdebug_random = DEF_RANDOM; 778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE; 779 static bool sdebug_removable = DEF_REMOVABLE; 780 static bool sdebug_clustering; 781 static bool sdebug_host_lock = DEF_HOST_LOCK; 782 static bool sdebug_strict = DEF_STRICT; 783 static bool sdebug_any_injecting_opt; 784 static bool sdebug_verbose; 785 static bool have_dif_prot; 786 static bool write_since_sync; 787 static bool sdebug_statistics = DEF_STATISTICS; 788 static bool sdebug_wp; 789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */ 790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE; 791 static char *sdeb_zbc_model_s; 792 793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0, 794 SAM_LUN_AM_FLAT = 0x1, 795 SAM_LUN_AM_LOGICAL_UNIT = 0x2, 796 SAM_LUN_AM_EXTENDED = 0x3}; 797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL; 798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL; 799 800 static unsigned int sdebug_store_sectors; 801 static sector_t sdebug_capacity; /* in sectors */ 802 803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages 804 may still need them */ 805 static int sdebug_heads; /* heads per disk */ 806 static int sdebug_cylinders_per; /* cylinders per surface */ 807 static int sdebug_sectors_per; /* sectors per cylinder */ 808 809 static LIST_HEAD(sdebug_host_list); 810 static DEFINE_SPINLOCK(sdebug_host_list_lock); 811 812 static struct xarray per_store_arr; 813 static struct xarray *per_store_ap = &per_store_arr; 814 static int sdeb_first_idx = -1; /* invalid index ==> none created */ 815 static int sdeb_most_recent_idx = -1; 816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */ 817 818 static unsigned long map_size; 819 static int num_aborts; 820 static int num_dev_resets; 821 static int num_target_resets; 822 static int num_bus_resets; 823 static int num_host_resets; 824 static int dix_writes; 825 static int dix_reads; 826 static int dif_errors; 827 828 /* ZBC global data */ 829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */ 830 static int sdeb_zbc_zone_size_mb; 831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES; 832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES; 833 834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */ 835 static int poll_queues; /* iouring iopoll interface.*/ 836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */ 837 838 static DEFINE_RWLOCK(atomic_rw); 839 static DEFINE_RWLOCK(atomic_rw2); 840 841 static rwlock_t *ramdisk_lck_a[2]; 842 843 static char sdebug_proc_name[] = MY_NAME; 844 static const char *my_name = MY_NAME; 845 846 static struct bus_type pseudo_lld_bus; 847 848 static struct device_driver sdebug_driverfs_driver = { 849 .name = sdebug_proc_name, 850 .bus = &pseudo_lld_bus, 851 }; 852 853 static const int check_condition_result = 854 SAM_STAT_CHECK_CONDITION; 855 856 static const int illegal_condition_result = 857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 858 859 static const int device_qfull_result = 860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL; 861 862 static const int condition_met_result = SAM_STAT_CONDITION_MET; 863 864 865 /* Only do the extra work involved in logical block provisioning if one or 866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing 867 * real reads and writes (i.e. not skipping them for speed). 868 */ 869 static inline bool scsi_debug_lbp(void) 870 { 871 return 0 == sdebug_fake_rw && 872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); 873 } 874 875 static void *lba2fake_store(struct sdeb_store_info *sip, 876 unsigned long long lba) 877 { 878 struct sdeb_store_info *lsip = sip; 879 880 lba = do_div(lba, sdebug_store_sectors); 881 if (!sip || !sip->storep) { 882 WARN_ON_ONCE(true); 883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */ 884 } 885 return lsip->storep + lba * sdebug_sector_size; 886 } 887 888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip, 889 sector_t sector) 890 { 891 sector = sector_div(sector, sdebug_store_sectors); 892 893 return sip->dif_storep + sector; 894 } 895 896 static void sdebug_max_tgts_luns(void) 897 { 898 struct sdebug_host_info *sdbg_host; 899 struct Scsi_Host *hpnt; 900 901 spin_lock(&sdebug_host_list_lock); 902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { 903 hpnt = sdbg_host->shost; 904 if ((hpnt->this_id >= 0) && 905 (sdebug_num_tgts > hpnt->this_id)) 906 hpnt->max_id = sdebug_num_tgts + 1; 907 else 908 hpnt->max_id = sdebug_num_tgts; 909 /* sdebug_max_luns; */ 910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; 911 } 912 spin_unlock(&sdebug_host_list_lock); 913 } 914 915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1}; 916 917 /* Set in_bit to -1 to indicate no bit position of invalid field */ 918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp, 919 enum sdeb_cmd_data c_d, 920 int in_byte, int in_bit) 921 { 922 unsigned char *sbuff; 923 u8 sks[4]; 924 int sl, asc; 925 926 sbuff = scp->sense_buffer; 927 if (!sbuff) { 928 sdev_printk(KERN_ERR, scp->device, 929 "%s: sense_buffer is NULL\n", __func__); 930 return; 931 } 932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; 933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); 934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0); 935 memset(sks, 0, sizeof(sks)); 936 sks[0] = 0x80; 937 if (c_d) 938 sks[0] |= 0x40; 939 if (in_bit >= 0) { 940 sks[0] |= 0x8; 941 sks[0] |= 0x7 & in_bit; 942 } 943 put_unaligned_be16(in_byte, sks + 1); 944 if (sdebug_dsense) { 945 sl = sbuff[7] + 8; 946 sbuff[7] = sl; 947 sbuff[sl] = 0x2; 948 sbuff[sl + 1] = 0x6; 949 memcpy(sbuff + sl + 4, sks, 3); 950 } else 951 memcpy(sbuff + 15, sks, 3); 952 if (sdebug_verbose) 953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" 954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n", 955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit); 956 } 957 958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) 959 { 960 if (!scp->sense_buffer) { 961 sdev_printk(KERN_ERR, scp->device, 962 "%s: sense_buffer is NULL\n", __func__); 963 return; 964 } 965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 966 967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq); 968 969 if (sdebug_verbose) 970 sdev_printk(KERN_INFO, scp->device, 971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", 972 my_name, key, asc, asq); 973 } 974 975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) 976 { 977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); 978 } 979 980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd, 981 void __user *arg) 982 { 983 if (sdebug_verbose) { 984 if (0x1261 == cmd) 985 sdev_printk(KERN_INFO, dev, 986 "%s: BLKFLSBUF [0x1261]\n", __func__); 987 else if (0x5331 == cmd) 988 sdev_printk(KERN_INFO, dev, 989 "%s: CDROM_GET_CAPABILITY [0x5331]\n", 990 __func__); 991 else 992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n", 993 __func__, cmd); 994 } 995 return -EINVAL; 996 /* return -ENOTTY; // correct return but upsets fdisk */ 997 } 998 999 static void config_cdb_len(struct scsi_device *sdev) 1000 { 1001 switch (sdebug_cdb_len) { 1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */ 1003 sdev->use_10_for_rw = false; 1004 sdev->use_16_for_rw = false; 1005 sdev->use_10_for_ms = false; 1006 break; 1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */ 1008 sdev->use_10_for_rw = true; 1009 sdev->use_16_for_rw = false; 1010 sdev->use_10_for_ms = false; 1011 break; 1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */ 1013 sdev->use_10_for_rw = true; 1014 sdev->use_16_for_rw = false; 1015 sdev->use_10_for_ms = true; 1016 break; 1017 case 16: 1018 sdev->use_10_for_rw = false; 1019 sdev->use_16_for_rw = true; 1020 sdev->use_10_for_ms = true; 1021 break; 1022 case 32: /* No knobs to suggest this so same as 16 for now */ 1023 sdev->use_10_for_rw = false; 1024 sdev->use_16_for_rw = true; 1025 sdev->use_10_for_ms = true; 1026 break; 1027 default: 1028 pr_warn("unexpected cdb_len=%d, force to 10\n", 1029 sdebug_cdb_len); 1030 sdev->use_10_for_rw = true; 1031 sdev->use_16_for_rw = false; 1032 sdev->use_10_for_ms = false; 1033 sdebug_cdb_len = 10; 1034 break; 1035 } 1036 } 1037 1038 static void all_config_cdb_len(void) 1039 { 1040 struct sdebug_host_info *sdbg_host; 1041 struct Scsi_Host *shost; 1042 struct scsi_device *sdev; 1043 1044 spin_lock(&sdebug_host_list_lock); 1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { 1046 shost = sdbg_host->shost; 1047 shost_for_each_device(sdev, shost) { 1048 config_cdb_len(sdev); 1049 } 1050 } 1051 spin_unlock(&sdebug_host_list_lock); 1052 } 1053 1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip) 1055 { 1056 struct sdebug_host_info *sdhp; 1057 struct sdebug_dev_info *dp; 1058 1059 spin_lock(&sdebug_host_list_lock); 1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) { 1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { 1062 if ((devip->sdbg_host == dp->sdbg_host) && 1063 (devip->target == dp->target)) 1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); 1065 } 1066 } 1067 spin_unlock(&sdebug_host_list_lock); 1068 } 1069 1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 1071 { 1072 int k; 1073 1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); 1075 if (k != SDEBUG_NUM_UAS) { 1076 const char *cp = NULL; 1077 1078 switch (k) { 1079 case SDEBUG_UA_POR: 1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, 1081 POWER_ON_RESET_ASCQ); 1082 if (sdebug_verbose) 1083 cp = "power on reset"; 1084 break; 1085 case SDEBUG_UA_BUS_RESET: 1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, 1087 BUS_RESET_ASCQ); 1088 if (sdebug_verbose) 1089 cp = "bus reset"; 1090 break; 1091 case SDEBUG_UA_MODE_CHANGED: 1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, 1093 MODE_CHANGED_ASCQ); 1094 if (sdebug_verbose) 1095 cp = "mode parameters changed"; 1096 break; 1097 case SDEBUG_UA_CAPACITY_CHANGED: 1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, 1099 CAPACITY_CHANGED_ASCQ); 1100 if (sdebug_verbose) 1101 cp = "capacity data changed"; 1102 break; 1103 case SDEBUG_UA_MICROCODE_CHANGED: 1104 mk_sense_buffer(scp, UNIT_ATTENTION, 1105 TARGET_CHANGED_ASC, 1106 MICROCODE_CHANGED_ASCQ); 1107 if (sdebug_verbose) 1108 cp = "microcode has been changed"; 1109 break; 1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: 1111 mk_sense_buffer(scp, UNIT_ATTENTION, 1112 TARGET_CHANGED_ASC, 1113 MICROCODE_CHANGED_WO_RESET_ASCQ); 1114 if (sdebug_verbose) 1115 cp = "microcode has been changed without reset"; 1116 break; 1117 case SDEBUG_UA_LUNS_CHANGED: 1118 /* 1119 * SPC-3 behavior is to report a UNIT ATTENTION with 1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN 1121 * on the target, until a REPORT LUNS command is 1122 * received. SPC-4 behavior is to report it only once. 1123 * NOTE: sdebug_scsi_level does not use the same 1124 * values as struct scsi_device->scsi_level. 1125 */ 1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */ 1127 clear_luns_changed_on_target(devip); 1128 mk_sense_buffer(scp, UNIT_ATTENTION, 1129 TARGET_CHANGED_ASC, 1130 LUNS_CHANGED_ASCQ); 1131 if (sdebug_verbose) 1132 cp = "reported luns data has changed"; 1133 break; 1134 default: 1135 pr_warn("unexpected unit attention code=%d\n", k); 1136 if (sdebug_verbose) 1137 cp = "unknown"; 1138 break; 1139 } 1140 clear_bit(k, devip->uas_bm); 1141 if (sdebug_verbose) 1142 sdev_printk(KERN_INFO, scp->device, 1143 "%s reports: Unit attention: %s\n", 1144 my_name, cp); 1145 return check_condition_result; 1146 } 1147 return 0; 1148 } 1149 1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */ 1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, 1152 int arr_len) 1153 { 1154 int act_len; 1155 struct scsi_data_buffer *sdb = &scp->sdb; 1156 1157 if (!sdb->length) 1158 return 0; 1159 if (scp->sc_data_direction != DMA_FROM_DEVICE) 1160 return DID_ERROR << 16; 1161 1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, 1163 arr, arr_len); 1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len); 1165 1166 return 0; 1167 } 1168 1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else 1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple 1171 * calls, not required to write in ascending offset order. Assumes resid 1172 * set to scsi_bufflen() prior to any calls. 1173 */ 1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, 1175 int arr_len, unsigned int off_dst) 1176 { 1177 unsigned int act_len, n; 1178 struct scsi_data_buffer *sdb = &scp->sdb; 1179 off_t skip = off_dst; 1180 1181 if (sdb->length <= off_dst) 1182 return 0; 1183 if (scp->sc_data_direction != DMA_FROM_DEVICE) 1184 return DID_ERROR << 16; 1185 1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, 1187 arr, arr_len, skip); 1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", 1189 __func__, off_dst, scsi_bufflen(scp), act_len, 1190 scsi_get_resid(scp)); 1191 n = scsi_bufflen(scp) - (off_dst + act_len); 1192 scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n)); 1193 return 0; 1194 } 1195 1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into 1197 * 'arr' or -1 if error. 1198 */ 1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, 1200 int arr_len) 1201 { 1202 if (!scsi_bufflen(scp)) 1203 return 0; 1204 if (scp->sc_data_direction != DMA_TO_DEVICE) 1205 return -1; 1206 1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len); 1208 } 1209 1210 1211 static char sdebug_inq_vendor_id[9] = "Linux "; 1212 static char sdebug_inq_product_id[17] = "scsi_debug "; 1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION; 1214 /* Use some locally assigned NAAs for SAS addresses. */ 1215 static const u64 naa3_comp_a = 0x3222222000000000ULL; 1216 static const u64 naa3_comp_b = 0x3333333000000000ULL; 1217 static const u64 naa3_comp_c = 0x3111111000000000ULL; 1218 1219 /* Device identification VPD page. Returns number of bytes placed in arr */ 1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id, 1221 int target_dev_id, int dev_id_num, 1222 const char *dev_id_str, int dev_id_str_len, 1223 const uuid_t *lu_name) 1224 { 1225 int num, port_a; 1226 char b[32]; 1227 1228 port_a = target_dev_id + 1; 1229 /* T10 vendor identifier field format (faked) */ 1230 arr[0] = 0x2; /* ASCII */ 1231 arr[1] = 0x1; 1232 arr[2] = 0x0; 1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8); 1234 memcpy(&arr[12], sdebug_inq_product_id, 16); 1235 memcpy(&arr[28], dev_id_str, dev_id_str_len); 1236 num = 8 + 16 + dev_id_str_len; 1237 arr[3] = num; 1238 num += 4; 1239 if (dev_id_num >= 0) { 1240 if (sdebug_uuid_ctl) { 1241 /* Locally assigned UUID */ 1242 arr[num++] = 0x1; /* binary (not necessarily sas) */ 1243 arr[num++] = 0xa; /* PIV=0, lu, naa */ 1244 arr[num++] = 0x0; 1245 arr[num++] = 0x12; 1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */ 1247 arr[num++] = 0x0; 1248 memcpy(arr + num, lu_name, 16); 1249 num += 16; 1250 } else { 1251 /* NAA-3, Logical unit identifier (binary) */ 1252 arr[num++] = 0x1; /* binary (not necessarily sas) */ 1253 arr[num++] = 0x3; /* PIV=0, lu, naa */ 1254 arr[num++] = 0x0; 1255 arr[num++] = 0x8; 1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num); 1257 num += 8; 1258 } 1259 /* Target relative port number */ 1260 arr[num++] = 0x61; /* proto=sas, binary */ 1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */ 1262 arr[num++] = 0x0; /* reserved */ 1263 arr[num++] = 0x4; /* length */ 1264 arr[num++] = 0x0; /* reserved */ 1265 arr[num++] = 0x0; /* reserved */ 1266 arr[num++] = 0x0; 1267 arr[num++] = 0x1; /* relative port A */ 1268 } 1269 /* NAA-3, Target port identifier */ 1270 arr[num++] = 0x61; /* proto=sas, binary */ 1271 arr[num++] = 0x93; /* piv=1, target port, naa */ 1272 arr[num++] = 0x0; 1273 arr[num++] = 0x8; 1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num); 1275 num += 8; 1276 /* NAA-3, Target port group identifier */ 1277 arr[num++] = 0x61; /* proto=sas, binary */ 1278 arr[num++] = 0x95; /* piv=1, target port group id */ 1279 arr[num++] = 0x0; 1280 arr[num++] = 0x4; 1281 arr[num++] = 0; 1282 arr[num++] = 0; 1283 put_unaligned_be16(port_group_id, arr + num); 1284 num += 2; 1285 /* NAA-3, Target device identifier */ 1286 arr[num++] = 0x61; /* proto=sas, binary */ 1287 arr[num++] = 0xa3; /* piv=1, target device, naa */ 1288 arr[num++] = 0x0; 1289 arr[num++] = 0x8; 1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num); 1291 num += 8; 1292 /* SCSI name string: Target device identifier */ 1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */ 1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */ 1295 arr[num++] = 0x0; 1296 arr[num++] = 24; 1297 memcpy(arr + num, "naa.32222220", 12); 1298 num += 12; 1299 snprintf(b, sizeof(b), "%08X", target_dev_id); 1300 memcpy(arr + num, b, 8); 1301 num += 8; 1302 memset(arr + num, 0, 4); 1303 num += 4; 1304 return num; 1305 } 1306 1307 static unsigned char vpd84_data[] = { 1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0, 1309 0x22,0x22,0x22,0x0,0xbb,0x1, 1310 0x22,0x22,0x22,0x0,0xbb,0x2, 1311 }; 1312 1313 /* Software interface identification VPD page */ 1314 static int inquiry_vpd_84(unsigned char *arr) 1315 { 1316 memcpy(arr, vpd84_data, sizeof(vpd84_data)); 1317 return sizeof(vpd84_data); 1318 } 1319 1320 /* Management network addresses VPD page */ 1321 static int inquiry_vpd_85(unsigned char *arr) 1322 { 1323 int num = 0; 1324 const char *na1 = "https://www.kernel.org/config"; 1325 const char *na2 = "http://www.kernel.org/log"; 1326 int plen, olen; 1327 1328 arr[num++] = 0x1; /* lu, storage config */ 1329 arr[num++] = 0x0; /* reserved */ 1330 arr[num++] = 0x0; 1331 olen = strlen(na1); 1332 plen = olen + 1; 1333 if (plen % 4) 1334 plen = ((plen / 4) + 1) * 4; 1335 arr[num++] = plen; /* length, null termianted, padded */ 1336 memcpy(arr + num, na1, olen); 1337 memset(arr + num + olen, 0, plen - olen); 1338 num += plen; 1339 1340 arr[num++] = 0x4; /* lu, logging */ 1341 arr[num++] = 0x0; /* reserved */ 1342 arr[num++] = 0x0; 1343 olen = strlen(na2); 1344 plen = olen + 1; 1345 if (plen % 4) 1346 plen = ((plen / 4) + 1) * 4; 1347 arr[num++] = plen; /* length, null terminated, padded */ 1348 memcpy(arr + num, na2, olen); 1349 memset(arr + num + olen, 0, plen - olen); 1350 num += plen; 1351 1352 return num; 1353 } 1354 1355 /* SCSI ports VPD page */ 1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id) 1357 { 1358 int num = 0; 1359 int port_a, port_b; 1360 1361 port_a = target_dev_id + 1; 1362 port_b = port_a + 1; 1363 arr[num++] = 0x0; /* reserved */ 1364 arr[num++] = 0x0; /* reserved */ 1365 arr[num++] = 0x0; 1366 arr[num++] = 0x1; /* relative port 1 (primary) */ 1367 memset(arr + num, 0, 6); 1368 num += 6; 1369 arr[num++] = 0x0; 1370 arr[num++] = 12; /* length tp descriptor */ 1371 /* naa-5 target port identifier (A) */ 1372 arr[num++] = 0x61; /* proto=sas, binary */ 1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */ 1374 arr[num++] = 0x0; /* reserved */ 1375 arr[num++] = 0x8; /* length */ 1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num); 1377 num += 8; 1378 arr[num++] = 0x0; /* reserved */ 1379 arr[num++] = 0x0; /* reserved */ 1380 arr[num++] = 0x0; 1381 arr[num++] = 0x2; /* relative port 2 (secondary) */ 1382 memset(arr + num, 0, 6); 1383 num += 6; 1384 arr[num++] = 0x0; 1385 arr[num++] = 12; /* length tp descriptor */ 1386 /* naa-5 target port identifier (B) */ 1387 arr[num++] = 0x61; /* proto=sas, binary */ 1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */ 1389 arr[num++] = 0x0; /* reserved */ 1390 arr[num++] = 0x8; /* length */ 1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num); 1392 num += 8; 1393 1394 return num; 1395 } 1396 1397 1398 static unsigned char vpd89_data[] = { 1399 /* from 4th byte */ 0,0,0,0, 1400 'l','i','n','u','x',' ',' ',' ', 1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ', 1402 '1','2','3','4', 1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 1404 0xec,0,0,0, 1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0, 1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20, 1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33, 1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31, 1409 0x53,0x41, 1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, 1411 0x20,0x20, 1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, 1413 0x10,0x80, 1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0, 1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0, 1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0, 1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0, 1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40, 1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0, 1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0, 1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42, 1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8, 1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe, 1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0, 1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51, 1440 }; 1441 1442 /* ATA Information VPD page */ 1443 static int inquiry_vpd_89(unsigned char *arr) 1444 { 1445 memcpy(arr, vpd89_data, sizeof(vpd89_data)); 1446 return sizeof(vpd89_data); 1447 } 1448 1449 1450 static unsigned char vpdb0_data[] = { 1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64, 1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1455 }; 1456 1457 /* Block limits VPD page (SBC-3) */ 1458 static int inquiry_vpd_b0(unsigned char *arr) 1459 { 1460 unsigned int gran; 1461 1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); 1463 1464 /* Optimal transfer length granularity */ 1465 if (sdebug_opt_xferlen_exp != 0 && 1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp) 1467 gran = 1 << sdebug_opt_xferlen_exp; 1468 else 1469 gran = 1 << sdebug_physblk_exp; 1470 put_unaligned_be16(gran, arr + 2); 1471 1472 /* Maximum Transfer Length */ 1473 if (sdebug_store_sectors > 0x400) 1474 put_unaligned_be32(sdebug_store_sectors, arr + 4); 1475 1476 /* Optimal Transfer Length */ 1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]); 1478 1479 if (sdebug_lbpu) { 1480 /* Maximum Unmap LBA Count */ 1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]); 1482 1483 /* Maximum Unmap Block Descriptor Count */ 1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]); 1485 } 1486 1487 /* Unmap Granularity Alignment */ 1488 if (sdebug_unmap_alignment) { 1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]); 1490 arr[28] |= 0x80; /* UGAVALID */ 1491 } 1492 1493 /* Optimal Unmap Granularity */ 1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]); 1495 1496 /* Maximum WRITE SAME Length */ 1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]); 1498 1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */ 1500 1501 return sizeof(vpdb0_data); 1502 } 1503 1504 /* Block device characteristics VPD page (SBC-3) */ 1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr) 1506 { 1507 memset(arr, 0, 0x3c); 1508 arr[0] = 0; 1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */ 1510 arr[2] = 0; 1511 arr[3] = 5; /* less than 1.8" */ 1512 if (devip->zmodel == BLK_ZONED_HA) 1513 arr[4] = 1 << 4; /* zoned field = 01b */ 1514 1515 return 0x3c; 1516 } 1517 1518 /* Logical block provisioning VPD page (SBC-4) */ 1519 static int inquiry_vpd_b2(unsigned char *arr) 1520 { 1521 memset(arr, 0, 0x4); 1522 arr[0] = 0; /* threshold exponent */ 1523 if (sdebug_lbpu) 1524 arr[1] = 1 << 7; 1525 if (sdebug_lbpws) 1526 arr[1] |= 1 << 6; 1527 if (sdebug_lbpws10) 1528 arr[1] |= 1 << 5; 1529 if (sdebug_lbprz && scsi_debug_lbp()) 1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */ 1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */ 1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */ 1533 /* threshold_percentage=0 */ 1534 return 0x4; 1535 } 1536 1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */ 1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr) 1539 { 1540 memset(arr, 0, 0x3c); 1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */ 1542 /* 1543 * Set Optimal number of open sequential write preferred zones and 1544 * Optimal number of non-sequentially written sequential write 1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other 1546 * fields set to zero, apart from Max. number of open swrz_s field. 1547 */ 1548 put_unaligned_be32(0xffffffff, &arr[4]); 1549 put_unaligned_be32(0xffffffff, &arr[8]); 1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open) 1551 put_unaligned_be32(devip->max_open, &arr[12]); 1552 else 1553 put_unaligned_be32(0xffffffff, &arr[12]); 1554 return 0x3c; 1555 } 1556 1557 #define SDEBUG_LONG_INQ_SZ 96 1558 #define SDEBUG_MAX_INQ_ARR_SZ 584 1559 1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 1561 { 1562 unsigned char pq_pdt; 1563 unsigned char *arr; 1564 unsigned char *cmd = scp->cmnd; 1565 int alloc_len, n, ret; 1566 bool have_wlun, is_disk, is_zbc, is_disk_zbc; 1567 1568 alloc_len = get_unaligned_be16(cmd + 3); 1569 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); 1570 if (! arr) 1571 return DID_REQUEUE << 16; 1572 is_disk = (sdebug_ptype == TYPE_DISK); 1573 is_zbc = (devip->zmodel != BLK_ZONED_NONE); 1574 is_disk_zbc = (is_disk || is_zbc); 1575 have_wlun = scsi_is_wlun(scp->device->lun); 1576 if (have_wlun) 1577 pq_pdt = TYPE_WLUN; /* present, wlun */ 1578 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) 1579 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ 1580 else 1581 pq_pdt = (sdebug_ptype & 0x1f); 1582 arr[0] = pq_pdt; 1583 if (0x2 & cmd[1]) { /* CMDDT bit set */ 1584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); 1585 kfree(arr); 1586 return check_condition_result; 1587 } else if (0x1 & cmd[1]) { /* EVPD bit set */ 1588 int lu_id_num, port_group_id, target_dev_id, len; 1589 char lu_id_str[6]; 1590 int host_no = devip->sdbg_host->shost->host_no; 1591 1592 port_group_id = (((host_no + 1) & 0x7f) << 8) + 1593 (devip->channel & 0x7f); 1594 if (sdebug_vpd_use_hostno == 0) 1595 host_no = 0; 1596 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + 1597 (devip->target * 1000) + devip->lun); 1598 target_dev_id = ((host_no + 1) * 2000) + 1599 (devip->target * 1000) - 3; 1600 len = scnprintf(lu_id_str, 6, "%d", lu_id_num); 1601 if (0 == cmd[2]) { /* supported vital product data pages */ 1602 arr[1] = cmd[2]; /*sanity */ 1603 n = 4; 1604 arr[n++] = 0x0; /* this page */ 1605 arr[n++] = 0x80; /* unit serial number */ 1606 arr[n++] = 0x83; /* device identification */ 1607 arr[n++] = 0x84; /* software interface ident. */ 1608 arr[n++] = 0x85; /* management network addresses */ 1609 arr[n++] = 0x86; /* extended inquiry */ 1610 arr[n++] = 0x87; /* mode page policy */ 1611 arr[n++] = 0x88; /* SCSI ports */ 1612 if (is_disk_zbc) { /* SBC or ZBC */ 1613 arr[n++] = 0x89; /* ATA information */ 1614 arr[n++] = 0xb0; /* Block limits */ 1615 arr[n++] = 0xb1; /* Block characteristics */ 1616 if (is_disk) 1617 arr[n++] = 0xb2; /* LB Provisioning */ 1618 if (is_zbc) 1619 arr[n++] = 0xb6; /* ZB dev. char. */ 1620 } 1621 arr[3] = n - 4; /* number of supported VPD pages */ 1622 } else if (0x80 == cmd[2]) { /* unit serial number */ 1623 arr[1] = cmd[2]; /*sanity */ 1624 arr[3] = len; 1625 memcpy(&arr[4], lu_id_str, len); 1626 } else if (0x83 == cmd[2]) { /* device identification */ 1627 arr[1] = cmd[2]; /*sanity */ 1628 arr[3] = inquiry_vpd_83(&arr[4], port_group_id, 1629 target_dev_id, lu_id_num, 1630 lu_id_str, len, 1631 &devip->lu_name); 1632 } else if (0x84 == cmd[2]) { /* Software interface ident. */ 1633 arr[1] = cmd[2]; /*sanity */ 1634 arr[3] = inquiry_vpd_84(&arr[4]); 1635 } else if (0x85 == cmd[2]) { /* Management network addresses */ 1636 arr[1] = cmd[2]; /*sanity */ 1637 arr[3] = inquiry_vpd_85(&arr[4]); 1638 } else if (0x86 == cmd[2]) { /* extended inquiry */ 1639 arr[1] = cmd[2]; /*sanity */ 1640 arr[3] = 0x3c; /* number of following entries */ 1641 if (sdebug_dif == T10_PI_TYPE3_PROTECTION) 1642 arr[4] = 0x4; /* SPT: GRD_CHK:1 */ 1643 else if (have_dif_prot) 1644 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ 1645 else 1646 arr[4] = 0x0; /* no protection stuff */ 1647 arr[5] = 0x7; /* head of q, ordered + simple q's */ 1648 } else if (0x87 == cmd[2]) { /* mode page policy */ 1649 arr[1] = cmd[2]; /*sanity */ 1650 arr[3] = 0x8; /* number of following entries */ 1651 arr[4] = 0x2; /* disconnect-reconnect mp */ 1652 arr[6] = 0x80; /* mlus, shared */ 1653 arr[8] = 0x18; /* protocol specific lu */ 1654 arr[10] = 0x82; /* mlus, per initiator port */ 1655 } else if (0x88 == cmd[2]) { /* SCSI Ports */ 1656 arr[1] = cmd[2]; /*sanity */ 1657 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id); 1658 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */ 1659 arr[1] = cmd[2]; /*sanity */ 1660 n = inquiry_vpd_89(&arr[4]); 1661 put_unaligned_be16(n, arr + 2); 1662 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */ 1663 arr[1] = cmd[2]; /*sanity */ 1664 arr[3] = inquiry_vpd_b0(&arr[4]); 1665 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */ 1666 arr[1] = cmd[2]; /*sanity */ 1667 arr[3] = inquiry_vpd_b1(devip, &arr[4]); 1668 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */ 1669 arr[1] = cmd[2]; /*sanity */ 1670 arr[3] = inquiry_vpd_b2(&arr[4]); 1671 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */ 1672 arr[1] = cmd[2]; /*sanity */ 1673 arr[3] = inquiry_vpd_b6(devip, &arr[4]); 1674 } else { 1675 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); 1676 kfree(arr); 1677 return check_condition_result; 1678 } 1679 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); 1680 ret = fill_from_dev_buffer(scp, arr, 1681 min(len, SDEBUG_MAX_INQ_ARR_SZ)); 1682 kfree(arr); 1683 return ret; 1684 } 1685 /* drops through here for a standard inquiry */ 1686 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */ 1687 arr[2] = sdebug_scsi_level; 1688 arr[3] = 2; /* response_data_format==2 */ 1689 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 1690 arr[5] = (int)have_dif_prot; /* PROTECT bit */ 1691 if (sdebug_vpd_use_hostno == 0) 1692 arr[5] |= 0x10; /* claim: implicit TPGS */ 1693 arr[6] = 0x10; /* claim: MultiP */ 1694 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ 1695 arr[7] = 0xa; /* claim: LINKED + CMDQUE */ 1696 memcpy(&arr[8], sdebug_inq_vendor_id, 8); 1697 memcpy(&arr[16], sdebug_inq_product_id, 16); 1698 memcpy(&arr[32], sdebug_inq_product_rev, 4); 1699 /* Use Vendor Specific area to place driver date in ASCII hex */ 1700 memcpy(&arr[36], sdebug_version_date, 8); 1701 /* version descriptors (2 bytes each) follow */ 1702 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ 1703 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ 1704 n = 62; 1705 if (is_disk) { /* SBC-4 no version claimed */ 1706 put_unaligned_be16(0x600, arr + n); 1707 n += 2; 1708 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ 1709 put_unaligned_be16(0x525, arr + n); 1710 n += 2; 1711 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */ 1712 put_unaligned_be16(0x624, arr + n); 1713 n += 2; 1714 } 1715 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ 1716 ret = fill_from_dev_buffer(scp, arr, 1717 min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ)); 1718 kfree(arr); 1719 return ret; 1720 } 1721 1722 /* See resp_iec_m_pg() for how this data is manipulated */ 1723 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 1724 0, 0, 0x0, 0x0}; 1725 1726 static int resp_requests(struct scsi_cmnd *scp, 1727 struct sdebug_dev_info *devip) 1728 { 1729 unsigned char *cmd = scp->cmnd; 1730 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */ 1731 bool dsense = !!(cmd[1] & 1); 1732 int alloc_len = cmd[4]; 1733 int len = 18; 1734 int stopped_state = atomic_read(&devip->stopped); 1735 1736 memset(arr, 0, sizeof(arr)); 1737 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */ 1738 if (dsense) { 1739 arr[0] = 0x72; 1740 arr[1] = NOT_READY; 1741 arr[2] = LOGICAL_UNIT_NOT_READY; 1742 arr[3] = (stopped_state == 2) ? 0x1 : 0x2; 1743 len = 8; 1744 } else { 1745 arr[0] = 0x70; 1746 arr[2] = NOT_READY; /* NO_SENSE in sense_key */ 1747 arr[7] = 0xa; /* 18 byte sense buffer */ 1748 arr[12] = LOGICAL_UNIT_NOT_READY; 1749 arr[13] = (stopped_state == 2) ? 0x1 : 0x2; 1750 } 1751 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { 1752 /* Information exceptions control mode page: TEST=1, MRIE=6 */ 1753 if (dsense) { 1754 arr[0] = 0x72; 1755 arr[1] = 0x0; /* NO_SENSE in sense_key */ 1756 arr[2] = THRESHOLD_EXCEEDED; 1757 arr[3] = 0xff; /* Failure prediction(false) */ 1758 len = 8; 1759 } else { 1760 arr[0] = 0x70; 1761 arr[2] = 0x0; /* NO_SENSE in sense_key */ 1762 arr[7] = 0xa; /* 18 byte sense buffer */ 1763 arr[12] = THRESHOLD_EXCEEDED; 1764 arr[13] = 0xff; /* Failure prediction(false) */ 1765 } 1766 } else { /* nothing to report */ 1767 if (dsense) { 1768 len = 8; 1769 memset(arr, 0, len); 1770 arr[0] = 0x72; 1771 } else { 1772 memset(arr, 0, len); 1773 arr[0] = 0x70; 1774 arr[7] = 0xa; 1775 } 1776 } 1777 return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len)); 1778 } 1779 1780 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 1781 { 1782 unsigned char *cmd = scp->cmnd; 1783 int power_cond, want_stop, stopped_state; 1784 bool changing; 1785 1786 power_cond = (cmd[4] & 0xf0) >> 4; 1787 if (power_cond) { 1788 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7); 1789 return check_condition_result; 1790 } 1791 want_stop = !(cmd[4] & 1); 1792 stopped_state = atomic_read(&devip->stopped); 1793 if (stopped_state == 2) { 1794 ktime_t now_ts = ktime_get_boottime(); 1795 1796 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { 1797 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); 1798 1799 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) { 1800 /* tur_ms_to_ready timer extinguished */ 1801 atomic_set(&devip->stopped, 0); 1802 stopped_state = 0; 1803 } 1804 } 1805 if (stopped_state == 2) { 1806 if (want_stop) { 1807 stopped_state = 1; /* dummy up success */ 1808 } else { /* Disallow tur_ms_to_ready delay to be overridden */ 1809 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */); 1810 return check_condition_result; 1811 } 1812 } 1813 } 1814 changing = (stopped_state != want_stop); 1815 if (changing) 1816 atomic_xchg(&devip->stopped, want_stop); 1817 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */ 1818 return SDEG_RES_IMMED_MASK; 1819 else 1820 return 0; 1821 } 1822 1823 static sector_t get_sdebug_capacity(void) 1824 { 1825 static const unsigned int gibibyte = 1073741824; 1826 1827 if (sdebug_virtual_gb > 0) 1828 return (sector_t)sdebug_virtual_gb * 1829 (gibibyte / sdebug_sector_size); 1830 else 1831 return sdebug_store_sectors; 1832 } 1833 1834 #define SDEBUG_READCAP_ARR_SZ 8 1835 static int resp_readcap(struct scsi_cmnd *scp, 1836 struct sdebug_dev_info *devip) 1837 { 1838 unsigned char arr[SDEBUG_READCAP_ARR_SZ]; 1839 unsigned int capac; 1840 1841 /* following just in case virtual_gb changed */ 1842 sdebug_capacity = get_sdebug_capacity(); 1843 memset(arr, 0, SDEBUG_READCAP_ARR_SZ); 1844 if (sdebug_capacity < 0xffffffff) { 1845 capac = (unsigned int)sdebug_capacity - 1; 1846 put_unaligned_be32(capac, arr + 0); 1847 } else 1848 put_unaligned_be32(0xffffffff, arr + 0); 1849 put_unaligned_be16(sdebug_sector_size, arr + 6); 1850 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); 1851 } 1852 1853 #define SDEBUG_READCAP16_ARR_SZ 32 1854 static int resp_readcap16(struct scsi_cmnd *scp, 1855 struct sdebug_dev_info *devip) 1856 { 1857 unsigned char *cmd = scp->cmnd; 1858 unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; 1859 u32 alloc_len; 1860 1861 alloc_len = get_unaligned_be32(cmd + 10); 1862 /* following just in case virtual_gb changed */ 1863 sdebug_capacity = get_sdebug_capacity(); 1864 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); 1865 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); 1866 put_unaligned_be32(sdebug_sector_size, arr + 8); 1867 arr[13] = sdebug_physblk_exp & 0xf; 1868 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f; 1869 1870 if (scsi_debug_lbp()) { 1871 arr[14] |= 0x80; /* LBPME */ 1872 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in 1873 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2 1874 * in the wider field maps to 0 in this field. 1875 */ 1876 if (sdebug_lbprz & 1) /* precisely what the draft requires */ 1877 arr[14] |= 0x40; 1878 } 1879 1880 arr[15] = sdebug_lowest_aligned & 0xff; 1881 1882 if (have_dif_prot) { 1883 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ 1884 arr[12] |= 1; /* PROT_EN */ 1885 } 1886 1887 return fill_from_dev_buffer(scp, arr, 1888 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ)); 1889 } 1890 1891 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412 1892 1893 static int resp_report_tgtpgs(struct scsi_cmnd *scp, 1894 struct sdebug_dev_info *devip) 1895 { 1896 unsigned char *cmd = scp->cmnd; 1897 unsigned char *arr; 1898 int host_no = devip->sdbg_host->shost->host_no; 1899 int port_group_a, port_group_b, port_a, port_b; 1900 u32 alen, n, rlen; 1901 int ret; 1902 1903 alen = get_unaligned_be32(cmd + 6); 1904 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); 1905 if (! arr) 1906 return DID_REQUEUE << 16; 1907 /* 1908 * EVPD page 0x88 states we have two ports, one 1909 * real and a fake port with no device connected. 1910 * So we create two port groups with one port each 1911 * and set the group with port B to unavailable. 1912 */ 1913 port_a = 0x1; /* relative port A */ 1914 port_b = 0x2; /* relative port B */ 1915 port_group_a = (((host_no + 1) & 0x7f) << 8) + 1916 (devip->channel & 0x7f); 1917 port_group_b = (((host_no + 1) & 0x7f) << 8) + 1918 (devip->channel & 0x7f) + 0x80; 1919 1920 /* 1921 * The asymmetric access state is cycled according to the host_id. 1922 */ 1923 n = 4; 1924 if (sdebug_vpd_use_hostno == 0) { 1925 arr[n++] = host_no % 3; /* Asymm access state */ 1926 arr[n++] = 0x0F; /* claim: all states are supported */ 1927 } else { 1928 arr[n++] = 0x0; /* Active/Optimized path */ 1929 arr[n++] = 0x01; /* only support active/optimized paths */ 1930 } 1931 put_unaligned_be16(port_group_a, arr + n); 1932 n += 2; 1933 arr[n++] = 0; /* Reserved */ 1934 arr[n++] = 0; /* Status code */ 1935 arr[n++] = 0; /* Vendor unique */ 1936 arr[n++] = 0x1; /* One port per group */ 1937 arr[n++] = 0; /* Reserved */ 1938 arr[n++] = 0; /* Reserved */ 1939 put_unaligned_be16(port_a, arr + n); 1940 n += 2; 1941 arr[n++] = 3; /* Port unavailable */ 1942 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ 1943 put_unaligned_be16(port_group_b, arr + n); 1944 n += 2; 1945 arr[n++] = 0; /* Reserved */ 1946 arr[n++] = 0; /* Status code */ 1947 arr[n++] = 0; /* Vendor unique */ 1948 arr[n++] = 0x1; /* One port per group */ 1949 arr[n++] = 0; /* Reserved */ 1950 arr[n++] = 0; /* Reserved */ 1951 put_unaligned_be16(port_b, arr + n); 1952 n += 2; 1953 1954 rlen = n - 4; 1955 put_unaligned_be32(rlen, arr + 0); 1956 1957 /* 1958 * Return the smallest value of either 1959 * - The allocated length 1960 * - The constructed command length 1961 * - The maximum array size 1962 */ 1963 rlen = min(alen, n); 1964 ret = fill_from_dev_buffer(scp, arr, 1965 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ)); 1966 kfree(arr); 1967 return ret; 1968 } 1969 1970 static int resp_rsup_opcodes(struct scsi_cmnd *scp, 1971 struct sdebug_dev_info *devip) 1972 { 1973 bool rctd; 1974 u8 reporting_opts, req_opcode, sdeb_i, supp; 1975 u16 req_sa, u; 1976 u32 alloc_len, a_len; 1977 int k, offset, len, errsts, count, bump, na; 1978 const struct opcode_info_t *oip; 1979 const struct opcode_info_t *r_oip; 1980 u8 *arr; 1981 u8 *cmd = scp->cmnd; 1982 1983 rctd = !!(cmd[2] & 0x80); 1984 reporting_opts = cmd[2] & 0x7; 1985 req_opcode = cmd[3]; 1986 req_sa = get_unaligned_be16(cmd + 4); 1987 alloc_len = get_unaligned_be32(cmd + 6); 1988 if (alloc_len < 4 || alloc_len > 0xffff) { 1989 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); 1990 return check_condition_result; 1991 } 1992 if (alloc_len > 8192) 1993 a_len = 8192; 1994 else 1995 a_len = alloc_len; 1996 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC); 1997 if (NULL == arr) { 1998 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 1999 INSUFF_RES_ASCQ); 2000 return check_condition_result; 2001 } 2002 switch (reporting_opts) { 2003 case 0: /* all commands */ 2004 /* count number of commands */ 2005 for (count = 0, oip = opcode_info_arr; 2006 oip->num_attached != 0xff; ++oip) { 2007 if (F_INV_OP & oip->flags) 2008 continue; 2009 count += (oip->num_attached + 1); 2010 } 2011 bump = rctd ? 20 : 8; 2012 put_unaligned_be32(count * bump, arr); 2013 for (offset = 4, oip = opcode_info_arr; 2014 oip->num_attached != 0xff && offset < a_len; ++oip) { 2015 if (F_INV_OP & oip->flags) 2016 continue; 2017 na = oip->num_attached; 2018 arr[offset] = oip->opcode; 2019 put_unaligned_be16(oip->sa, arr + offset + 2); 2020 if (rctd) 2021 arr[offset + 5] |= 0x2; 2022 if (FF_SA & oip->flags) 2023 arr[offset + 5] |= 0x1; 2024 put_unaligned_be16(oip->len_mask[0], arr + offset + 6); 2025 if (rctd) 2026 put_unaligned_be16(0xa, arr + offset + 8); 2027 r_oip = oip; 2028 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { 2029 if (F_INV_OP & oip->flags) 2030 continue; 2031 offset += bump; 2032 arr[offset] = oip->opcode; 2033 put_unaligned_be16(oip->sa, arr + offset + 2); 2034 if (rctd) 2035 arr[offset + 5] |= 0x2; 2036 if (FF_SA & oip->flags) 2037 arr[offset + 5] |= 0x1; 2038 put_unaligned_be16(oip->len_mask[0], 2039 arr + offset + 6); 2040 if (rctd) 2041 put_unaligned_be16(0xa, 2042 arr + offset + 8); 2043 } 2044 oip = r_oip; 2045 offset += bump; 2046 } 2047 break; 2048 case 1: /* one command: opcode only */ 2049 case 2: /* one command: opcode plus service action */ 2050 case 3: /* one command: if sa==0 then opcode only else opcode+sa */ 2051 sdeb_i = opcode_ind_arr[req_opcode]; 2052 oip = &opcode_info_arr[sdeb_i]; 2053 if (F_INV_OP & oip->flags) { 2054 supp = 1; 2055 offset = 4; 2056 } else { 2057 if (1 == reporting_opts) { 2058 if (FF_SA & oip->flags) { 2059 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2060 2, 2); 2061 kfree(arr); 2062 return check_condition_result; 2063 } 2064 req_sa = 0; 2065 } else if (2 == reporting_opts && 2066 0 == (FF_SA & oip->flags)) { 2067 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); 2068 kfree(arr); /* point at requested sa */ 2069 return check_condition_result; 2070 } 2071 if (0 == (FF_SA & oip->flags) && 2072 req_opcode == oip->opcode) 2073 supp = 3; 2074 else if (0 == (FF_SA & oip->flags)) { 2075 na = oip->num_attached; 2076 for (k = 0, oip = oip->arrp; k < na; 2077 ++k, ++oip) { 2078 if (req_opcode == oip->opcode) 2079 break; 2080 } 2081 supp = (k >= na) ? 1 : 3; 2082 } else if (req_sa != oip->sa) { 2083 na = oip->num_attached; 2084 for (k = 0, oip = oip->arrp; k < na; 2085 ++k, ++oip) { 2086 if (req_sa == oip->sa) 2087 break; 2088 } 2089 supp = (k >= na) ? 1 : 3; 2090 } else 2091 supp = 3; 2092 if (3 == supp) { 2093 u = oip->len_mask[0]; 2094 put_unaligned_be16(u, arr + 2); 2095 arr[4] = oip->opcode; 2096 for (k = 1; k < u; ++k) 2097 arr[4 + k] = (k < 16) ? 2098 oip->len_mask[k] : 0xff; 2099 offset = 4 + u; 2100 } else 2101 offset = 4; 2102 } 2103 arr[1] = (rctd ? 0x80 : 0) | supp; 2104 if (rctd) { 2105 put_unaligned_be16(0xa, arr + offset); 2106 offset += 12; 2107 } 2108 break; 2109 default: 2110 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2); 2111 kfree(arr); 2112 return check_condition_result; 2113 } 2114 offset = (offset < a_len) ? offset : a_len; 2115 len = (offset < alloc_len) ? offset : alloc_len; 2116 errsts = fill_from_dev_buffer(scp, arr, len); 2117 kfree(arr); 2118 return errsts; 2119 } 2120 2121 static int resp_rsup_tmfs(struct scsi_cmnd *scp, 2122 struct sdebug_dev_info *devip) 2123 { 2124 bool repd; 2125 u32 alloc_len, len; 2126 u8 arr[16]; 2127 u8 *cmd = scp->cmnd; 2128 2129 memset(arr, 0, sizeof(arr)); 2130 repd = !!(cmd[2] & 0x80); 2131 alloc_len = get_unaligned_be32(cmd + 6); 2132 if (alloc_len < 4) { 2133 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); 2134 return check_condition_result; 2135 } 2136 arr[0] = 0xc8; /* ATS | ATSS | LURS */ 2137 arr[1] = 0x1; /* ITNRS */ 2138 if (repd) { 2139 arr[3] = 0xc; 2140 len = 16; 2141 } else 2142 len = 4; 2143 2144 len = (len < alloc_len) ? len : alloc_len; 2145 return fill_from_dev_buffer(scp, arr, len); 2146 } 2147 2148 /* <<Following mode page info copied from ST318451LW>> */ 2149 2150 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target) 2151 { /* Read-Write Error Recovery page for mode_sense */ 2152 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, 2153 5, 0, 0xff, 0xff}; 2154 2155 memcpy(p, err_recov_pg, sizeof(err_recov_pg)); 2156 if (1 == pcontrol) 2157 memset(p + 2, 0, sizeof(err_recov_pg) - 2); 2158 return sizeof(err_recov_pg); 2159 } 2160 2161 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target) 2162 { /* Disconnect-Reconnect page for mode_sense */ 2163 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0, 2164 0, 0, 0, 0, 0, 0, 0, 0}; 2165 2166 memcpy(p, disconnect_pg, sizeof(disconnect_pg)); 2167 if (1 == pcontrol) 2168 memset(p + 2, 0, sizeof(disconnect_pg) - 2); 2169 return sizeof(disconnect_pg); 2170 } 2171 2172 static int resp_format_pg(unsigned char *p, int pcontrol, int target) 2173 { /* Format device page for mode_sense */ 2174 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, 2175 0, 0, 0, 0, 0, 0, 0, 0, 2176 0, 0, 0, 0, 0x40, 0, 0, 0}; 2177 2178 memcpy(p, format_pg, sizeof(format_pg)); 2179 put_unaligned_be16(sdebug_sectors_per, p + 10); 2180 put_unaligned_be16(sdebug_sector_size, p + 12); 2181 if (sdebug_removable) 2182 p[20] |= 0x20; /* should agree with INQUIRY */ 2183 if (1 == pcontrol) 2184 memset(p + 2, 0, sizeof(format_pg) - 2); 2185 return sizeof(format_pg); 2186 } 2187 2188 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 2189 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 2190 0, 0, 0, 0}; 2191 2192 static int resp_caching_pg(unsigned char *p, int pcontrol, int target) 2193 { /* Caching page for mode_sense */ 2194 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, 2195 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 2196 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 2197 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; 2198 2199 if (SDEBUG_OPT_N_WCE & sdebug_opts) 2200 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ 2201 memcpy(p, caching_pg, sizeof(caching_pg)); 2202 if (1 == pcontrol) 2203 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg)); 2204 else if (2 == pcontrol) 2205 memcpy(p, d_caching_pg, sizeof(d_caching_pg)); 2206 return sizeof(caching_pg); 2207 } 2208 2209 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 2210 0, 0, 0x2, 0x4b}; 2211 2212 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target) 2213 { /* Control mode page for mode_sense */ 2214 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, 2215 0, 0, 0, 0}; 2216 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 2217 0, 0, 0x2, 0x4b}; 2218 2219 if (sdebug_dsense) 2220 ctrl_m_pg[2] |= 0x4; 2221 else 2222 ctrl_m_pg[2] &= ~0x4; 2223 2224 if (sdebug_ato) 2225 ctrl_m_pg[5] |= 0x80; /* ATO=1 */ 2226 2227 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); 2228 if (1 == pcontrol) 2229 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg)); 2230 else if (2 == pcontrol) 2231 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg)); 2232 return sizeof(ctrl_m_pg); 2233 } 2234 2235 2236 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target) 2237 { /* Informational Exceptions control mode page for mode_sense */ 2238 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, 2239 0, 0, 0x0, 0x0}; 2240 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 2241 0, 0, 0x0, 0x0}; 2242 2243 memcpy(p, iec_m_pg, sizeof(iec_m_pg)); 2244 if (1 == pcontrol) 2245 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg)); 2246 else if (2 == pcontrol) 2247 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg)); 2248 return sizeof(iec_m_pg); 2249 } 2250 2251 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target) 2252 { /* SAS SSP mode page - short format for mode_sense */ 2253 unsigned char sas_sf_m_pg[] = {0x19, 0x6, 2254 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0}; 2255 2256 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg)); 2257 if (1 == pcontrol) 2258 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2); 2259 return sizeof(sas_sf_m_pg); 2260 } 2261 2262 2263 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target, 2264 int target_dev_id) 2265 { /* SAS phy control and discover mode page for mode_sense */ 2266 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2, 2267 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0, 2268 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 2269 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 2270 0x2, 0, 0, 0, 0, 0, 0, 0, 2271 0x88, 0x99, 0, 0, 0, 0, 0, 0, 2272 0, 0, 0, 0, 0, 0, 0, 0, 2273 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0, 2274 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 2275 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 2276 0x3, 0, 0, 0, 0, 0, 0, 0, 2277 0x88, 0x99, 0, 0, 0, 0, 0, 0, 2278 0, 0, 0, 0, 0, 0, 0, 0, 2279 }; 2280 int port_a, port_b; 2281 2282 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16); 2283 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24); 2284 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64); 2285 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72); 2286 port_a = target_dev_id + 1; 2287 port_b = port_a + 1; 2288 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg)); 2289 put_unaligned_be32(port_a, p + 20); 2290 put_unaligned_be32(port_b, p + 48 + 20); 2291 if (1 == pcontrol) 2292 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); 2293 return sizeof(sas_pcd_m_pg); 2294 } 2295 2296 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol) 2297 { /* SAS SSP shared protocol specific port mode subpage */ 2298 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, 2299 0, 0, 0, 0, 0, 0, 0, 0, 2300 }; 2301 2302 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg)); 2303 if (1 == pcontrol) 2304 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4); 2305 return sizeof(sas_sha_m_pg); 2306 } 2307 2308 #define SDEBUG_MAX_MSENSE_SZ 256 2309 2310 static int resp_mode_sense(struct scsi_cmnd *scp, 2311 struct sdebug_dev_info *devip) 2312 { 2313 int pcontrol, pcode, subpcode, bd_len; 2314 unsigned char dev_spec; 2315 int alloc_len, offset, len, target_dev_id; 2316 int target = scp->device->id; 2317 unsigned char *ap; 2318 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 2319 unsigned char *cmd = scp->cmnd; 2320 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode; 2321 2322 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ 2323 pcontrol = (cmd[2] & 0xc0) >> 6; 2324 pcode = cmd[2] & 0x3f; 2325 subpcode = cmd[3]; 2326 msense_6 = (MODE_SENSE == cmd[0]); 2327 llbaa = msense_6 ? false : !!(cmd[1] & 0x10); 2328 is_disk = (sdebug_ptype == TYPE_DISK); 2329 is_zbc = (devip->zmodel != BLK_ZONED_NONE); 2330 if ((is_disk || is_zbc) && !dbd) 2331 bd_len = llbaa ? 16 : 8; 2332 else 2333 bd_len = 0; 2334 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); 2335 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); 2336 if (0x3 == pcontrol) { /* Saving values not supported */ 2337 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); 2338 return check_condition_result; 2339 } 2340 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + 2341 (devip->target * 1000) - 3; 2342 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */ 2343 if (is_disk || is_zbc) { 2344 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ 2345 if (sdebug_wp) 2346 dev_spec |= 0x80; 2347 } else 2348 dev_spec = 0x0; 2349 if (msense_6) { 2350 arr[2] = dev_spec; 2351 arr[3] = bd_len; 2352 offset = 4; 2353 } else { 2354 arr[3] = dev_spec; 2355 if (16 == bd_len) 2356 arr[4] = 0x1; /* set LONGLBA bit */ 2357 arr[7] = bd_len; /* assume 255 or less */ 2358 offset = 8; 2359 } 2360 ap = arr + offset; 2361 if ((bd_len > 0) && (!sdebug_capacity)) 2362 sdebug_capacity = get_sdebug_capacity(); 2363 2364 if (8 == bd_len) { 2365 if (sdebug_capacity > 0xfffffffe) 2366 put_unaligned_be32(0xffffffff, ap + 0); 2367 else 2368 put_unaligned_be32(sdebug_capacity, ap + 0); 2369 put_unaligned_be16(sdebug_sector_size, ap + 6); 2370 offset += bd_len; 2371 ap = arr + offset; 2372 } else if (16 == bd_len) { 2373 put_unaligned_be64((u64)sdebug_capacity, ap + 0); 2374 put_unaligned_be32(sdebug_sector_size, ap + 12); 2375 offset += bd_len; 2376 ap = arr + offset; 2377 } 2378 2379 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { 2380 /* TODO: Control Extension page */ 2381 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2382 return check_condition_result; 2383 } 2384 bad_pcode = false; 2385 2386 switch (pcode) { 2387 case 0x1: /* Read-Write error recovery page, direct access */ 2388 len = resp_err_recov_pg(ap, pcontrol, target); 2389 offset += len; 2390 break; 2391 case 0x2: /* Disconnect-Reconnect page, all devices */ 2392 len = resp_disconnect_pg(ap, pcontrol, target); 2393 offset += len; 2394 break; 2395 case 0x3: /* Format device page, direct access */ 2396 if (is_disk) { 2397 len = resp_format_pg(ap, pcontrol, target); 2398 offset += len; 2399 } else 2400 bad_pcode = true; 2401 break; 2402 case 0x8: /* Caching page, direct access */ 2403 if (is_disk || is_zbc) { 2404 len = resp_caching_pg(ap, pcontrol, target); 2405 offset += len; 2406 } else 2407 bad_pcode = true; 2408 break; 2409 case 0xa: /* Control Mode page, all devices */ 2410 len = resp_ctrl_m_pg(ap, pcontrol, target); 2411 offset += len; 2412 break; 2413 case 0x19: /* if spc==1 then sas phy, control+discover */ 2414 if ((subpcode > 0x2) && (subpcode < 0xff)) { 2415 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2416 return check_condition_result; 2417 } 2418 len = 0; 2419 if ((0x0 == subpcode) || (0xff == subpcode)) 2420 len += resp_sas_sf_m_pg(ap + len, pcontrol, target); 2421 if ((0x1 == subpcode) || (0xff == subpcode)) 2422 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target, 2423 target_dev_id); 2424 if ((0x2 == subpcode) || (0xff == subpcode)) 2425 len += resp_sas_sha_m_spg(ap + len, pcontrol); 2426 offset += len; 2427 break; 2428 case 0x1c: /* Informational Exceptions Mode page, all devices */ 2429 len = resp_iec_m_pg(ap, pcontrol, target); 2430 offset += len; 2431 break; 2432 case 0x3f: /* Read all Mode pages */ 2433 if ((0 == subpcode) || (0xff == subpcode)) { 2434 len = resp_err_recov_pg(ap, pcontrol, target); 2435 len += resp_disconnect_pg(ap + len, pcontrol, target); 2436 if (is_disk) { 2437 len += resp_format_pg(ap + len, pcontrol, 2438 target); 2439 len += resp_caching_pg(ap + len, pcontrol, 2440 target); 2441 } else if (is_zbc) { 2442 len += resp_caching_pg(ap + len, pcontrol, 2443 target); 2444 } 2445 len += resp_ctrl_m_pg(ap + len, pcontrol, target); 2446 len += resp_sas_sf_m_pg(ap + len, pcontrol, target); 2447 if (0xff == subpcode) { 2448 len += resp_sas_pcd_m_spg(ap + len, pcontrol, 2449 target, target_dev_id); 2450 len += resp_sas_sha_m_spg(ap + len, pcontrol); 2451 } 2452 len += resp_iec_m_pg(ap + len, pcontrol, target); 2453 offset += len; 2454 } else { 2455 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2456 return check_condition_result; 2457 } 2458 break; 2459 default: 2460 bad_pcode = true; 2461 break; 2462 } 2463 if (bad_pcode) { 2464 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); 2465 return check_condition_result; 2466 } 2467 if (msense_6) 2468 arr[0] = offset - 1; 2469 else 2470 put_unaligned_be16((offset - 2), arr + 0); 2471 return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset)); 2472 } 2473 2474 #define SDEBUG_MAX_MSELECT_SZ 512 2475 2476 static int resp_mode_select(struct scsi_cmnd *scp, 2477 struct sdebug_dev_info *devip) 2478 { 2479 int pf, sp, ps, md_len, bd_len, off, spf, pg_len; 2480 int param_len, res, mpage; 2481 unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; 2482 unsigned char *cmd = scp->cmnd; 2483 int mselect6 = (MODE_SELECT == cmd[0]); 2484 2485 memset(arr, 0, sizeof(arr)); 2486 pf = cmd[1] & 0x10; 2487 sp = cmd[1] & 0x1; 2488 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7); 2489 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { 2490 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); 2491 return check_condition_result; 2492 } 2493 res = fetch_to_dev_buffer(scp, arr, param_len); 2494 if (-1 == res) 2495 return DID_ERROR << 16; 2496 else if (sdebug_verbose && (res < param_len)) 2497 sdev_printk(KERN_INFO, scp->device, 2498 "%s: cdb indicated=%d, IO sent=%d bytes\n", 2499 __func__, param_len, res); 2500 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); 2501 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); 2502 if (md_len > 2) { 2503 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); 2504 return check_condition_result; 2505 } 2506 off = bd_len + (mselect6 ? 4 : 8); 2507 mpage = arr[off] & 0x3f; 2508 ps = !!(arr[off] & 0x80); 2509 if (ps) { 2510 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7); 2511 return check_condition_result; 2512 } 2513 spf = !!(arr[off] & 0x40); 2514 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) : 2515 (arr[off + 1] + 2); 2516 if ((pg_len + off) > param_len) { 2517 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2518 PARAMETER_LIST_LENGTH_ERR, 0); 2519 return check_condition_result; 2520 } 2521 switch (mpage) { 2522 case 0x8: /* Caching Mode page */ 2523 if (caching_pg[1] == arr[off + 1]) { 2524 memcpy(caching_pg + 2, arr + off + 2, 2525 sizeof(caching_pg) - 2); 2526 goto set_mode_changed_ua; 2527 } 2528 break; 2529 case 0xa: /* Control Mode page */ 2530 if (ctrl_m_pg[1] == arr[off + 1]) { 2531 memcpy(ctrl_m_pg + 2, arr + off + 2, 2532 sizeof(ctrl_m_pg) - 2); 2533 if (ctrl_m_pg[4] & 0x8) 2534 sdebug_wp = true; 2535 else 2536 sdebug_wp = false; 2537 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4); 2538 goto set_mode_changed_ua; 2539 } 2540 break; 2541 case 0x1c: /* Informational Exceptions Mode page */ 2542 if (iec_m_pg[1] == arr[off + 1]) { 2543 memcpy(iec_m_pg + 2, arr + off + 2, 2544 sizeof(iec_m_pg) - 2); 2545 goto set_mode_changed_ua; 2546 } 2547 break; 2548 default: 2549 break; 2550 } 2551 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5); 2552 return check_condition_result; 2553 set_mode_changed_ua: 2554 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); 2555 return 0; 2556 } 2557 2558 static int resp_temp_l_pg(unsigned char *arr) 2559 { 2560 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38, 2561 0x0, 0x1, 0x3, 0x2, 0x0, 65, 2562 }; 2563 2564 memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); 2565 return sizeof(temp_l_pg); 2566 } 2567 2568 static int resp_ie_l_pg(unsigned char *arr) 2569 { 2570 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, 2571 }; 2572 2573 memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); 2574 if (iec_m_pg[2] & 0x4) { /* TEST bit set */ 2575 arr[4] = THRESHOLD_EXCEEDED; 2576 arr[5] = 0xff; 2577 } 2578 return sizeof(ie_l_pg); 2579 } 2580 2581 #define SDEBUG_MAX_LSENSE_SZ 512 2582 2583 static int resp_log_sense(struct scsi_cmnd *scp, 2584 struct sdebug_dev_info *devip) 2585 { 2586 int ppc, sp, pcode, subpcode, alloc_len, len, n; 2587 unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; 2588 unsigned char *cmd = scp->cmnd; 2589 2590 memset(arr, 0, sizeof(arr)); 2591 ppc = cmd[1] & 0x2; 2592 sp = cmd[1] & 0x1; 2593 if (ppc || sp) { 2594 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0); 2595 return check_condition_result; 2596 } 2597 pcode = cmd[2] & 0x3f; 2598 subpcode = cmd[3] & 0xff; 2599 alloc_len = get_unaligned_be16(cmd + 7); 2600 arr[0] = pcode; 2601 if (0 == subpcode) { 2602 switch (pcode) { 2603 case 0x0: /* Supported log pages log page */ 2604 n = 4; 2605 arr[n++] = 0x0; /* this page */ 2606 arr[n++] = 0xd; /* Temperature */ 2607 arr[n++] = 0x2f; /* Informational exceptions */ 2608 arr[3] = n - 4; 2609 break; 2610 case 0xd: /* Temperature log page */ 2611 arr[3] = resp_temp_l_pg(arr + 4); 2612 break; 2613 case 0x2f: /* Informational exceptions log page */ 2614 arr[3] = resp_ie_l_pg(arr + 4); 2615 break; 2616 default: 2617 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); 2618 return check_condition_result; 2619 } 2620 } else if (0xff == subpcode) { 2621 arr[0] |= 0x40; 2622 arr[1] = subpcode; 2623 switch (pcode) { 2624 case 0x0: /* Supported log pages and subpages log page */ 2625 n = 4; 2626 arr[n++] = 0x0; 2627 arr[n++] = 0x0; /* 0,0 page */ 2628 arr[n++] = 0x0; 2629 arr[n++] = 0xff; /* this page */ 2630 arr[n++] = 0xd; 2631 arr[n++] = 0x0; /* Temperature */ 2632 arr[n++] = 0x2f; 2633 arr[n++] = 0x0; /* Informational exceptions */ 2634 arr[3] = n - 4; 2635 break; 2636 case 0xd: /* Temperature subpages */ 2637 n = 4; 2638 arr[n++] = 0xd; 2639 arr[n++] = 0x0; /* Temperature */ 2640 arr[3] = n - 4; 2641 break; 2642 case 0x2f: /* Informational exceptions subpages */ 2643 n = 4; 2644 arr[n++] = 0x2f; 2645 arr[n++] = 0x0; /* Informational exceptions */ 2646 arr[3] = n - 4; 2647 break; 2648 default: 2649 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); 2650 return check_condition_result; 2651 } 2652 } else { 2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2654 return check_condition_result; 2655 } 2656 len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len); 2657 return fill_from_dev_buffer(scp, arr, 2658 min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ)); 2659 } 2660 2661 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) 2662 { 2663 return devip->nr_zones != 0; 2664 } 2665 2666 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip, 2667 unsigned long long lba) 2668 { 2669 return &devip->zstate[lba >> devip->zsize_shift]; 2670 } 2671 2672 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp) 2673 { 2674 return zsp->z_type == ZBC_ZONE_TYPE_CNV; 2675 } 2676 2677 static void zbc_close_zone(struct sdebug_dev_info *devip, 2678 struct sdeb_zone_state *zsp) 2679 { 2680 enum sdebug_z_cond zc; 2681 2682 if (zbc_zone_is_conv(zsp)) 2683 return; 2684 2685 zc = zsp->z_cond; 2686 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)) 2687 return; 2688 2689 if (zc == ZC2_IMPLICIT_OPEN) 2690 devip->nr_imp_open--; 2691 else 2692 devip->nr_exp_open--; 2693 2694 if (zsp->z_wp == zsp->z_start) { 2695 zsp->z_cond = ZC1_EMPTY; 2696 } else { 2697 zsp->z_cond = ZC4_CLOSED; 2698 devip->nr_closed++; 2699 } 2700 } 2701 2702 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip) 2703 { 2704 struct sdeb_zone_state *zsp = &devip->zstate[0]; 2705 unsigned int i; 2706 2707 for (i = 0; i < devip->nr_zones; i++, zsp++) { 2708 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) { 2709 zbc_close_zone(devip, zsp); 2710 return; 2711 } 2712 } 2713 } 2714 2715 static void zbc_open_zone(struct sdebug_dev_info *devip, 2716 struct sdeb_zone_state *zsp, bool explicit) 2717 { 2718 enum sdebug_z_cond zc; 2719 2720 if (zbc_zone_is_conv(zsp)) 2721 return; 2722 2723 zc = zsp->z_cond; 2724 if ((explicit && zc == ZC3_EXPLICIT_OPEN) || 2725 (!explicit && zc == ZC2_IMPLICIT_OPEN)) 2726 return; 2727 2728 /* Close an implicit open zone if necessary */ 2729 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN) 2730 zbc_close_zone(devip, zsp); 2731 else if (devip->max_open && 2732 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open) 2733 zbc_close_imp_open_zone(devip); 2734 2735 if (zsp->z_cond == ZC4_CLOSED) 2736 devip->nr_closed--; 2737 if (explicit) { 2738 zsp->z_cond = ZC3_EXPLICIT_OPEN; 2739 devip->nr_exp_open++; 2740 } else { 2741 zsp->z_cond = ZC2_IMPLICIT_OPEN; 2742 devip->nr_imp_open++; 2743 } 2744 } 2745 2746 static void zbc_inc_wp(struct sdebug_dev_info *devip, 2747 unsigned long long lba, unsigned int num) 2748 { 2749 struct sdeb_zone_state *zsp = zbc_zone(devip, lba); 2750 unsigned long long n, end, zend = zsp->z_start + zsp->z_size; 2751 2752 if (zbc_zone_is_conv(zsp)) 2753 return; 2754 2755 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) { 2756 zsp->z_wp += num; 2757 if (zsp->z_wp >= zend) 2758 zsp->z_cond = ZC5_FULL; 2759 return; 2760 } 2761 2762 while (num) { 2763 if (lba != zsp->z_wp) 2764 zsp->z_non_seq_resource = true; 2765 2766 end = lba + num; 2767 if (end >= zend) { 2768 n = zend - lba; 2769 zsp->z_wp = zend; 2770 } else if (end > zsp->z_wp) { 2771 n = num; 2772 zsp->z_wp = end; 2773 } else { 2774 n = num; 2775 } 2776 if (zsp->z_wp >= zend) 2777 zsp->z_cond = ZC5_FULL; 2778 2779 num -= n; 2780 lba += n; 2781 if (num) { 2782 zsp++; 2783 zend = zsp->z_start + zsp->z_size; 2784 } 2785 } 2786 } 2787 2788 static int check_zbc_access_params(struct scsi_cmnd *scp, 2789 unsigned long long lba, unsigned int num, bool write) 2790 { 2791 struct scsi_device *sdp = scp->device; 2792 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; 2793 struct sdeb_zone_state *zsp = zbc_zone(devip, lba); 2794 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1); 2795 2796 if (!write) { 2797 if (devip->zmodel == BLK_ZONED_HA) 2798 return 0; 2799 /* For host-managed, reads cannot cross zone types boundaries */ 2800 if (zsp_end != zsp && 2801 zbc_zone_is_conv(zsp) && 2802 !zbc_zone_is_conv(zsp_end)) { 2803 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2804 LBA_OUT_OF_RANGE, 2805 READ_INVDATA_ASCQ); 2806 return check_condition_result; 2807 } 2808 return 0; 2809 } 2810 2811 /* No restrictions for writes within conventional zones */ 2812 if (zbc_zone_is_conv(zsp)) { 2813 if (!zbc_zone_is_conv(zsp_end)) { 2814 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2815 LBA_OUT_OF_RANGE, 2816 WRITE_BOUNDARY_ASCQ); 2817 return check_condition_result; 2818 } 2819 return 0; 2820 } 2821 2822 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) { 2823 /* Writes cannot cross sequential zone boundaries */ 2824 if (zsp_end != zsp) { 2825 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2826 LBA_OUT_OF_RANGE, 2827 WRITE_BOUNDARY_ASCQ); 2828 return check_condition_result; 2829 } 2830 /* Cannot write full zones */ 2831 if (zsp->z_cond == ZC5_FULL) { 2832 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2833 INVALID_FIELD_IN_CDB, 0); 2834 return check_condition_result; 2835 } 2836 /* Writes must be aligned to the zone WP */ 2837 if (lba != zsp->z_wp) { 2838 mk_sense_buffer(scp, ILLEGAL_REQUEST, 2839 LBA_OUT_OF_RANGE, 2840 UNALIGNED_WRITE_ASCQ); 2841 return check_condition_result; 2842 } 2843 } 2844 2845 /* Handle implicit open of closed and empty zones */ 2846 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) { 2847 if (devip->max_open && 2848 devip->nr_exp_open >= devip->max_open) { 2849 mk_sense_buffer(scp, DATA_PROTECT, 2850 INSUFF_RES_ASC, 2851 INSUFF_ZONE_ASCQ); 2852 return check_condition_result; 2853 } 2854 zbc_open_zone(devip, zsp, false); 2855 } 2856 2857 return 0; 2858 } 2859 2860 static inline int check_device_access_params 2861 (struct scsi_cmnd *scp, unsigned long long lba, 2862 unsigned int num, bool write) 2863 { 2864 struct scsi_device *sdp = scp->device; 2865 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; 2866 2867 if (lba + num > sdebug_capacity) { 2868 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 2869 return check_condition_result; 2870 } 2871 /* transfer length excessive (tie in to block limits VPD page) */ 2872 if (num > sdebug_store_sectors) { 2873 /* needs work to find which cdb byte 'num' comes from */ 2874 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 2875 return check_condition_result; 2876 } 2877 if (write && unlikely(sdebug_wp)) { 2878 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2); 2879 return check_condition_result; 2880 } 2881 if (sdebug_dev_is_zoned(devip)) 2882 return check_zbc_access_params(scp, lba, num, write); 2883 2884 return 0; 2885 } 2886 2887 /* 2888 * Note: if BUG_ON() fires it usually indicates a problem with the parser 2889 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions 2890 * that access any of the "stores" in struct sdeb_store_info should call this 2891 * function with bug_if_fake_rw set to true. 2892 */ 2893 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip, 2894 bool bug_if_fake_rw) 2895 { 2896 if (sdebug_fake_rw) { 2897 BUG_ON(bug_if_fake_rw); /* See note above */ 2898 return NULL; 2899 } 2900 return xa_load(per_store_ap, devip->sdbg_host->si_idx); 2901 } 2902 2903 /* Returns number of bytes copied or -1 if error. */ 2904 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, 2905 u32 sg_skip, u64 lba, u32 num, bool do_write) 2906 { 2907 int ret; 2908 u64 block, rest = 0; 2909 enum dma_data_direction dir; 2910 struct scsi_data_buffer *sdb = &scp->sdb; 2911 u8 *fsp; 2912 2913 if (do_write) { 2914 dir = DMA_TO_DEVICE; 2915 write_since_sync = true; 2916 } else { 2917 dir = DMA_FROM_DEVICE; 2918 } 2919 2920 if (!sdb->length || !sip) 2921 return 0; 2922 if (scp->sc_data_direction != dir) 2923 return -1; 2924 fsp = sip->storep; 2925 2926 block = do_div(lba, sdebug_store_sectors); 2927 if (block + num > sdebug_store_sectors) 2928 rest = block + num - sdebug_store_sectors; 2929 2930 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2931 fsp + (block * sdebug_sector_size), 2932 (num - rest) * sdebug_sector_size, sg_skip, do_write); 2933 if (ret != (num - rest) * sdebug_sector_size) 2934 return ret; 2935 2936 if (rest) { 2937 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2938 fsp, rest * sdebug_sector_size, 2939 sg_skip + ((num - rest) * sdebug_sector_size), 2940 do_write); 2941 } 2942 2943 return ret; 2944 } 2945 2946 /* Returns number of bytes copied or -1 if error. */ 2947 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp) 2948 { 2949 struct scsi_data_buffer *sdb = &scp->sdb; 2950 2951 if (!sdb->length) 2952 return 0; 2953 if (scp->sc_data_direction != DMA_TO_DEVICE) 2954 return -1; 2955 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp, 2956 num * sdebug_sector_size, 0, true); 2957 } 2958 2959 /* If sip->storep+lba compares equal to arr(num), then copy top half of 2960 * arr into sip->storep+lba and return true. If comparison fails then 2961 * return false. */ 2962 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num, 2963 const u8 *arr, bool compare_only) 2964 { 2965 bool res; 2966 u64 block, rest = 0; 2967 u32 store_blks = sdebug_store_sectors; 2968 u32 lb_size = sdebug_sector_size; 2969 u8 *fsp = sip->storep; 2970 2971 block = do_div(lba, store_blks); 2972 if (block + num > store_blks) 2973 rest = block + num - store_blks; 2974 2975 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size); 2976 if (!res) 2977 return res; 2978 if (rest) 2979 res = memcmp(fsp, arr + ((num - rest) * lb_size), 2980 rest * lb_size); 2981 if (!res) 2982 return res; 2983 if (compare_only) 2984 return true; 2985 arr += num * lb_size; 2986 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size); 2987 if (rest) 2988 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size); 2989 return res; 2990 } 2991 2992 static __be16 dif_compute_csum(const void *buf, int len) 2993 { 2994 __be16 csum; 2995 2996 if (sdebug_guard) 2997 csum = (__force __be16)ip_compute_csum(buf, len); 2998 else 2999 csum = cpu_to_be16(crc_t10dif(buf, len)); 3000 3001 return csum; 3002 } 3003 3004 static int dif_verify(struct t10_pi_tuple *sdt, const void *data, 3005 sector_t sector, u32 ei_lba) 3006 { 3007 __be16 csum = dif_compute_csum(data, sdebug_sector_size); 3008 3009 if (sdt->guard_tag != csum) { 3010 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", 3011 (unsigned long)sector, 3012 be16_to_cpu(sdt->guard_tag), 3013 be16_to_cpu(csum)); 3014 return 0x01; 3015 } 3016 if (sdebug_dif == T10_PI_TYPE1_PROTECTION && 3017 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 3018 pr_err("REF check failed on sector %lu\n", 3019 (unsigned long)sector); 3020 return 0x03; 3021 } 3022 if (sdebug_dif == T10_PI_TYPE2_PROTECTION && 3023 be32_to_cpu(sdt->ref_tag) != ei_lba) { 3024 pr_err("REF check failed on sector %lu\n", 3025 (unsigned long)sector); 3026 return 0x03; 3027 } 3028 return 0; 3029 } 3030 3031 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector, 3032 unsigned int sectors, bool read) 3033 { 3034 size_t resid; 3035 void *paddr; 3036 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *) 3037 scp->device->hostdata, true); 3038 struct t10_pi_tuple *dif_storep = sip->dif_storep; 3039 const void *dif_store_end = dif_storep + sdebug_store_sectors; 3040 struct sg_mapping_iter miter; 3041 3042 /* Bytes of protection data to copy into sgl */ 3043 resid = sectors * sizeof(*dif_storep); 3044 3045 sg_miter_start(&miter, scsi_prot_sglist(scp), 3046 scsi_prot_sg_count(scp), SG_MITER_ATOMIC | 3047 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG)); 3048 3049 while (sg_miter_next(&miter) && resid > 0) { 3050 size_t len = min_t(size_t, miter.length, resid); 3051 void *start = dif_store(sip, sector); 3052 size_t rest = 0; 3053 3054 if (dif_store_end < start + len) 3055 rest = start + len - dif_store_end; 3056 3057 paddr = miter.addr; 3058 3059 if (read) 3060 memcpy(paddr, start, len - rest); 3061 else 3062 memcpy(start, paddr, len - rest); 3063 3064 if (rest) { 3065 if (read) 3066 memcpy(paddr + len - rest, dif_storep, rest); 3067 else 3068 memcpy(dif_storep, paddr + len - rest, rest); 3069 } 3070 3071 sector += len / sizeof(*dif_storep); 3072 resid -= len; 3073 } 3074 sg_miter_stop(&miter); 3075 } 3076 3077 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec, 3078 unsigned int sectors, u32 ei_lba) 3079 { 3080 int ret = 0; 3081 unsigned int i; 3082 sector_t sector; 3083 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *) 3084 scp->device->hostdata, true); 3085 struct t10_pi_tuple *sdt; 3086 3087 for (i = 0; i < sectors; i++, ei_lba++) { 3088 sector = start_sec + i; 3089 sdt = dif_store(sip, sector); 3090 3091 if (sdt->app_tag == cpu_to_be16(0xffff)) 3092 continue; 3093 3094 /* 3095 * Because scsi_debug acts as both initiator and 3096 * target we proceed to verify the PI even if 3097 * RDPROTECT=3. This is done so the "initiator" knows 3098 * which type of error to return. Otherwise we would 3099 * have to iterate over the PI twice. 3100 */ 3101 if (scp->cmnd[1] >> 5) { /* RDPROTECT */ 3102 ret = dif_verify(sdt, lba2fake_store(sip, sector), 3103 sector, ei_lba); 3104 if (ret) { 3105 dif_errors++; 3106 break; 3107 } 3108 } 3109 } 3110 3111 dif_copy_prot(scp, start_sec, sectors, true); 3112 dix_reads++; 3113 3114 return ret; 3115 } 3116 3117 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3118 { 3119 bool check_prot; 3120 u32 num; 3121 u32 ei_lba; 3122 int ret; 3123 u64 lba; 3124 struct sdeb_store_info *sip = devip2sip(devip, true); 3125 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck; 3126 u8 *cmd = scp->cmnd; 3127 3128 switch (cmd[0]) { 3129 case READ_16: 3130 ei_lba = 0; 3131 lba = get_unaligned_be64(cmd + 2); 3132 num = get_unaligned_be32(cmd + 10); 3133 check_prot = true; 3134 break; 3135 case READ_10: 3136 ei_lba = 0; 3137 lba = get_unaligned_be32(cmd + 2); 3138 num = get_unaligned_be16(cmd + 7); 3139 check_prot = true; 3140 break; 3141 case READ_6: 3142 ei_lba = 0; 3143 lba = (u32)cmd[3] | (u32)cmd[2] << 8 | 3144 (u32)(cmd[1] & 0x1f) << 16; 3145 num = (0 == cmd[4]) ? 256 : cmd[4]; 3146 check_prot = true; 3147 break; 3148 case READ_12: 3149 ei_lba = 0; 3150 lba = get_unaligned_be32(cmd + 2); 3151 num = get_unaligned_be32(cmd + 6); 3152 check_prot = true; 3153 break; 3154 case XDWRITEREAD_10: 3155 ei_lba = 0; 3156 lba = get_unaligned_be32(cmd + 2); 3157 num = get_unaligned_be16(cmd + 7); 3158 check_prot = false; 3159 break; 3160 default: /* assume READ(32) */ 3161 lba = get_unaligned_be64(cmd + 12); 3162 ei_lba = get_unaligned_be32(cmd + 20); 3163 num = get_unaligned_be32(cmd + 28); 3164 check_prot = false; 3165 break; 3166 } 3167 if (unlikely(have_dif_prot && check_prot)) { 3168 if (sdebug_dif == T10_PI_TYPE2_PROTECTION && 3169 (cmd[1] & 0xe0)) { 3170 mk_sense_invalid_opcode(scp); 3171 return check_condition_result; 3172 } 3173 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || 3174 sdebug_dif == T10_PI_TYPE3_PROTECTION) && 3175 (cmd[1] & 0xe0) == 0) 3176 sdev_printk(KERN_ERR, scp->device, "Unprotected RD " 3177 "to DIF device\n"); 3178 } 3179 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) && 3180 atomic_read(&sdeb_inject_pending))) { 3181 num /= 2; 3182 atomic_set(&sdeb_inject_pending, 0); 3183 } 3184 3185 ret = check_device_access_params(scp, lba, num, false); 3186 if (ret) 3187 return ret; 3188 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) && 3189 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) && 3190 ((lba + num) > sdebug_medium_error_start))) { 3191 /* claim unrecoverable read error */ 3192 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); 3193 /* set info field and valid bit for fixed descriptor */ 3194 if (0x70 == (scp->sense_buffer[0] & 0x7f)) { 3195 scp->sense_buffer[0] |= 0x80; /* Valid bit */ 3196 ret = (lba < OPT_MEDIUM_ERR_ADDR) 3197 ? OPT_MEDIUM_ERR_ADDR : (int)lba; 3198 put_unaligned_be32(ret, scp->sense_buffer + 3); 3199 } 3200 scsi_set_resid(scp, scsi_bufflen(scp)); 3201 return check_condition_result; 3202 } 3203 3204 read_lock(macc_lckp); 3205 3206 /* DIX + T10 DIF */ 3207 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { 3208 switch (prot_verify_read(scp, lba, num, ei_lba)) { 3209 case 1: /* Guard tag error */ 3210 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ 3211 read_unlock(macc_lckp); 3212 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 3213 return check_condition_result; 3214 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { 3215 read_unlock(macc_lckp); 3216 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 3217 return illegal_condition_result; 3218 } 3219 break; 3220 case 3: /* Reference tag error */ 3221 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ 3222 read_unlock(macc_lckp); 3223 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); 3224 return check_condition_result; 3225 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { 3226 read_unlock(macc_lckp); 3227 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); 3228 return illegal_condition_result; 3229 } 3230 break; 3231 } 3232 } 3233 3234 ret = do_device_access(sip, scp, 0, lba, num, false); 3235 read_unlock(macc_lckp); 3236 if (unlikely(ret == -1)) 3237 return DID_ERROR << 16; 3238 3239 scsi_set_resid(scp, scsi_bufflen(scp) - ret); 3240 3241 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) && 3242 atomic_read(&sdeb_inject_pending))) { 3243 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) { 3244 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); 3245 atomic_set(&sdeb_inject_pending, 0); 3246 return check_condition_result; 3247 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) { 3248 /* Logical block guard check failed */ 3249 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 3250 atomic_set(&sdeb_inject_pending, 0); 3251 return illegal_condition_result; 3252 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) { 3253 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 3254 atomic_set(&sdeb_inject_pending, 0); 3255 return illegal_condition_result; 3256 } 3257 } 3258 return 0; 3259 } 3260 3261 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, 3262 unsigned int sectors, u32 ei_lba) 3263 { 3264 int ret; 3265 struct t10_pi_tuple *sdt; 3266 void *daddr; 3267 sector_t sector = start_sec; 3268 int ppage_offset; 3269 int dpage_offset; 3270 struct sg_mapping_iter diter; 3271 struct sg_mapping_iter piter; 3272 3273 BUG_ON(scsi_sg_count(SCpnt) == 0); 3274 BUG_ON(scsi_prot_sg_count(SCpnt) == 0); 3275 3276 sg_miter_start(&piter, scsi_prot_sglist(SCpnt), 3277 scsi_prot_sg_count(SCpnt), 3278 SG_MITER_ATOMIC | SG_MITER_FROM_SG); 3279 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt), 3280 SG_MITER_ATOMIC | SG_MITER_FROM_SG); 3281 3282 /* For each protection page */ 3283 while (sg_miter_next(&piter)) { 3284 dpage_offset = 0; 3285 if (WARN_ON(!sg_miter_next(&diter))) { 3286 ret = 0x01; 3287 goto out; 3288 } 3289 3290 for (ppage_offset = 0; ppage_offset < piter.length; 3291 ppage_offset += sizeof(struct t10_pi_tuple)) { 3292 /* If we're at the end of the current 3293 * data page advance to the next one 3294 */ 3295 if (dpage_offset >= diter.length) { 3296 if (WARN_ON(!sg_miter_next(&diter))) { 3297 ret = 0x01; 3298 goto out; 3299 } 3300 dpage_offset = 0; 3301 } 3302 3303 sdt = piter.addr + ppage_offset; 3304 daddr = diter.addr + dpage_offset; 3305 3306 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */ 3307 ret = dif_verify(sdt, daddr, sector, ei_lba); 3308 if (ret) 3309 goto out; 3310 } 3311 3312 sector++; 3313 ei_lba++; 3314 dpage_offset += sdebug_sector_size; 3315 } 3316 diter.consumed = dpage_offset; 3317 sg_miter_stop(&diter); 3318 } 3319 sg_miter_stop(&piter); 3320 3321 dif_copy_prot(SCpnt, start_sec, sectors, false); 3322 dix_writes++; 3323 3324 return 0; 3325 3326 out: 3327 dif_errors++; 3328 sg_miter_stop(&diter); 3329 sg_miter_stop(&piter); 3330 return ret; 3331 } 3332 3333 static unsigned long lba_to_map_index(sector_t lba) 3334 { 3335 if (sdebug_unmap_alignment) 3336 lba += sdebug_unmap_granularity - sdebug_unmap_alignment; 3337 sector_div(lba, sdebug_unmap_granularity); 3338 return lba; 3339 } 3340 3341 static sector_t map_index_to_lba(unsigned long index) 3342 { 3343 sector_t lba = index * sdebug_unmap_granularity; 3344 3345 if (sdebug_unmap_alignment) 3346 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; 3347 return lba; 3348 } 3349 3350 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba, 3351 unsigned int *num) 3352 { 3353 sector_t end; 3354 unsigned int mapped; 3355 unsigned long index; 3356 unsigned long next; 3357 3358 index = lba_to_map_index(lba); 3359 mapped = test_bit(index, sip->map_storep); 3360 3361 if (mapped) 3362 next = find_next_zero_bit(sip->map_storep, map_size, index); 3363 else 3364 next = find_next_bit(sip->map_storep, map_size, index); 3365 3366 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); 3367 *num = end - lba; 3368 return mapped; 3369 } 3370 3371 static void map_region(struct sdeb_store_info *sip, sector_t lba, 3372 unsigned int len) 3373 { 3374 sector_t end = lba + len; 3375 3376 while (lba < end) { 3377 unsigned long index = lba_to_map_index(lba); 3378 3379 if (index < map_size) 3380 set_bit(index, sip->map_storep); 3381 3382 lba = map_index_to_lba(index + 1); 3383 } 3384 } 3385 3386 static void unmap_region(struct sdeb_store_info *sip, sector_t lba, 3387 unsigned int len) 3388 { 3389 sector_t end = lba + len; 3390 u8 *fsp = sip->storep; 3391 3392 while (lba < end) { 3393 unsigned long index = lba_to_map_index(lba); 3394 3395 if (lba == map_index_to_lba(index) && 3396 lba + sdebug_unmap_granularity <= end && 3397 index < map_size) { 3398 clear_bit(index, sip->map_storep); 3399 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */ 3400 memset(fsp + lba * sdebug_sector_size, 3401 (sdebug_lbprz & 1) ? 0 : 0xff, 3402 sdebug_sector_size * 3403 sdebug_unmap_granularity); 3404 } 3405 if (sip->dif_storep) { 3406 memset(sip->dif_storep + lba, 0xff, 3407 sizeof(*sip->dif_storep) * 3408 sdebug_unmap_granularity); 3409 } 3410 } 3411 lba = map_index_to_lba(index + 1); 3412 } 3413 } 3414 3415 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3416 { 3417 bool check_prot; 3418 u32 num; 3419 u32 ei_lba; 3420 int ret; 3421 u64 lba; 3422 struct sdeb_store_info *sip = devip2sip(devip, true); 3423 rwlock_t *macc_lckp = &sip->macc_lck; 3424 u8 *cmd = scp->cmnd; 3425 3426 switch (cmd[0]) { 3427 case WRITE_16: 3428 ei_lba = 0; 3429 lba = get_unaligned_be64(cmd + 2); 3430 num = get_unaligned_be32(cmd + 10); 3431 check_prot = true; 3432 break; 3433 case WRITE_10: 3434 ei_lba = 0; 3435 lba = get_unaligned_be32(cmd + 2); 3436 num = get_unaligned_be16(cmd + 7); 3437 check_prot = true; 3438 break; 3439 case WRITE_6: 3440 ei_lba = 0; 3441 lba = (u32)cmd[3] | (u32)cmd[2] << 8 | 3442 (u32)(cmd[1] & 0x1f) << 16; 3443 num = (0 == cmd[4]) ? 256 : cmd[4]; 3444 check_prot = true; 3445 break; 3446 case WRITE_12: 3447 ei_lba = 0; 3448 lba = get_unaligned_be32(cmd + 2); 3449 num = get_unaligned_be32(cmd + 6); 3450 check_prot = true; 3451 break; 3452 case 0x53: /* XDWRITEREAD(10) */ 3453 ei_lba = 0; 3454 lba = get_unaligned_be32(cmd + 2); 3455 num = get_unaligned_be16(cmd + 7); 3456 check_prot = false; 3457 break; 3458 default: /* assume WRITE(32) */ 3459 lba = get_unaligned_be64(cmd + 12); 3460 ei_lba = get_unaligned_be32(cmd + 20); 3461 num = get_unaligned_be32(cmd + 28); 3462 check_prot = false; 3463 break; 3464 } 3465 if (unlikely(have_dif_prot && check_prot)) { 3466 if (sdebug_dif == T10_PI_TYPE2_PROTECTION && 3467 (cmd[1] & 0xe0)) { 3468 mk_sense_invalid_opcode(scp); 3469 return check_condition_result; 3470 } 3471 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || 3472 sdebug_dif == T10_PI_TYPE3_PROTECTION) && 3473 (cmd[1] & 0xe0) == 0) 3474 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " 3475 "to DIF device\n"); 3476 } 3477 3478 write_lock(macc_lckp); 3479 ret = check_device_access_params(scp, lba, num, true); 3480 if (ret) { 3481 write_unlock(macc_lckp); 3482 return ret; 3483 } 3484 3485 /* DIX + T10 DIF */ 3486 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { 3487 switch (prot_verify_write(scp, lba, num, ei_lba)) { 3488 case 1: /* Guard tag error */ 3489 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { 3490 write_unlock(macc_lckp); 3491 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 3492 return illegal_condition_result; 3493 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ 3494 write_unlock(macc_lckp); 3495 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 3496 return check_condition_result; 3497 } 3498 break; 3499 case 3: /* Reference tag error */ 3500 if (scp->prot_flags & SCSI_PROT_REF_CHECK) { 3501 write_unlock(macc_lckp); 3502 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); 3503 return illegal_condition_result; 3504 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ 3505 write_unlock(macc_lckp); 3506 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); 3507 return check_condition_result; 3508 } 3509 break; 3510 } 3511 } 3512 3513 ret = do_device_access(sip, scp, 0, lba, num, true); 3514 if (unlikely(scsi_debug_lbp())) 3515 map_region(sip, lba, num); 3516 /* If ZBC zone then bump its write pointer */ 3517 if (sdebug_dev_is_zoned(devip)) 3518 zbc_inc_wp(devip, lba, num); 3519 write_unlock(macc_lckp); 3520 if (unlikely(-1 == ret)) 3521 return DID_ERROR << 16; 3522 else if (unlikely(sdebug_verbose && 3523 (ret < (num * sdebug_sector_size)))) 3524 sdev_printk(KERN_INFO, scp->device, 3525 "%s: write: cdb indicated=%u, IO sent=%d bytes\n", 3526 my_name, num * sdebug_sector_size, ret); 3527 3528 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) && 3529 atomic_read(&sdeb_inject_pending))) { 3530 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) { 3531 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); 3532 atomic_set(&sdeb_inject_pending, 0); 3533 return check_condition_result; 3534 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) { 3535 /* Logical block guard check failed */ 3536 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 3537 atomic_set(&sdeb_inject_pending, 0); 3538 return illegal_condition_result; 3539 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) { 3540 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 3541 atomic_set(&sdeb_inject_pending, 0); 3542 return illegal_condition_result; 3543 } 3544 } 3545 return 0; 3546 } 3547 3548 /* 3549 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32). 3550 * No READ GATHERED yet (requires bidi or long cdb holding gather list). 3551 */ 3552 static int resp_write_scat(struct scsi_cmnd *scp, 3553 struct sdebug_dev_info *devip) 3554 { 3555 u8 *cmd = scp->cmnd; 3556 u8 *lrdp = NULL; 3557 u8 *up; 3558 struct sdeb_store_info *sip = devip2sip(devip, true); 3559 rwlock_t *macc_lckp = &sip->macc_lck; 3560 u8 wrprotect; 3561 u16 lbdof, num_lrd, k; 3562 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb; 3563 u32 lb_size = sdebug_sector_size; 3564 u32 ei_lba; 3565 u64 lba; 3566 int ret, res; 3567 bool is_16; 3568 static const u32 lrd_size = 32; /* + parameter list header size */ 3569 3570 if (cmd[0] == VARIABLE_LENGTH_CMD) { 3571 is_16 = false; 3572 wrprotect = (cmd[10] >> 5) & 0x7; 3573 lbdof = get_unaligned_be16(cmd + 12); 3574 num_lrd = get_unaligned_be16(cmd + 16); 3575 bt_len = get_unaligned_be32(cmd + 28); 3576 } else { /* that leaves WRITE SCATTERED(16) */ 3577 is_16 = true; 3578 wrprotect = (cmd[2] >> 5) & 0x7; 3579 lbdof = get_unaligned_be16(cmd + 4); 3580 num_lrd = get_unaligned_be16(cmd + 8); 3581 bt_len = get_unaligned_be32(cmd + 10); 3582 if (unlikely(have_dif_prot)) { 3583 if (sdebug_dif == T10_PI_TYPE2_PROTECTION && 3584 wrprotect) { 3585 mk_sense_invalid_opcode(scp); 3586 return illegal_condition_result; 3587 } 3588 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || 3589 sdebug_dif == T10_PI_TYPE3_PROTECTION) && 3590 wrprotect == 0) 3591 sdev_printk(KERN_ERR, scp->device, 3592 "Unprotected WR to DIF device\n"); 3593 } 3594 } 3595 if ((num_lrd == 0) || (bt_len == 0)) 3596 return 0; /* T10 says these do-nothings are not errors */ 3597 if (lbdof == 0) { 3598 if (sdebug_verbose) 3599 sdev_printk(KERN_INFO, scp->device, 3600 "%s: %s: LB Data Offset field bad\n", 3601 my_name, __func__); 3602 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 3603 return illegal_condition_result; 3604 } 3605 lbdof_blen = lbdof * lb_size; 3606 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) { 3607 if (sdebug_verbose) 3608 sdev_printk(KERN_INFO, scp->device, 3609 "%s: %s: LBA range descriptors don't fit\n", 3610 my_name, __func__); 3611 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 3612 return illegal_condition_result; 3613 } 3614 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC); 3615 if (lrdp == NULL) 3616 return SCSI_MLQUEUE_HOST_BUSY; 3617 if (sdebug_verbose) 3618 sdev_printk(KERN_INFO, scp->device, 3619 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n", 3620 my_name, __func__, lbdof_blen); 3621 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen); 3622 if (res == -1) { 3623 ret = DID_ERROR << 16; 3624 goto err_out; 3625 } 3626 3627 write_lock(macc_lckp); 3628 sg_off = lbdof_blen; 3629 /* Spec says Buffer xfer Length field in number of LBs in dout */ 3630 cum_lb = 0; 3631 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) { 3632 lba = get_unaligned_be64(up + 0); 3633 num = get_unaligned_be32(up + 8); 3634 if (sdebug_verbose) 3635 sdev_printk(KERN_INFO, scp->device, 3636 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n", 3637 my_name, __func__, k, lba, num, sg_off); 3638 if (num == 0) 3639 continue; 3640 ret = check_device_access_params(scp, lba, num, true); 3641 if (ret) 3642 goto err_out_unlock; 3643 num_by = num * lb_size; 3644 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12); 3645 3646 if ((cum_lb + num) > bt_len) { 3647 if (sdebug_verbose) 3648 sdev_printk(KERN_INFO, scp->device, 3649 "%s: %s: sum of blocks > data provided\n", 3650 my_name, __func__); 3651 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC, 3652 0); 3653 ret = illegal_condition_result; 3654 goto err_out_unlock; 3655 } 3656 3657 /* DIX + T10 DIF */ 3658 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { 3659 int prot_ret = prot_verify_write(scp, lba, num, 3660 ei_lba); 3661 3662 if (prot_ret) { 3663 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3664 prot_ret); 3665 ret = illegal_condition_result; 3666 goto err_out_unlock; 3667 } 3668 } 3669 3670 ret = do_device_access(sip, scp, sg_off, lba, num, true); 3671 /* If ZBC zone then bump its write pointer */ 3672 if (sdebug_dev_is_zoned(devip)) 3673 zbc_inc_wp(devip, lba, num); 3674 if (unlikely(scsi_debug_lbp())) 3675 map_region(sip, lba, num); 3676 if (unlikely(-1 == ret)) { 3677 ret = DID_ERROR << 16; 3678 goto err_out_unlock; 3679 } else if (unlikely(sdebug_verbose && (ret < num_by))) 3680 sdev_printk(KERN_INFO, scp->device, 3681 "%s: write: cdb indicated=%u, IO sent=%d bytes\n", 3682 my_name, num_by, ret); 3683 3684 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) && 3685 atomic_read(&sdeb_inject_pending))) { 3686 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) { 3687 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); 3688 atomic_set(&sdeb_inject_pending, 0); 3689 ret = check_condition_result; 3690 goto err_out_unlock; 3691 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) { 3692 /* Logical block guard check failed */ 3693 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); 3694 atomic_set(&sdeb_inject_pending, 0); 3695 ret = illegal_condition_result; 3696 goto err_out_unlock; 3697 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) { 3698 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); 3699 atomic_set(&sdeb_inject_pending, 0); 3700 ret = illegal_condition_result; 3701 goto err_out_unlock; 3702 } 3703 } 3704 sg_off += num_by; 3705 cum_lb += num; 3706 } 3707 ret = 0; 3708 err_out_unlock: 3709 write_unlock(macc_lckp); 3710 err_out: 3711 kfree(lrdp); 3712 return ret; 3713 } 3714 3715 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 3716 u32 ei_lba, bool unmap, bool ndob) 3717 { 3718 struct scsi_device *sdp = scp->device; 3719 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; 3720 unsigned long long i; 3721 u64 block, lbaa; 3722 u32 lb_size = sdebug_sector_size; 3723 int ret; 3724 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *) 3725 scp->device->hostdata, true); 3726 rwlock_t *macc_lckp = &sip->macc_lck; 3727 u8 *fs1p; 3728 u8 *fsp; 3729 3730 write_lock(macc_lckp); 3731 3732 ret = check_device_access_params(scp, lba, num, true); 3733 if (ret) { 3734 write_unlock(macc_lckp); 3735 return ret; 3736 } 3737 3738 if (unmap && scsi_debug_lbp()) { 3739 unmap_region(sip, lba, num); 3740 goto out; 3741 } 3742 lbaa = lba; 3743 block = do_div(lbaa, sdebug_store_sectors); 3744 /* if ndob then zero 1 logical block, else fetch 1 logical block */ 3745 fsp = sip->storep; 3746 fs1p = fsp + (block * lb_size); 3747 if (ndob) { 3748 memset(fs1p, 0, lb_size); 3749 ret = 0; 3750 } else 3751 ret = fetch_to_dev_buffer(scp, fs1p, lb_size); 3752 3753 if (-1 == ret) { 3754 write_unlock(&sip->macc_lck); 3755 return DID_ERROR << 16; 3756 } else if (sdebug_verbose && !ndob && (ret < lb_size)) 3757 sdev_printk(KERN_INFO, scp->device, 3758 "%s: %s: lb size=%u, IO sent=%d bytes\n", 3759 my_name, "write same", lb_size, ret); 3760 3761 /* Copy first sector to remaining blocks */ 3762 for (i = 1 ; i < num ; i++) { 3763 lbaa = lba + i; 3764 block = do_div(lbaa, sdebug_store_sectors); 3765 memmove(fsp + (block * lb_size), fs1p, lb_size); 3766 } 3767 if (scsi_debug_lbp()) 3768 map_region(sip, lba, num); 3769 /* If ZBC zone then bump its write pointer */ 3770 if (sdebug_dev_is_zoned(devip)) 3771 zbc_inc_wp(devip, lba, num); 3772 out: 3773 write_unlock(macc_lckp); 3774 3775 return 0; 3776 } 3777 3778 static int resp_write_same_10(struct scsi_cmnd *scp, 3779 struct sdebug_dev_info *devip) 3780 { 3781 u8 *cmd = scp->cmnd; 3782 u32 lba; 3783 u16 num; 3784 u32 ei_lba = 0; 3785 bool unmap = false; 3786 3787 if (cmd[1] & 0x8) { 3788 if (sdebug_lbpws10 == 0) { 3789 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); 3790 return check_condition_result; 3791 } else 3792 unmap = true; 3793 } 3794 lba = get_unaligned_be32(cmd + 2); 3795 num = get_unaligned_be16(cmd + 7); 3796 if (num > sdebug_write_same_length) { 3797 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); 3798 return check_condition_result; 3799 } 3800 return resp_write_same(scp, lba, num, ei_lba, unmap, false); 3801 } 3802 3803 static int resp_write_same_16(struct scsi_cmnd *scp, 3804 struct sdebug_dev_info *devip) 3805 { 3806 u8 *cmd = scp->cmnd; 3807 u64 lba; 3808 u32 num; 3809 u32 ei_lba = 0; 3810 bool unmap = false; 3811 bool ndob = false; 3812 3813 if (cmd[1] & 0x8) { /* UNMAP */ 3814 if (sdebug_lbpws == 0) { 3815 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); 3816 return check_condition_result; 3817 } else 3818 unmap = true; 3819 } 3820 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */ 3821 ndob = true; 3822 lba = get_unaligned_be64(cmd + 2); 3823 num = get_unaligned_be32(cmd + 10); 3824 if (num > sdebug_write_same_length) { 3825 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); 3826 return check_condition_result; 3827 } 3828 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob); 3829 } 3830 3831 /* Note the mode field is in the same position as the (lower) service action 3832 * field. For the Report supported operation codes command, SPC-4 suggests 3833 * each mode of this command should be reported separately; for future. */ 3834 static int resp_write_buffer(struct scsi_cmnd *scp, 3835 struct sdebug_dev_info *devip) 3836 { 3837 u8 *cmd = scp->cmnd; 3838 struct scsi_device *sdp = scp->device; 3839 struct sdebug_dev_info *dp; 3840 u8 mode; 3841 3842 mode = cmd[1] & 0x1f; 3843 switch (mode) { 3844 case 0x4: /* download microcode (MC) and activate (ACT) */ 3845 /* set UAs on this device only */ 3846 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 3847 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm); 3848 break; 3849 case 0x5: /* download MC, save and ACT */ 3850 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm); 3851 break; 3852 case 0x6: /* download MC with offsets and ACT */ 3853 /* set UAs on most devices (LUs) in this target */ 3854 list_for_each_entry(dp, 3855 &devip->sdbg_host->dev_info_list, 3856 dev_list) 3857 if (dp->target == sdp->id) { 3858 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm); 3859 if (devip != dp) 3860 set_bit(SDEBUG_UA_MICROCODE_CHANGED, 3861 dp->uas_bm); 3862 } 3863 break; 3864 case 0x7: /* download MC with offsets, save, and ACT */ 3865 /* set UA on all devices (LUs) in this target */ 3866 list_for_each_entry(dp, 3867 &devip->sdbg_host->dev_info_list, 3868 dev_list) 3869 if (dp->target == sdp->id) 3870 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, 3871 dp->uas_bm); 3872 break; 3873 default: 3874 /* do nothing for this command for other mode values */ 3875 break; 3876 } 3877 return 0; 3878 } 3879 3880 static int resp_comp_write(struct scsi_cmnd *scp, 3881 struct sdebug_dev_info *devip) 3882 { 3883 u8 *cmd = scp->cmnd; 3884 u8 *arr; 3885 struct sdeb_store_info *sip = devip2sip(devip, true); 3886 rwlock_t *macc_lckp = &sip->macc_lck; 3887 u64 lba; 3888 u32 dnum; 3889 u32 lb_size = sdebug_sector_size; 3890 u8 num; 3891 int ret; 3892 int retval = 0; 3893 3894 lba = get_unaligned_be64(cmd + 2); 3895 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ 3896 if (0 == num) 3897 return 0; /* degenerate case, not an error */ 3898 if (sdebug_dif == T10_PI_TYPE2_PROTECTION && 3899 (cmd[1] & 0xe0)) { 3900 mk_sense_invalid_opcode(scp); 3901 return check_condition_result; 3902 } 3903 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION || 3904 sdebug_dif == T10_PI_TYPE3_PROTECTION) && 3905 (cmd[1] & 0xe0) == 0) 3906 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " 3907 "to DIF device\n"); 3908 ret = check_device_access_params(scp, lba, num, false); 3909 if (ret) 3910 return ret; 3911 dnum = 2 * num; 3912 arr = kcalloc(lb_size, dnum, GFP_ATOMIC); 3913 if (NULL == arr) { 3914 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 3915 INSUFF_RES_ASCQ); 3916 return check_condition_result; 3917 } 3918 3919 write_lock(macc_lckp); 3920 3921 ret = do_dout_fetch(scp, dnum, arr); 3922 if (ret == -1) { 3923 retval = DID_ERROR << 16; 3924 goto cleanup; 3925 } else if (sdebug_verbose && (ret < (dnum * lb_size))) 3926 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " 3927 "indicated=%u, IO sent=%d bytes\n", my_name, 3928 dnum * lb_size, ret); 3929 if (!comp_write_worker(sip, lba, num, arr, false)) { 3930 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); 3931 retval = check_condition_result; 3932 goto cleanup; 3933 } 3934 if (scsi_debug_lbp()) 3935 map_region(sip, lba, num); 3936 cleanup: 3937 write_unlock(macc_lckp); 3938 kfree(arr); 3939 return retval; 3940 } 3941 3942 struct unmap_block_desc { 3943 __be64 lba; 3944 __be32 blocks; 3945 __be32 __reserved; 3946 }; 3947 3948 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 3949 { 3950 unsigned char *buf; 3951 struct unmap_block_desc *desc; 3952 struct sdeb_store_info *sip = devip2sip(devip, true); 3953 rwlock_t *macc_lckp = &sip->macc_lck; 3954 unsigned int i, payload_len, descriptors; 3955 int ret; 3956 3957 if (!scsi_debug_lbp()) 3958 return 0; /* fib and say its done */ 3959 payload_len = get_unaligned_be16(scp->cmnd + 7); 3960 BUG_ON(scsi_bufflen(scp) != payload_len); 3961 3962 descriptors = (payload_len - 8) / 16; 3963 if (descriptors > sdebug_unmap_max_desc) { 3964 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); 3965 return check_condition_result; 3966 } 3967 3968 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); 3969 if (!buf) { 3970 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 3971 INSUFF_RES_ASCQ); 3972 return check_condition_result; 3973 } 3974 3975 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); 3976 3977 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); 3978 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); 3979 3980 desc = (void *)&buf[8]; 3981 3982 write_lock(macc_lckp); 3983 3984 for (i = 0 ; i < descriptors ; i++) { 3985 unsigned long long lba = get_unaligned_be64(&desc[i].lba); 3986 unsigned int num = get_unaligned_be32(&desc[i].blocks); 3987 3988 ret = check_device_access_params(scp, lba, num, true); 3989 if (ret) 3990 goto out; 3991 3992 unmap_region(sip, lba, num); 3993 } 3994 3995 ret = 0; 3996 3997 out: 3998 write_unlock(macc_lckp); 3999 kfree(buf); 4000 4001 return ret; 4002 } 4003 4004 #define SDEBUG_GET_LBA_STATUS_LEN 32 4005 4006 static int resp_get_lba_status(struct scsi_cmnd *scp, 4007 struct sdebug_dev_info *devip) 4008 { 4009 u8 *cmd = scp->cmnd; 4010 u64 lba; 4011 u32 alloc_len, mapped, num; 4012 int ret; 4013 u8 arr[SDEBUG_GET_LBA_STATUS_LEN]; 4014 4015 lba = get_unaligned_be64(cmd + 2); 4016 alloc_len = get_unaligned_be32(cmd + 10); 4017 4018 if (alloc_len < 24) 4019 return 0; 4020 4021 ret = check_device_access_params(scp, lba, 1, false); 4022 if (ret) 4023 return ret; 4024 4025 if (scsi_debug_lbp()) { 4026 struct sdeb_store_info *sip = devip2sip(devip, true); 4027 4028 mapped = map_state(sip, lba, &num); 4029 } else { 4030 mapped = 1; 4031 /* following just in case virtual_gb changed */ 4032 sdebug_capacity = get_sdebug_capacity(); 4033 if (sdebug_capacity - lba <= 0xffffffff) 4034 num = sdebug_capacity - lba; 4035 else 4036 num = 0xffffffff; 4037 } 4038 4039 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); 4040 put_unaligned_be32(20, arr); /* Parameter Data Length */ 4041 put_unaligned_be64(lba, arr + 8); /* LBA */ 4042 put_unaligned_be32(num, arr + 16); /* Number of blocks */ 4043 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */ 4044 4045 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); 4046 } 4047 4048 static int resp_sync_cache(struct scsi_cmnd *scp, 4049 struct sdebug_dev_info *devip) 4050 { 4051 int res = 0; 4052 u64 lba; 4053 u32 num_blocks; 4054 u8 *cmd = scp->cmnd; 4055 4056 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */ 4057 lba = get_unaligned_be32(cmd + 2); 4058 num_blocks = get_unaligned_be16(cmd + 7); 4059 } else { /* SYNCHRONIZE_CACHE(16) */ 4060 lba = get_unaligned_be64(cmd + 2); 4061 num_blocks = get_unaligned_be32(cmd + 10); 4062 } 4063 if (lba + num_blocks > sdebug_capacity) { 4064 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 4065 return check_condition_result; 4066 } 4067 if (!write_since_sync || (cmd[1] & 0x2)) 4068 res = SDEG_RES_IMMED_MASK; 4069 else /* delay if write_since_sync and IMMED clear */ 4070 write_since_sync = false; 4071 return res; 4072 } 4073 4074 /* 4075 * Assuming the LBA+num_blocks is not out-of-range, this function will return 4076 * CONDITION MET if the specified blocks will/have fitted in the cache, and 4077 * a GOOD status otherwise. Model a disk with a big cache and yield 4078 * CONDITION MET. Actually tries to bring range in main memory into the 4079 * cache associated with the CPU(s). 4080 */ 4081 static int resp_pre_fetch(struct scsi_cmnd *scp, 4082 struct sdebug_dev_info *devip) 4083 { 4084 int res = 0; 4085 u64 lba; 4086 u64 block, rest = 0; 4087 u32 nblks; 4088 u8 *cmd = scp->cmnd; 4089 struct sdeb_store_info *sip = devip2sip(devip, true); 4090 rwlock_t *macc_lckp = &sip->macc_lck; 4091 u8 *fsp = sip->storep; 4092 4093 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */ 4094 lba = get_unaligned_be32(cmd + 2); 4095 nblks = get_unaligned_be16(cmd + 7); 4096 } else { /* PRE-FETCH(16) */ 4097 lba = get_unaligned_be64(cmd + 2); 4098 nblks = get_unaligned_be32(cmd + 10); 4099 } 4100 if (lba + nblks > sdebug_capacity) { 4101 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 4102 return check_condition_result; 4103 } 4104 if (!fsp) 4105 goto fini; 4106 /* PRE-FETCH spec says nothing about LBP or PI so skip them */ 4107 block = do_div(lba, sdebug_store_sectors); 4108 if (block + nblks > sdebug_store_sectors) 4109 rest = block + nblks - sdebug_store_sectors; 4110 4111 /* Try to bring the PRE-FETCH range into CPU's cache */ 4112 read_lock(macc_lckp); 4113 prefetch_range(fsp + (sdebug_sector_size * block), 4114 (nblks - rest) * sdebug_sector_size); 4115 if (rest) 4116 prefetch_range(fsp, rest * sdebug_sector_size); 4117 read_unlock(macc_lckp); 4118 fini: 4119 if (cmd[1] & 0x2) 4120 res = SDEG_RES_IMMED_MASK; 4121 return res | condition_met_result; 4122 } 4123 4124 #define RL_BUCKET_ELEMS 8 4125 4126 /* Even though each pseudo target has a REPORT LUNS "well known logical unit" 4127 * (W-LUN), the normal Linux scanning logic does not associate it with a 4128 * device (e.g. /dev/sg7). The following magic will make that association: 4129 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan" 4130 * where <n> is a host number. If there are multiple targets in a host then 4131 * the above will associate a W-LUN to each target. To only get a W-LUN 4132 * for target 2, then use "echo '- 2 49409' > scan" . 4133 */ 4134 static int resp_report_luns(struct scsi_cmnd *scp, 4135 struct sdebug_dev_info *devip) 4136 { 4137 unsigned char *cmd = scp->cmnd; 4138 unsigned int alloc_len; 4139 unsigned char select_report; 4140 u64 lun; 4141 struct scsi_lun *lun_p; 4142 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)]; 4143 unsigned int lun_cnt; /* normal LUN count (max: 256) */ 4144 unsigned int wlun_cnt; /* report luns W-LUN count */ 4145 unsigned int tlun_cnt; /* total LUN count */ 4146 unsigned int rlen; /* response length (in bytes) */ 4147 int k, j, n, res; 4148 unsigned int off_rsp = 0; 4149 const int sz_lun = sizeof(struct scsi_lun); 4150 4151 clear_luns_changed_on_target(devip); 4152 4153 select_report = cmd[2]; 4154 alloc_len = get_unaligned_be32(cmd + 6); 4155 4156 if (alloc_len < 4) { 4157 pr_err("alloc len too small %d\n", alloc_len); 4158 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); 4159 return check_condition_result; 4160 } 4161 4162 switch (select_report) { 4163 case 0: /* all LUNs apart from W-LUNs */ 4164 lun_cnt = sdebug_max_luns; 4165 wlun_cnt = 0; 4166 break; 4167 case 1: /* only W-LUNs */ 4168 lun_cnt = 0; 4169 wlun_cnt = 1; 4170 break; 4171 case 2: /* all LUNs */ 4172 lun_cnt = sdebug_max_luns; 4173 wlun_cnt = 1; 4174 break; 4175 case 0x10: /* only administrative LUs */ 4176 case 0x11: /* see SPC-5 */ 4177 case 0x12: /* only subsiduary LUs owned by referenced LU */ 4178 default: 4179 pr_debug("select report invalid %d\n", select_report); 4180 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); 4181 return check_condition_result; 4182 } 4183 4184 if (sdebug_no_lun_0 && (lun_cnt > 0)) 4185 --lun_cnt; 4186 4187 tlun_cnt = lun_cnt + wlun_cnt; 4188 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */ 4189 scsi_set_resid(scp, scsi_bufflen(scp)); 4190 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n", 4191 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0); 4192 4193 /* loops rely on sizeof response header same as sizeof lun (both 8) */ 4194 lun = sdebug_no_lun_0 ? 1 : 0; 4195 for (k = 0, j = 0, res = 0; true; ++k, j = 0) { 4196 memset(arr, 0, sizeof(arr)); 4197 lun_p = (struct scsi_lun *)&arr[0]; 4198 if (k == 0) { 4199 put_unaligned_be32(rlen, &arr[0]); 4200 ++lun_p; 4201 j = 1; 4202 } 4203 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) { 4204 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt) 4205 break; 4206 int_to_scsilun(lun++, lun_p); 4207 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT) 4208 lun_p->scsi_lun[0] |= 0x40; 4209 } 4210 if (j < RL_BUCKET_ELEMS) 4211 break; 4212 n = j * sz_lun; 4213 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp); 4214 if (res) 4215 return res; 4216 off_rsp += n; 4217 } 4218 if (wlun_cnt) { 4219 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p); 4220 ++j; 4221 } 4222 if (j > 0) 4223 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp); 4224 return res; 4225 } 4226 4227 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 4228 { 4229 bool is_bytchk3 = false; 4230 u8 bytchk; 4231 int ret, j; 4232 u32 vnum, a_num, off; 4233 const u32 lb_size = sdebug_sector_size; 4234 u64 lba; 4235 u8 *arr; 4236 u8 *cmd = scp->cmnd; 4237 struct sdeb_store_info *sip = devip2sip(devip, true); 4238 rwlock_t *macc_lckp = &sip->macc_lck; 4239 4240 bytchk = (cmd[1] >> 1) & 0x3; 4241 if (bytchk == 0) { 4242 return 0; /* always claim internal verify okay */ 4243 } else if (bytchk == 2) { 4244 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2); 4245 return check_condition_result; 4246 } else if (bytchk == 3) { 4247 is_bytchk3 = true; /* 1 block sent, compared repeatedly */ 4248 } 4249 switch (cmd[0]) { 4250 case VERIFY_16: 4251 lba = get_unaligned_be64(cmd + 2); 4252 vnum = get_unaligned_be32(cmd + 10); 4253 break; 4254 case VERIFY: /* is VERIFY(10) */ 4255 lba = get_unaligned_be32(cmd + 2); 4256 vnum = get_unaligned_be16(cmd + 7); 4257 break; 4258 default: 4259 mk_sense_invalid_opcode(scp); 4260 return check_condition_result; 4261 } 4262 if (vnum == 0) 4263 return 0; /* not an error */ 4264 a_num = is_bytchk3 ? 1 : vnum; 4265 /* Treat following check like one for read (i.e. no write) access */ 4266 ret = check_device_access_params(scp, lba, a_num, false); 4267 if (ret) 4268 return ret; 4269 4270 arr = kcalloc(lb_size, vnum, GFP_ATOMIC); 4271 if (!arr) { 4272 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 4273 INSUFF_RES_ASCQ); 4274 return check_condition_result; 4275 } 4276 /* Not changing store, so only need read access */ 4277 read_lock(macc_lckp); 4278 4279 ret = do_dout_fetch(scp, a_num, arr); 4280 if (ret == -1) { 4281 ret = DID_ERROR << 16; 4282 goto cleanup; 4283 } else if (sdebug_verbose && (ret < (a_num * lb_size))) { 4284 sdev_printk(KERN_INFO, scp->device, 4285 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", 4286 my_name, __func__, a_num * lb_size, ret); 4287 } 4288 if (is_bytchk3) { 4289 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size) 4290 memcpy(arr + off, arr, lb_size); 4291 } 4292 ret = 0; 4293 if (!comp_write_worker(sip, lba, vnum, arr, true)) { 4294 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); 4295 ret = check_condition_result; 4296 goto cleanup; 4297 } 4298 cleanup: 4299 read_unlock(macc_lckp); 4300 kfree(arr); 4301 return ret; 4302 } 4303 4304 #define RZONES_DESC_HD 64 4305 4306 /* Report zones depending on start LBA nad reporting options */ 4307 static int resp_report_zones(struct scsi_cmnd *scp, 4308 struct sdebug_dev_info *devip) 4309 { 4310 unsigned int i, max_zones, rep_max_zones, nrz = 0; 4311 int ret = 0; 4312 u32 alloc_len, rep_opts, rep_len; 4313 bool partial; 4314 u64 lba, zs_lba; 4315 u8 *arr = NULL, *desc; 4316 u8 *cmd = scp->cmnd; 4317 struct sdeb_zone_state *zsp; 4318 struct sdeb_store_info *sip = devip2sip(devip, false); 4319 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck; 4320 4321 if (!sdebug_dev_is_zoned(devip)) { 4322 mk_sense_invalid_opcode(scp); 4323 return check_condition_result; 4324 } 4325 zs_lba = get_unaligned_be64(cmd + 2); 4326 alloc_len = get_unaligned_be32(cmd + 10); 4327 if (alloc_len == 0) 4328 return 0; /* not an error */ 4329 rep_opts = cmd[14] & 0x3f; 4330 partial = cmd[14] & 0x80; 4331 4332 if (zs_lba >= sdebug_capacity) { 4333 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 4334 return check_condition_result; 4335 } 4336 4337 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift); 4338 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD), 4339 max_zones); 4340 4341 arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC); 4342 if (!arr) { 4343 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, 4344 INSUFF_RES_ASCQ); 4345 return check_condition_result; 4346 } 4347 4348 read_lock(macc_lckp); 4349 4350 desc = arr + 64; 4351 for (i = 0; i < max_zones; i++) { 4352 lba = zs_lba + devip->zsize * i; 4353 if (lba > sdebug_capacity) 4354 break; 4355 zsp = zbc_zone(devip, lba); 4356 switch (rep_opts) { 4357 case 0x00: 4358 /* All zones */ 4359 break; 4360 case 0x01: 4361 /* Empty zones */ 4362 if (zsp->z_cond != ZC1_EMPTY) 4363 continue; 4364 break; 4365 case 0x02: 4366 /* Implicit open zones */ 4367 if (zsp->z_cond != ZC2_IMPLICIT_OPEN) 4368 continue; 4369 break; 4370 case 0x03: 4371 /* Explicit open zones */ 4372 if (zsp->z_cond != ZC3_EXPLICIT_OPEN) 4373 continue; 4374 break; 4375 case 0x04: 4376 /* Closed zones */ 4377 if (zsp->z_cond != ZC4_CLOSED) 4378 continue; 4379 break; 4380 case 0x05: 4381 /* Full zones */ 4382 if (zsp->z_cond != ZC5_FULL) 4383 continue; 4384 break; 4385 case 0x06: 4386 case 0x07: 4387 case 0x10: 4388 /* 4389 * Read-only, offline, reset WP recommended are 4390 * not emulated: no zones to report; 4391 */ 4392 continue; 4393 case 0x11: 4394 /* non-seq-resource set */ 4395 if (!zsp->z_non_seq_resource) 4396 continue; 4397 break; 4398 case 0x3f: 4399 /* Not write pointer (conventional) zones */ 4400 if (!zbc_zone_is_conv(zsp)) 4401 continue; 4402 break; 4403 default: 4404 mk_sense_buffer(scp, ILLEGAL_REQUEST, 4405 INVALID_FIELD_IN_CDB, 0); 4406 ret = check_condition_result; 4407 goto fini; 4408 } 4409 4410 if (nrz < rep_max_zones) { 4411 /* Fill zone descriptor */ 4412 desc[0] = zsp->z_type; 4413 desc[1] = zsp->z_cond << 4; 4414 if (zsp->z_non_seq_resource) 4415 desc[1] |= 1 << 1; 4416 put_unaligned_be64((u64)zsp->z_size, desc + 8); 4417 put_unaligned_be64((u64)zsp->z_start, desc + 16); 4418 put_unaligned_be64((u64)zsp->z_wp, desc + 24); 4419 desc += 64; 4420 } 4421 4422 if (partial && nrz >= rep_max_zones) 4423 break; 4424 4425 nrz++; 4426 } 4427 4428 /* Report header */ 4429 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0); 4430 put_unaligned_be64(sdebug_capacity - 1, arr + 8); 4431 4432 rep_len = (unsigned long)desc - (unsigned long)arr; 4433 ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len)); 4434 4435 fini: 4436 read_unlock(macc_lckp); 4437 kfree(arr); 4438 return ret; 4439 } 4440 4441 /* Logic transplanted from tcmu-runner, file_zbc.c */ 4442 static void zbc_open_all(struct sdebug_dev_info *devip) 4443 { 4444 struct sdeb_zone_state *zsp = &devip->zstate[0]; 4445 unsigned int i; 4446 4447 for (i = 0; i < devip->nr_zones; i++, zsp++) { 4448 if (zsp->z_cond == ZC4_CLOSED) 4449 zbc_open_zone(devip, &devip->zstate[i], true); 4450 } 4451 } 4452 4453 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 4454 { 4455 int res = 0; 4456 u64 z_id; 4457 enum sdebug_z_cond zc; 4458 u8 *cmd = scp->cmnd; 4459 struct sdeb_zone_state *zsp; 4460 bool all = cmd[14] & 0x01; 4461 struct sdeb_store_info *sip = devip2sip(devip, false); 4462 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck; 4463 4464 if (!sdebug_dev_is_zoned(devip)) { 4465 mk_sense_invalid_opcode(scp); 4466 return check_condition_result; 4467 } 4468 4469 write_lock(macc_lckp); 4470 4471 if (all) { 4472 /* Check if all closed zones can be open */ 4473 if (devip->max_open && 4474 devip->nr_exp_open + devip->nr_closed > devip->max_open) { 4475 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC, 4476 INSUFF_ZONE_ASCQ); 4477 res = check_condition_result; 4478 goto fini; 4479 } 4480 /* Open all closed zones */ 4481 zbc_open_all(devip); 4482 goto fini; 4483 } 4484 4485 /* Open the specified zone */ 4486 z_id = get_unaligned_be64(cmd + 2); 4487 if (z_id >= sdebug_capacity) { 4488 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 4489 res = check_condition_result; 4490 goto fini; 4491 } 4492 4493 zsp = zbc_zone(devip, z_id); 4494 if (z_id != zsp->z_start) { 4495 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4496 res = check_condition_result; 4497 goto fini; 4498 } 4499 if (zbc_zone_is_conv(zsp)) { 4500 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4501 res = check_condition_result; 4502 goto fini; 4503 } 4504 4505 zc = zsp->z_cond; 4506 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL) 4507 goto fini; 4508 4509 if (devip->max_open && devip->nr_exp_open >= devip->max_open) { 4510 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC, 4511 INSUFF_ZONE_ASCQ); 4512 res = check_condition_result; 4513 goto fini; 4514 } 4515 4516 zbc_open_zone(devip, zsp, true); 4517 fini: 4518 write_unlock(macc_lckp); 4519 return res; 4520 } 4521 4522 static void zbc_close_all(struct sdebug_dev_info *devip) 4523 { 4524 unsigned int i; 4525 4526 for (i = 0; i < devip->nr_zones; i++) 4527 zbc_close_zone(devip, &devip->zstate[i]); 4528 } 4529 4530 static int resp_close_zone(struct scsi_cmnd *scp, 4531 struct sdebug_dev_info *devip) 4532 { 4533 int res = 0; 4534 u64 z_id; 4535 u8 *cmd = scp->cmnd; 4536 struct sdeb_zone_state *zsp; 4537 bool all = cmd[14] & 0x01; 4538 struct sdeb_store_info *sip = devip2sip(devip, false); 4539 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck; 4540 4541 if (!sdebug_dev_is_zoned(devip)) { 4542 mk_sense_invalid_opcode(scp); 4543 return check_condition_result; 4544 } 4545 4546 write_lock(macc_lckp); 4547 4548 if (all) { 4549 zbc_close_all(devip); 4550 goto fini; 4551 } 4552 4553 /* Close specified zone */ 4554 z_id = get_unaligned_be64(cmd + 2); 4555 if (z_id >= sdebug_capacity) { 4556 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 4557 res = check_condition_result; 4558 goto fini; 4559 } 4560 4561 zsp = zbc_zone(devip, z_id); 4562 if (z_id != zsp->z_start) { 4563 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4564 res = check_condition_result; 4565 goto fini; 4566 } 4567 if (zbc_zone_is_conv(zsp)) { 4568 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4569 res = check_condition_result; 4570 goto fini; 4571 } 4572 4573 zbc_close_zone(devip, zsp); 4574 fini: 4575 write_unlock(macc_lckp); 4576 return res; 4577 } 4578 4579 static void zbc_finish_zone(struct sdebug_dev_info *devip, 4580 struct sdeb_zone_state *zsp, bool empty) 4581 { 4582 enum sdebug_z_cond zc = zsp->z_cond; 4583 4584 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN || 4585 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) { 4586 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN) 4587 zbc_close_zone(devip, zsp); 4588 if (zsp->z_cond == ZC4_CLOSED) 4589 devip->nr_closed--; 4590 zsp->z_wp = zsp->z_start + zsp->z_size; 4591 zsp->z_cond = ZC5_FULL; 4592 } 4593 } 4594 4595 static void zbc_finish_all(struct sdebug_dev_info *devip) 4596 { 4597 unsigned int i; 4598 4599 for (i = 0; i < devip->nr_zones; i++) 4600 zbc_finish_zone(devip, &devip->zstate[i], false); 4601 } 4602 4603 static int resp_finish_zone(struct scsi_cmnd *scp, 4604 struct sdebug_dev_info *devip) 4605 { 4606 struct sdeb_zone_state *zsp; 4607 int res = 0; 4608 u64 z_id; 4609 u8 *cmd = scp->cmnd; 4610 bool all = cmd[14] & 0x01; 4611 struct sdeb_store_info *sip = devip2sip(devip, false); 4612 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck; 4613 4614 if (!sdebug_dev_is_zoned(devip)) { 4615 mk_sense_invalid_opcode(scp); 4616 return check_condition_result; 4617 } 4618 4619 write_lock(macc_lckp); 4620 4621 if (all) { 4622 zbc_finish_all(devip); 4623 goto fini; 4624 } 4625 4626 /* Finish the specified zone */ 4627 z_id = get_unaligned_be64(cmd + 2); 4628 if (z_id >= sdebug_capacity) { 4629 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 4630 res = check_condition_result; 4631 goto fini; 4632 } 4633 4634 zsp = zbc_zone(devip, z_id); 4635 if (z_id != zsp->z_start) { 4636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4637 res = check_condition_result; 4638 goto fini; 4639 } 4640 if (zbc_zone_is_conv(zsp)) { 4641 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4642 res = check_condition_result; 4643 goto fini; 4644 } 4645 4646 zbc_finish_zone(devip, zsp, true); 4647 fini: 4648 write_unlock(macc_lckp); 4649 return res; 4650 } 4651 4652 static void zbc_rwp_zone(struct sdebug_dev_info *devip, 4653 struct sdeb_zone_state *zsp) 4654 { 4655 enum sdebug_z_cond zc; 4656 4657 if (zbc_zone_is_conv(zsp)) 4658 return; 4659 4660 zc = zsp->z_cond; 4661 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN) 4662 zbc_close_zone(devip, zsp); 4663 4664 if (zsp->z_cond == ZC4_CLOSED) 4665 devip->nr_closed--; 4666 4667 zsp->z_non_seq_resource = false; 4668 zsp->z_wp = zsp->z_start; 4669 zsp->z_cond = ZC1_EMPTY; 4670 } 4671 4672 static void zbc_rwp_all(struct sdebug_dev_info *devip) 4673 { 4674 unsigned int i; 4675 4676 for (i = 0; i < devip->nr_zones; i++) 4677 zbc_rwp_zone(devip, &devip->zstate[i]); 4678 } 4679 4680 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 4681 { 4682 struct sdeb_zone_state *zsp; 4683 int res = 0; 4684 u64 z_id; 4685 u8 *cmd = scp->cmnd; 4686 bool all = cmd[14] & 0x01; 4687 struct sdeb_store_info *sip = devip2sip(devip, false); 4688 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck; 4689 4690 if (!sdebug_dev_is_zoned(devip)) { 4691 mk_sense_invalid_opcode(scp); 4692 return check_condition_result; 4693 } 4694 4695 write_lock(macc_lckp); 4696 4697 if (all) { 4698 zbc_rwp_all(devip); 4699 goto fini; 4700 } 4701 4702 z_id = get_unaligned_be64(cmd + 2); 4703 if (z_id >= sdebug_capacity) { 4704 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 4705 res = check_condition_result; 4706 goto fini; 4707 } 4708 4709 zsp = zbc_zone(devip, z_id); 4710 if (z_id != zsp->z_start) { 4711 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4712 res = check_condition_result; 4713 goto fini; 4714 } 4715 if (zbc_zone_is_conv(zsp)) { 4716 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 4717 res = check_condition_result; 4718 goto fini; 4719 } 4720 4721 zbc_rwp_zone(devip, zsp); 4722 fini: 4723 write_unlock(macc_lckp); 4724 return res; 4725 } 4726 4727 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) 4728 { 4729 u16 hwq; 4730 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); 4731 4732 hwq = blk_mq_unique_tag_to_hwq(tag); 4733 4734 pr_debug("tag=%#x, hwq=%d\n", tag, hwq); 4735 if (WARN_ON_ONCE(hwq >= submit_queues)) 4736 hwq = 0; 4737 4738 return sdebug_q_arr + hwq; 4739 } 4740 4741 static u32 get_tag(struct scsi_cmnd *cmnd) 4742 { 4743 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); 4744 } 4745 4746 /* Queued (deferred) command completions converge here. */ 4747 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) 4748 { 4749 bool aborted = sd_dp->aborted; 4750 int qc_idx; 4751 int retiring = 0; 4752 unsigned long iflags; 4753 struct sdebug_queue *sqp; 4754 struct sdebug_queued_cmd *sqcp; 4755 struct scsi_cmnd *scp; 4756 struct sdebug_dev_info *devip; 4757 4758 if (unlikely(aborted)) 4759 sd_dp->aborted = false; 4760 qc_idx = sd_dp->qc_idx; 4761 sqp = sdebug_q_arr + sd_dp->sqa_idx; 4762 if (sdebug_statistics) { 4763 atomic_inc(&sdebug_completions); 4764 if (raw_smp_processor_id() != sd_dp->issuing_cpu) 4765 atomic_inc(&sdebug_miss_cpus); 4766 } 4767 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) { 4768 pr_err("wild qc_idx=%d\n", qc_idx); 4769 return; 4770 } 4771 spin_lock_irqsave(&sqp->qc_lock, iflags); 4772 sd_dp->defer_t = SDEB_DEFER_NONE; 4773 sqcp = &sqp->qc_arr[qc_idx]; 4774 scp = sqcp->a_cmnd; 4775 if (unlikely(scp == NULL)) { 4776 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4777 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n", 4778 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx); 4779 return; 4780 } 4781 devip = (struct sdebug_dev_info *)scp->device->hostdata; 4782 if (likely(devip)) 4783 atomic_dec(&devip->num_in_q); 4784 else 4785 pr_err("devip=NULL\n"); 4786 if (unlikely(atomic_read(&retired_max_queue) > 0)) 4787 retiring = 1; 4788 4789 sqcp->a_cmnd = NULL; 4790 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) { 4791 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4792 pr_err("Unexpected completion\n"); 4793 return; 4794 } 4795 4796 if (unlikely(retiring)) { /* user has reduced max_queue */ 4797 int k, retval; 4798 4799 retval = atomic_read(&retired_max_queue); 4800 if (qc_idx >= retval) { 4801 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4802 pr_err("index %d too large\n", retval); 4803 return; 4804 } 4805 k = find_last_bit(sqp->in_use_bm, retval); 4806 if ((k < sdebug_max_queue) || (k == retval)) 4807 atomic_set(&retired_max_queue, 0); 4808 else 4809 atomic_set(&retired_max_queue, k + 1); 4810 } 4811 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4812 if (unlikely(aborted)) { 4813 if (sdebug_verbose) 4814 pr_info("bypassing scsi_done() due to aborted cmd\n"); 4815 return; 4816 } 4817 scsi_done(scp); /* callback to mid level */ 4818 } 4819 4820 /* When high resolution timer goes off this function is called. */ 4821 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer) 4822 { 4823 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer, 4824 hrt); 4825 sdebug_q_cmd_complete(sd_dp); 4826 return HRTIMER_NORESTART; 4827 } 4828 4829 /* When work queue schedules work, it calls this function. */ 4830 static void sdebug_q_cmd_wq_complete(struct work_struct *work) 4831 { 4832 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer, 4833 ew.work); 4834 sdebug_q_cmd_complete(sd_dp); 4835 } 4836 4837 static bool got_shared_uuid; 4838 static uuid_t shared_uuid; 4839 4840 static int sdebug_device_create_zones(struct sdebug_dev_info *devip) 4841 { 4842 struct sdeb_zone_state *zsp; 4843 sector_t capacity = get_sdebug_capacity(); 4844 sector_t zstart = 0; 4845 unsigned int i; 4846 4847 /* 4848 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out 4849 * a zone size allowing for at least 4 zones on the device. Otherwise, 4850 * use the specified zone size checking that at least 2 zones can be 4851 * created for the device. 4852 */ 4853 if (!sdeb_zbc_zone_size_mb) { 4854 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M) 4855 >> ilog2(sdebug_sector_size); 4856 while (capacity < devip->zsize << 2 && devip->zsize >= 2) 4857 devip->zsize >>= 1; 4858 if (devip->zsize < 2) { 4859 pr_err("Device capacity too small\n"); 4860 return -EINVAL; 4861 } 4862 } else { 4863 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) { 4864 pr_err("Zone size is not a power of 2\n"); 4865 return -EINVAL; 4866 } 4867 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M) 4868 >> ilog2(sdebug_sector_size); 4869 if (devip->zsize >= capacity) { 4870 pr_err("Zone size too large for device capacity\n"); 4871 return -EINVAL; 4872 } 4873 } 4874 4875 devip->zsize_shift = ilog2(devip->zsize); 4876 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift; 4877 4878 if (sdeb_zbc_nr_conv >= devip->nr_zones) { 4879 pr_err("Number of conventional zones too large\n"); 4880 return -EINVAL; 4881 } 4882 devip->nr_conv_zones = sdeb_zbc_nr_conv; 4883 4884 if (devip->zmodel == BLK_ZONED_HM) { 4885 /* zbc_max_open_zones can be 0, meaning "not reported" */ 4886 if (sdeb_zbc_max_open >= devip->nr_zones - 1) 4887 devip->max_open = (devip->nr_zones - 1) / 2; 4888 else 4889 devip->max_open = sdeb_zbc_max_open; 4890 } 4891 4892 devip->zstate = kcalloc(devip->nr_zones, 4893 sizeof(struct sdeb_zone_state), GFP_KERNEL); 4894 if (!devip->zstate) 4895 return -ENOMEM; 4896 4897 for (i = 0; i < devip->nr_zones; i++) { 4898 zsp = &devip->zstate[i]; 4899 4900 zsp->z_start = zstart; 4901 4902 if (i < devip->nr_conv_zones) { 4903 zsp->z_type = ZBC_ZONE_TYPE_CNV; 4904 zsp->z_cond = ZBC_NOT_WRITE_POINTER; 4905 zsp->z_wp = (sector_t)-1; 4906 } else { 4907 if (devip->zmodel == BLK_ZONED_HM) 4908 zsp->z_type = ZBC_ZONE_TYPE_SWR; 4909 else 4910 zsp->z_type = ZBC_ZONE_TYPE_SWP; 4911 zsp->z_cond = ZC1_EMPTY; 4912 zsp->z_wp = zsp->z_start; 4913 } 4914 4915 if (zsp->z_start + devip->zsize < capacity) 4916 zsp->z_size = devip->zsize; 4917 else 4918 zsp->z_size = capacity - zsp->z_start; 4919 4920 zstart += zsp->z_size; 4921 } 4922 4923 return 0; 4924 } 4925 4926 static struct sdebug_dev_info *sdebug_device_create( 4927 struct sdebug_host_info *sdbg_host, gfp_t flags) 4928 { 4929 struct sdebug_dev_info *devip; 4930 4931 devip = kzalloc(sizeof(*devip), flags); 4932 if (devip) { 4933 if (sdebug_uuid_ctl == 1) 4934 uuid_gen(&devip->lu_name); 4935 else if (sdebug_uuid_ctl == 2) { 4936 if (got_shared_uuid) 4937 devip->lu_name = shared_uuid; 4938 else { 4939 uuid_gen(&shared_uuid); 4940 got_shared_uuid = true; 4941 devip->lu_name = shared_uuid; 4942 } 4943 } 4944 devip->sdbg_host = sdbg_host; 4945 if (sdeb_zbc_in_use) { 4946 devip->zmodel = sdeb_zbc_model; 4947 if (sdebug_device_create_zones(devip)) { 4948 kfree(devip); 4949 return NULL; 4950 } 4951 } else { 4952 devip->zmodel = BLK_ZONED_NONE; 4953 } 4954 devip->sdbg_host = sdbg_host; 4955 devip->create_ts = ktime_get_boottime(); 4956 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); 4957 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); 4958 } 4959 return devip; 4960 } 4961 4962 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev) 4963 { 4964 struct sdebug_host_info *sdbg_host; 4965 struct sdebug_dev_info *open_devip = NULL; 4966 struct sdebug_dev_info *devip; 4967 4968 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); 4969 if (!sdbg_host) { 4970 pr_err("Host info NULL\n"); 4971 return NULL; 4972 } 4973 4974 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 4975 if ((devip->used) && (devip->channel == sdev->channel) && 4976 (devip->target == sdev->id) && 4977 (devip->lun == sdev->lun)) 4978 return devip; 4979 else { 4980 if ((!devip->used) && (!open_devip)) 4981 open_devip = devip; 4982 } 4983 } 4984 if (!open_devip) { /* try and make a new one */ 4985 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); 4986 if (!open_devip) { 4987 pr_err("out of memory at line %d\n", __LINE__); 4988 return NULL; 4989 } 4990 } 4991 4992 open_devip->channel = sdev->channel; 4993 open_devip->target = sdev->id; 4994 open_devip->lun = sdev->lun; 4995 open_devip->sdbg_host = sdbg_host; 4996 atomic_set(&open_devip->num_in_q, 0); 4997 set_bit(SDEBUG_UA_POR, open_devip->uas_bm); 4998 open_devip->used = true; 4999 return open_devip; 5000 } 5001 5002 static int scsi_debug_slave_alloc(struct scsi_device *sdp) 5003 { 5004 if (sdebug_verbose) 5005 pr_info("slave_alloc <%u %u %u %llu>\n", 5006 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 5007 return 0; 5008 } 5009 5010 static int scsi_debug_slave_configure(struct scsi_device *sdp) 5011 { 5012 struct sdebug_dev_info *devip = 5013 (struct sdebug_dev_info *)sdp->hostdata; 5014 5015 if (sdebug_verbose) 5016 pr_info("slave_configure <%u %u %u %llu>\n", 5017 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 5018 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) 5019 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; 5020 if (devip == NULL) { 5021 devip = find_build_dev_info(sdp); 5022 if (devip == NULL) 5023 return 1; /* no resources, will be marked offline */ 5024 } 5025 sdp->hostdata = devip; 5026 if (sdebug_no_uld) 5027 sdp->no_uld_attach = 1; 5028 config_cdb_len(sdp); 5029 return 0; 5030 } 5031 5032 static void scsi_debug_slave_destroy(struct scsi_device *sdp) 5033 { 5034 struct sdebug_dev_info *devip = 5035 (struct sdebug_dev_info *)sdp->hostdata; 5036 5037 if (sdebug_verbose) 5038 pr_info("slave_destroy <%u %u %u %llu>\n", 5039 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 5040 if (devip) { 5041 /* make this slot available for re-use */ 5042 devip->used = false; 5043 sdp->hostdata = NULL; 5044 } 5045 } 5046 5047 static void stop_qc_helper(struct sdebug_defer *sd_dp, 5048 enum sdeb_defer_type defer_t) 5049 { 5050 if (!sd_dp) 5051 return; 5052 if (defer_t == SDEB_DEFER_HRT) 5053 hrtimer_cancel(&sd_dp->hrt); 5054 else if (defer_t == SDEB_DEFER_WQ) 5055 cancel_work_sync(&sd_dp->ew.work); 5056 } 5057 5058 /* If @cmnd found deletes its timer or work queue and returns true; else 5059 returns false */ 5060 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd) 5061 { 5062 unsigned long iflags; 5063 int j, k, qmax, r_qmax; 5064 enum sdeb_defer_type l_defer_t; 5065 struct sdebug_queue *sqp; 5066 struct sdebug_queued_cmd *sqcp; 5067 struct sdebug_dev_info *devip; 5068 struct sdebug_defer *sd_dp; 5069 5070 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 5071 spin_lock_irqsave(&sqp->qc_lock, iflags); 5072 qmax = sdebug_max_queue; 5073 r_qmax = atomic_read(&retired_max_queue); 5074 if (r_qmax > qmax) 5075 qmax = r_qmax; 5076 for (k = 0; k < qmax; ++k) { 5077 if (test_bit(k, sqp->in_use_bm)) { 5078 sqcp = &sqp->qc_arr[k]; 5079 if (cmnd != sqcp->a_cmnd) 5080 continue; 5081 /* found */ 5082 devip = (struct sdebug_dev_info *) 5083 cmnd->device->hostdata; 5084 if (devip) 5085 atomic_dec(&devip->num_in_q); 5086 sqcp->a_cmnd = NULL; 5087 sd_dp = sqcp->sd_dp; 5088 if (sd_dp) { 5089 l_defer_t = sd_dp->defer_t; 5090 sd_dp->defer_t = SDEB_DEFER_NONE; 5091 } else 5092 l_defer_t = SDEB_DEFER_NONE; 5093 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5094 stop_qc_helper(sd_dp, l_defer_t); 5095 clear_bit(k, sqp->in_use_bm); 5096 return true; 5097 } 5098 } 5099 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5100 } 5101 return false; 5102 } 5103 5104 /* Deletes (stops) timers or work queues of all queued commands */ 5105 static void stop_all_queued(void) 5106 { 5107 unsigned long iflags; 5108 int j, k; 5109 enum sdeb_defer_type l_defer_t; 5110 struct sdebug_queue *sqp; 5111 struct sdebug_queued_cmd *sqcp; 5112 struct sdebug_dev_info *devip; 5113 struct sdebug_defer *sd_dp; 5114 5115 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 5116 spin_lock_irqsave(&sqp->qc_lock, iflags); 5117 for (k = 0; k < SDEBUG_CANQUEUE; ++k) { 5118 if (test_bit(k, sqp->in_use_bm)) { 5119 sqcp = &sqp->qc_arr[k]; 5120 if (sqcp->a_cmnd == NULL) 5121 continue; 5122 devip = (struct sdebug_dev_info *) 5123 sqcp->a_cmnd->device->hostdata; 5124 if (devip) 5125 atomic_dec(&devip->num_in_q); 5126 sqcp->a_cmnd = NULL; 5127 sd_dp = sqcp->sd_dp; 5128 if (sd_dp) { 5129 l_defer_t = sd_dp->defer_t; 5130 sd_dp->defer_t = SDEB_DEFER_NONE; 5131 } else 5132 l_defer_t = SDEB_DEFER_NONE; 5133 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5134 stop_qc_helper(sd_dp, l_defer_t); 5135 clear_bit(k, sqp->in_use_bm); 5136 spin_lock_irqsave(&sqp->qc_lock, iflags); 5137 } 5138 } 5139 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5140 } 5141 } 5142 5143 /* Free queued command memory on heap */ 5144 static void free_all_queued(void) 5145 { 5146 int j, k; 5147 struct sdebug_queue *sqp; 5148 struct sdebug_queued_cmd *sqcp; 5149 5150 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 5151 for (k = 0; k < SDEBUG_CANQUEUE; ++k) { 5152 sqcp = &sqp->qc_arr[k]; 5153 kfree(sqcp->sd_dp); 5154 sqcp->sd_dp = NULL; 5155 } 5156 } 5157 } 5158 5159 static int scsi_debug_abort(struct scsi_cmnd *SCpnt) 5160 { 5161 bool ok; 5162 5163 ++num_aborts; 5164 if (SCpnt) { 5165 ok = stop_queued_cmnd(SCpnt); 5166 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) 5167 sdev_printk(KERN_INFO, SCpnt->device, 5168 "%s: command%s found\n", __func__, 5169 ok ? "" : " not"); 5170 } 5171 return SUCCESS; 5172 } 5173 5174 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) 5175 { 5176 ++num_dev_resets; 5177 if (SCpnt && SCpnt->device) { 5178 struct scsi_device *sdp = SCpnt->device; 5179 struct sdebug_dev_info *devip = 5180 (struct sdebug_dev_info *)sdp->hostdata; 5181 5182 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5183 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 5184 if (devip) 5185 set_bit(SDEBUG_UA_POR, devip->uas_bm); 5186 } 5187 return SUCCESS; 5188 } 5189 5190 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) 5191 { 5192 struct sdebug_host_info *sdbg_host; 5193 struct sdebug_dev_info *devip; 5194 struct scsi_device *sdp; 5195 struct Scsi_Host *hp; 5196 int k = 0; 5197 5198 ++num_target_resets; 5199 if (!SCpnt) 5200 goto lie; 5201 sdp = SCpnt->device; 5202 if (!sdp) 5203 goto lie; 5204 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5205 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 5206 hp = sdp->host; 5207 if (!hp) 5208 goto lie; 5209 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); 5210 if (sdbg_host) { 5211 list_for_each_entry(devip, 5212 &sdbg_host->dev_info_list, 5213 dev_list) 5214 if (devip->target == sdp->id) { 5215 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 5216 ++k; 5217 } 5218 } 5219 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 5220 sdev_printk(KERN_INFO, sdp, 5221 "%s: %d device(s) found in target\n", __func__, k); 5222 lie: 5223 return SUCCESS; 5224 } 5225 5226 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt) 5227 { 5228 struct sdebug_host_info *sdbg_host; 5229 struct sdebug_dev_info *devip; 5230 struct scsi_device *sdp; 5231 struct Scsi_Host *hp; 5232 int k = 0; 5233 5234 ++num_bus_resets; 5235 if (!(SCpnt && SCpnt->device)) 5236 goto lie; 5237 sdp = SCpnt->device; 5238 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) 5239 sdev_printk(KERN_INFO, sdp, "%s\n", __func__); 5240 hp = sdp->host; 5241 if (hp) { 5242 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); 5243 if (sdbg_host) { 5244 list_for_each_entry(devip, 5245 &sdbg_host->dev_info_list, 5246 dev_list) { 5247 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 5248 ++k; 5249 } 5250 } 5251 } 5252 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 5253 sdev_printk(KERN_INFO, sdp, 5254 "%s: %d device(s) found in host\n", __func__, k); 5255 lie: 5256 return SUCCESS; 5257 } 5258 5259 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt) 5260 { 5261 struct sdebug_host_info *sdbg_host; 5262 struct sdebug_dev_info *devip; 5263 int k = 0; 5264 5265 ++num_host_resets; 5266 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) 5267 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); 5268 spin_lock(&sdebug_host_list_lock); 5269 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { 5270 list_for_each_entry(devip, &sdbg_host->dev_info_list, 5271 dev_list) { 5272 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 5273 ++k; 5274 } 5275 } 5276 spin_unlock(&sdebug_host_list_lock); 5277 stop_all_queued(); 5278 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 5279 sdev_printk(KERN_INFO, SCpnt->device, 5280 "%s: %d device(s) found\n", __func__, k); 5281 return SUCCESS; 5282 } 5283 5284 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size) 5285 { 5286 struct msdos_partition *pp; 5287 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs; 5288 int sectors_per_part, num_sectors, k; 5289 int heads_by_sects, start_sec, end_sec; 5290 5291 /* assume partition table already zeroed */ 5292 if ((sdebug_num_parts < 1) || (store_size < 1048576)) 5293 return; 5294 if (sdebug_num_parts > SDEBUG_MAX_PARTS) { 5295 sdebug_num_parts = SDEBUG_MAX_PARTS; 5296 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS); 5297 } 5298 num_sectors = (int)get_sdebug_capacity(); 5299 sectors_per_part = (num_sectors - sdebug_sectors_per) 5300 / sdebug_num_parts; 5301 heads_by_sects = sdebug_heads * sdebug_sectors_per; 5302 starts[0] = sdebug_sectors_per; 5303 max_part_secs = sectors_per_part; 5304 for (k = 1; k < sdebug_num_parts; ++k) { 5305 starts[k] = ((k * sectors_per_part) / heads_by_sects) 5306 * heads_by_sects; 5307 if (starts[k] - starts[k - 1] < max_part_secs) 5308 max_part_secs = starts[k] - starts[k - 1]; 5309 } 5310 starts[sdebug_num_parts] = num_sectors; 5311 starts[sdebug_num_parts + 1] = 0; 5312 5313 ramp[510] = 0x55; /* magic partition markings */ 5314 ramp[511] = 0xAA; 5315 pp = (struct msdos_partition *)(ramp + 0x1be); 5316 for (k = 0; starts[k + 1]; ++k, ++pp) { 5317 start_sec = starts[k]; 5318 end_sec = starts[k] + max_part_secs - 1; 5319 pp->boot_ind = 0; 5320 5321 pp->cyl = start_sec / heads_by_sects; 5322 pp->head = (start_sec - (pp->cyl * heads_by_sects)) 5323 / sdebug_sectors_per; 5324 pp->sector = (start_sec % sdebug_sectors_per) + 1; 5325 5326 pp->end_cyl = end_sec / heads_by_sects; 5327 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects)) 5328 / sdebug_sectors_per; 5329 pp->end_sector = (end_sec % sdebug_sectors_per) + 1; 5330 5331 pp->start_sect = cpu_to_le32(start_sec); 5332 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); 5333 pp->sys_ind = 0x83; /* plain Linux partition */ 5334 } 5335 } 5336 5337 static void block_unblock_all_queues(bool block) 5338 { 5339 int j; 5340 struct sdebug_queue *sqp; 5341 5342 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) 5343 atomic_set(&sqp->blocked, (int)block); 5344 } 5345 5346 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1 5347 * commands will be processed normally before triggers occur. 5348 */ 5349 static void tweak_cmnd_count(void) 5350 { 5351 int count, modulo; 5352 5353 modulo = abs(sdebug_every_nth); 5354 if (modulo < 2) 5355 return; 5356 block_unblock_all_queues(true); 5357 count = atomic_read(&sdebug_cmnd_count); 5358 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo); 5359 block_unblock_all_queues(false); 5360 } 5361 5362 static void clear_queue_stats(void) 5363 { 5364 atomic_set(&sdebug_cmnd_count, 0); 5365 atomic_set(&sdebug_completions, 0); 5366 atomic_set(&sdebug_miss_cpus, 0); 5367 atomic_set(&sdebug_a_tsf, 0); 5368 } 5369 5370 static bool inject_on_this_cmd(void) 5371 { 5372 if (sdebug_every_nth == 0) 5373 return false; 5374 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0; 5375 } 5376 5377 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */ 5378 5379 /* Complete the processing of the thread that queued a SCSI command to this 5380 * driver. It either completes the command by calling cmnd_done() or 5381 * schedules a hr timer or work queue then returns 0. Returns 5382 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources. 5383 */ 5384 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, 5385 int scsi_result, 5386 int (*pfp)(struct scsi_cmnd *, 5387 struct sdebug_dev_info *), 5388 int delta_jiff, int ndelay) 5389 { 5390 bool new_sd_dp; 5391 bool inject = false; 5392 bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI; 5393 int k, num_in_q, qdepth; 5394 unsigned long iflags; 5395 u64 ns_from_boot = 0; 5396 struct sdebug_queue *sqp; 5397 struct sdebug_queued_cmd *sqcp; 5398 struct scsi_device *sdp; 5399 struct sdebug_defer *sd_dp; 5400 5401 if (unlikely(devip == NULL)) { 5402 if (scsi_result == 0) 5403 scsi_result = DID_NO_CONNECT << 16; 5404 goto respond_in_thread; 5405 } 5406 sdp = cmnd->device; 5407 5408 if (delta_jiff == 0) 5409 goto respond_in_thread; 5410 5411 sqp = get_queue(cmnd); 5412 spin_lock_irqsave(&sqp->qc_lock, iflags); 5413 if (unlikely(atomic_read(&sqp->blocked))) { 5414 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5415 return SCSI_MLQUEUE_HOST_BUSY; 5416 } 5417 num_in_q = atomic_read(&devip->num_in_q); 5418 qdepth = cmnd->device->queue_depth; 5419 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) { 5420 if (scsi_result) { 5421 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5422 goto respond_in_thread; 5423 } else 5424 scsi_result = device_qfull_result; 5425 } else if (unlikely(sdebug_every_nth && 5426 (SDEBUG_OPT_RARE_TSF & sdebug_opts) && 5427 (scsi_result == 0))) { 5428 if ((num_in_q == (qdepth - 1)) && 5429 (atomic_inc_return(&sdebug_a_tsf) >= 5430 abs(sdebug_every_nth))) { 5431 atomic_set(&sdebug_a_tsf, 0); 5432 inject = true; 5433 scsi_result = device_qfull_result; 5434 } 5435 } 5436 5437 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue); 5438 if (unlikely(k >= sdebug_max_queue)) { 5439 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5440 if (scsi_result) 5441 goto respond_in_thread; 5442 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts) 5443 scsi_result = device_qfull_result; 5444 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) 5445 sdev_printk(KERN_INFO, sdp, 5446 "%s: max_queue=%d exceeded, %s\n", 5447 __func__, sdebug_max_queue, 5448 (scsi_result ? "status: TASK SET FULL" : 5449 "report: host busy")); 5450 if (scsi_result) 5451 goto respond_in_thread; 5452 else 5453 return SCSI_MLQUEUE_HOST_BUSY; 5454 } 5455 set_bit(k, sqp->in_use_bm); 5456 atomic_inc(&devip->num_in_q); 5457 sqcp = &sqp->qc_arr[k]; 5458 sqcp->a_cmnd = cmnd; 5459 cmnd->host_scribble = (unsigned char *)sqcp; 5460 sd_dp = sqcp->sd_dp; 5461 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5462 5463 if (!sd_dp) { 5464 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); 5465 if (!sd_dp) { 5466 atomic_dec(&devip->num_in_q); 5467 clear_bit(k, sqp->in_use_bm); 5468 return SCSI_MLQUEUE_HOST_BUSY; 5469 } 5470 new_sd_dp = true; 5471 } else { 5472 new_sd_dp = false; 5473 } 5474 5475 /* Set the hostwide tag */ 5476 if (sdebug_host_max_queue) 5477 sd_dp->hc_idx = get_tag(cmnd); 5478 5479 if (hipri) 5480 ns_from_boot = ktime_get_boottime_ns(); 5481 5482 /* one of the resp_*() response functions is called here */ 5483 cmnd->result = pfp ? pfp(cmnd, devip) : 0; 5484 if (cmnd->result & SDEG_RES_IMMED_MASK) { 5485 cmnd->result &= ~SDEG_RES_IMMED_MASK; 5486 delta_jiff = ndelay = 0; 5487 } 5488 if (cmnd->result == 0 && scsi_result != 0) 5489 cmnd->result = scsi_result; 5490 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) { 5491 if (atomic_read(&sdeb_inject_pending)) { 5492 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO); 5493 atomic_set(&sdeb_inject_pending, 0); 5494 cmnd->result = check_condition_result; 5495 } 5496 } 5497 5498 if (unlikely(sdebug_verbose && cmnd->result)) 5499 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", 5500 __func__, cmnd->result); 5501 5502 if (delta_jiff > 0 || ndelay > 0) { 5503 ktime_t kt; 5504 5505 if (delta_jiff > 0) { 5506 u64 ns = jiffies_to_nsecs(delta_jiff); 5507 5508 if (sdebug_random && ns < U32_MAX) { 5509 ns = prandom_u32_max((u32)ns); 5510 } else if (sdebug_random) { 5511 ns >>= 12; /* scale to 4 usec precision */ 5512 if (ns < U32_MAX) /* over 4 hours max */ 5513 ns = prandom_u32_max((u32)ns); 5514 ns <<= 12; 5515 } 5516 kt = ns_to_ktime(ns); 5517 } else { /* ndelay has a 4.2 second max */ 5518 kt = sdebug_random ? prandom_u32_max((u32)ndelay) : 5519 (u32)ndelay; 5520 if (ndelay < INCLUSIVE_TIMING_MAX_NS) { 5521 u64 d = ktime_get_boottime_ns() - ns_from_boot; 5522 5523 if (kt <= d) { /* elapsed duration >= kt */ 5524 spin_lock_irqsave(&sqp->qc_lock, iflags); 5525 sqcp->a_cmnd = NULL; 5526 atomic_dec(&devip->num_in_q); 5527 clear_bit(k, sqp->in_use_bm); 5528 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5529 if (new_sd_dp) 5530 kfree(sd_dp); 5531 /* call scsi_done() from this thread */ 5532 scsi_done(cmnd); 5533 return 0; 5534 } 5535 /* otherwise reduce kt by elapsed time */ 5536 kt -= d; 5537 } 5538 } 5539 if (hipri) { 5540 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); 5541 spin_lock_irqsave(&sqp->qc_lock, iflags); 5542 if (!sd_dp->init_poll) { 5543 sd_dp->init_poll = true; 5544 sqcp->sd_dp = sd_dp; 5545 sd_dp->sqa_idx = sqp - sdebug_q_arr; 5546 sd_dp->qc_idx = k; 5547 } 5548 sd_dp->defer_t = SDEB_DEFER_POLL; 5549 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5550 } else { 5551 if (!sd_dp->init_hrt) { 5552 sd_dp->init_hrt = true; 5553 sqcp->sd_dp = sd_dp; 5554 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, 5555 HRTIMER_MODE_REL_PINNED); 5556 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; 5557 sd_dp->sqa_idx = sqp - sdebug_q_arr; 5558 sd_dp->qc_idx = k; 5559 } 5560 sd_dp->defer_t = SDEB_DEFER_HRT; 5561 /* schedule the invocation of scsi_done() for a later time */ 5562 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); 5563 } 5564 if (sdebug_statistics) 5565 sd_dp->issuing_cpu = raw_smp_processor_id(); 5566 } else { /* jdelay < 0, use work queue */ 5567 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) && 5568 atomic_read(&sdeb_inject_pending))) 5569 sd_dp->aborted = true; 5570 if (hipri) { 5571 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); 5572 spin_lock_irqsave(&sqp->qc_lock, iflags); 5573 if (!sd_dp->init_poll) { 5574 sd_dp->init_poll = true; 5575 sqcp->sd_dp = sd_dp; 5576 sd_dp->sqa_idx = sqp - sdebug_q_arr; 5577 sd_dp->qc_idx = k; 5578 } 5579 sd_dp->defer_t = SDEB_DEFER_POLL; 5580 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 5581 } else { 5582 if (!sd_dp->init_wq) { 5583 sd_dp->init_wq = true; 5584 sqcp->sd_dp = sd_dp; 5585 sd_dp->sqa_idx = sqp - sdebug_q_arr; 5586 sd_dp->qc_idx = k; 5587 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); 5588 } 5589 sd_dp->defer_t = SDEB_DEFER_WQ; 5590 schedule_work(&sd_dp->ew.work); 5591 } 5592 if (sdebug_statistics) 5593 sd_dp->issuing_cpu = raw_smp_processor_id(); 5594 if (unlikely(sd_dp->aborted)) { 5595 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", 5596 scsi_cmd_to_rq(cmnd)->tag); 5597 blk_abort_request(scsi_cmd_to_rq(cmnd)); 5598 atomic_set(&sdeb_inject_pending, 0); 5599 sd_dp->aborted = false; 5600 } 5601 } 5602 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result)) 5603 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__, 5604 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL"); 5605 return 0; 5606 5607 respond_in_thread: /* call back to mid-layer using invocation thread */ 5608 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0; 5609 cmnd->result &= ~SDEG_RES_IMMED_MASK; 5610 if (cmnd->result == 0 && scsi_result != 0) 5611 cmnd->result = scsi_result; 5612 scsi_done(cmnd); 5613 return 0; 5614 } 5615 5616 /* Note: The following macros create attribute files in the 5617 /sys/module/scsi_debug/parameters directory. Unfortunately this 5618 driver is unaware of a change and cannot trigger auxiliary actions 5619 as it can when the corresponding attribute in the 5620 /sys/bus/pseudo/drivers/scsi_debug directory is changed. 5621 */ 5622 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR); 5623 module_param_named(ato, sdebug_ato, int, S_IRUGO); 5624 module_param_named(cdb_len, sdebug_cdb_len, int, 0644); 5625 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR); 5626 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR); 5627 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO); 5628 module_param_named(dif, sdebug_dif, int, S_IRUGO); 5629 module_param_named(dix, sdebug_dix, int, S_IRUGO); 5630 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR); 5631 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR); 5632 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR); 5633 module_param_named(guard, sdebug_guard, uint, S_IRUGO); 5634 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR); 5635 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO); 5636 module_param_string(inq_product, sdebug_inq_product_id, 5637 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR); 5638 module_param_string(inq_rev, sdebug_inq_product_rev, 5639 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR); 5640 module_param_string(inq_vendor, sdebug_inq_vendor_id, 5641 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR); 5642 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); 5643 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); 5644 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); 5645 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); 5646 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); 5647 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR); 5648 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); 5649 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR); 5650 module_param_named(medium_error_count, sdebug_medium_error_count, int, 5651 S_IRUGO | S_IWUSR); 5652 module_param_named(medium_error_start, sdebug_medium_error_start, int, 5653 S_IRUGO | S_IWUSR); 5654 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR); 5655 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR); 5656 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO); 5657 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO); 5658 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR); 5659 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO); 5660 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO); 5661 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR); 5662 module_param_named(per_host_store, sdebug_per_host_store, bool, 5663 S_IRUGO | S_IWUSR); 5664 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO); 5665 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR); 5666 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR); 5667 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR); 5668 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO); 5669 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO); 5670 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR); 5671 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR); 5672 module_param_named(submit_queues, submit_queues, int, S_IRUGO); 5673 module_param_named(poll_queues, poll_queues, int, S_IRUGO); 5674 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO); 5675 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); 5676 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); 5677 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); 5678 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); 5679 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); 5680 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); 5681 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, 5682 S_IRUGO | S_IWUSR); 5683 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR); 5684 module_param_named(write_same_length, sdebug_write_same_length, int, 5685 S_IRUGO | S_IWUSR); 5686 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO); 5687 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO); 5688 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO); 5689 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO); 5690 5691 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 5692 MODULE_DESCRIPTION("SCSI debug adapter driver"); 5693 MODULE_LICENSE("GPL"); 5694 MODULE_VERSION(SDEBUG_VERSION); 5695 5696 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)"); 5697 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 5698 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)"); 5699 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); 5700 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny"); 5701 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)"); 5702 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 5703 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); 5704 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); 5705 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); 5706 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); 5707 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 5708 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)"); 5709 MODULE_PARM_DESC(host_max_queue, 5710 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])"); 5711 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")"); 5712 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"" 5713 SDEBUG_VERSION "\")"); 5714 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")"); 5715 MODULE_PARM_DESC(lbprz, 5716 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2"); 5717 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); 5718 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); 5719 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); 5720 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); 5721 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method"); 5722 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 5723 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); 5724 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error"); 5725 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error"); 5726 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)"); 5727 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); 5728 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); 5729 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); 5730 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); 5731 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)"); 5732 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)"); 5733 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); 5734 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)"); 5735 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 5736 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))"); 5737 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 5738 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns"); 5739 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); 5740 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])"); 5741 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 5742 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)"); 5743 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)"); 5744 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)"); 5745 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)"); 5746 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 5747 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); 5748 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); 5749 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); 5750 MODULE_PARM_DESC(uuid_ctl, 5751 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); 5752 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); 5753 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 5754 MODULE_PARM_DESC(wp, "Write Protect (def=0)"); 5755 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); 5756 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix"); 5757 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)"); 5758 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)"); 5759 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)"); 5760 5761 #define SDEBUG_INFO_LEN 256 5762 static char sdebug_info[SDEBUG_INFO_LEN]; 5763 5764 static const char *scsi_debug_info(struct Scsi_Host *shp) 5765 { 5766 int k; 5767 5768 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n", 5769 my_name, SDEBUG_VERSION, sdebug_version_date); 5770 if (k >= (SDEBUG_INFO_LEN - 1)) 5771 return sdebug_info; 5772 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, 5773 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d", 5774 sdebug_dev_size_mb, sdebug_opts, submit_queues, 5775 "statistics", (int)sdebug_statistics); 5776 return sdebug_info; 5777 } 5778 5779 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */ 5780 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, 5781 int length) 5782 { 5783 char arr[16]; 5784 int opts; 5785 int minLen = length > 15 ? 15 : length; 5786 5787 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 5788 return -EACCES; 5789 memcpy(arr, buffer, minLen); 5790 arr[minLen] = '\0'; 5791 if (1 != sscanf(arr, "%d", &opts)) 5792 return -EINVAL; 5793 sdebug_opts = opts; 5794 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); 5795 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); 5796 if (sdebug_every_nth != 0) 5797 tweak_cmnd_count(); 5798 return length; 5799 } 5800 5801 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the 5802 * same for each scsi_debug host (if more than one). Some of the counters 5803 * output are not atomics so might be inaccurate in a busy system. */ 5804 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) 5805 { 5806 int f, j, l; 5807 struct sdebug_queue *sqp; 5808 struct sdebug_host_info *sdhp; 5809 5810 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n", 5811 SDEBUG_VERSION, sdebug_version_date); 5812 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n", 5813 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb, 5814 sdebug_opts, sdebug_every_nth); 5815 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n", 5816 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns, 5817 sdebug_sector_size, "bytes"); 5818 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n", 5819 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, 5820 num_aborts); 5821 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n", 5822 num_dev_resets, num_target_resets, num_bus_resets, 5823 num_host_resets); 5824 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n", 5825 dix_reads, dix_writes, dif_errors); 5826 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000, 5827 sdebug_statistics); 5828 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n", 5829 atomic_read(&sdebug_cmnd_count), 5830 atomic_read(&sdebug_completions), 5831 "miss_cpus", atomic_read(&sdebug_miss_cpus), 5832 atomic_read(&sdebug_a_tsf), 5833 atomic_read(&sdeb_mq_poll_count)); 5834 5835 seq_printf(m, "submit_queues=%d\n", submit_queues); 5836 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { 5837 seq_printf(m, " queue %d:\n", j); 5838 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue); 5839 if (f != sdebug_max_queue) { 5840 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue); 5841 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n", 5842 "first,last bits", f, l); 5843 } 5844 } 5845 5846 seq_printf(m, "this host_no=%d\n", host->host_no); 5847 if (!xa_empty(per_store_ap)) { 5848 bool niu; 5849 int idx; 5850 unsigned long l_idx; 5851 struct sdeb_store_info *sip; 5852 5853 seq_puts(m, "\nhost list:\n"); 5854 j = 0; 5855 list_for_each_entry(sdhp, &sdebug_host_list, host_list) { 5856 idx = sdhp->si_idx; 5857 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j, 5858 sdhp->shost->host_no, idx); 5859 ++j; 5860 } 5861 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n", 5862 sdeb_most_recent_idx); 5863 j = 0; 5864 xa_for_each(per_store_ap, l_idx, sip) { 5865 niu = xa_get_mark(per_store_ap, l_idx, 5866 SDEB_XA_NOT_IN_USE); 5867 idx = (int)l_idx; 5868 seq_printf(m, " %d: idx=%d%s\n", j, idx, 5869 (niu ? " not_in_use" : "")); 5870 ++j; 5871 } 5872 } 5873 return 0; 5874 } 5875 5876 static ssize_t delay_show(struct device_driver *ddp, char *buf) 5877 { 5878 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay); 5879 } 5880 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit 5881 * of delay is jiffies. 5882 */ 5883 static ssize_t delay_store(struct device_driver *ddp, const char *buf, 5884 size_t count) 5885 { 5886 int jdelay, res; 5887 5888 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) { 5889 res = count; 5890 if (sdebug_jdelay != jdelay) { 5891 int j, k; 5892 struct sdebug_queue *sqp; 5893 5894 block_unblock_all_queues(true); 5895 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; 5896 ++j, ++sqp) { 5897 k = find_first_bit(sqp->in_use_bm, 5898 sdebug_max_queue); 5899 if (k != sdebug_max_queue) { 5900 res = -EBUSY; /* queued commands */ 5901 break; 5902 } 5903 } 5904 if (res > 0) { 5905 sdebug_jdelay = jdelay; 5906 sdebug_ndelay = 0; 5907 } 5908 block_unblock_all_queues(false); 5909 } 5910 return res; 5911 } 5912 return -EINVAL; 5913 } 5914 static DRIVER_ATTR_RW(delay); 5915 5916 static ssize_t ndelay_show(struct device_driver *ddp, char *buf) 5917 { 5918 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay); 5919 } 5920 /* Returns -EBUSY if ndelay is being changed and commands are queued */ 5921 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */ 5922 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, 5923 size_t count) 5924 { 5925 int ndelay, res; 5926 5927 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) && 5928 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) { 5929 res = count; 5930 if (sdebug_ndelay != ndelay) { 5931 int j, k; 5932 struct sdebug_queue *sqp; 5933 5934 block_unblock_all_queues(true); 5935 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; 5936 ++j, ++sqp) { 5937 k = find_first_bit(sqp->in_use_bm, 5938 sdebug_max_queue); 5939 if (k != sdebug_max_queue) { 5940 res = -EBUSY; /* queued commands */ 5941 break; 5942 } 5943 } 5944 if (res > 0) { 5945 sdebug_ndelay = ndelay; 5946 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN 5947 : DEF_JDELAY; 5948 } 5949 block_unblock_all_queues(false); 5950 } 5951 return res; 5952 } 5953 return -EINVAL; 5954 } 5955 static DRIVER_ATTR_RW(ndelay); 5956 5957 static ssize_t opts_show(struct device_driver *ddp, char *buf) 5958 { 5959 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts); 5960 } 5961 5962 static ssize_t opts_store(struct device_driver *ddp, const char *buf, 5963 size_t count) 5964 { 5965 int opts; 5966 char work[20]; 5967 5968 if (sscanf(buf, "%10s", work) == 1) { 5969 if (strncasecmp(work, "0x", 2) == 0) { 5970 if (kstrtoint(work + 2, 16, &opts) == 0) 5971 goto opts_done; 5972 } else { 5973 if (kstrtoint(work, 10, &opts) == 0) 5974 goto opts_done; 5975 } 5976 } 5977 return -EINVAL; 5978 opts_done: 5979 sdebug_opts = opts; 5980 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); 5981 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); 5982 tweak_cmnd_count(); 5983 return count; 5984 } 5985 static DRIVER_ATTR_RW(opts); 5986 5987 static ssize_t ptype_show(struct device_driver *ddp, char *buf) 5988 { 5989 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype); 5990 } 5991 static ssize_t ptype_store(struct device_driver *ddp, const char *buf, 5992 size_t count) 5993 { 5994 int n; 5995 5996 /* Cannot change from or to TYPE_ZBC with sysfs */ 5997 if (sdebug_ptype == TYPE_ZBC) 5998 return -EINVAL; 5999 6000 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6001 if (n == TYPE_ZBC) 6002 return -EINVAL; 6003 sdebug_ptype = n; 6004 return count; 6005 } 6006 return -EINVAL; 6007 } 6008 static DRIVER_ATTR_RW(ptype); 6009 6010 static ssize_t dsense_show(struct device_driver *ddp, char *buf) 6011 { 6012 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense); 6013 } 6014 static ssize_t dsense_store(struct device_driver *ddp, const char *buf, 6015 size_t count) 6016 { 6017 int n; 6018 6019 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6020 sdebug_dsense = n; 6021 return count; 6022 } 6023 return -EINVAL; 6024 } 6025 static DRIVER_ATTR_RW(dsense); 6026 6027 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf) 6028 { 6029 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw); 6030 } 6031 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, 6032 size_t count) 6033 { 6034 int n, idx; 6035 6036 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6037 bool want_store = (n == 0); 6038 struct sdebug_host_info *sdhp; 6039 6040 n = (n > 0); 6041 sdebug_fake_rw = (sdebug_fake_rw > 0); 6042 if (sdebug_fake_rw == n) 6043 return count; /* not transitioning so do nothing */ 6044 6045 if (want_store) { /* 1 --> 0 transition, set up store */ 6046 if (sdeb_first_idx < 0) { 6047 idx = sdebug_add_store(); 6048 if (idx < 0) 6049 return idx; 6050 } else { 6051 idx = sdeb_first_idx; 6052 xa_clear_mark(per_store_ap, idx, 6053 SDEB_XA_NOT_IN_USE); 6054 } 6055 /* make all hosts use same store */ 6056 list_for_each_entry(sdhp, &sdebug_host_list, 6057 host_list) { 6058 if (sdhp->si_idx != idx) { 6059 xa_set_mark(per_store_ap, sdhp->si_idx, 6060 SDEB_XA_NOT_IN_USE); 6061 sdhp->si_idx = idx; 6062 } 6063 } 6064 sdeb_most_recent_idx = idx; 6065 } else { /* 0 --> 1 transition is trigger for shrink */ 6066 sdebug_erase_all_stores(true /* apart from first */); 6067 } 6068 sdebug_fake_rw = n; 6069 return count; 6070 } 6071 return -EINVAL; 6072 } 6073 static DRIVER_ATTR_RW(fake_rw); 6074 6075 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf) 6076 { 6077 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0); 6078 } 6079 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, 6080 size_t count) 6081 { 6082 int n; 6083 6084 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6085 sdebug_no_lun_0 = n; 6086 return count; 6087 } 6088 return -EINVAL; 6089 } 6090 static DRIVER_ATTR_RW(no_lun_0); 6091 6092 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf) 6093 { 6094 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts); 6095 } 6096 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, 6097 size_t count) 6098 { 6099 int n; 6100 6101 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6102 sdebug_num_tgts = n; 6103 sdebug_max_tgts_luns(); 6104 return count; 6105 } 6106 return -EINVAL; 6107 } 6108 static DRIVER_ATTR_RW(num_tgts); 6109 6110 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf) 6111 { 6112 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb); 6113 } 6114 static DRIVER_ATTR_RO(dev_size_mb); 6115 6116 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf) 6117 { 6118 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store); 6119 } 6120 6121 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf, 6122 size_t count) 6123 { 6124 bool v; 6125 6126 if (kstrtobool(buf, &v)) 6127 return -EINVAL; 6128 6129 sdebug_per_host_store = v; 6130 return count; 6131 } 6132 static DRIVER_ATTR_RW(per_host_store); 6133 6134 static ssize_t num_parts_show(struct device_driver *ddp, char *buf) 6135 { 6136 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts); 6137 } 6138 static DRIVER_ATTR_RO(num_parts); 6139 6140 static ssize_t every_nth_show(struct device_driver *ddp, char *buf) 6141 { 6142 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth); 6143 } 6144 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, 6145 size_t count) 6146 { 6147 int nth; 6148 char work[20]; 6149 6150 if (sscanf(buf, "%10s", work) == 1) { 6151 if (strncasecmp(work, "0x", 2) == 0) { 6152 if (kstrtoint(work + 2, 16, &nth) == 0) 6153 goto every_nth_done; 6154 } else { 6155 if (kstrtoint(work, 10, &nth) == 0) 6156 goto every_nth_done; 6157 } 6158 } 6159 return -EINVAL; 6160 6161 every_nth_done: 6162 sdebug_every_nth = nth; 6163 if (nth && !sdebug_statistics) { 6164 pr_info("every_nth needs statistics=1, set it\n"); 6165 sdebug_statistics = true; 6166 } 6167 tweak_cmnd_count(); 6168 return count; 6169 } 6170 static DRIVER_ATTR_RW(every_nth); 6171 6172 static ssize_t lun_format_show(struct device_driver *ddp, char *buf) 6173 { 6174 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am); 6175 } 6176 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf, 6177 size_t count) 6178 { 6179 int n; 6180 bool changed; 6181 6182 if (kstrtoint(buf, 0, &n)) 6183 return -EINVAL; 6184 if (n >= 0) { 6185 if (n > (int)SAM_LUN_AM_FLAT) { 6186 pr_warn("only LUN address methods 0 and 1 are supported\n"); 6187 return -EINVAL; 6188 } 6189 changed = ((int)sdebug_lun_am != n); 6190 sdebug_lun_am = n; 6191 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */ 6192 struct sdebug_host_info *sdhp; 6193 struct sdebug_dev_info *dp; 6194 6195 spin_lock(&sdebug_host_list_lock); 6196 list_for_each_entry(sdhp, &sdebug_host_list, host_list) { 6197 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { 6198 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); 6199 } 6200 } 6201 spin_unlock(&sdebug_host_list_lock); 6202 } 6203 return count; 6204 } 6205 return -EINVAL; 6206 } 6207 static DRIVER_ATTR_RW(lun_format); 6208 6209 static ssize_t max_luns_show(struct device_driver *ddp, char *buf) 6210 { 6211 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns); 6212 } 6213 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, 6214 size_t count) 6215 { 6216 int n; 6217 bool changed; 6218 6219 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6220 if (n > 256) { 6221 pr_warn("max_luns can be no more than 256\n"); 6222 return -EINVAL; 6223 } 6224 changed = (sdebug_max_luns != n); 6225 sdebug_max_luns = n; 6226 sdebug_max_tgts_luns(); 6227 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ 6228 struct sdebug_host_info *sdhp; 6229 struct sdebug_dev_info *dp; 6230 6231 spin_lock(&sdebug_host_list_lock); 6232 list_for_each_entry(sdhp, &sdebug_host_list, 6233 host_list) { 6234 list_for_each_entry(dp, &sdhp->dev_info_list, 6235 dev_list) { 6236 set_bit(SDEBUG_UA_LUNS_CHANGED, 6237 dp->uas_bm); 6238 } 6239 } 6240 spin_unlock(&sdebug_host_list_lock); 6241 } 6242 return count; 6243 } 6244 return -EINVAL; 6245 } 6246 static DRIVER_ATTR_RW(max_luns); 6247 6248 static ssize_t max_queue_show(struct device_driver *ddp, char *buf) 6249 { 6250 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue); 6251 } 6252 /* N.B. max_queue can be changed while there are queued commands. In flight 6253 * commands beyond the new max_queue will be completed. */ 6254 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, 6255 size_t count) 6256 { 6257 int j, n, k, a; 6258 struct sdebug_queue *sqp; 6259 6260 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && 6261 (n <= SDEBUG_CANQUEUE) && 6262 (sdebug_host_max_queue == 0)) { 6263 block_unblock_all_queues(true); 6264 k = 0; 6265 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; 6266 ++j, ++sqp) { 6267 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE); 6268 if (a > k) 6269 k = a; 6270 } 6271 sdebug_max_queue = n; 6272 if (k == SDEBUG_CANQUEUE) 6273 atomic_set(&retired_max_queue, 0); 6274 else if (k >= n) 6275 atomic_set(&retired_max_queue, k + 1); 6276 else 6277 atomic_set(&retired_max_queue, 0); 6278 block_unblock_all_queues(false); 6279 return count; 6280 } 6281 return -EINVAL; 6282 } 6283 static DRIVER_ATTR_RW(max_queue); 6284 6285 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf) 6286 { 6287 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue); 6288 } 6289 6290 /* 6291 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap 6292 * in range [0, sdebug_host_max_queue), we can't change it. 6293 */ 6294 static DRIVER_ATTR_RO(host_max_queue); 6295 6296 static ssize_t no_uld_show(struct device_driver *ddp, char *buf) 6297 { 6298 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld); 6299 } 6300 static DRIVER_ATTR_RO(no_uld); 6301 6302 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf) 6303 { 6304 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level); 6305 } 6306 static DRIVER_ATTR_RO(scsi_level); 6307 6308 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf) 6309 { 6310 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb); 6311 } 6312 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, 6313 size_t count) 6314 { 6315 int n; 6316 bool changed; 6317 6318 /* Ignore capacity change for ZBC drives for now */ 6319 if (sdeb_zbc_in_use) 6320 return -ENOTSUPP; 6321 6322 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6323 changed = (sdebug_virtual_gb != n); 6324 sdebug_virtual_gb = n; 6325 sdebug_capacity = get_sdebug_capacity(); 6326 if (changed) { 6327 struct sdebug_host_info *sdhp; 6328 struct sdebug_dev_info *dp; 6329 6330 spin_lock(&sdebug_host_list_lock); 6331 list_for_each_entry(sdhp, &sdebug_host_list, 6332 host_list) { 6333 list_for_each_entry(dp, &sdhp->dev_info_list, 6334 dev_list) { 6335 set_bit(SDEBUG_UA_CAPACITY_CHANGED, 6336 dp->uas_bm); 6337 } 6338 } 6339 spin_unlock(&sdebug_host_list_lock); 6340 } 6341 return count; 6342 } 6343 return -EINVAL; 6344 } 6345 static DRIVER_ATTR_RW(virtual_gb); 6346 6347 static ssize_t add_host_show(struct device_driver *ddp, char *buf) 6348 { 6349 /* absolute number of hosts currently active is what is shown */ 6350 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts); 6351 } 6352 6353 static ssize_t add_host_store(struct device_driver *ddp, const char *buf, 6354 size_t count) 6355 { 6356 bool found; 6357 unsigned long idx; 6358 struct sdeb_store_info *sip; 6359 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store; 6360 int delta_hosts; 6361 6362 if (sscanf(buf, "%d", &delta_hosts) != 1) 6363 return -EINVAL; 6364 if (delta_hosts > 0) { 6365 do { 6366 found = false; 6367 if (want_phs) { 6368 xa_for_each_marked(per_store_ap, idx, sip, 6369 SDEB_XA_NOT_IN_USE) { 6370 sdeb_most_recent_idx = (int)idx; 6371 found = true; 6372 break; 6373 } 6374 if (found) /* re-use case */ 6375 sdebug_add_host_helper((int)idx); 6376 else 6377 sdebug_do_add_host(true); 6378 } else { 6379 sdebug_do_add_host(false); 6380 } 6381 } while (--delta_hosts); 6382 } else if (delta_hosts < 0) { 6383 do { 6384 sdebug_do_remove_host(false); 6385 } while (++delta_hosts); 6386 } 6387 return count; 6388 } 6389 static DRIVER_ATTR_RW(add_host); 6390 6391 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf) 6392 { 6393 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno); 6394 } 6395 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, 6396 size_t count) 6397 { 6398 int n; 6399 6400 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6401 sdebug_vpd_use_hostno = n; 6402 return count; 6403 } 6404 return -EINVAL; 6405 } 6406 static DRIVER_ATTR_RW(vpd_use_hostno); 6407 6408 static ssize_t statistics_show(struct device_driver *ddp, char *buf) 6409 { 6410 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics); 6411 } 6412 static ssize_t statistics_store(struct device_driver *ddp, const char *buf, 6413 size_t count) 6414 { 6415 int n; 6416 6417 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) { 6418 if (n > 0) 6419 sdebug_statistics = true; 6420 else { 6421 clear_queue_stats(); 6422 sdebug_statistics = false; 6423 } 6424 return count; 6425 } 6426 return -EINVAL; 6427 } 6428 static DRIVER_ATTR_RW(statistics); 6429 6430 static ssize_t sector_size_show(struct device_driver *ddp, char *buf) 6431 { 6432 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size); 6433 } 6434 static DRIVER_ATTR_RO(sector_size); 6435 6436 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf) 6437 { 6438 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues); 6439 } 6440 static DRIVER_ATTR_RO(submit_queues); 6441 6442 static ssize_t dix_show(struct device_driver *ddp, char *buf) 6443 { 6444 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix); 6445 } 6446 static DRIVER_ATTR_RO(dix); 6447 6448 static ssize_t dif_show(struct device_driver *ddp, char *buf) 6449 { 6450 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif); 6451 } 6452 static DRIVER_ATTR_RO(dif); 6453 6454 static ssize_t guard_show(struct device_driver *ddp, char *buf) 6455 { 6456 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard); 6457 } 6458 static DRIVER_ATTR_RO(guard); 6459 6460 static ssize_t ato_show(struct device_driver *ddp, char *buf) 6461 { 6462 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato); 6463 } 6464 static DRIVER_ATTR_RO(ato); 6465 6466 static ssize_t map_show(struct device_driver *ddp, char *buf) 6467 { 6468 ssize_t count = 0; 6469 6470 if (!scsi_debug_lbp()) 6471 return scnprintf(buf, PAGE_SIZE, "0-%u\n", 6472 sdebug_store_sectors); 6473 6474 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) { 6475 struct sdeb_store_info *sip = xa_load(per_store_ap, 0); 6476 6477 if (sip) 6478 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", 6479 (int)map_size, sip->map_storep); 6480 } 6481 buf[count++] = '\n'; 6482 buf[count] = '\0'; 6483 6484 return count; 6485 } 6486 static DRIVER_ATTR_RO(map); 6487 6488 static ssize_t random_show(struct device_driver *ddp, char *buf) 6489 { 6490 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random); 6491 } 6492 6493 static ssize_t random_store(struct device_driver *ddp, const char *buf, 6494 size_t count) 6495 { 6496 bool v; 6497 6498 if (kstrtobool(buf, &v)) 6499 return -EINVAL; 6500 6501 sdebug_random = v; 6502 return count; 6503 } 6504 static DRIVER_ATTR_RW(random); 6505 6506 static ssize_t removable_show(struct device_driver *ddp, char *buf) 6507 { 6508 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0); 6509 } 6510 static ssize_t removable_store(struct device_driver *ddp, const char *buf, 6511 size_t count) 6512 { 6513 int n; 6514 6515 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6516 sdebug_removable = (n > 0); 6517 return count; 6518 } 6519 return -EINVAL; 6520 } 6521 static DRIVER_ATTR_RW(removable); 6522 6523 static ssize_t host_lock_show(struct device_driver *ddp, char *buf) 6524 { 6525 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock); 6526 } 6527 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */ 6528 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, 6529 size_t count) 6530 { 6531 int n; 6532 6533 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6534 sdebug_host_lock = (n > 0); 6535 return count; 6536 } 6537 return -EINVAL; 6538 } 6539 static DRIVER_ATTR_RW(host_lock); 6540 6541 static ssize_t strict_show(struct device_driver *ddp, char *buf) 6542 { 6543 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict); 6544 } 6545 static ssize_t strict_store(struct device_driver *ddp, const char *buf, 6546 size_t count) 6547 { 6548 int n; 6549 6550 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 6551 sdebug_strict = (n > 0); 6552 return count; 6553 } 6554 return -EINVAL; 6555 } 6556 static DRIVER_ATTR_RW(strict); 6557 6558 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf) 6559 { 6560 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl); 6561 } 6562 static DRIVER_ATTR_RO(uuid_ctl); 6563 6564 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf) 6565 { 6566 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len); 6567 } 6568 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf, 6569 size_t count) 6570 { 6571 int ret, n; 6572 6573 ret = kstrtoint(buf, 0, &n); 6574 if (ret) 6575 return ret; 6576 sdebug_cdb_len = n; 6577 all_config_cdb_len(); 6578 return count; 6579 } 6580 static DRIVER_ATTR_RW(cdb_len); 6581 6582 static const char * const zbc_model_strs_a[] = { 6583 [BLK_ZONED_NONE] = "none", 6584 [BLK_ZONED_HA] = "host-aware", 6585 [BLK_ZONED_HM] = "host-managed", 6586 }; 6587 6588 static const char * const zbc_model_strs_b[] = { 6589 [BLK_ZONED_NONE] = "no", 6590 [BLK_ZONED_HA] = "aware", 6591 [BLK_ZONED_HM] = "managed", 6592 }; 6593 6594 static const char * const zbc_model_strs_c[] = { 6595 [BLK_ZONED_NONE] = "0", 6596 [BLK_ZONED_HA] = "1", 6597 [BLK_ZONED_HM] = "2", 6598 }; 6599 6600 static int sdeb_zbc_model_str(const char *cp) 6601 { 6602 int res = sysfs_match_string(zbc_model_strs_a, cp); 6603 6604 if (res < 0) { 6605 res = sysfs_match_string(zbc_model_strs_b, cp); 6606 if (res < 0) { 6607 res = sysfs_match_string(zbc_model_strs_c, cp); 6608 if (res < 0) 6609 return -EINVAL; 6610 } 6611 } 6612 return res; 6613 } 6614 6615 static ssize_t zbc_show(struct device_driver *ddp, char *buf) 6616 { 6617 return scnprintf(buf, PAGE_SIZE, "%s\n", 6618 zbc_model_strs_a[sdeb_zbc_model]); 6619 } 6620 static DRIVER_ATTR_RO(zbc); 6621 6622 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf) 6623 { 6624 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready); 6625 } 6626 static DRIVER_ATTR_RO(tur_ms_to_ready); 6627 6628 /* Note: The following array creates attribute files in the 6629 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 6630 files (over those found in the /sys/module/scsi_debug/parameters 6631 directory) is that auxiliary actions can be triggered when an attribute 6632 is changed. For example see: add_host_store() above. 6633 */ 6634 6635 static struct attribute *sdebug_drv_attrs[] = { 6636 &driver_attr_delay.attr, 6637 &driver_attr_opts.attr, 6638 &driver_attr_ptype.attr, 6639 &driver_attr_dsense.attr, 6640 &driver_attr_fake_rw.attr, 6641 &driver_attr_host_max_queue.attr, 6642 &driver_attr_no_lun_0.attr, 6643 &driver_attr_num_tgts.attr, 6644 &driver_attr_dev_size_mb.attr, 6645 &driver_attr_num_parts.attr, 6646 &driver_attr_every_nth.attr, 6647 &driver_attr_lun_format.attr, 6648 &driver_attr_max_luns.attr, 6649 &driver_attr_max_queue.attr, 6650 &driver_attr_no_uld.attr, 6651 &driver_attr_scsi_level.attr, 6652 &driver_attr_virtual_gb.attr, 6653 &driver_attr_add_host.attr, 6654 &driver_attr_per_host_store.attr, 6655 &driver_attr_vpd_use_hostno.attr, 6656 &driver_attr_sector_size.attr, 6657 &driver_attr_statistics.attr, 6658 &driver_attr_submit_queues.attr, 6659 &driver_attr_dix.attr, 6660 &driver_attr_dif.attr, 6661 &driver_attr_guard.attr, 6662 &driver_attr_ato.attr, 6663 &driver_attr_map.attr, 6664 &driver_attr_random.attr, 6665 &driver_attr_removable.attr, 6666 &driver_attr_host_lock.attr, 6667 &driver_attr_ndelay.attr, 6668 &driver_attr_strict.attr, 6669 &driver_attr_uuid_ctl.attr, 6670 &driver_attr_cdb_len.attr, 6671 &driver_attr_tur_ms_to_ready.attr, 6672 &driver_attr_zbc.attr, 6673 NULL, 6674 }; 6675 ATTRIBUTE_GROUPS(sdebug_drv); 6676 6677 static struct device *pseudo_primary; 6678 6679 static int __init scsi_debug_init(void) 6680 { 6681 bool want_store = (sdebug_fake_rw == 0); 6682 unsigned long sz; 6683 int k, ret, hosts_to_add; 6684 int idx = -1; 6685 6686 ramdisk_lck_a[0] = &atomic_rw; 6687 ramdisk_lck_a[1] = &atomic_rw2; 6688 atomic_set(&retired_max_queue, 0); 6689 6690 if (sdebug_ndelay >= 1000 * 1000 * 1000) { 6691 pr_warn("ndelay must be less than 1 second, ignored\n"); 6692 sdebug_ndelay = 0; 6693 } else if (sdebug_ndelay > 0) 6694 sdebug_jdelay = JDELAY_OVERRIDDEN; 6695 6696 switch (sdebug_sector_size) { 6697 case 512: 6698 case 1024: 6699 case 2048: 6700 case 4096: 6701 break; 6702 default: 6703 pr_err("invalid sector_size %d\n", sdebug_sector_size); 6704 return -EINVAL; 6705 } 6706 6707 switch (sdebug_dif) { 6708 case T10_PI_TYPE0_PROTECTION: 6709 break; 6710 case T10_PI_TYPE1_PROTECTION: 6711 case T10_PI_TYPE2_PROTECTION: 6712 case T10_PI_TYPE3_PROTECTION: 6713 have_dif_prot = true; 6714 break; 6715 6716 default: 6717 pr_err("dif must be 0, 1, 2 or 3\n"); 6718 return -EINVAL; 6719 } 6720 6721 if (sdebug_num_tgts < 0) { 6722 pr_err("num_tgts must be >= 0\n"); 6723 return -EINVAL; 6724 } 6725 6726 if (sdebug_guard > 1) { 6727 pr_err("guard must be 0 or 1\n"); 6728 return -EINVAL; 6729 } 6730 6731 if (sdebug_ato > 1) { 6732 pr_err("ato must be 0 or 1\n"); 6733 return -EINVAL; 6734 } 6735 6736 if (sdebug_physblk_exp > 15) { 6737 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp); 6738 return -EINVAL; 6739 } 6740 6741 sdebug_lun_am = sdebug_lun_am_i; 6742 if (sdebug_lun_am > SAM_LUN_AM_FLAT) { 6743 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am); 6744 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL; 6745 } 6746 6747 if (sdebug_max_luns > 256) { 6748 if (sdebug_max_luns > 16384) { 6749 pr_warn("max_luns can be no more than 16384, use default\n"); 6750 sdebug_max_luns = DEF_MAX_LUNS; 6751 } 6752 sdebug_lun_am = SAM_LUN_AM_FLAT; 6753 } 6754 6755 if (sdebug_lowest_aligned > 0x3fff) { 6756 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned); 6757 return -EINVAL; 6758 } 6759 6760 if (submit_queues < 1) { 6761 pr_err("submit_queues must be 1 or more\n"); 6762 return -EINVAL; 6763 } 6764 6765 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) { 6766 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE); 6767 return -EINVAL; 6768 } 6769 6770 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) || 6771 (sdebug_host_max_queue < 0)) { 6772 pr_err("host_max_queue must be in range [0 %d]\n", 6773 SDEBUG_CANQUEUE); 6774 return -EINVAL; 6775 } 6776 6777 if (sdebug_host_max_queue && 6778 (sdebug_max_queue != sdebug_host_max_queue)) { 6779 sdebug_max_queue = sdebug_host_max_queue; 6780 pr_warn("fixing max submit queue depth to host max queue depth, %d\n", 6781 sdebug_max_queue); 6782 } 6783 6784 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), 6785 GFP_KERNEL); 6786 if (sdebug_q_arr == NULL) 6787 return -ENOMEM; 6788 for (k = 0; k < submit_queues; ++k) 6789 spin_lock_init(&sdebug_q_arr[k].qc_lock); 6790 6791 /* 6792 * check for host managed zoned block device specified with 6793 * ptype=0x14 or zbc=XXX. 6794 */ 6795 if (sdebug_ptype == TYPE_ZBC) { 6796 sdeb_zbc_model = BLK_ZONED_HM; 6797 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) { 6798 k = sdeb_zbc_model_str(sdeb_zbc_model_s); 6799 if (k < 0) { 6800 ret = k; 6801 goto free_q_arr; 6802 } 6803 sdeb_zbc_model = k; 6804 switch (sdeb_zbc_model) { 6805 case BLK_ZONED_NONE: 6806 case BLK_ZONED_HA: 6807 sdebug_ptype = TYPE_DISK; 6808 break; 6809 case BLK_ZONED_HM: 6810 sdebug_ptype = TYPE_ZBC; 6811 break; 6812 default: 6813 pr_err("Invalid ZBC model\n"); 6814 ret = -EINVAL; 6815 goto free_q_arr; 6816 } 6817 } 6818 if (sdeb_zbc_model != BLK_ZONED_NONE) { 6819 sdeb_zbc_in_use = true; 6820 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT) 6821 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB; 6822 } 6823 6824 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT) 6825 sdebug_dev_size_mb = DEF_DEV_SIZE_MB; 6826 if (sdebug_dev_size_mb < 1) 6827 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ 6828 sz = (unsigned long)sdebug_dev_size_mb * 1048576; 6829 sdebug_store_sectors = sz / sdebug_sector_size; 6830 sdebug_capacity = get_sdebug_capacity(); 6831 6832 /* play around with geometry, don't waste too much on track 0 */ 6833 sdebug_heads = 8; 6834 sdebug_sectors_per = 32; 6835 if (sdebug_dev_size_mb >= 256) 6836 sdebug_heads = 64; 6837 else if (sdebug_dev_size_mb >= 16) 6838 sdebug_heads = 32; 6839 sdebug_cylinders_per = (unsigned long)sdebug_capacity / 6840 (sdebug_sectors_per * sdebug_heads); 6841 if (sdebug_cylinders_per >= 1024) { 6842 /* other LLDs do this; implies >= 1GB ram disk ... */ 6843 sdebug_heads = 255; 6844 sdebug_sectors_per = 63; 6845 sdebug_cylinders_per = (unsigned long)sdebug_capacity / 6846 (sdebug_sectors_per * sdebug_heads); 6847 } 6848 if (scsi_debug_lbp()) { 6849 sdebug_unmap_max_blocks = 6850 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU); 6851 6852 sdebug_unmap_max_desc = 6853 clamp(sdebug_unmap_max_desc, 0U, 256U); 6854 6855 sdebug_unmap_granularity = 6856 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU); 6857 6858 if (sdebug_unmap_alignment && 6859 sdebug_unmap_granularity <= 6860 sdebug_unmap_alignment) { 6861 pr_err("ERR: unmap_granularity <= unmap_alignment\n"); 6862 ret = -EINVAL; 6863 goto free_q_arr; 6864 } 6865 } 6866 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); 6867 if (want_store) { 6868 idx = sdebug_add_store(); 6869 if (idx < 0) { 6870 ret = idx; 6871 goto free_q_arr; 6872 } 6873 } 6874 6875 pseudo_primary = root_device_register("pseudo_0"); 6876 if (IS_ERR(pseudo_primary)) { 6877 pr_warn("root_device_register() error\n"); 6878 ret = PTR_ERR(pseudo_primary); 6879 goto free_vm; 6880 } 6881 ret = bus_register(&pseudo_lld_bus); 6882 if (ret < 0) { 6883 pr_warn("bus_register error: %d\n", ret); 6884 goto dev_unreg; 6885 } 6886 ret = driver_register(&sdebug_driverfs_driver); 6887 if (ret < 0) { 6888 pr_warn("driver_register error: %d\n", ret); 6889 goto bus_unreg; 6890 } 6891 6892 hosts_to_add = sdebug_add_host; 6893 sdebug_add_host = 0; 6894 6895 for (k = 0; k < hosts_to_add; k++) { 6896 if (want_store && k == 0) { 6897 ret = sdebug_add_host_helper(idx); 6898 if (ret < 0) { 6899 pr_err("add_host_helper k=%d, error=%d\n", 6900 k, -ret); 6901 break; 6902 } 6903 } else { 6904 ret = sdebug_do_add_host(want_store && 6905 sdebug_per_host_store); 6906 if (ret < 0) { 6907 pr_err("add_host k=%d error=%d\n", k, -ret); 6908 break; 6909 } 6910 } 6911 } 6912 if (sdebug_verbose) 6913 pr_info("built %d host(s)\n", sdebug_num_hosts); 6914 6915 return 0; 6916 6917 bus_unreg: 6918 bus_unregister(&pseudo_lld_bus); 6919 dev_unreg: 6920 root_device_unregister(pseudo_primary); 6921 free_vm: 6922 sdebug_erase_store(idx, NULL); 6923 free_q_arr: 6924 kfree(sdebug_q_arr); 6925 return ret; 6926 } 6927 6928 static void __exit scsi_debug_exit(void) 6929 { 6930 int k = sdebug_num_hosts; 6931 6932 stop_all_queued(); 6933 for (; k; k--) 6934 sdebug_do_remove_host(true); 6935 free_all_queued(); 6936 driver_unregister(&sdebug_driverfs_driver); 6937 bus_unregister(&pseudo_lld_bus); 6938 root_device_unregister(pseudo_primary); 6939 6940 sdebug_erase_all_stores(false); 6941 xa_destroy(per_store_ap); 6942 kfree(sdebug_q_arr); 6943 } 6944 6945 device_initcall(scsi_debug_init); 6946 module_exit(scsi_debug_exit); 6947 6948 static void sdebug_release_adapter(struct device *dev) 6949 { 6950 struct sdebug_host_info *sdbg_host; 6951 6952 sdbg_host = to_sdebug_host(dev); 6953 kfree(sdbg_host); 6954 } 6955 6956 /* idx must be valid, if sip is NULL then it will be obtained using idx */ 6957 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip) 6958 { 6959 if (idx < 0) 6960 return; 6961 if (!sip) { 6962 if (xa_empty(per_store_ap)) 6963 return; 6964 sip = xa_load(per_store_ap, idx); 6965 if (!sip) 6966 return; 6967 } 6968 vfree(sip->map_storep); 6969 vfree(sip->dif_storep); 6970 vfree(sip->storep); 6971 xa_erase(per_store_ap, idx); 6972 kfree(sip); 6973 } 6974 6975 /* Assume apart_from_first==false only in shutdown case. */ 6976 static void sdebug_erase_all_stores(bool apart_from_first) 6977 { 6978 unsigned long idx; 6979 struct sdeb_store_info *sip = NULL; 6980 6981 xa_for_each(per_store_ap, idx, sip) { 6982 if (apart_from_first) 6983 apart_from_first = false; 6984 else 6985 sdebug_erase_store(idx, sip); 6986 } 6987 if (apart_from_first) 6988 sdeb_most_recent_idx = sdeb_first_idx; 6989 } 6990 6991 /* 6992 * Returns store xarray new element index (idx) if >=0 else negated errno. 6993 * Limit the number of stores to 65536. 6994 */ 6995 static int sdebug_add_store(void) 6996 { 6997 int res; 6998 u32 n_idx; 6999 unsigned long iflags; 7000 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576; 7001 struct sdeb_store_info *sip = NULL; 7002 struct xa_limit xal = { .max = 1 << 16, .min = 0 }; 7003 7004 sip = kzalloc(sizeof(*sip), GFP_KERNEL); 7005 if (!sip) 7006 return -ENOMEM; 7007 7008 xa_lock_irqsave(per_store_ap, iflags); 7009 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC); 7010 if (unlikely(res < 0)) { 7011 xa_unlock_irqrestore(per_store_ap, iflags); 7012 kfree(sip); 7013 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res); 7014 return res; 7015 } 7016 sdeb_most_recent_idx = n_idx; 7017 if (sdeb_first_idx < 0) 7018 sdeb_first_idx = n_idx; 7019 xa_unlock_irqrestore(per_store_ap, iflags); 7020 7021 res = -ENOMEM; 7022 sip->storep = vzalloc(sz); 7023 if (!sip->storep) { 7024 pr_err("user data oom\n"); 7025 goto err; 7026 } 7027 if (sdebug_num_parts > 0) 7028 sdebug_build_parts(sip->storep, sz); 7029 7030 /* DIF/DIX: what T10 calls Protection Information (PI) */ 7031 if (sdebug_dix) { 7032 int dif_size; 7033 7034 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple); 7035 sip->dif_storep = vmalloc(dif_size); 7036 7037 pr_info("dif_storep %u bytes @ %pK\n", dif_size, 7038 sip->dif_storep); 7039 7040 if (!sip->dif_storep) { 7041 pr_err("DIX oom\n"); 7042 goto err; 7043 } 7044 memset(sip->dif_storep, 0xff, dif_size); 7045 } 7046 /* Logical Block Provisioning */ 7047 if (scsi_debug_lbp()) { 7048 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; 7049 sip->map_storep = vmalloc(array_size(sizeof(long), 7050 BITS_TO_LONGS(map_size))); 7051 7052 pr_info("%lu provisioning blocks\n", map_size); 7053 7054 if (!sip->map_storep) { 7055 pr_err("LBP map oom\n"); 7056 goto err; 7057 } 7058 7059 bitmap_zero(sip->map_storep, map_size); 7060 7061 /* Map first 1KB for partition table */ 7062 if (sdebug_num_parts) 7063 map_region(sip, 0, 2); 7064 } 7065 7066 rwlock_init(&sip->macc_lck); 7067 return (int)n_idx; 7068 err: 7069 sdebug_erase_store((int)n_idx, sip); 7070 pr_warn("%s: failed, errno=%d\n", __func__, -res); 7071 return res; 7072 } 7073 7074 static int sdebug_add_host_helper(int per_host_idx) 7075 { 7076 int k, devs_per_host, idx; 7077 int error = -ENOMEM; 7078 struct sdebug_host_info *sdbg_host; 7079 struct sdebug_dev_info *sdbg_devinfo, *tmp; 7080 7081 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL); 7082 if (!sdbg_host) 7083 return -ENOMEM; 7084 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx; 7085 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE)) 7086 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE); 7087 sdbg_host->si_idx = idx; 7088 7089 INIT_LIST_HEAD(&sdbg_host->dev_info_list); 7090 7091 devs_per_host = sdebug_num_tgts * sdebug_max_luns; 7092 for (k = 0; k < devs_per_host; k++) { 7093 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 7094 if (!sdbg_devinfo) 7095 goto clean; 7096 } 7097 7098 spin_lock(&sdebug_host_list_lock); 7099 list_add_tail(&sdbg_host->host_list, &sdebug_host_list); 7100 spin_unlock(&sdebug_host_list_lock); 7101 7102 sdbg_host->dev.bus = &pseudo_lld_bus; 7103 sdbg_host->dev.parent = pseudo_primary; 7104 sdbg_host->dev.release = &sdebug_release_adapter; 7105 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); 7106 7107 error = device_register(&sdbg_host->dev); 7108 if (error) 7109 goto clean; 7110 7111 ++sdebug_num_hosts; 7112 return 0; 7113 7114 clean: 7115 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, 7116 dev_list) { 7117 list_del(&sdbg_devinfo->dev_list); 7118 kfree(sdbg_devinfo->zstate); 7119 kfree(sdbg_devinfo); 7120 } 7121 kfree(sdbg_host); 7122 pr_warn("%s: failed, errno=%d\n", __func__, -error); 7123 return error; 7124 } 7125 7126 static int sdebug_do_add_host(bool mk_new_store) 7127 { 7128 int ph_idx = sdeb_most_recent_idx; 7129 7130 if (mk_new_store) { 7131 ph_idx = sdebug_add_store(); 7132 if (ph_idx < 0) 7133 return ph_idx; 7134 } 7135 return sdebug_add_host_helper(ph_idx); 7136 } 7137 7138 static void sdebug_do_remove_host(bool the_end) 7139 { 7140 int idx = -1; 7141 struct sdebug_host_info *sdbg_host = NULL; 7142 struct sdebug_host_info *sdbg_host2; 7143 7144 spin_lock(&sdebug_host_list_lock); 7145 if (!list_empty(&sdebug_host_list)) { 7146 sdbg_host = list_entry(sdebug_host_list.prev, 7147 struct sdebug_host_info, host_list); 7148 idx = sdbg_host->si_idx; 7149 } 7150 if (!the_end && idx >= 0) { 7151 bool unique = true; 7152 7153 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) { 7154 if (sdbg_host2 == sdbg_host) 7155 continue; 7156 if (idx == sdbg_host2->si_idx) { 7157 unique = false; 7158 break; 7159 } 7160 } 7161 if (unique) { 7162 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE); 7163 if (idx == sdeb_most_recent_idx) 7164 --sdeb_most_recent_idx; 7165 } 7166 } 7167 if (sdbg_host) 7168 list_del(&sdbg_host->host_list); 7169 spin_unlock(&sdebug_host_list_lock); 7170 7171 if (!sdbg_host) 7172 return; 7173 7174 device_unregister(&sdbg_host->dev); 7175 --sdebug_num_hosts; 7176 } 7177 7178 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) 7179 { 7180 int num_in_q = 0; 7181 struct sdebug_dev_info *devip; 7182 7183 block_unblock_all_queues(true); 7184 devip = (struct sdebug_dev_info *)sdev->hostdata; 7185 if (NULL == devip) { 7186 block_unblock_all_queues(false); 7187 return -ENODEV; 7188 } 7189 num_in_q = atomic_read(&devip->num_in_q); 7190 7191 if (qdepth > SDEBUG_CANQUEUE) { 7192 qdepth = SDEBUG_CANQUEUE; 7193 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__, 7194 qdepth, SDEBUG_CANQUEUE); 7195 } 7196 if (qdepth < 1) 7197 qdepth = 1; 7198 if (qdepth != sdev->queue_depth) 7199 scsi_change_queue_depth(sdev, qdepth); 7200 7201 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) { 7202 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n", 7203 __func__, qdepth, num_in_q); 7204 } 7205 block_unblock_all_queues(false); 7206 return sdev->queue_depth; 7207 } 7208 7209 static bool fake_timeout(struct scsi_cmnd *scp) 7210 { 7211 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) { 7212 if (sdebug_every_nth < -1) 7213 sdebug_every_nth = -1; 7214 if (SDEBUG_OPT_TIMEOUT & sdebug_opts) 7215 return true; /* ignore command causing timeout */ 7216 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts && 7217 scsi_medium_access_command(scp)) 7218 return true; /* time out reads and writes */ 7219 } 7220 return false; 7221 } 7222 7223 /* Response to TUR or media access command when device stopped */ 7224 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) 7225 { 7226 int stopped_state; 7227 u64 diff_ns = 0; 7228 ktime_t now_ts = ktime_get_boottime(); 7229 struct scsi_device *sdp = scp->device; 7230 7231 stopped_state = atomic_read(&devip->stopped); 7232 if (stopped_state == 2) { 7233 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) { 7234 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts)); 7235 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) { 7236 /* tur_ms_to_ready timer extinguished */ 7237 atomic_set(&devip->stopped, 0); 7238 return 0; 7239 } 7240 } 7241 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1); 7242 if (sdebug_verbose) 7243 sdev_printk(KERN_INFO, sdp, 7244 "%s: Not ready: in process of becoming ready\n", my_name); 7245 if (scp->cmnd[0] == TEST_UNIT_READY) { 7246 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000; 7247 7248 if (diff_ns <= tur_nanosecs_to_ready) 7249 diff_ns = tur_nanosecs_to_ready - diff_ns; 7250 else 7251 diff_ns = tur_nanosecs_to_ready; 7252 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */ 7253 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */ 7254 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE, 7255 diff_ns); 7256 return check_condition_result; 7257 } 7258 } 7259 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); 7260 if (sdebug_verbose) 7261 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n", 7262 my_name); 7263 return check_condition_result; 7264 } 7265 7266 static int sdebug_map_queues(struct Scsi_Host *shost) 7267 { 7268 int i, qoff; 7269 7270 if (shost->nr_hw_queues == 1) 7271 return 0; 7272 7273 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 7274 struct blk_mq_queue_map *map = &shost->tag_set.map[i]; 7275 7276 map->nr_queues = 0; 7277 7278 if (i == HCTX_TYPE_DEFAULT) 7279 map->nr_queues = submit_queues - poll_queues; 7280 else if (i == HCTX_TYPE_POLL) 7281 map->nr_queues = poll_queues; 7282 7283 if (!map->nr_queues) { 7284 BUG_ON(i == HCTX_TYPE_DEFAULT); 7285 continue; 7286 } 7287 7288 map->queue_offset = qoff; 7289 blk_mq_map_queues(map); 7290 7291 qoff += map->nr_queues; 7292 } 7293 7294 return 0; 7295 7296 } 7297 7298 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) 7299 { 7300 bool first; 7301 bool retiring = false; 7302 int num_entries = 0; 7303 unsigned int qc_idx = 0; 7304 unsigned long iflags; 7305 ktime_t kt_from_boot = ktime_get_boottime(); 7306 struct sdebug_queue *sqp; 7307 struct sdebug_queued_cmd *sqcp; 7308 struct scsi_cmnd *scp; 7309 struct sdebug_dev_info *devip; 7310 struct sdebug_defer *sd_dp; 7311 7312 sqp = sdebug_q_arr + queue_num; 7313 spin_lock_irqsave(&sqp->qc_lock, iflags); 7314 7315 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) { 7316 if (first) { 7317 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue); 7318 first = false; 7319 } else { 7320 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1); 7321 } 7322 if (unlikely(qc_idx >= sdebug_max_queue)) 7323 break; 7324 7325 sqcp = &sqp->qc_arr[qc_idx]; 7326 sd_dp = sqcp->sd_dp; 7327 if (unlikely(!sd_dp)) 7328 continue; 7329 scp = sqcp->a_cmnd; 7330 if (unlikely(scp == NULL)) { 7331 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n", 7332 queue_num, qc_idx, __func__); 7333 break; 7334 } 7335 if (sd_dp->defer_t == SDEB_DEFER_POLL) { 7336 if (kt_from_boot < sd_dp->cmpl_ts) 7337 continue; 7338 7339 } else /* ignoring non REQ_HIPRI requests */ 7340 continue; 7341 devip = (struct sdebug_dev_info *)scp->device->hostdata; 7342 if (likely(devip)) 7343 atomic_dec(&devip->num_in_q); 7344 else 7345 pr_err("devip=NULL from %s\n", __func__); 7346 if (unlikely(atomic_read(&retired_max_queue) > 0)) 7347 retiring = true; 7348 7349 sqcp->a_cmnd = NULL; 7350 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) { 7351 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n", 7352 sqp, queue_num, qc_idx, __func__); 7353 break; 7354 } 7355 if (unlikely(retiring)) { /* user has reduced max_queue */ 7356 int k, retval; 7357 7358 retval = atomic_read(&retired_max_queue); 7359 if (qc_idx >= retval) { 7360 pr_err("index %d too large\n", retval); 7361 break; 7362 } 7363 k = find_last_bit(sqp->in_use_bm, retval); 7364 if ((k < sdebug_max_queue) || (k == retval)) 7365 atomic_set(&retired_max_queue, 0); 7366 else 7367 atomic_set(&retired_max_queue, k + 1); 7368 } 7369 sd_dp->defer_t = SDEB_DEFER_NONE; 7370 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 7371 scsi_done(scp); /* callback to mid level */ 7372 spin_lock_irqsave(&sqp->qc_lock, iflags); 7373 num_entries++; 7374 } 7375 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 7376 if (num_entries > 0) 7377 atomic_add(num_entries, &sdeb_mq_poll_count); 7378 return num_entries; 7379 } 7380 7381 static int scsi_debug_queuecommand(struct Scsi_Host *shost, 7382 struct scsi_cmnd *scp) 7383 { 7384 u8 sdeb_i; 7385 struct scsi_device *sdp = scp->device; 7386 const struct opcode_info_t *oip; 7387 const struct opcode_info_t *r_oip; 7388 struct sdebug_dev_info *devip; 7389 u8 *cmd = scp->cmnd; 7390 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); 7391 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL; 7392 int k, na; 7393 int errsts = 0; 7394 u64 lun_index = sdp->lun & 0x3FFF; 7395 u32 flags; 7396 u16 sa; 7397 u8 opcode = cmd[0]; 7398 bool has_wlun_rl; 7399 bool inject_now; 7400 7401 scsi_set_resid(scp, 0); 7402 if (sdebug_statistics) { 7403 atomic_inc(&sdebug_cmnd_count); 7404 inject_now = inject_on_this_cmd(); 7405 } else { 7406 inject_now = false; 7407 } 7408 if (unlikely(sdebug_verbose && 7409 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { 7410 char b[120]; 7411 int n, len, sb; 7412 7413 len = scp->cmd_len; 7414 sb = (int)sizeof(b); 7415 if (len > 32) 7416 strcpy(b, "too long, over 32 bytes"); 7417 else { 7418 for (k = 0, n = 0; k < len && n < sb; ++k) 7419 n += scnprintf(b + n, sb - n, "%02x ", 7420 (u32)cmd[k]); 7421 } 7422 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name, 7423 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b); 7424 } 7425 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY))) 7426 return SCSI_MLQUEUE_HOST_BUSY; 7427 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); 7428 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl)) 7429 goto err_out; 7430 7431 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */ 7432 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */ 7433 devip = (struct sdebug_dev_info *)sdp->hostdata; 7434 if (unlikely(!devip)) { 7435 devip = find_build_dev_info(sdp); 7436 if (NULL == devip) 7437 goto err_out; 7438 } 7439 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending))) 7440 atomic_set(&sdeb_inject_pending, 1); 7441 7442 na = oip->num_attached; 7443 r_pfp = oip->pfp; 7444 if (na) { /* multiple commands with this opcode */ 7445 r_oip = oip; 7446 if (FF_SA & r_oip->flags) { 7447 if (F_SA_LOW & oip->flags) 7448 sa = 0x1f & cmd[1]; 7449 else 7450 sa = get_unaligned_be16(cmd + 8); 7451 for (k = 0; k <= na; oip = r_oip->arrp + k++) { 7452 if (opcode == oip->opcode && sa == oip->sa) 7453 break; 7454 } 7455 } else { /* since no service action only check opcode */ 7456 for (k = 0; k <= na; oip = r_oip->arrp + k++) { 7457 if (opcode == oip->opcode) 7458 break; 7459 } 7460 } 7461 if (k > na) { 7462 if (F_SA_LOW & r_oip->flags) 7463 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4); 7464 else if (F_SA_HIGH & r_oip->flags) 7465 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7); 7466 else 7467 mk_sense_invalid_opcode(scp); 7468 goto check_cond; 7469 } 7470 } /* else (when na==0) we assume the oip is a match */ 7471 flags = oip->flags; 7472 if (unlikely(F_INV_OP & flags)) { 7473 mk_sense_invalid_opcode(scp); 7474 goto check_cond; 7475 } 7476 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) { 7477 if (sdebug_verbose) 7478 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n", 7479 my_name, opcode, " supported for wlun"); 7480 mk_sense_invalid_opcode(scp); 7481 goto check_cond; 7482 } 7483 if (unlikely(sdebug_strict)) { /* check cdb against mask */ 7484 u8 rem; 7485 int j; 7486 7487 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) { 7488 rem = ~oip->len_mask[k] & cmd[k]; 7489 if (rem) { 7490 for (j = 7; j >= 0; --j, rem <<= 1) { 7491 if (0x80 & rem) 7492 break; 7493 } 7494 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j); 7495 goto check_cond; 7496 } 7497 } 7498 } 7499 if (unlikely(!(F_SKIP_UA & flags) && 7500 find_first_bit(devip->uas_bm, 7501 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) { 7502 errsts = make_ua(scp, devip); 7503 if (errsts) 7504 goto check_cond; 7505 } 7506 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) && 7507 atomic_read(&devip->stopped))) { 7508 errsts = resp_not_ready(scp, devip); 7509 if (errsts) 7510 goto fini; 7511 } 7512 if (sdebug_fake_rw && (F_FAKE_RW & flags)) 7513 goto fini; 7514 if (unlikely(sdebug_every_nth)) { 7515 if (fake_timeout(scp)) 7516 return 0; /* ignore command: make trouble */ 7517 } 7518 if (likely(oip->pfp)) 7519 pfp = oip->pfp; /* calls a resp_* function */ 7520 else 7521 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */ 7522 7523 fini: 7524 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */ 7525 return schedule_resp(scp, devip, errsts, pfp, 0, 0); 7526 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 || 7527 sdebug_ndelay > 10000)) { 7528 /* 7529 * Skip long delays if ndelay <= 10 microseconds. Otherwise 7530 * for Start Stop Unit (SSU) want at least 1 second delay and 7531 * if sdebug_jdelay>1 want a long delay of that many seconds. 7532 * For Synchronize Cache want 1/20 of SSU's delay. 7533 */ 7534 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; 7535 int denom = (flags & F_SYNC_DELAY) ? 20 : 1; 7536 7537 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ); 7538 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0); 7539 } else 7540 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay, 7541 sdebug_ndelay); 7542 check_cond: 7543 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0); 7544 err_out: 7545 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0); 7546 } 7547 7548 static struct scsi_host_template sdebug_driver_template = { 7549 .show_info = scsi_debug_show_info, 7550 .write_info = scsi_debug_write_info, 7551 .proc_name = sdebug_proc_name, 7552 .name = "SCSI DEBUG", 7553 .info = scsi_debug_info, 7554 .slave_alloc = scsi_debug_slave_alloc, 7555 .slave_configure = scsi_debug_slave_configure, 7556 .slave_destroy = scsi_debug_slave_destroy, 7557 .ioctl = scsi_debug_ioctl, 7558 .queuecommand = scsi_debug_queuecommand, 7559 .change_queue_depth = sdebug_change_qdepth, 7560 .map_queues = sdebug_map_queues, 7561 .mq_poll = sdebug_blk_mq_poll, 7562 .eh_abort_handler = scsi_debug_abort, 7563 .eh_device_reset_handler = scsi_debug_device_reset, 7564 .eh_target_reset_handler = scsi_debug_target_reset, 7565 .eh_bus_reset_handler = scsi_debug_bus_reset, 7566 .eh_host_reset_handler = scsi_debug_host_reset, 7567 .can_queue = SDEBUG_CANQUEUE, 7568 .this_id = 7, 7569 .sg_tablesize = SG_MAX_SEGMENTS, 7570 .cmd_per_lun = DEF_CMD_PER_LUN, 7571 .max_sectors = -1U, 7572 .max_segment_size = -1U, 7573 .module = THIS_MODULE, 7574 .track_queue_depth = 1, 7575 }; 7576 7577 static int sdebug_driver_probe(struct device *dev) 7578 { 7579 int error = 0; 7580 struct sdebug_host_info *sdbg_host; 7581 struct Scsi_Host *hpnt; 7582 int hprot; 7583 7584 sdbg_host = to_sdebug_host(dev); 7585 7586 sdebug_driver_template.can_queue = sdebug_max_queue; 7587 sdebug_driver_template.cmd_per_lun = sdebug_max_queue; 7588 if (!sdebug_clustering) 7589 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1; 7590 7591 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 7592 if (NULL == hpnt) { 7593 pr_err("scsi_host_alloc failed\n"); 7594 error = -ENODEV; 7595 return error; 7596 } 7597 if (submit_queues > nr_cpu_ids) { 7598 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n", 7599 my_name, submit_queues, nr_cpu_ids); 7600 submit_queues = nr_cpu_ids; 7601 } 7602 /* 7603 * Decide whether to tell scsi subsystem that we want mq. The 7604 * following should give the same answer for each host. 7605 */ 7606 hpnt->nr_hw_queues = submit_queues; 7607 if (sdebug_host_max_queue) 7608 hpnt->host_tagset = 1; 7609 7610 /* poll queues are possible for nr_hw_queues > 1 */ 7611 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) { 7612 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n", 7613 my_name, poll_queues, hpnt->nr_hw_queues); 7614 poll_queues = 0; 7615 } 7616 7617 /* 7618 * Poll queues don't need interrupts, but we need at least one I/O queue 7619 * left over for non-polled I/O. 7620 * If condition not met, trim poll_queues to 1 (just for simplicity). 7621 */ 7622 if (poll_queues >= submit_queues) { 7623 if (submit_queues < 3) 7624 pr_warn("%s: trim poll_queues to 1\n", my_name); 7625 else 7626 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n", 7627 my_name, submit_queues - 1); 7628 poll_queues = 1; 7629 } 7630 if (poll_queues) 7631 hpnt->nr_maps = 3; 7632 7633 sdbg_host->shost = hpnt; 7634 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; 7635 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) 7636 hpnt->max_id = sdebug_num_tgts + 1; 7637 else 7638 hpnt->max_id = sdebug_num_tgts; 7639 /* = sdebug_max_luns; */ 7640 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; 7641 7642 hprot = 0; 7643 7644 switch (sdebug_dif) { 7645 7646 case T10_PI_TYPE1_PROTECTION: 7647 hprot = SHOST_DIF_TYPE1_PROTECTION; 7648 if (sdebug_dix) 7649 hprot |= SHOST_DIX_TYPE1_PROTECTION; 7650 break; 7651 7652 case T10_PI_TYPE2_PROTECTION: 7653 hprot = SHOST_DIF_TYPE2_PROTECTION; 7654 if (sdebug_dix) 7655 hprot |= SHOST_DIX_TYPE2_PROTECTION; 7656 break; 7657 7658 case T10_PI_TYPE3_PROTECTION: 7659 hprot = SHOST_DIF_TYPE3_PROTECTION; 7660 if (sdebug_dix) 7661 hprot |= SHOST_DIX_TYPE3_PROTECTION; 7662 break; 7663 7664 default: 7665 if (sdebug_dix) 7666 hprot |= SHOST_DIX_TYPE0_PROTECTION; 7667 break; 7668 } 7669 7670 scsi_host_set_prot(hpnt, hprot); 7671 7672 if (have_dif_prot || sdebug_dix) 7673 pr_info("host protection%s%s%s%s%s%s%s\n", 7674 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 7675 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 7676 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 7677 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 7678 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 7679 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 7680 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 7681 7682 if (sdebug_guard == 1) 7683 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP); 7684 else 7685 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); 7686 7687 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts); 7688 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts); 7689 if (sdebug_every_nth) /* need stats counters for every_nth */ 7690 sdebug_statistics = true; 7691 error = scsi_add_host(hpnt, &sdbg_host->dev); 7692 if (error) { 7693 pr_err("scsi_add_host failed\n"); 7694 error = -ENODEV; 7695 scsi_host_put(hpnt); 7696 } else { 7697 scsi_scan_host(hpnt); 7698 } 7699 7700 return error; 7701 } 7702 7703 static void sdebug_driver_remove(struct device *dev) 7704 { 7705 struct sdebug_host_info *sdbg_host; 7706 struct sdebug_dev_info *sdbg_devinfo, *tmp; 7707 7708 sdbg_host = to_sdebug_host(dev); 7709 7710 scsi_remove_host(sdbg_host->shost); 7711 7712 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, 7713 dev_list) { 7714 list_del(&sdbg_devinfo->dev_list); 7715 kfree(sdbg_devinfo->zstate); 7716 kfree(sdbg_devinfo); 7717 } 7718 7719 scsi_host_put(sdbg_host->shost); 7720 } 7721 7722 static int pseudo_lld_bus_match(struct device *dev, 7723 struct device_driver *dev_driver) 7724 { 7725 return 1; 7726 } 7727 7728 static struct bus_type pseudo_lld_bus = { 7729 .name = "pseudo", 7730 .match = pseudo_lld_bus_match, 7731 .probe = sdebug_driver_probe, 7732 .remove = sdebug_driver_remove, 7733 .drv_groups = sdebug_drv_groups, 7734 }; 7735