1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2007 Intel Corporation. All rights reserved. 4 * Copyright(c) 2008 Red Hat, Inc. All rights reserved. 5 * Copyright(c) 2008 Mike Christie 6 * 7 * Maintained at www.Open-FCoE.org 8 */ 9 10 #include <linux/module.h> 11 #include <linux/delay.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/spinlock.h> 15 #include <linux/scatterlist.h> 16 #include <linux/err.h> 17 #include <linux/crc32.h> 18 #include <linux/slab.h> 19 20 #include <scsi/scsi_tcq.h> 21 #include <scsi/scsi.h> 22 #include <scsi/scsi_host.h> 23 #include <scsi/scsi_device.h> 24 #include <scsi/scsi_cmnd.h> 25 26 #include <scsi/fc/fc_fc2.h> 27 28 #include <scsi/libfc.h> 29 #include <scsi/fc_encode.h> 30 31 #include "fc_libfc.h" 32 33 static struct kmem_cache *scsi_pkt_cachep; 34 35 /* SRB state definitions */ 36 #define FC_SRB_FREE 0 /* cmd is free */ 37 #define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */ 38 #define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */ 39 #define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */ 40 #define FC_SRB_ABORTED (1 << 3) /* abort acknowledged */ 41 #define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 42 #define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 43 #define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 44 45 #define FC_SRB_READ (1 << 1) 46 #define FC_SRB_WRITE (1 << 0) 47 48 /* 49 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock 50 */ 51 #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) 52 #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 53 #define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual) 54 #define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) 55 #define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual) 56 57 /** 58 * struct fc_fcp_internal - FCP layer internal data 59 * @scsi_pkt_pool: Memory pool to draw FCP packets from 60 * @scsi_queue_lock: Protects the scsi_pkt_queue 61 * @scsi_pkt_queue: Current FCP packets 62 * @last_can_queue_ramp_down_time: ramp down time 63 * @last_can_queue_ramp_up_time: ramp up time 64 * @max_can_queue: max can_queue size 65 */ 66 struct fc_fcp_internal { 67 mempool_t *scsi_pkt_pool; 68 spinlock_t scsi_queue_lock; 69 struct list_head scsi_pkt_queue; 70 unsigned long last_can_queue_ramp_down_time; 71 unsigned long last_can_queue_ramp_up_time; 72 int max_can_queue; 73 }; 74 75 #define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 76 77 /* 78 * function prototypes 79 * FC scsi I/O related functions 80 */ 81 static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *); 82 static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *); 83 static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); 84 static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 85 static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 86 static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); 87 static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code); 88 static void fc_fcp_timeout(struct timer_list *); 89 static void fc_fcp_rec(struct fc_fcp_pkt *); 90 static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 91 static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); 92 static void fc_io_compl(struct fc_fcp_pkt *); 93 94 static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32); 95 static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *); 96 static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); 97 98 /* 99 * command status codes 100 */ 101 #define FC_COMPLETE 0 102 #define FC_CMD_ABORTED 1 103 #define FC_CMD_RESET 2 104 #define FC_CMD_PLOGO 3 105 #define FC_SNS_RCV 4 106 #define FC_TRANS_ERR 5 107 #define FC_DATA_OVRRUN 6 108 #define FC_DATA_UNDRUN 7 109 #define FC_ERROR 8 110 #define FC_HRD_ERROR 9 111 #define FC_CRC_ERROR 10 112 #define FC_TIMED_OUT 11 113 #define FC_TRANS_RESET 12 114 115 /* 116 * Error recovery timeout values. 117 */ 118 #define FC_SCSI_TM_TOV (10 * HZ) 119 #define FC_HOST_RESET_TIMEOUT (30 * HZ) 120 #define FC_CAN_QUEUE_PERIOD (60 * HZ) 121 122 #define FC_MAX_ERROR_CNT 5 123 #define FC_MAX_RECOV_RETRY 3 124 125 #define FC_FCP_DFLT_QUEUE_DEPTH 32 126 127 /** 128 * fc_fcp_pkt_alloc() - Allocate a fcp_pkt 129 * @lport: The local port that the FCP packet is for 130 * @gfp: GFP flags for allocation 131 * 132 * Return value: fcp_pkt structure or null on allocation failure. 133 * Context: Can be called from process context, no lock is required. 134 */ 135 static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) 136 { 137 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 138 struct fc_fcp_pkt *fsp; 139 140 fsp = mempool_alloc(si->scsi_pkt_pool, gfp); 141 if (fsp) { 142 memset(fsp, 0, sizeof(*fsp)); 143 fsp->lp = lport; 144 fsp->xfer_ddp = FC_XID_UNKNOWN; 145 refcount_set(&fsp->ref_cnt, 1); 146 timer_setup(&fsp->timer, NULL, 0); 147 INIT_LIST_HEAD(&fsp->list); 148 spin_lock_init(&fsp->scsi_pkt_lock); 149 } else { 150 per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++; 151 put_cpu(); 152 } 153 return fsp; 154 } 155 156 /** 157 * fc_fcp_pkt_release() - Release hold on a fcp_pkt 158 * @fsp: The FCP packet to be released 159 * 160 * Context: Can be called from process or interrupt context, 161 * no lock is required. 162 */ 163 static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) 164 { 165 if (refcount_dec_and_test(&fsp->ref_cnt)) { 166 struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp); 167 168 mempool_free(fsp, si->scsi_pkt_pool); 169 } 170 } 171 172 /** 173 * fc_fcp_pkt_hold() - Hold a fcp_pkt 174 * @fsp: The FCP packet to be held 175 */ 176 static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) 177 { 178 refcount_inc(&fsp->ref_cnt); 179 } 180 181 /** 182 * fc_fcp_pkt_destroy() - Release hold on a fcp_pkt 183 * @seq: The sequence that the FCP packet is on (required by destructor API) 184 * @fsp: The FCP packet to be released 185 * 186 * This routine is called by a destructor callback in the fc_exch_seq_send() 187 * routine of the libfc Transport Template. The 'struct fc_seq' is a required 188 * argument even though it is not used by this routine. 189 * 190 * Context: No locking required. 191 */ 192 static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) 193 { 194 fc_fcp_pkt_release(fsp); 195 } 196 197 /** 198 * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count 199 * @fsp: The FCP packet to be locked and incremented 200 * 201 * We should only return error if we return a command to SCSI-ml before 202 * getting a response. This can happen in cases where we send a abort, but 203 * do not wait for the response and the abort and command can be passing 204 * each other on the wire/network-layer. 205 * 206 * Note: this function locks the packet and gets a reference to allow 207 * callers to call the completion function while the lock is held and 208 * not have to worry about the packets refcount. 209 * 210 * TODO: Maybe we should just have callers grab/release the lock and 211 * have a function that they call to verify the fsp and grab a ref if 212 * needed. 213 */ 214 static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp) 215 { 216 spin_lock_bh(&fsp->scsi_pkt_lock); 217 if (fsp->state & FC_SRB_COMPL) { 218 spin_unlock_bh(&fsp->scsi_pkt_lock); 219 return -EPERM; 220 } 221 222 fc_fcp_pkt_hold(fsp); 223 return 0; 224 } 225 226 /** 227 * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its 228 * reference count 229 * @fsp: The FCP packet to be unlocked and decremented 230 */ 231 static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) 232 { 233 spin_unlock_bh(&fsp->scsi_pkt_lock); 234 fc_fcp_pkt_release(fsp); 235 } 236 237 /** 238 * fc_fcp_timer_set() - Start a timer for a fcp_pkt 239 * @fsp: The FCP packet to start a timer for 240 * @delay: The timeout period in jiffies 241 */ 242 static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) 243 { 244 if (!(fsp->state & FC_SRB_COMPL)) { 245 mod_timer(&fsp->timer, jiffies + delay); 246 fsp->timer_delay = delay; 247 } 248 } 249 250 static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp) 251 { 252 fsp->state |= FC_SRB_ABORTED; 253 fsp->state &= ~FC_SRB_ABORT_PENDING; 254 255 if (fsp->wait_for_comp) 256 complete(&fsp->tm_done); 257 else 258 fc_fcp_complete_locked(fsp); 259 } 260 261 /** 262 * fc_fcp_send_abort() - Send an abort for exchanges associated with a 263 * fcp_pkt 264 * @fsp: The FCP packet to abort exchanges on 265 */ 266 static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp) 267 { 268 int rc; 269 270 if (!fsp->seq_ptr) 271 return -EINVAL; 272 273 per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++; 274 put_cpu(); 275 276 fsp->state |= FC_SRB_ABORT_PENDING; 277 rc = fc_seq_exch_abort(fsp->seq_ptr, 0); 278 /* 279 * fc_seq_exch_abort() might return -ENXIO if 280 * the sequence is already completed 281 */ 282 if (rc == -ENXIO) { 283 fc_fcp_abort_done(fsp); 284 rc = 0; 285 } 286 return rc; 287 } 288 289 /** 290 * fc_fcp_retry_cmd() - Retry a fcp_pkt 291 * @fsp: The FCP packet to be retried 292 * 293 * Sets the status code to be FC_ERROR and then calls 294 * fc_fcp_complete_locked() which in turn calls fc_io_compl(). 295 * fc_io_compl() will notify the SCSI-ml that the I/O is done. 296 * The SCSI-ml will retry the command. 297 */ 298 static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code) 299 { 300 if (fsp->seq_ptr) { 301 fc_exch_done(fsp->seq_ptr); 302 fsp->seq_ptr = NULL; 303 } 304 305 fsp->state &= ~FC_SRB_ABORT_PENDING; 306 fsp->io_status = 0; 307 fsp->status_code = status_code; 308 fc_fcp_complete_locked(fsp); 309 } 310 311 /** 312 * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context 313 * @fsp: The FCP packet that will manage the DDP frames 314 * @xid: The XID that will be used for the DDP exchange 315 */ 316 void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid) 317 { 318 struct fc_lport *lport; 319 320 lport = fsp->lp; 321 if ((fsp->req_flags & FC_SRB_READ) && 322 (lport->lro_enabled) && (lport->tt.ddp_setup)) { 323 if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd), 324 scsi_sg_count(fsp->cmd))) 325 fsp->xfer_ddp = xid; 326 } 327 } 328 329 /** 330 * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any 331 * DDP related resources for a fcp_pkt 332 * @fsp: The FCP packet that DDP had been used on 333 */ 334 void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) 335 { 336 struct fc_lport *lport; 337 338 if (!fsp) 339 return; 340 341 if (fsp->xfer_ddp == FC_XID_UNKNOWN) 342 return; 343 344 lport = fsp->lp; 345 if (lport->tt.ddp_done) { 346 fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp); 347 fsp->xfer_ddp = FC_XID_UNKNOWN; 348 } 349 } 350 351 /** 352 * fc_fcp_can_queue_ramp_up() - increases can_queue 353 * @lport: lport to ramp up can_queue 354 */ 355 static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) 356 { 357 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 358 unsigned long flags; 359 int can_queue; 360 361 spin_lock_irqsave(lport->host->host_lock, flags); 362 363 if (si->last_can_queue_ramp_up_time && 364 (time_before(jiffies, si->last_can_queue_ramp_up_time + 365 FC_CAN_QUEUE_PERIOD))) 366 goto unlock; 367 368 if (time_before(jiffies, si->last_can_queue_ramp_down_time + 369 FC_CAN_QUEUE_PERIOD)) 370 goto unlock; 371 372 si->last_can_queue_ramp_up_time = jiffies; 373 374 can_queue = lport->host->can_queue << 1; 375 if (can_queue >= si->max_can_queue) { 376 can_queue = si->max_can_queue; 377 si->last_can_queue_ramp_down_time = 0; 378 } 379 lport->host->can_queue = can_queue; 380 shost_printk(KERN_ERR, lport->host, "libfc: increased " 381 "can_queue to %d.\n", can_queue); 382 383 unlock: 384 spin_unlock_irqrestore(lport->host->host_lock, flags); 385 } 386 387 /** 388 * fc_fcp_can_queue_ramp_down() - reduces can_queue 389 * @lport: lport to reduce can_queue 390 * 391 * If we are getting memory allocation failures, then we may 392 * be trying to execute too many commands. We let the running 393 * commands complete or timeout, then try again with a reduced 394 * can_queue. Eventually we will hit the point where we run 395 * on all reserved structs. 396 */ 397 static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 398 { 399 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 400 unsigned long flags; 401 int can_queue; 402 bool changed = false; 403 404 spin_lock_irqsave(lport->host->host_lock, flags); 405 406 if (si->last_can_queue_ramp_down_time && 407 (time_before(jiffies, si->last_can_queue_ramp_down_time + 408 FC_CAN_QUEUE_PERIOD))) 409 goto unlock; 410 411 si->last_can_queue_ramp_down_time = jiffies; 412 413 can_queue = lport->host->can_queue; 414 can_queue >>= 1; 415 if (!can_queue) 416 can_queue = 1; 417 lport->host->can_queue = can_queue; 418 changed = true; 419 420 unlock: 421 spin_unlock_irqrestore(lport->host->host_lock, flags); 422 return changed; 423 } 424 425 /* 426 * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer. 427 * @lport: fc lport struct 428 * @len: payload length 429 * 430 * Allocates fc_frame structure and buffer but if fails to allocate 431 * then reduce can_queue. 432 */ 433 static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, 434 size_t len) 435 { 436 struct fc_frame *fp; 437 438 fp = fc_frame_alloc(lport, len); 439 if (likely(fp)) 440 return fp; 441 442 per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++; 443 put_cpu(); 444 /* error case */ 445 fc_fcp_can_queue_ramp_down(lport); 446 shost_printk(KERN_ERR, lport->host, 447 "libfc: Could not allocate frame, " 448 "reducing can_queue to %d.\n", lport->host->can_queue); 449 return NULL; 450 } 451 452 /** 453 * get_fsp_rec_tov() - Helper function to get REC_TOV 454 * @fsp: the FCP packet 455 * 456 * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second 457 */ 458 static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) 459 { 460 struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data; 461 unsigned int e_d_tov = FC_DEF_E_D_TOV; 462 463 if (rpriv && rpriv->e_d_tov > e_d_tov) 464 e_d_tov = rpriv->e_d_tov; 465 return msecs_to_jiffies(e_d_tov) + HZ; 466 } 467 468 /** 469 * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target 470 * @fsp: The FCP packet the data is on 471 * @fp: The data frame 472 */ 473 static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 474 { 475 struct scsi_cmnd *sc = fsp->cmd; 476 struct fc_lport *lport = fsp->lp; 477 struct fc_stats *stats; 478 struct fc_frame_header *fh; 479 size_t start_offset; 480 size_t offset; 481 u32 crc; 482 u32 copy_len = 0; 483 size_t len; 484 void *buf; 485 struct scatterlist *sg; 486 u32 nents; 487 u8 host_bcode = FC_COMPLETE; 488 489 fh = fc_frame_header_get(fp); 490 offset = ntohl(fh->fh_parm_offset); 491 start_offset = offset; 492 len = fr_len(fp) - sizeof(*fh); 493 buf = fc_frame_payload_get(fp, 0); 494 495 /* 496 * if this I/O is ddped then clear it and initiate recovery since data 497 * frames are expected to be placed directly in that case. 498 * 499 * Indicate error to scsi-ml because something went wrong with the 500 * ddp handling to get us here. 501 */ 502 if (fsp->xfer_ddp != FC_XID_UNKNOWN) { 503 fc_fcp_ddp_done(fsp); 504 FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n"); 505 host_bcode = FC_ERROR; 506 goto err; 507 } 508 if (offset + len > fsp->data_len) { 509 /* this should never happen */ 510 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 511 fc_frame_crc_check(fp)) 512 goto crc_err; 513 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " 514 "data_len %x\n", len, offset, fsp->data_len); 515 516 /* Data is corrupted indicate scsi-ml should retry */ 517 host_bcode = FC_DATA_OVRRUN; 518 goto err; 519 } 520 if (offset != fsp->xfer_len) 521 fsp->state |= FC_SRB_DISCONTIG; 522 523 sg = scsi_sglist(sc); 524 nents = scsi_sg_count(sc); 525 526 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { 527 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, 528 &offset, NULL); 529 } else { 530 crc = crc32(~0, (u8 *) fh, sizeof(*fh)); 531 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, 532 &offset, &crc); 533 buf = fc_frame_payload_get(fp, 0); 534 if (len % 4) 535 crc = crc32(crc, buf + len, 4 - (len % 4)); 536 537 if (~crc != le32_to_cpu(fr_crc(fp))) { 538 crc_err: 539 stats = per_cpu_ptr(lport->stats, get_cpu()); 540 stats->ErrorFrames++; 541 /* per cpu count, not total count, but OK for limit */ 542 if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT) 543 printk(KERN_WARNING "libfc: CRC error on data " 544 "frame for port (%6.6x)\n", 545 lport->port_id); 546 put_cpu(); 547 /* 548 * Assume the frame is total garbage. 549 * We may have copied it over the good part 550 * of the buffer. 551 * If so, we need to retry the entire operation. 552 * Otherwise, ignore it. 553 */ 554 if (fsp->state & FC_SRB_DISCONTIG) { 555 host_bcode = FC_CRC_ERROR; 556 goto err; 557 } 558 return; 559 } 560 } 561 562 if (fsp->xfer_contig_end == start_offset) 563 fsp->xfer_contig_end += copy_len; 564 fsp->xfer_len += copy_len; 565 566 /* 567 * In the very rare event that this data arrived after the response 568 * and completes the transfer, call the completion handler. 569 */ 570 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && 571 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) { 572 FC_FCP_DBG( fsp, "complete out-of-order sequence\n" ); 573 fc_fcp_complete_locked(fsp); 574 } 575 return; 576 err: 577 fc_fcp_recovery(fsp, host_bcode); 578 } 579 580 /** 581 * fc_fcp_send_data() - Send SCSI data to a target 582 * @fsp: The FCP packet the data is on 583 * @sp: The sequence the data is to be sent on 584 * @offset: The starting offset for this data request 585 * @seq_blen: The burst length for this data request 586 * 587 * Called after receiving a Transfer Ready data descriptor. 588 * If the LLD is capable of sequence offload then send down the 589 * seq_blen amount of data in single frame, otherwise send 590 * multiple frames of the maximum frame payload supported by 591 * the target port. 592 */ 593 static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, 594 size_t offset, size_t seq_blen) 595 { 596 struct fc_exch *ep; 597 struct scsi_cmnd *sc; 598 struct scatterlist *sg; 599 struct fc_frame *fp = NULL; 600 struct fc_lport *lport = fsp->lp; 601 struct page *page; 602 size_t remaining; 603 size_t t_blen; 604 size_t tlen; 605 size_t sg_bytes; 606 size_t frame_offset, fh_parm_offset; 607 size_t off; 608 int error; 609 void *data = NULL; 610 void *page_addr; 611 int using_sg = lport->sg_supp; 612 u32 f_ctl; 613 614 WARN_ON(seq_blen <= 0); 615 if (unlikely(offset + seq_blen > fsp->data_len)) { 616 /* this should never happen */ 617 FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx " 618 "offset %zx\n", seq_blen, offset); 619 fc_fcp_send_abort(fsp); 620 return 0; 621 } else if (offset != fsp->xfer_len) { 622 /* Out of Order Data Request - no problem, but unexpected. */ 623 FC_FCP_DBG(fsp, "xfer-ready non-contiguous. " 624 "seq_blen %zx offset %zx\n", seq_blen, offset); 625 } 626 627 /* 628 * if LLD is capable of seq_offload then set transport 629 * burst length (t_blen) to seq_blen, otherwise set t_blen 630 * to max FC frame payload previously set in fsp->max_payload. 631 */ 632 t_blen = fsp->max_payload; 633 if (lport->seq_offload) { 634 t_blen = min(seq_blen, (size_t)lport->lso_max); 635 FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n", 636 fsp, seq_blen, lport->lso_max, t_blen); 637 } 638 639 if (t_blen > 512) 640 t_blen &= ~(512 - 1); /* round down to block size */ 641 sc = fsp->cmd; 642 643 remaining = seq_blen; 644 fh_parm_offset = frame_offset = offset; 645 tlen = 0; 646 seq = fc_seq_start_next(seq); 647 f_ctl = FC_FC_REL_OFF; 648 WARN_ON(!seq); 649 650 sg = scsi_sglist(sc); 651 652 while (remaining > 0 && sg) { 653 if (offset >= sg->length) { 654 offset -= sg->length; 655 sg = sg_next(sg); 656 continue; 657 } 658 if (!fp) { 659 tlen = min(t_blen, remaining); 660 661 /* 662 * TODO. Temporary workaround. fc_seq_send() can't 663 * handle odd lengths in non-linear skbs. 664 * This will be the final fragment only. 665 */ 666 if (tlen % 4) 667 using_sg = 0; 668 fp = fc_frame_alloc(lport, using_sg ? 0 : tlen); 669 if (!fp) 670 return -ENOMEM; 671 672 data = fc_frame_header_get(fp) + 1; 673 fh_parm_offset = frame_offset; 674 fr_max_payload(fp) = fsp->max_payload; 675 } 676 677 off = offset + sg->offset; 678 sg_bytes = min(tlen, sg->length - offset); 679 sg_bytes = min(sg_bytes, 680 (size_t) (PAGE_SIZE - (off & ~PAGE_MASK))); 681 page = sg_page(sg) + (off >> PAGE_SHIFT); 682 if (using_sg) { 683 get_page(page); 684 skb_fill_page_desc(fp_skb(fp), 685 skb_shinfo(fp_skb(fp))->nr_frags, 686 page, off & ~PAGE_MASK, sg_bytes); 687 fp_skb(fp)->data_len += sg_bytes; 688 fr_len(fp) += sg_bytes; 689 fp_skb(fp)->truesize += PAGE_SIZE; 690 } else { 691 /* 692 * The scatterlist item may be bigger than PAGE_SIZE, 693 * but we must not cross pages inside the kmap. 694 */ 695 page_addr = kmap_atomic(page); 696 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 697 sg_bytes); 698 kunmap_atomic(page_addr); 699 data += sg_bytes; 700 } 701 offset += sg_bytes; 702 frame_offset += sg_bytes; 703 tlen -= sg_bytes; 704 remaining -= sg_bytes; 705 706 if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && 707 (tlen)) 708 continue; 709 710 /* 711 * Send sequence with transfer sequence initiative in case 712 * this is last FCP frame of the sequence. 713 */ 714 if (remaining == 0) 715 f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ; 716 717 ep = fc_seq_exch(seq); 718 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 719 FC_TYPE_FCP, f_ctl, fh_parm_offset); 720 721 /* 722 * send fragment using for a sequence. 723 */ 724 error = fc_seq_send(lport, seq, fp); 725 if (error) { 726 WARN_ON(1); /* send error should be rare */ 727 return error; 728 } 729 fp = NULL; 730 } 731 fsp->xfer_len += seq_blen; /* premature count? */ 732 return 0; 733 } 734 735 /** 736 * fc_fcp_abts_resp() - Receive an ABTS response 737 * @fsp: The FCP packet that is being aborted 738 * @fp: The response frame 739 */ 740 static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 741 { 742 int ba_done = 1; 743 struct fc_ba_rjt *brp; 744 struct fc_frame_header *fh; 745 746 fh = fc_frame_header_get(fp); 747 switch (fh->fh_r_ctl) { 748 case FC_RCTL_BA_ACC: 749 break; 750 case FC_RCTL_BA_RJT: 751 brp = fc_frame_payload_get(fp, sizeof(*brp)); 752 if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) 753 break; 754 /* fall thru */ 755 default: 756 /* 757 * we will let the command timeout 758 * and scsi-ml recover in this case, 759 * therefore cleared the ba_done flag. 760 */ 761 ba_done = 0; 762 } 763 764 if (ba_done) 765 fc_fcp_abort_done(fsp); 766 } 767 768 /** 769 * fc_fcp_recv() - Receive an FCP frame 770 * @seq: The sequence the frame is on 771 * @fp: The received frame 772 * @arg: The related FCP packet 773 * 774 * Context: Called from Soft IRQ context. Can not be called 775 * holding the FCP packet list lock. 776 */ 777 static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) 778 { 779 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 780 struct fc_lport *lport = fsp->lp; 781 struct fc_frame_header *fh; 782 struct fcp_txrdy *dd; 783 u8 r_ctl; 784 int rc = 0; 785 786 if (IS_ERR(fp)) { 787 fc_fcp_error(fsp, fp); 788 return; 789 } 790 791 fh = fc_frame_header_get(fp); 792 r_ctl = fh->fh_r_ctl; 793 794 if (lport->state != LPORT_ST_READY) { 795 FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n", 796 lport->state, r_ctl); 797 goto out; 798 } 799 if (fc_fcp_lock_pkt(fsp)) 800 goto out; 801 802 if (fh->fh_type == FC_TYPE_BLS) { 803 fc_fcp_abts_resp(fsp, fp); 804 goto unlock; 805 } 806 807 if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) { 808 FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl); 809 goto unlock; 810 } 811 812 if (r_ctl == FC_RCTL_DD_DATA_DESC) { 813 /* 814 * received XFER RDY from the target 815 * need to send data to the target 816 */ 817 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 818 dd = fc_frame_payload_get(fp, sizeof(*dd)); 819 WARN_ON(!dd); 820 821 rc = fc_fcp_send_data(fsp, seq, 822 (size_t) ntohl(dd->ft_data_ro), 823 (size_t) ntohl(dd->ft_burst_len)); 824 if (!rc) 825 seq->rec_data = fsp->xfer_len; 826 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 827 /* 828 * received a DATA frame 829 * next we will copy the data to the system buffer 830 */ 831 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */ 832 fc_fcp_recv_data(fsp, fp); 833 seq->rec_data = fsp->xfer_contig_end; 834 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) { 835 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED); 836 837 fc_fcp_resp(fsp, fp); 838 } else { 839 FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl); 840 } 841 unlock: 842 fc_fcp_unlock_pkt(fsp); 843 out: 844 fc_frame_free(fp); 845 } 846 847 /** 848 * fc_fcp_resp() - Handler for FCP responses 849 * @fsp: The FCP packet the response is for 850 * @fp: The response frame 851 */ 852 static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 853 { 854 struct fc_frame_header *fh; 855 struct fcp_resp *fc_rp; 856 struct fcp_resp_ext *rp_ex; 857 struct fcp_resp_rsp_info *fc_rp_info; 858 u32 plen; 859 u32 expected_len; 860 u32 respl = 0; 861 u32 snsl = 0; 862 u8 flags = 0; 863 864 plen = fr_len(fp); 865 fh = (struct fc_frame_header *)fr_hdr(fp); 866 if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp))) 867 goto len_err; 868 plen -= sizeof(*fh); 869 fc_rp = (struct fcp_resp *)(fh + 1); 870 fsp->cdb_status = fc_rp->fr_status; 871 flags = fc_rp->fr_flags; 872 fsp->scsi_comp_flags = flags; 873 expected_len = fsp->data_len; 874 875 /* if ddp, update xfer len */ 876 fc_fcp_ddp_done(fsp); 877 878 if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) { 879 rp_ex = (void *)(fc_rp + 1); 880 if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) { 881 if (plen < sizeof(*fc_rp) + sizeof(*rp_ex)) 882 goto len_err; 883 fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1); 884 if (flags & FCP_RSP_LEN_VAL) { 885 respl = ntohl(rp_ex->fr_rsp_len); 886 if ((respl != FCP_RESP_RSP_INFO_LEN4) && 887 (respl != FCP_RESP_RSP_INFO_LEN8)) 888 goto len_err; 889 if (fsp->wait_for_comp) { 890 /* Abuse cdb_status for rsp code */ 891 fsp->cdb_status = fc_rp_info->rsp_code; 892 complete(&fsp->tm_done); 893 /* 894 * tmfs will not have any scsi cmd so 895 * exit here 896 */ 897 return; 898 } 899 } 900 if (flags & FCP_SNS_LEN_VAL) { 901 snsl = ntohl(rp_ex->fr_sns_len); 902 if (snsl > SCSI_SENSE_BUFFERSIZE) 903 snsl = SCSI_SENSE_BUFFERSIZE; 904 memcpy(fsp->cmd->sense_buffer, 905 (char *)fc_rp_info + respl, snsl); 906 } 907 } 908 if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) { 909 if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid)) 910 goto len_err; 911 if (flags & FCP_RESID_UNDER) { 912 fsp->scsi_resid = ntohl(rp_ex->fr_resid); 913 /* 914 * The cmnd->underflow is the minimum number of 915 * bytes that must be transferred for this 916 * command. Provided a sense condition is not 917 * present, make sure the actual amount 918 * transferred is at least the underflow value 919 * or fail. 920 */ 921 if (!(flags & FCP_SNS_LEN_VAL) && 922 (fc_rp->fr_status == 0) && 923 (scsi_bufflen(fsp->cmd) - 924 fsp->scsi_resid) < fsp->cmd->underflow) 925 goto err; 926 expected_len -= fsp->scsi_resid; 927 } else { 928 fsp->status_code = FC_ERROR; 929 } 930 } 931 } 932 fsp->state |= FC_SRB_RCV_STATUS; 933 934 /* 935 * Check for missing or extra data frames. 936 */ 937 if (unlikely(fsp->cdb_status == SAM_STAT_GOOD && 938 fsp->xfer_len != expected_len)) { 939 if (fsp->xfer_len < expected_len) { 940 /* 941 * Some data may be queued locally, 942 * Wait a at least one jiffy to see if it is delivered. 943 * If this expires without data, we may do SRR. 944 */ 945 if (fsp->lp->qfull) { 946 FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n", 947 fsp->rport->port_id); 948 return; 949 } 950 FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun " 951 "len %x, data len %x\n", 952 fsp->rport->port_id, 953 fsp->xfer_len, expected_len, fsp->data_len); 954 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 955 return; 956 } 957 fsp->status_code = FC_DATA_OVRRUN; 958 FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, " 959 "len %x, data len %x\n", 960 fsp->rport->port_id, 961 fsp->xfer_len, expected_len, fsp->data_len); 962 } 963 fc_fcp_complete_locked(fsp); 964 return; 965 966 len_err: 967 FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u " 968 "snsl %u\n", flags, fr_len(fp), respl, snsl); 969 err: 970 fsp->status_code = FC_ERROR; 971 fc_fcp_complete_locked(fsp); 972 } 973 974 /** 975 * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the 976 * fcp_pkt lock held 977 * @fsp: The FCP packet to be completed 978 * 979 * This function may sleep if a timer is pending. The packet lock must be 980 * held, and the host lock must not be held. 981 */ 982 static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) 983 { 984 struct fc_lport *lport = fsp->lp; 985 struct fc_seq *seq; 986 struct fc_exch *ep; 987 u32 f_ctl; 988 989 if (fsp->state & FC_SRB_ABORT_PENDING) 990 return; 991 992 if (fsp->state & FC_SRB_ABORTED) { 993 if (!fsp->status_code) 994 fsp->status_code = FC_CMD_ABORTED; 995 } else { 996 /* 997 * Test for transport underrun, independent of response 998 * underrun status. 999 */ 1000 if (fsp->cdb_status == SAM_STAT_GOOD && 1001 fsp->xfer_len < fsp->data_len && !fsp->io_status && 1002 (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || 1003 fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { 1004 FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n", 1005 fsp->xfer_len, fsp->data_len); 1006 fsp->status_code = FC_DATA_UNDRUN; 1007 } 1008 } 1009 1010 seq = fsp->seq_ptr; 1011 if (seq) { 1012 fsp->seq_ptr = NULL; 1013 if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) { 1014 struct fc_frame *conf_frame; 1015 struct fc_seq *csp; 1016 1017 csp = fc_seq_start_next(seq); 1018 conf_frame = fc_fcp_frame_alloc(fsp->lp, 0); 1019 if (conf_frame) { 1020 f_ctl = FC_FC_SEQ_INIT; 1021 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ; 1022 ep = fc_seq_exch(seq); 1023 fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL, 1024 ep->did, ep->sid, 1025 FC_TYPE_FCP, f_ctl, 0); 1026 fc_seq_send(lport, csp, conf_frame); 1027 } 1028 } 1029 fc_exch_done(seq); 1030 } 1031 /* 1032 * Some resets driven by SCSI are not I/Os and do not have 1033 * SCSI commands associated with the requests. We should not 1034 * call I/O completion if we do not have a SCSI command. 1035 */ 1036 if (fsp->cmd) 1037 fc_io_compl(fsp); 1038 } 1039 1040 /** 1041 * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt 1042 * @fsp: The FCP packet whose exchanges should be canceled 1043 * @error: The reason for the cancellation 1044 */ 1045 static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) 1046 { 1047 if (fsp->seq_ptr) { 1048 fc_exch_done(fsp->seq_ptr); 1049 fsp->seq_ptr = NULL; 1050 } 1051 fsp->status_code = error; 1052 } 1053 1054 /** 1055 * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port 1056 * @lport: The local port whose exchanges should be canceled 1057 * @id: The target's ID 1058 * @lun: The LUN 1059 * @error: The reason for cancellation 1060 * 1061 * If lun or id is -1, they are ignored. 1062 */ 1063 static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id, 1064 unsigned int lun, int error) 1065 { 1066 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 1067 struct fc_fcp_pkt *fsp; 1068 struct scsi_cmnd *sc_cmd; 1069 unsigned long flags; 1070 1071 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1072 restart: 1073 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 1074 sc_cmd = fsp->cmd; 1075 if (id != -1 && scmd_id(sc_cmd) != id) 1076 continue; 1077 1078 if (lun != -1 && sc_cmd->device->lun != lun) 1079 continue; 1080 1081 fc_fcp_pkt_hold(fsp); 1082 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1083 1084 spin_lock_bh(&fsp->scsi_pkt_lock); 1085 if (!(fsp->state & FC_SRB_COMPL)) { 1086 fsp->state |= FC_SRB_COMPL; 1087 /* 1088 * TODO: dropping scsi_pkt_lock and then reacquiring 1089 * again around fc_fcp_cleanup_cmd() is required, 1090 * since fc_fcp_cleanup_cmd() calls into 1091 * fc_seq_set_resp() and that func preempts cpu using 1092 * schedule. May be schedule and related code should be 1093 * removed instead of unlocking here to avoid scheduling 1094 * while atomic bug. 1095 */ 1096 spin_unlock_bh(&fsp->scsi_pkt_lock); 1097 1098 fc_fcp_cleanup_cmd(fsp, error); 1099 1100 spin_lock_bh(&fsp->scsi_pkt_lock); 1101 fc_io_compl(fsp); 1102 } 1103 spin_unlock_bh(&fsp->scsi_pkt_lock); 1104 1105 fc_fcp_pkt_release(fsp); 1106 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1107 /* 1108 * while we dropped the lock multiple pkts could 1109 * have been released, so we have to start over. 1110 */ 1111 goto restart; 1112 } 1113 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1114 } 1115 1116 /** 1117 * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port 1118 * @lport: The local port whose exchanges are to be aborted 1119 */ 1120 static void fc_fcp_abort_io(struct fc_lport *lport) 1121 { 1122 fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR); 1123 } 1124 1125 /** 1126 * fc_fcp_pkt_send() - Send a fcp_pkt 1127 * @lport: The local port to send the FCP packet on 1128 * @fsp: The FCP packet to send 1129 * 1130 * Return: Zero for success and -1 for failure 1131 * Locks: Called without locks held 1132 */ 1133 static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) 1134 { 1135 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 1136 unsigned long flags; 1137 int rc; 1138 1139 fsp->cmd->SCp.ptr = (char *)fsp; 1140 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1141 fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK; 1142 1143 int_to_scsilun(fsp->cmd->device->lun, &fsp->cdb_cmd.fc_lun); 1144 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1145 1146 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1147 list_add_tail(&fsp->list, &si->scsi_pkt_queue); 1148 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1149 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); 1150 if (unlikely(rc)) { 1151 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1152 fsp->cmd->SCp.ptr = NULL; 1153 list_del(&fsp->list); 1154 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1155 } 1156 1157 return rc; 1158 } 1159 1160 /** 1161 * fc_fcp_cmd_send() - Send a FCP command 1162 * @lport: The local port to send the command on 1163 * @fsp: The FCP packet the command is on 1164 * @resp: The handler for the response 1165 */ 1166 static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, 1167 void (*resp)(struct fc_seq *, 1168 struct fc_frame *fp, 1169 void *arg)) 1170 { 1171 struct fc_frame *fp; 1172 struct fc_seq *seq; 1173 struct fc_rport *rport; 1174 struct fc_rport_libfc_priv *rpriv; 1175 const size_t len = sizeof(fsp->cdb_cmd); 1176 int rc = 0; 1177 1178 if (fc_fcp_lock_pkt(fsp)) 1179 return 0; 1180 1181 fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd)); 1182 if (!fp) { 1183 rc = -1; 1184 goto unlock; 1185 } 1186 1187 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len); 1188 fr_fsp(fp) = fsp; 1189 rport = fsp->rport; 1190 fsp->max_payload = rport->maxframe_size; 1191 rpriv = rport->dd_data; 1192 1193 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1194 rpriv->local_port->port_id, FC_TYPE_FCP, 1195 FC_FCTL_REQ, 0); 1196 1197 seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0); 1198 if (!seq) { 1199 rc = -1; 1200 goto unlock; 1201 } 1202 fsp->seq_ptr = seq; 1203 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1204 1205 fsp->timer.function = fc_fcp_timeout; 1206 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1207 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 1208 1209 unlock: 1210 fc_fcp_unlock_pkt(fsp); 1211 return rc; 1212 } 1213 1214 /** 1215 * fc_fcp_error() - Handler for FCP layer errors 1216 * @fsp: The FCP packet the error is on 1217 * @fp: The frame that has errored 1218 */ 1219 static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1220 { 1221 int error = PTR_ERR(fp); 1222 1223 if (fc_fcp_lock_pkt(fsp)) 1224 return; 1225 1226 if (error == -FC_EX_CLOSED) { 1227 fc_fcp_retry_cmd(fsp, FC_ERROR); 1228 goto unlock; 1229 } 1230 1231 /* 1232 * clear abort pending, because the lower layer 1233 * decided to force completion. 1234 */ 1235 fsp->state &= ~FC_SRB_ABORT_PENDING; 1236 fsp->status_code = FC_CMD_PLOGO; 1237 fc_fcp_complete_locked(fsp); 1238 unlock: 1239 fc_fcp_unlock_pkt(fsp); 1240 } 1241 1242 /** 1243 * fc_fcp_pkt_abort() - Abort a fcp_pkt 1244 * @fsp: The FCP packet to abort on 1245 * 1246 * Called to send an abort and then wait for abort completion 1247 */ 1248 static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) 1249 { 1250 int rc = FAILED; 1251 unsigned long ticks_left; 1252 1253 FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state); 1254 if (fc_fcp_send_abort(fsp)) { 1255 FC_FCP_DBG(fsp, "failed to send abort\n"); 1256 return FAILED; 1257 } 1258 1259 if (fsp->state & FC_SRB_ABORTED) { 1260 FC_FCP_DBG(fsp, "target abort cmd completed\n"); 1261 return SUCCESS; 1262 } 1263 1264 init_completion(&fsp->tm_done); 1265 fsp->wait_for_comp = 1; 1266 1267 spin_unlock_bh(&fsp->scsi_pkt_lock); 1268 ticks_left = wait_for_completion_timeout(&fsp->tm_done, 1269 FC_SCSI_TM_TOV); 1270 spin_lock_bh(&fsp->scsi_pkt_lock); 1271 fsp->wait_for_comp = 0; 1272 1273 if (!ticks_left) { 1274 FC_FCP_DBG(fsp, "target abort cmd failed\n"); 1275 } else if (fsp->state & FC_SRB_ABORTED) { 1276 FC_FCP_DBG(fsp, "target abort cmd passed\n"); 1277 rc = SUCCESS; 1278 fc_fcp_complete_locked(fsp); 1279 } 1280 1281 return rc; 1282 } 1283 1284 /** 1285 * fc_lun_reset_send() - Send LUN reset command 1286 * @data: The FCP packet that identifies the LUN to be reset 1287 */ 1288 static void fc_lun_reset_send(struct timer_list *t) 1289 { 1290 struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); 1291 struct fc_lport *lport = fsp->lp; 1292 1293 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { 1294 if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) 1295 return; 1296 if (fc_fcp_lock_pkt(fsp)) 1297 return; 1298 fsp->timer.function = fc_lun_reset_send; 1299 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 1300 fc_fcp_unlock_pkt(fsp); 1301 } 1302 } 1303 1304 /** 1305 * fc_lun_reset() - Send a LUN RESET command to a device 1306 * and wait for the reply 1307 * @lport: The local port to sent the command on 1308 * @fsp: The FCP packet that identifies the LUN to be reset 1309 * @id: The SCSI command ID 1310 * @lun: The LUN ID to be reset 1311 */ 1312 static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, 1313 unsigned int id, unsigned int lun) 1314 { 1315 int rc; 1316 1317 fsp->cdb_cmd.fc_dl = htonl(fsp->data_len); 1318 fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET; 1319 int_to_scsilun(lun, &fsp->cdb_cmd.fc_lun); 1320 1321 fsp->wait_for_comp = 1; 1322 init_completion(&fsp->tm_done); 1323 1324 fc_lun_reset_send(&fsp->timer); 1325 1326 /* 1327 * wait for completion of reset 1328 * after that make sure all commands are terminated 1329 */ 1330 rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); 1331 1332 spin_lock_bh(&fsp->scsi_pkt_lock); 1333 fsp->state |= FC_SRB_COMPL; 1334 spin_unlock_bh(&fsp->scsi_pkt_lock); 1335 1336 del_timer_sync(&fsp->timer); 1337 1338 spin_lock_bh(&fsp->scsi_pkt_lock); 1339 if (fsp->seq_ptr) { 1340 fc_exch_done(fsp->seq_ptr); 1341 fsp->seq_ptr = NULL; 1342 } 1343 fsp->wait_for_comp = 0; 1344 spin_unlock_bh(&fsp->scsi_pkt_lock); 1345 1346 if (!rc) { 1347 FC_SCSI_DBG(lport, "lun reset failed\n"); 1348 return FAILED; 1349 } 1350 1351 /* cdb_status holds the tmf's rsp code */ 1352 if (fsp->cdb_status != FCP_TMF_CMPL) 1353 return FAILED; 1354 1355 FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun); 1356 fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED); 1357 return SUCCESS; 1358 } 1359 1360 /** 1361 * fc_tm_done() - Task Management response handler 1362 * @seq: The sequence that the response is on 1363 * @fp: The response frame 1364 * @arg: The FCP packet the response is for 1365 */ 1366 static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1367 { 1368 struct fc_fcp_pkt *fsp = arg; 1369 struct fc_frame_header *fh; 1370 1371 if (IS_ERR(fp)) { 1372 /* 1373 * If there is an error just let it timeout or wait 1374 * for TMF to be aborted if it timedout. 1375 * 1376 * scsi-eh will escalate for when either happens. 1377 */ 1378 return; 1379 } 1380 1381 if (fc_fcp_lock_pkt(fsp)) 1382 goto out; 1383 1384 /* 1385 * raced with eh timeout handler. 1386 */ 1387 if (!fsp->seq_ptr || !fsp->wait_for_comp) 1388 goto out_unlock; 1389 1390 fh = fc_frame_header_get(fp); 1391 if (fh->fh_type != FC_TYPE_BLS) 1392 fc_fcp_resp(fsp, fp); 1393 fsp->seq_ptr = NULL; 1394 fc_exch_done(seq); 1395 out_unlock: 1396 fc_fcp_unlock_pkt(fsp); 1397 out: 1398 fc_frame_free(fp); 1399 } 1400 1401 /** 1402 * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port 1403 * @lport: The local port to be cleaned up 1404 */ 1405 static void fc_fcp_cleanup(struct fc_lport *lport) 1406 { 1407 fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR); 1408 } 1409 1410 /** 1411 * fc_fcp_timeout() - Handler for fcp_pkt timeouts 1412 * @data: The FCP packet that has timed out 1413 * 1414 * If REC is supported then just issue it and return. The REC exchange will 1415 * complete or time out and recovery can continue at that point. Otherwise, 1416 * if the response has been received without all the data it has been 1417 * ER_TIMEOUT since the response was received. If the response has not been 1418 * received we see if data was received recently. If it has been then we 1419 * continue waiting, otherwise, we abort the command. 1420 */ 1421 static void fc_fcp_timeout(struct timer_list *t) 1422 { 1423 struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); 1424 struct fc_rport *rport = fsp->rport; 1425 struct fc_rport_libfc_priv *rpriv = rport->dd_data; 1426 1427 if (fc_fcp_lock_pkt(fsp)) 1428 return; 1429 1430 if (fsp->cdb_cmd.fc_tm_flags) 1431 goto unlock; 1432 1433 if (fsp->lp->qfull) { 1434 FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n", 1435 fsp->timer_delay); 1436 fsp->timer.function = fc_fcp_timeout; 1437 fc_fcp_timer_set(fsp, fsp->timer_delay); 1438 goto unlock; 1439 } 1440 FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n", 1441 fsp->timer_delay, rpriv->flags, fsp->state); 1442 fsp->state |= FC_SRB_FCP_PROCESSING_TMO; 1443 1444 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1445 fc_fcp_rec(fsp); 1446 else if (fsp->state & FC_SRB_RCV_STATUS) 1447 fc_fcp_complete_locked(fsp); 1448 else 1449 fc_fcp_recovery(fsp, FC_TIMED_OUT); 1450 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1451 unlock: 1452 fc_fcp_unlock_pkt(fsp); 1453 } 1454 1455 /** 1456 * fc_fcp_rec() - Send a REC ELS request 1457 * @fsp: The FCP packet to send the REC request on 1458 */ 1459 static void fc_fcp_rec(struct fc_fcp_pkt *fsp) 1460 { 1461 struct fc_lport *lport; 1462 struct fc_frame *fp; 1463 struct fc_rport *rport; 1464 struct fc_rport_libfc_priv *rpriv; 1465 1466 lport = fsp->lp; 1467 rport = fsp->rport; 1468 rpriv = rport->dd_data; 1469 if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) { 1470 fsp->status_code = FC_HRD_ERROR; 1471 fsp->io_status = 0; 1472 fc_fcp_complete_locked(fsp); 1473 return; 1474 } 1475 1476 fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); 1477 if (!fp) 1478 goto retry; 1479 1480 fr_seq(fp) = fsp->seq_ptr; 1481 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1482 rpriv->local_port->port_id, FC_TYPE_ELS, 1483 FC_FCTL_REQ, 0); 1484 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, 1485 fc_fcp_rec_resp, fsp, 1486 2 * lport->r_a_tov)) { 1487 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ 1488 return; 1489 } 1490 retry: 1491 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1492 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 1493 else 1494 fc_fcp_recovery(fsp, FC_TIMED_OUT); 1495 } 1496 1497 /** 1498 * fc_fcp_rec_resp() - Handler for REC ELS responses 1499 * @seq: The sequence the response is on 1500 * @fp: The response frame 1501 * @arg: The FCP packet the response is on 1502 * 1503 * If the response is a reject then the scsi layer will handle 1504 * the timeout. If the response is a LS_ACC then if the I/O was not completed 1505 * set the timeout and return. If the I/O was completed then complete the 1506 * exchange and tell the SCSI layer. 1507 */ 1508 static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1509 { 1510 struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg; 1511 struct fc_els_rec_acc *recp; 1512 struct fc_els_ls_rjt *rjt; 1513 u32 e_stat; 1514 u8 opcode; 1515 u32 offset; 1516 enum dma_data_direction data_dir; 1517 enum fc_rctl r_ctl; 1518 struct fc_rport_libfc_priv *rpriv; 1519 1520 if (IS_ERR(fp)) { 1521 fc_fcp_rec_error(fsp, fp); 1522 return; 1523 } 1524 1525 if (fc_fcp_lock_pkt(fsp)) 1526 goto out; 1527 1528 fsp->recov_retry = 0; 1529 opcode = fc_frame_payload_op(fp); 1530 if (opcode == ELS_LS_RJT) { 1531 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1532 switch (rjt->er_reason) { 1533 default: 1534 FC_FCP_DBG(fsp, 1535 "device %x invalid REC reject %d/%d\n", 1536 fsp->rport->port_id, rjt->er_reason, 1537 rjt->er_explan); 1538 /* fall through */ 1539 case ELS_RJT_UNSUP: 1540 FC_FCP_DBG(fsp, "device does not support REC\n"); 1541 rpriv = fsp->rport->dd_data; 1542 /* 1543 * if we do not spport RECs or got some bogus 1544 * reason then resetup timer so we check for 1545 * making progress. 1546 */ 1547 rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; 1548 break; 1549 case ELS_RJT_LOGIC: 1550 case ELS_RJT_UNAB: 1551 FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n", 1552 fsp->rport->port_id, rjt->er_reason, 1553 rjt->er_explan); 1554 /* 1555 * If response got lost or is stuck in the 1556 * queue somewhere we have no idea if and when 1557 * the response will be received. So quarantine 1558 * the xid and retry the command. 1559 */ 1560 if (rjt->er_explan == ELS_EXPL_OXID_RXID) { 1561 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1562 ep->state |= FC_EX_QUARANTINE; 1563 fsp->state |= FC_SRB_ABORTED; 1564 fc_fcp_retry_cmd(fsp, FC_TRANS_RESET); 1565 break; 1566 } 1567 fc_fcp_recovery(fsp, FC_TRANS_RESET); 1568 break; 1569 } 1570 } else if (opcode == ELS_LS_ACC) { 1571 if (fsp->state & FC_SRB_ABORTED) 1572 goto unlock_out; 1573 1574 data_dir = fsp->cmd->sc_data_direction; 1575 recp = fc_frame_payload_get(fp, sizeof(*recp)); 1576 offset = ntohl(recp->reca_fc4value); 1577 e_stat = ntohl(recp->reca_e_stat); 1578 1579 if (e_stat & ESB_ST_COMPLETE) { 1580 1581 /* 1582 * The exchange is complete. 1583 * 1584 * For output, we must've lost the response. 1585 * For input, all data must've been sent. 1586 * We lost may have lost the response 1587 * (and a confirmation was requested) and maybe 1588 * some data. 1589 * 1590 * If all data received, send SRR 1591 * asking for response. If partial data received, 1592 * or gaps, SRR requests data at start of gap. 1593 * Recovery via SRR relies on in-order-delivery. 1594 */ 1595 if (data_dir == DMA_TO_DEVICE) { 1596 r_ctl = FC_RCTL_DD_CMD_STATUS; 1597 } else if (fsp->xfer_contig_end == offset) { 1598 r_ctl = FC_RCTL_DD_CMD_STATUS; 1599 } else { 1600 offset = fsp->xfer_contig_end; 1601 r_ctl = FC_RCTL_DD_SOL_DATA; 1602 } 1603 fc_fcp_srr(fsp, r_ctl, offset); 1604 } else if (e_stat & ESB_ST_SEQ_INIT) { 1605 /* 1606 * The remote port has the initiative, so just 1607 * keep waiting for it to complete. 1608 */ 1609 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 1610 } else { 1611 1612 /* 1613 * The exchange is incomplete, we have seq. initiative. 1614 * Lost response with requested confirmation, 1615 * lost confirmation, lost transfer ready or 1616 * lost write data. 1617 * 1618 * For output, if not all data was received, ask 1619 * for transfer ready to be repeated. 1620 * 1621 * If we received or sent all the data, send SRR to 1622 * request response. 1623 * 1624 * If we lost a response, we may have lost some read 1625 * data as well. 1626 */ 1627 r_ctl = FC_RCTL_DD_SOL_DATA; 1628 if (data_dir == DMA_TO_DEVICE) { 1629 r_ctl = FC_RCTL_DD_CMD_STATUS; 1630 if (offset < fsp->data_len) 1631 r_ctl = FC_RCTL_DD_DATA_DESC; 1632 } else if (offset == fsp->xfer_contig_end) { 1633 r_ctl = FC_RCTL_DD_CMD_STATUS; 1634 } else if (fsp->xfer_contig_end < offset) { 1635 offset = fsp->xfer_contig_end; 1636 } 1637 fc_fcp_srr(fsp, r_ctl, offset); 1638 } 1639 } 1640 unlock_out: 1641 fc_fcp_unlock_pkt(fsp); 1642 out: 1643 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1644 fc_frame_free(fp); 1645 } 1646 1647 /** 1648 * fc_fcp_rec_error() - Handler for REC errors 1649 * @fsp: The FCP packet the error is on 1650 * @fp: The REC frame 1651 */ 1652 static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1653 { 1654 int error = PTR_ERR(fp); 1655 1656 if (fc_fcp_lock_pkt(fsp)) 1657 goto out; 1658 1659 switch (error) { 1660 case -FC_EX_CLOSED: 1661 FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n", 1662 fsp, fsp->rport->port_id); 1663 fc_fcp_retry_cmd(fsp, FC_ERROR); 1664 break; 1665 1666 default: 1667 FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n", 1668 fsp, fsp->rport->port_id, error); 1669 fsp->status_code = FC_CMD_PLOGO; 1670 /* fall through */ 1671 1672 case -FC_EX_TIMEOUT: 1673 /* 1674 * Assume REC or LS_ACC was lost. 1675 * The exchange manager will have aborted REC, so retry. 1676 */ 1677 FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n", 1678 fsp, fsp->rport->port_id, fsp->recov_retry, 1679 FC_MAX_RECOV_RETRY); 1680 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1681 fc_fcp_rec(fsp); 1682 else 1683 fc_fcp_recovery(fsp, FC_ERROR); 1684 break; 1685 } 1686 fc_fcp_unlock_pkt(fsp); 1687 out: 1688 fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */ 1689 } 1690 1691 /** 1692 * fc_fcp_recovery() - Handler for fcp_pkt recovery 1693 * @fsp: The FCP pkt that needs to be aborted 1694 */ 1695 static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code) 1696 { 1697 FC_FCP_DBG(fsp, "start recovery code %x\n", code); 1698 fsp->status_code = code; 1699 fsp->cdb_status = 0; 1700 fsp->io_status = 0; 1701 /* 1702 * if this fails then we let the scsi command timer fire and 1703 * scsi-ml escalate. 1704 */ 1705 fc_fcp_send_abort(fsp); 1706 } 1707 1708 /** 1709 * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request) 1710 * @fsp: The FCP packet the SRR is to be sent on 1711 * @r_ctl: The R_CTL field for the SRR request 1712 * This is called after receiving status but insufficient data, or 1713 * when expecting status but the request has timed out. 1714 */ 1715 static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) 1716 { 1717 struct fc_lport *lport = fsp->lp; 1718 struct fc_rport *rport; 1719 struct fc_rport_libfc_priv *rpriv; 1720 struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr); 1721 struct fc_seq *seq; 1722 struct fcp_srr *srr; 1723 struct fc_frame *fp; 1724 1725 rport = fsp->rport; 1726 rpriv = rport->dd_data; 1727 1728 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || 1729 rpriv->rp_state != RPORT_ST_READY) 1730 goto retry; /* shouldn't happen */ 1731 fp = fc_fcp_frame_alloc(lport, sizeof(*srr)); 1732 if (!fp) 1733 goto retry; 1734 1735 srr = fc_frame_payload_get(fp, sizeof(*srr)); 1736 memset(srr, 0, sizeof(*srr)); 1737 srr->srr_op = ELS_SRR; 1738 srr->srr_ox_id = htons(ep->oxid); 1739 srr->srr_rx_id = htons(ep->rxid); 1740 srr->srr_r_ctl = r_ctl; 1741 srr->srr_rel_off = htonl(offset); 1742 1743 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1744 rpriv->local_port->port_id, FC_TYPE_FCP, 1745 FC_FCTL_REQ, 0); 1746 1747 seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp, 1748 fc_fcp_pkt_destroy, 1749 fsp, get_fsp_rec_tov(fsp)); 1750 if (!seq) 1751 goto retry; 1752 1753 fsp->recov_seq = seq; 1754 fsp->xfer_len = offset; 1755 fsp->xfer_contig_end = offset; 1756 fsp->state &= ~FC_SRB_RCV_STATUS; 1757 fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */ 1758 return; 1759 retry: 1760 fc_fcp_retry_cmd(fsp, FC_TRANS_RESET); 1761 } 1762 1763 /** 1764 * fc_fcp_srr_resp() - Handler for SRR response 1765 * @seq: The sequence the SRR is on 1766 * @fp: The SRR frame 1767 * @arg: The FCP packet the SRR is on 1768 */ 1769 static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) 1770 { 1771 struct fc_fcp_pkt *fsp = arg; 1772 struct fc_frame_header *fh; 1773 1774 if (IS_ERR(fp)) { 1775 fc_fcp_srr_error(fsp, fp); 1776 return; 1777 } 1778 1779 if (fc_fcp_lock_pkt(fsp)) 1780 goto out; 1781 1782 fh = fc_frame_header_get(fp); 1783 /* 1784 * BUG? fc_fcp_srr_error calls fc_exch_done which would release 1785 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT, 1786 * then fc_exch_timeout would be sending an abort. The fc_exch_done 1787 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing 1788 * an abort response though. 1789 */ 1790 if (fh->fh_type == FC_TYPE_BLS) { 1791 fc_fcp_unlock_pkt(fsp); 1792 return; 1793 } 1794 1795 switch (fc_frame_payload_op(fp)) { 1796 case ELS_LS_ACC: 1797 fsp->recov_retry = 0; 1798 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 1799 break; 1800 case ELS_LS_RJT: 1801 default: 1802 fc_fcp_recovery(fsp, FC_ERROR); 1803 break; 1804 } 1805 fc_fcp_unlock_pkt(fsp); 1806 out: 1807 fc_exch_done(seq); 1808 fc_frame_free(fp); 1809 } 1810 1811 /** 1812 * fc_fcp_srr_error() - Handler for SRR errors 1813 * @fsp: The FCP packet that the SRR error is on 1814 * @fp: The SRR frame 1815 */ 1816 static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) 1817 { 1818 if (fc_fcp_lock_pkt(fsp)) 1819 goto out; 1820 switch (PTR_ERR(fp)) { 1821 case -FC_EX_TIMEOUT: 1822 FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry); 1823 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1824 fc_fcp_rec(fsp); 1825 else 1826 fc_fcp_recovery(fsp, FC_TIMED_OUT); 1827 break; 1828 case -FC_EX_CLOSED: /* e.g., link failure */ 1829 FC_FCP_DBG(fsp, "SRR error, exchange closed\n"); 1830 /* fall through */ 1831 default: 1832 fc_fcp_retry_cmd(fsp, FC_ERROR); 1833 break; 1834 } 1835 fc_fcp_unlock_pkt(fsp); 1836 out: 1837 fc_exch_done(fsp->recov_seq); 1838 } 1839 1840 /** 1841 * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready 1842 * @lport: The local port to be checked 1843 */ 1844 static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) 1845 { 1846 /* lock ? */ 1847 return (lport->state == LPORT_ST_READY) && 1848 lport->link_up && !lport->qfull; 1849 } 1850 1851 /** 1852 * fc_queuecommand() - The queuecommand function of the SCSI template 1853 * @shost: The Scsi_Host that the command was issued to 1854 * @cmd: The scsi_cmnd to be executed 1855 * 1856 * This is the i/o strategy routine, called by the SCSI layer. 1857 */ 1858 int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) 1859 { 1860 struct fc_lport *lport = shost_priv(shost); 1861 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1862 struct fc_fcp_pkt *fsp; 1863 int rval; 1864 int rc = 0; 1865 struct fc_stats *stats; 1866 1867 rval = fc_remote_port_chkready(rport); 1868 if (rval) { 1869 sc_cmd->result = rval; 1870 sc_cmd->scsi_done(sc_cmd); 1871 return 0; 1872 } 1873 1874 if (!*(struct fc_remote_port **)rport->dd_data) { 1875 /* 1876 * rport is transitioning from blocked/deleted to 1877 * online 1878 */ 1879 sc_cmd->result = DID_IMM_RETRY << 16; 1880 sc_cmd->scsi_done(sc_cmd); 1881 goto out; 1882 } 1883 1884 if (!fc_fcp_lport_queue_ready(lport)) { 1885 if (lport->qfull) { 1886 if (fc_fcp_can_queue_ramp_down(lport)) 1887 shost_printk(KERN_ERR, lport->host, 1888 "libfc: queue full, " 1889 "reducing can_queue to %d.\n", 1890 lport->host->can_queue); 1891 } 1892 rc = SCSI_MLQUEUE_HOST_BUSY; 1893 goto out; 1894 } 1895 1896 fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC); 1897 if (fsp == NULL) { 1898 rc = SCSI_MLQUEUE_HOST_BUSY; 1899 goto out; 1900 } 1901 1902 /* 1903 * build the libfc request pkt 1904 */ 1905 fsp->cmd = sc_cmd; /* save the cmd */ 1906 fsp->rport = rport; /* set the remote port ptr */ 1907 1908 /* 1909 * set up the transfer length 1910 */ 1911 fsp->data_len = scsi_bufflen(sc_cmd); 1912 fsp->xfer_len = 0; 1913 1914 /* 1915 * setup the data direction 1916 */ 1917 stats = per_cpu_ptr(lport->stats, get_cpu()); 1918 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1919 fsp->req_flags = FC_SRB_READ; 1920 stats->InputRequests++; 1921 stats->InputBytes += fsp->data_len; 1922 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 1923 fsp->req_flags = FC_SRB_WRITE; 1924 stats->OutputRequests++; 1925 stats->OutputBytes += fsp->data_len; 1926 } else { 1927 fsp->req_flags = 0; 1928 stats->ControlRequests++; 1929 } 1930 put_cpu(); 1931 1932 /* 1933 * send it to the lower layer 1934 * if we get -1 return then put the request in the pending 1935 * queue. 1936 */ 1937 rval = fc_fcp_pkt_send(lport, fsp); 1938 if (rval != 0) { 1939 fsp->state = FC_SRB_FREE; 1940 fc_fcp_pkt_release(fsp); 1941 rc = SCSI_MLQUEUE_HOST_BUSY; 1942 } 1943 out: 1944 return rc; 1945 } 1946 EXPORT_SYMBOL(fc_queuecommand); 1947 1948 /** 1949 * fc_io_compl() - Handle responses for completed commands 1950 * @fsp: The FCP packet that is complete 1951 * 1952 * Translates fcp_pkt errors to a Linux SCSI errors. 1953 * The fcp packet lock must be held when calling. 1954 */ 1955 static void fc_io_compl(struct fc_fcp_pkt *fsp) 1956 { 1957 struct fc_fcp_internal *si; 1958 struct scsi_cmnd *sc_cmd; 1959 struct fc_lport *lport; 1960 unsigned long flags; 1961 1962 /* release outstanding ddp context */ 1963 fc_fcp_ddp_done(fsp); 1964 1965 fsp->state |= FC_SRB_COMPL; 1966 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1967 spin_unlock_bh(&fsp->scsi_pkt_lock); 1968 del_timer_sync(&fsp->timer); 1969 spin_lock_bh(&fsp->scsi_pkt_lock); 1970 } 1971 1972 lport = fsp->lp; 1973 si = fc_get_scsi_internal(lport); 1974 1975 /* 1976 * if can_queue ramp down is done then try can_queue ramp up 1977 * since commands are completing now. 1978 */ 1979 if (si->last_can_queue_ramp_down_time) 1980 fc_fcp_can_queue_ramp_up(lport); 1981 1982 sc_cmd = fsp->cmd; 1983 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1984 switch (fsp->status_code) { 1985 case FC_COMPLETE: 1986 if (fsp->cdb_status == 0) { 1987 /* 1988 * good I/O status 1989 */ 1990 sc_cmd->result = DID_OK << 16; 1991 if (fsp->scsi_resid) 1992 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1993 } else { 1994 /* 1995 * transport level I/O was ok but scsi 1996 * has non zero status 1997 */ 1998 sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; 1999 } 2000 break; 2001 case FC_ERROR: 2002 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 2003 "due to FC_ERROR\n"); 2004 sc_cmd->result = DID_ERROR << 16; 2005 break; 2006 case FC_DATA_UNDRUN: 2007 if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) { 2008 /* 2009 * scsi status is good but transport level 2010 * underrun. 2011 */ 2012 if (fsp->state & FC_SRB_RCV_STATUS) { 2013 sc_cmd->result = DID_OK << 16; 2014 } else { 2015 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml" 2016 " due to FC_DATA_UNDRUN (trans)\n"); 2017 sc_cmd->result = DID_ERROR << 16; 2018 } 2019 } else { 2020 /* 2021 * scsi got underrun, this is an error 2022 */ 2023 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 2024 "due to FC_DATA_UNDRUN (scsi)\n"); 2025 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 2026 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 2027 } 2028 break; 2029 case FC_DATA_OVRRUN: 2030 /* 2031 * overrun is an error 2032 */ 2033 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 2034 "due to FC_DATA_OVRRUN\n"); 2035 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 2036 break; 2037 case FC_CMD_ABORTED: 2038 if (host_byte(sc_cmd->result) == DID_TIME_OUT) 2039 FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml " 2040 "due to FC_CMD_ABORTED\n"); 2041 else { 2042 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 2043 "due to FC_CMD_ABORTED\n"); 2044 set_host_byte(sc_cmd, DID_ERROR); 2045 } 2046 sc_cmd->result |= fsp->io_status; 2047 break; 2048 case FC_CMD_RESET: 2049 FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml " 2050 "due to FC_CMD_RESET\n"); 2051 sc_cmd->result = (DID_RESET << 16); 2052 break; 2053 case FC_TRANS_RESET: 2054 FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml " 2055 "due to FC_TRANS_RESET\n"); 2056 sc_cmd->result = (DID_SOFT_ERROR << 16); 2057 break; 2058 case FC_HRD_ERROR: 2059 FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml " 2060 "due to FC_HRD_ERROR\n"); 2061 sc_cmd->result = (DID_NO_CONNECT << 16); 2062 break; 2063 case FC_CRC_ERROR: 2064 FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml " 2065 "due to FC_CRC_ERROR\n"); 2066 sc_cmd->result = (DID_PARITY << 16); 2067 break; 2068 case FC_TIMED_OUT: 2069 FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml " 2070 "due to FC_TIMED_OUT\n"); 2071 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 2072 break; 2073 default: 2074 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " 2075 "due to unknown error\n"); 2076 sc_cmd->result = (DID_ERROR << 16); 2077 break; 2078 } 2079 2080 if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) 2081 sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16); 2082 2083 spin_lock_irqsave(&si->scsi_queue_lock, flags); 2084 list_del(&fsp->list); 2085 sc_cmd->SCp.ptr = NULL; 2086 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 2087 sc_cmd->scsi_done(sc_cmd); 2088 2089 /* release ref from initial allocation in queue command */ 2090 fc_fcp_pkt_release(fsp); 2091 } 2092 2093 /** 2094 * fc_eh_abort() - Abort a command 2095 * @sc_cmd: The SCSI command to abort 2096 * 2097 * From SCSI host template. 2098 * Send an ABTS to the target device and wait for the response. 2099 */ 2100 int fc_eh_abort(struct scsi_cmnd *sc_cmd) 2101 { 2102 struct fc_fcp_pkt *fsp; 2103 struct fc_lport *lport; 2104 struct fc_fcp_internal *si; 2105 int rc = FAILED; 2106 unsigned long flags; 2107 int rval; 2108 2109 rval = fc_block_scsi_eh(sc_cmd); 2110 if (rval) 2111 return rval; 2112 2113 lport = shost_priv(sc_cmd->device->host); 2114 if (lport->state != LPORT_ST_READY) 2115 return rc; 2116 else if (!lport->link_up) 2117 return rc; 2118 2119 si = fc_get_scsi_internal(lport); 2120 spin_lock_irqsave(&si->scsi_queue_lock, flags); 2121 fsp = CMD_SP(sc_cmd); 2122 if (!fsp) { 2123 /* command completed while scsi eh was setting up */ 2124 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 2125 return SUCCESS; 2126 } 2127 /* grab a ref so the fsp and sc_cmd cannot be released from under us */ 2128 fc_fcp_pkt_hold(fsp); 2129 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 2130 2131 if (fc_fcp_lock_pkt(fsp)) { 2132 /* completed while we were waiting for timer to be deleted */ 2133 rc = SUCCESS; 2134 goto release_pkt; 2135 } 2136 2137 rc = fc_fcp_pkt_abort(fsp); 2138 fc_fcp_unlock_pkt(fsp); 2139 2140 release_pkt: 2141 fc_fcp_pkt_release(fsp); 2142 return rc; 2143 } 2144 EXPORT_SYMBOL(fc_eh_abort); 2145 2146 /** 2147 * fc_eh_device_reset() - Reset a single LUN 2148 * @sc_cmd: The SCSI command which identifies the device whose 2149 * LUN is to be reset 2150 * 2151 * Set from SCSI host template. 2152 */ 2153 int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 2154 { 2155 struct fc_lport *lport; 2156 struct fc_fcp_pkt *fsp; 2157 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 2158 int rc = FAILED; 2159 int rval; 2160 2161 rval = fc_block_scsi_eh(sc_cmd); 2162 if (rval) 2163 return rval; 2164 2165 lport = shost_priv(sc_cmd->device->host); 2166 2167 if (lport->state != LPORT_ST_READY) 2168 return rc; 2169 2170 FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id); 2171 2172 fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); 2173 if (fsp == NULL) { 2174 printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n"); 2175 goto out; 2176 } 2177 2178 /* 2179 * Build the libfc request pkt. Do not set the scsi cmnd, because 2180 * the sc passed in is not setup for execution like when sent 2181 * through the queuecommand callout. 2182 */ 2183 fsp->rport = rport; /* set the remote port ptr */ 2184 2185 /* 2186 * flush outstanding commands 2187 */ 2188 rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun); 2189 fsp->state = FC_SRB_FREE; 2190 fc_fcp_pkt_release(fsp); 2191 2192 out: 2193 return rc; 2194 } 2195 EXPORT_SYMBOL(fc_eh_device_reset); 2196 2197 /** 2198 * fc_eh_host_reset() - Reset a Scsi_Host. 2199 * @sc_cmd: The SCSI command that identifies the SCSI host to be reset 2200 */ 2201 int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) 2202 { 2203 struct Scsi_Host *shost = sc_cmd->device->host; 2204 struct fc_lport *lport = shost_priv(shost); 2205 unsigned long wait_tmo; 2206 2207 FC_SCSI_DBG(lport, "Resetting host\n"); 2208 2209 fc_lport_reset(lport); 2210 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2211 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, 2212 wait_tmo)) 2213 msleep(1000); 2214 2215 if (fc_fcp_lport_queue_ready(lport)) { 2216 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " 2217 "on port (%6.6x)\n", lport->port_id); 2218 return SUCCESS; 2219 } else { 2220 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " 2221 "port (%6.6x) is not ready.\n", 2222 lport->port_id); 2223 return FAILED; 2224 } 2225 } 2226 EXPORT_SYMBOL(fc_eh_host_reset); 2227 2228 /** 2229 * fc_slave_alloc() - Configure the queue depth of a Scsi_Host 2230 * @sdev: The SCSI device that identifies the SCSI host 2231 * 2232 * Configures queue depth based on host's cmd_per_len. If not set 2233 * then we use the libfc default. 2234 */ 2235 int fc_slave_alloc(struct scsi_device *sdev) 2236 { 2237 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 2238 2239 if (!rport || fc_remote_port_chkready(rport)) 2240 return -ENXIO; 2241 2242 scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH); 2243 return 0; 2244 } 2245 EXPORT_SYMBOL(fc_slave_alloc); 2246 2247 /** 2248 * fc_fcp_destory() - Tear down the FCP layer for a given local port 2249 * @lport: The local port that no longer needs the FCP layer 2250 */ 2251 void fc_fcp_destroy(struct fc_lport *lport) 2252 { 2253 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 2254 2255 if (!list_empty(&si->scsi_pkt_queue)) 2256 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " 2257 "port (%6.6x)\n", lport->port_id); 2258 2259 mempool_destroy(si->scsi_pkt_pool); 2260 kfree(si); 2261 lport->scsi_priv = NULL; 2262 } 2263 EXPORT_SYMBOL(fc_fcp_destroy); 2264 2265 int fc_setup_fcp(void) 2266 { 2267 int rc = 0; 2268 2269 scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt", 2270 sizeof(struct fc_fcp_pkt), 2271 0, SLAB_HWCACHE_ALIGN, NULL); 2272 if (!scsi_pkt_cachep) { 2273 printk(KERN_ERR "libfc: Unable to allocate SRB cache, " 2274 "module load failed!"); 2275 rc = -ENOMEM; 2276 } 2277 2278 return rc; 2279 } 2280 2281 void fc_destroy_fcp(void) 2282 { 2283 kmem_cache_destroy(scsi_pkt_cachep); 2284 } 2285 2286 /** 2287 * fc_fcp_init() - Initialize the FCP layer for a local port 2288 * @lport: The local port to initialize the exchange layer for 2289 */ 2290 int fc_fcp_init(struct fc_lport *lport) 2291 { 2292 int rc; 2293 struct fc_fcp_internal *si; 2294 2295 if (!lport->tt.fcp_cmd_send) 2296 lport->tt.fcp_cmd_send = fc_fcp_cmd_send; 2297 2298 if (!lport->tt.fcp_cleanup) 2299 lport->tt.fcp_cleanup = fc_fcp_cleanup; 2300 2301 if (!lport->tt.fcp_abort_io) 2302 lport->tt.fcp_abort_io = fc_fcp_abort_io; 2303 2304 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL); 2305 if (!si) 2306 return -ENOMEM; 2307 lport->scsi_priv = si; 2308 si->max_can_queue = lport->host->can_queue; 2309 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2310 spin_lock_init(&si->scsi_queue_lock); 2311 2312 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2313 if (!si->scsi_pkt_pool) { 2314 rc = -ENOMEM; 2315 goto free_internal; 2316 } 2317 return 0; 2318 2319 free_internal: 2320 kfree(si); 2321 return rc; 2322 } 2323 EXPORT_SYMBOL(fc_fcp_init); 2324