1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * FiberChannel transport specific attributes exported to sysfs. 4 * 5 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2004-2007 James Smart, Emulex Corporation 7 * Rewrite for host, target, device, and remote port attributes, 8 * statistics, and service functions... 9 * Add vports, etc 10 */ 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/slab.h> 14 #include <linux/delay.h> 15 #include <linux/kernel.h> 16 #include <linux/bsg-lib.h> 17 #include <scsi/scsi_device.h> 18 #include <scsi/scsi_host.h> 19 #include <scsi/scsi_transport.h> 20 #include <scsi/scsi_transport_fc.h> 21 #include <scsi/scsi_cmnd.h> 22 #include <net/netlink.h> 23 #include <scsi/scsi_netlink_fc.h> 24 #include <scsi/scsi_bsg_fc.h> 25 #include <uapi/scsi/fc/fc_els.h> 26 #include "scsi_priv.h" 27 28 static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 29 static void fc_vport_sched_delete(struct work_struct *work); 30 static int fc_vport_setup(struct Scsi_Host *shost, int channel, 31 struct device *pdev, struct fc_vport_identifiers *ids, 32 struct fc_vport **vport); 33 static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *); 34 static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *); 35 static void fc_bsg_remove(struct request_queue *); 36 static void fc_bsg_goose_queue(struct fc_rport *); 37 static void fc_li_stats_update(struct fc_fn_li_desc *li_desc, 38 struct fc_fpin_stats *stats); 39 static void fc_delivery_stats_update(u32 reason_code, 40 struct fc_fpin_stats *stats); 41 static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats); 42 43 /* 44 * Module Parameters 45 */ 46 47 /* 48 * dev_loss_tmo: the default number of seconds that the FC transport 49 * should insulate the loss of a remote port. 50 * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. 51 */ 52 static unsigned int fc_dev_loss_tmo = 60; /* seconds */ 53 54 module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR); 55 MODULE_PARM_DESC(dev_loss_tmo, 56 "Maximum number of seconds that the FC transport should" 57 " insulate the loss of a remote port. Once this value is" 58 " exceeded, the scsi target is removed. Value should be" 59 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if" 60 " fast_io_fail_tmo is not set."); 61 62 /* 63 * Redefine so that we can have same named attributes in the 64 * sdev/starget/host objects. 65 */ 66 #define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ 67 struct device_attribute device_attr_##_prefix##_##_name = \ 68 __ATTR(_name,_mode,_show,_store) 69 70 #define fc_enum_name_search(title, table_type, table) \ 71 static const char *get_fc_##title##_name(enum table_type table_key) \ 72 { \ 73 int i; \ 74 char *name = NULL; \ 75 \ 76 for (i = 0; i < ARRAY_SIZE(table); i++) { \ 77 if (table[i].value == table_key) { \ 78 name = table[i].name; \ 79 break; \ 80 } \ 81 } \ 82 return name; \ 83 } 84 85 #define fc_enum_name_match(title, table_type, table) \ 86 static int get_fc_##title##_match(const char *table_key, \ 87 enum table_type *value) \ 88 { \ 89 int i; \ 90 \ 91 for (i = 0; i < ARRAY_SIZE(table); i++) { \ 92 if (strncmp(table_key, table[i].name, \ 93 table[i].matchlen) == 0) { \ 94 *value = table[i].value; \ 95 return 0; /* success */ \ 96 } \ 97 } \ 98 return 1; /* failure */ \ 99 } 100 101 102 /* Convert fc_port_type values to ascii string name */ 103 static struct { 104 enum fc_port_type value; 105 char *name; 106 } fc_port_type_names[] = { 107 { FC_PORTTYPE_UNKNOWN, "Unknown" }, 108 { FC_PORTTYPE_OTHER, "Other" }, 109 { FC_PORTTYPE_NOTPRESENT, "Not Present" }, 110 { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" }, 111 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, 112 { FC_PORTTYPE_LPORT, "LPort (private loop)" }, 113 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" }, 114 { FC_PORTTYPE_NPIV, "NPIV VPORT" }, 115 }; 116 fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) 117 #define FC_PORTTYPE_MAX_NAMELEN 50 118 119 /* Reuse fc_port_type enum function for vport_type */ 120 #define get_fc_vport_type_name get_fc_port_type_name 121 122 123 /* Convert fc_host_event_code values to ascii string name */ 124 static const struct { 125 enum fc_host_event_code value; 126 char *name; 127 } fc_host_event_code_names[] = { 128 { FCH_EVT_LIP, "lip" }, 129 { FCH_EVT_LINKUP, "link_up" }, 130 { FCH_EVT_LINKDOWN, "link_down" }, 131 { FCH_EVT_LIPRESET, "lip_reset" }, 132 { FCH_EVT_RSCN, "rscn" }, 133 { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" }, 134 { FCH_EVT_PORT_UNKNOWN, "port_unknown" }, 135 { FCH_EVT_PORT_ONLINE, "port_online" }, 136 { FCH_EVT_PORT_OFFLINE, "port_offline" }, 137 { FCH_EVT_PORT_FABRIC, "port_fabric" }, 138 { FCH_EVT_LINK_UNKNOWN, "link_unknown" }, 139 { FCH_EVT_LINK_FPIN, "link_FPIN" }, 140 { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" }, 141 }; 142 fc_enum_name_search(host_event_code, fc_host_event_code, 143 fc_host_event_code_names) 144 #define FC_HOST_EVENT_CODE_MAX_NAMELEN 30 145 146 147 /* Convert fc_port_state values to ascii string name */ 148 static struct { 149 enum fc_port_state value; 150 char *name; 151 } fc_port_state_names[] = { 152 { FC_PORTSTATE_UNKNOWN, "Unknown" }, 153 { FC_PORTSTATE_NOTPRESENT, "Not Present" }, 154 { FC_PORTSTATE_ONLINE, "Online" }, 155 { FC_PORTSTATE_OFFLINE, "Offline" }, 156 { FC_PORTSTATE_BLOCKED, "Blocked" }, 157 { FC_PORTSTATE_BYPASSED, "Bypassed" }, 158 { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" }, 159 { FC_PORTSTATE_LINKDOWN, "Linkdown" }, 160 { FC_PORTSTATE_ERROR, "Error" }, 161 { FC_PORTSTATE_LOOPBACK, "Loopback" }, 162 { FC_PORTSTATE_DELETED, "Deleted" }, 163 }; 164 fc_enum_name_search(port_state, fc_port_state, fc_port_state_names) 165 #define FC_PORTSTATE_MAX_NAMELEN 20 166 167 168 /* Convert fc_vport_state values to ascii string name */ 169 static struct { 170 enum fc_vport_state value; 171 char *name; 172 } fc_vport_state_names[] = { 173 { FC_VPORT_UNKNOWN, "Unknown" }, 174 { FC_VPORT_ACTIVE, "Active" }, 175 { FC_VPORT_DISABLED, "Disabled" }, 176 { FC_VPORT_LINKDOWN, "Linkdown" }, 177 { FC_VPORT_INITIALIZING, "Initializing" }, 178 { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" }, 179 { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" }, 180 { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" }, 181 { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" }, 182 { FC_VPORT_FAILED, "VPort Failed" }, 183 }; 184 fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names) 185 #define FC_VPORTSTATE_MAX_NAMELEN 24 186 187 /* Reuse fc_vport_state enum function for vport_last_state */ 188 #define get_fc_vport_last_state_name get_fc_vport_state_name 189 190 191 /* Convert fc_tgtid_binding_type values to ascii string name */ 192 static const struct { 193 enum fc_tgtid_binding_type value; 194 char *name; 195 int matchlen; 196 } fc_tgtid_binding_type_names[] = { 197 { FC_TGTID_BIND_NONE, "none", 4 }, 198 { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 }, 199 { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 }, 200 { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 }, 201 }; 202 fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type, 203 fc_tgtid_binding_type_names) 204 fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type, 205 fc_tgtid_binding_type_names) 206 #define FC_BINDTYPE_MAX_NAMELEN 30 207 208 209 #define fc_bitfield_name_search(title, table) \ 210 static ssize_t \ 211 get_fc_##title##_names(u32 table_key, char *buf) \ 212 { \ 213 char *prefix = ""; \ 214 ssize_t len = 0; \ 215 int i; \ 216 \ 217 for (i = 0; i < ARRAY_SIZE(table); i++) { \ 218 if (table[i].value & table_key) { \ 219 len += sprintf(buf + len, "%s%s", \ 220 prefix, table[i].name); \ 221 prefix = ", "; \ 222 } \ 223 } \ 224 len += sprintf(buf + len, "\n"); \ 225 return len; \ 226 } 227 228 229 /* Convert FC_COS bit values to ascii string name */ 230 static const struct { 231 u32 value; 232 char *name; 233 } fc_cos_names[] = { 234 { FC_COS_CLASS1, "Class 1" }, 235 { FC_COS_CLASS2, "Class 2" }, 236 { FC_COS_CLASS3, "Class 3" }, 237 { FC_COS_CLASS4, "Class 4" }, 238 { FC_COS_CLASS6, "Class 6" }, 239 }; 240 fc_bitfield_name_search(cos, fc_cos_names) 241 242 243 /* Convert FC_PORTSPEED bit values to ascii string name */ 244 static const struct { 245 u32 value; 246 char *name; 247 } fc_port_speed_names[] = { 248 { FC_PORTSPEED_1GBIT, "1 Gbit" }, 249 { FC_PORTSPEED_2GBIT, "2 Gbit" }, 250 { FC_PORTSPEED_4GBIT, "4 Gbit" }, 251 { FC_PORTSPEED_10GBIT, "10 Gbit" }, 252 { FC_PORTSPEED_8GBIT, "8 Gbit" }, 253 { FC_PORTSPEED_16GBIT, "16 Gbit" }, 254 { FC_PORTSPEED_32GBIT, "32 Gbit" }, 255 { FC_PORTSPEED_20GBIT, "20 Gbit" }, 256 { FC_PORTSPEED_40GBIT, "40 Gbit" }, 257 { FC_PORTSPEED_50GBIT, "50 Gbit" }, 258 { FC_PORTSPEED_100GBIT, "100 Gbit" }, 259 { FC_PORTSPEED_25GBIT, "25 Gbit" }, 260 { FC_PORTSPEED_64GBIT, "64 Gbit" }, 261 { FC_PORTSPEED_128GBIT, "128 Gbit" }, 262 { FC_PORTSPEED_256GBIT, "256 Gbit" }, 263 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, 264 }; 265 fc_bitfield_name_search(port_speed, fc_port_speed_names) 266 267 268 static int 269 show_fc_fc4s (char *buf, u8 *fc4_list) 270 { 271 int i, len=0; 272 273 for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++) 274 len += sprintf(buf + len , "0x%02x ", *fc4_list); 275 len += sprintf(buf + len, "\n"); 276 return len; 277 } 278 279 280 /* Convert FC_PORT_ROLE bit values to ascii string name */ 281 static const struct { 282 u32 value; 283 char *name; 284 } fc_port_role_names[] = { 285 { FC_PORT_ROLE_FCP_TARGET, "FCP Target" }, 286 { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, 287 { FC_PORT_ROLE_IP_PORT, "IP Port" }, 288 { FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" }, 289 { FC_PORT_ROLE_NVME_INITIATOR, "NVMe Initiator" }, 290 { FC_PORT_ROLE_NVME_TARGET, "NVMe Target" }, 291 { FC_PORT_ROLE_NVME_DISCOVERY, "NVMe Discovery" }, 292 }; 293 fc_bitfield_name_search(port_roles, fc_port_role_names) 294 295 /* 296 * Define roles that are specific to port_id. Values are relative to ROLE_MASK. 297 */ 298 #define FC_WELLKNOWN_PORTID_MASK 0xfffff0 299 #define FC_WELLKNOWN_ROLE_MASK 0x00000f 300 #define FC_FPORT_PORTID 0x00000e 301 #define FC_FABCTLR_PORTID 0x00000d 302 #define FC_DIRSRVR_PORTID 0x00000c 303 #define FC_TIMESRVR_PORTID 0x00000b 304 #define FC_MGMTSRVR_PORTID 0x00000a 305 306 307 static void fc_timeout_deleted_rport(struct work_struct *work); 308 static void fc_timeout_fail_rport_io(struct work_struct *work); 309 static void fc_scsi_scan_rport(struct work_struct *work); 310 311 /* 312 * Attribute counts pre object type... 313 * Increase these values if you add attributes 314 */ 315 #define FC_STARGET_NUM_ATTRS 3 316 #define FC_RPORT_NUM_ATTRS 10 317 #define FC_VPORT_NUM_ATTRS 9 318 #define FC_HOST_NUM_ATTRS 29 319 320 struct fc_internal { 321 struct scsi_transport_template t; 322 struct fc_function_template *f; 323 324 /* 325 * For attributes : each object has : 326 * An array of the actual attributes structures 327 * An array of null-terminated pointers to the attribute 328 * structures - used for mid-layer interaction. 329 * 330 * The attribute containers for the starget and host are are 331 * part of the midlayer. As the remote port is specific to the 332 * fc transport, we must provide the attribute container. 333 */ 334 struct device_attribute private_starget_attrs[ 335 FC_STARGET_NUM_ATTRS]; 336 struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1]; 337 338 struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS]; 339 struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1]; 340 341 struct transport_container rport_attr_cont; 342 struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS]; 343 struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1]; 344 345 struct transport_container vport_attr_cont; 346 struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS]; 347 struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1]; 348 }; 349 350 #define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) 351 352 static int fc_target_setup(struct transport_container *tc, struct device *dev, 353 struct device *cdev) 354 { 355 struct scsi_target *starget = to_scsi_target(dev); 356 struct fc_rport *rport = starget_to_rport(starget); 357 358 /* 359 * if parent is remote port, use values from remote port. 360 * Otherwise, this host uses the fc_transport, but not the 361 * remote port interface. As such, initialize to known non-values. 362 */ 363 if (rport) { 364 fc_starget_node_name(starget) = rport->node_name; 365 fc_starget_port_name(starget) = rport->port_name; 366 fc_starget_port_id(starget) = rport->port_id; 367 } else { 368 fc_starget_node_name(starget) = -1; 369 fc_starget_port_name(starget) = -1; 370 fc_starget_port_id(starget) = -1; 371 } 372 373 return 0; 374 } 375 376 static DECLARE_TRANSPORT_CLASS(fc_transport_class, 377 "fc_transport", 378 fc_target_setup, 379 NULL, 380 NULL); 381 382 static int fc_host_setup(struct transport_container *tc, struct device *dev, 383 struct device *cdev) 384 { 385 struct Scsi_Host *shost = dev_to_shost(dev); 386 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 387 388 /* 389 * Set default values easily detected by the midlayer as 390 * failure cases. The scsi lldd is responsible for initializing 391 * all transport attributes to valid values per host. 392 */ 393 fc_host->node_name = -1; 394 fc_host->port_name = -1; 395 fc_host->permanent_port_name = -1; 396 fc_host->supported_classes = FC_COS_UNSPECIFIED; 397 memset(fc_host->supported_fc4s, 0, 398 sizeof(fc_host->supported_fc4s)); 399 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; 400 fc_host->maxframe_size = -1; 401 fc_host->max_npiv_vports = 0; 402 memset(fc_host->serial_number, 0, 403 sizeof(fc_host->serial_number)); 404 memset(fc_host->manufacturer, 0, 405 sizeof(fc_host->manufacturer)); 406 memset(fc_host->model, 0, 407 sizeof(fc_host->model)); 408 memset(fc_host->model_description, 0, 409 sizeof(fc_host->model_description)); 410 memset(fc_host->hardware_version, 0, 411 sizeof(fc_host->hardware_version)); 412 memset(fc_host->driver_version, 0, 413 sizeof(fc_host->driver_version)); 414 memset(fc_host->firmware_version, 0, 415 sizeof(fc_host->firmware_version)); 416 memset(fc_host->optionrom_version, 0, 417 sizeof(fc_host->optionrom_version)); 418 419 fc_host->port_id = -1; 420 fc_host->port_type = FC_PORTTYPE_UNKNOWN; 421 fc_host->port_state = FC_PORTSTATE_UNKNOWN; 422 memset(fc_host->active_fc4s, 0, 423 sizeof(fc_host->active_fc4s)); 424 fc_host->speed = FC_PORTSPEED_UNKNOWN; 425 fc_host->fabric_name = -1; 426 memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name)); 427 memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname)); 428 memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats)); 429 430 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; 431 432 INIT_LIST_HEAD(&fc_host->rports); 433 INIT_LIST_HEAD(&fc_host->rport_bindings); 434 INIT_LIST_HEAD(&fc_host->vports); 435 fc_host->next_rport_number = 0; 436 fc_host->next_target_id = 0; 437 fc_host->next_vport_number = 0; 438 fc_host->npiv_vports_inuse = 0; 439 440 snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), 441 "fc_wq_%d", shost->host_no); 442 fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name); 443 if (!fc_host->work_q) 444 return -ENOMEM; 445 446 fc_host->dev_loss_tmo = fc_dev_loss_tmo; 447 snprintf(fc_host->devloss_work_q_name, 448 sizeof(fc_host->devloss_work_q_name), 449 "fc_dl_%d", shost->host_no); 450 fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0, 451 fc_host->devloss_work_q_name); 452 if (!fc_host->devloss_work_q) { 453 destroy_workqueue(fc_host->work_q); 454 fc_host->work_q = NULL; 455 return -ENOMEM; 456 } 457 458 fc_bsg_hostadd(shost, fc_host); 459 /* ignore any bsg add error - we just can't do sgio */ 460 461 return 0; 462 } 463 464 static int fc_host_remove(struct transport_container *tc, struct device *dev, 465 struct device *cdev) 466 { 467 struct Scsi_Host *shost = dev_to_shost(dev); 468 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 469 470 fc_bsg_remove(fc_host->rqst_q); 471 return 0; 472 } 473 474 static DECLARE_TRANSPORT_CLASS(fc_host_class, 475 "fc_host", 476 fc_host_setup, 477 fc_host_remove, 478 NULL); 479 480 /* 481 * Setup and Remove actions for remote ports are handled 482 * in the service functions below. 483 */ 484 static DECLARE_TRANSPORT_CLASS(fc_rport_class, 485 "fc_remote_ports", 486 NULL, 487 NULL, 488 NULL); 489 490 /* 491 * Setup and Remove actions for virtual ports are handled 492 * in the service functions below. 493 */ 494 static DECLARE_TRANSPORT_CLASS(fc_vport_class, 495 "fc_vports", 496 NULL, 497 NULL, 498 NULL); 499 500 /* 501 * Netlink Infrastructure 502 */ 503 504 static atomic_t fc_event_seq; 505 506 /** 507 * fc_get_event_number - Obtain the next sequential FC event number 508 * 509 * Notes: 510 * We could have inlined this, but it would have required fc_event_seq to 511 * be exposed. For now, live with the subroutine call. 512 * Atomic used to avoid lock/unlock... 513 */ 514 u32 515 fc_get_event_number(void) 516 { 517 return atomic_add_return(1, &fc_event_seq); 518 } 519 EXPORT_SYMBOL(fc_get_event_number); 520 521 /** 522 * fc_host_post_fc_event - routine to do the work of posting an event 523 * on an fc_host. 524 * @shost: host the event occurred on 525 * @event_number: fc event number obtained from get_fc_event_number() 526 * @event_code: fc_host event being posted 527 * @data_len: amount, in bytes, of event data 528 * @data_buf: pointer to event data 529 * @vendor_id: value for Vendor id 530 * 531 * Notes: 532 * This routine assumes no locks are held on entry. 533 */ 534 void 535 fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number, 536 enum fc_host_event_code event_code, 537 u32 data_len, char *data_buf, u64 vendor_id) 538 { 539 struct sk_buff *skb; 540 struct nlmsghdr *nlh; 541 struct fc_nl_event *event; 542 const char *name; 543 u32 len; 544 int err; 545 546 if (!data_buf || data_len < 4) 547 data_len = 0; 548 549 if (!scsi_nl_sock) { 550 err = -ENOENT; 551 goto send_fail; 552 } 553 554 len = FC_NL_MSGALIGN(sizeof(*event) + data_len); 555 556 skb = nlmsg_new(len, GFP_KERNEL); 557 if (!skb) { 558 err = -ENOBUFS; 559 goto send_fail; 560 } 561 562 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0); 563 if (!nlh) { 564 err = -ENOBUFS; 565 goto send_fail_skb; 566 } 567 event = nlmsg_data(nlh); 568 569 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, 570 FC_NL_ASYNC_EVENT, len); 571 event->seconds = ktime_get_real_seconds(); 572 event->vendor_id = vendor_id; 573 event->host_no = shost->host_no; 574 event->event_datalen = data_len; /* bytes */ 575 event->event_num = event_number; 576 event->event_code = event_code; 577 if (data_len) 578 memcpy(&event->event_data, data_buf, data_len); 579 580 nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS, 581 GFP_KERNEL); 582 return; 583 584 send_fail_skb: 585 kfree_skb(skb); 586 send_fail: 587 name = get_fc_host_event_code_name(event_code); 588 printk(KERN_WARNING 589 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n", 590 __func__, shost->host_no, 591 (name) ? name : "<unknown>", 592 (data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err); 593 return; 594 } 595 EXPORT_SYMBOL(fc_host_post_fc_event); 596 597 /** 598 * fc_host_post_event - called to post an even on an fc_host. 599 * @shost: host the event occurred on 600 * @event_number: fc event number obtained from get_fc_event_number() 601 * @event_code: fc_host event being posted 602 * @event_data: 32bits of data for the event being posted 603 * 604 * Notes: 605 * This routine assumes no locks are held on entry. 606 */ 607 void 608 fc_host_post_event(struct Scsi_Host *shost, u32 event_number, 609 enum fc_host_event_code event_code, u32 event_data) 610 { 611 fc_host_post_fc_event(shost, event_number, event_code, 612 (u32)sizeof(u32), (char *)&event_data, 0); 613 } 614 EXPORT_SYMBOL(fc_host_post_event); 615 616 617 /** 618 * fc_host_post_vendor_event - called to post a vendor unique event 619 * on an fc_host 620 * @shost: host the event occurred on 621 * @event_number: fc event number obtained from get_fc_event_number() 622 * @data_len: amount, in bytes, of vendor unique data 623 * @data_buf: pointer to vendor unique data 624 * @vendor_id: Vendor id 625 * 626 * Notes: 627 * This routine assumes no locks are held on entry. 628 */ 629 void 630 fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, 631 u32 data_len, char * data_buf, u64 vendor_id) 632 { 633 fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE, 634 data_len, data_buf, vendor_id); 635 } 636 EXPORT_SYMBOL(fc_host_post_vendor_event); 637 638 /** 639 * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn 640 * @shost: host the fc_rport is associated with 641 * @wwpn: wwpn of the fc_rport device 642 * 643 * Notes: 644 * This routine assumes no locks are held on entry. 645 */ 646 struct fc_rport * 647 fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn) 648 { 649 struct fc_rport *rport; 650 unsigned long flags; 651 652 spin_lock_irqsave(shost->host_lock, flags); 653 654 list_for_each_entry(rport, &fc_host_rports(shost), peers) { 655 if (rport->port_state != FC_PORTSTATE_ONLINE) 656 continue; 657 658 if (rport->port_name == wwpn) { 659 spin_unlock_irqrestore(shost->host_lock, flags); 660 return rport; 661 } 662 } 663 664 spin_unlock_irqrestore(shost->host_lock, flags); 665 return NULL; 666 } 667 EXPORT_SYMBOL(fc_find_rport_by_wwpn); 668 669 static void 670 fc_li_stats_update(struct fc_fn_li_desc *li_desc, 671 struct fc_fpin_stats *stats) 672 { 673 stats->li += be32_to_cpu(li_desc->event_count); 674 switch (be16_to_cpu(li_desc->event_type)) { 675 case FPIN_LI_UNKNOWN: 676 stats->li_failure_unknown += 677 be32_to_cpu(li_desc->event_count); 678 break; 679 case FPIN_LI_LINK_FAILURE: 680 stats->li_link_failure_count += 681 be32_to_cpu(li_desc->event_count); 682 break; 683 case FPIN_LI_LOSS_OF_SYNC: 684 stats->li_loss_of_sync_count += 685 be32_to_cpu(li_desc->event_count); 686 break; 687 case FPIN_LI_LOSS_OF_SIG: 688 stats->li_loss_of_signals_count += 689 be32_to_cpu(li_desc->event_count); 690 break; 691 case FPIN_LI_PRIM_SEQ_ERR: 692 stats->li_prim_seq_err_count += 693 be32_to_cpu(li_desc->event_count); 694 break; 695 case FPIN_LI_INVALID_TX_WD: 696 stats->li_invalid_tx_word_count += 697 be32_to_cpu(li_desc->event_count); 698 break; 699 case FPIN_LI_INVALID_CRC: 700 stats->li_invalid_crc_count += 701 be32_to_cpu(li_desc->event_count); 702 break; 703 case FPIN_LI_DEVICE_SPEC: 704 stats->li_device_specific += 705 be32_to_cpu(li_desc->event_count); 706 break; 707 } 708 } 709 710 static void 711 fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats) 712 { 713 stats->dn++; 714 switch (reason_code) { 715 case FPIN_DELI_UNKNOWN: 716 stats->dn_unknown++; 717 break; 718 case FPIN_DELI_TIMEOUT: 719 stats->dn_timeout++; 720 break; 721 case FPIN_DELI_UNABLE_TO_ROUTE: 722 stats->dn_unable_to_route++; 723 break; 724 case FPIN_DELI_DEVICE_SPEC: 725 stats->dn_device_specific++; 726 break; 727 } 728 } 729 730 static void 731 fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats) 732 { 733 stats->cn++; 734 switch (event_type) { 735 case FPIN_CONGN_CLEAR: 736 stats->cn_clear++; 737 break; 738 case FPIN_CONGN_LOST_CREDIT: 739 stats->cn_lost_credit++; 740 break; 741 case FPIN_CONGN_CREDIT_STALL: 742 stats->cn_credit_stall++; 743 break; 744 case FPIN_CONGN_OVERSUBSCRIPTION: 745 stats->cn_oversubscription++; 746 break; 747 case FPIN_CONGN_DEVICE_SPEC: 748 stats->cn_device_specific++; 749 } 750 } 751 752 /* 753 * fc_fpin_li_stats_update - routine to update Link Integrity 754 * event statistics. 755 * @shost: host the FPIN was received on 756 * @tlv: pointer to link integrity descriptor 757 * 758 */ 759 static void 760 fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv) 761 { 762 u8 i; 763 struct fc_rport *rport = NULL; 764 struct fc_rport *attach_rport = NULL; 765 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 766 struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv; 767 u64 wwpn; 768 769 rport = fc_find_rport_by_wwpn(shost, 770 be64_to_cpu(li_desc->attached_wwpn)); 771 if (rport && 772 (rport->roles & FC_PORT_ROLE_FCP_TARGET || 773 rport->roles & FC_PORT_ROLE_NVME_TARGET)) { 774 attach_rport = rport; 775 fc_li_stats_update(li_desc, &attach_rport->fpin_stats); 776 } 777 778 if (be32_to_cpu(li_desc->pname_count) > 0) { 779 for (i = 0; 780 i < be32_to_cpu(li_desc->pname_count); 781 i++) { 782 wwpn = be64_to_cpu(li_desc->pname_list[i]); 783 rport = fc_find_rport_by_wwpn(shost, wwpn); 784 if (rport && 785 (rport->roles & FC_PORT_ROLE_FCP_TARGET || 786 rport->roles & FC_PORT_ROLE_NVME_TARGET)) { 787 if (rport == attach_rport) 788 continue; 789 fc_li_stats_update(li_desc, 790 &rport->fpin_stats); 791 } 792 } 793 } 794 795 if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn)) 796 fc_li_stats_update(li_desc, &fc_host->fpin_stats); 797 } 798 799 /* 800 * fc_fpin_delivery_stats_update - routine to update Delivery Notification 801 * event statistics. 802 * @shost: host the FPIN was received on 803 * @tlv: pointer to delivery descriptor 804 * 805 */ 806 static void 807 fc_fpin_delivery_stats_update(struct Scsi_Host *shost, 808 struct fc_tlv_desc *tlv) 809 { 810 struct fc_rport *rport = NULL; 811 struct fc_rport *attach_rport = NULL; 812 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 813 struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv; 814 u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code); 815 816 rport = fc_find_rport_by_wwpn(shost, 817 be64_to_cpu(dn_desc->attached_wwpn)); 818 if (rport && 819 (rport->roles & FC_PORT_ROLE_FCP_TARGET || 820 rport->roles & FC_PORT_ROLE_NVME_TARGET)) { 821 attach_rport = rport; 822 fc_delivery_stats_update(reason_code, 823 &attach_rport->fpin_stats); 824 } 825 826 if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn)) 827 fc_delivery_stats_update(reason_code, &fc_host->fpin_stats); 828 } 829 830 /* 831 * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion 832 * event statistics. 833 * @shost: host the FPIN was received on 834 * @tlv: pointer to peer congestion descriptor 835 * 836 */ 837 static void 838 fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost, 839 struct fc_tlv_desc *tlv) 840 { 841 u8 i; 842 struct fc_rport *rport = NULL; 843 struct fc_rport *attach_rport = NULL; 844 struct fc_fn_peer_congn_desc *pc_desc = 845 (struct fc_fn_peer_congn_desc *)tlv; 846 u16 event_type = be16_to_cpu(pc_desc->event_type); 847 u64 wwpn; 848 849 rport = fc_find_rport_by_wwpn(shost, 850 be64_to_cpu(pc_desc->attached_wwpn)); 851 if (rport && 852 (rport->roles & FC_PORT_ROLE_FCP_TARGET || 853 rport->roles & FC_PORT_ROLE_NVME_TARGET)) { 854 attach_rport = rport; 855 fc_cn_stats_update(event_type, &attach_rport->fpin_stats); 856 } 857 858 if (be32_to_cpu(pc_desc->pname_count) > 0) { 859 for (i = 0; 860 i < be32_to_cpu(pc_desc->pname_count); 861 i++) { 862 wwpn = be64_to_cpu(pc_desc->pname_list[i]); 863 rport = fc_find_rport_by_wwpn(shost, wwpn); 864 if (rport && 865 (rport->roles & FC_PORT_ROLE_FCP_TARGET || 866 rport->roles & FC_PORT_ROLE_NVME_TARGET)) { 867 if (rport == attach_rport) 868 continue; 869 fc_cn_stats_update(event_type, 870 &rport->fpin_stats); 871 } 872 } 873 } 874 } 875 876 /* 877 * fc_fpin_congn_stats_update - routine to update Congestion 878 * event statistics. 879 * @shost: host the FPIN was received on 880 * @tlv: pointer to congestion descriptor 881 * 882 */ 883 static void 884 fc_fpin_congn_stats_update(struct Scsi_Host *shost, 885 struct fc_tlv_desc *tlv) 886 { 887 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 888 struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv; 889 890 fc_cn_stats_update(be16_to_cpu(congn->event_type), 891 &fc_host->fpin_stats); 892 } 893 894 /** 895 * fc_host_fpin_rcv - routine to process a received FPIN. 896 * @shost: host the FPIN was received on 897 * @fpin_len: length of FPIN payload, in bytes 898 * @fpin_buf: pointer to FPIN payload 899 * 900 * Notes: 901 * This routine assumes no locks are held on entry. 902 */ 903 void 904 fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf) 905 { 906 struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf; 907 struct fc_tlv_desc *tlv; 908 u32 desc_cnt = 0, bytes_remain; 909 u32 dtag; 910 911 /* Update Statistics */ 912 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 913 bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc); 914 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 915 916 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 917 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 918 dtag = be32_to_cpu(tlv->desc_tag); 919 switch (dtag) { 920 case ELS_DTAG_LNK_INTEGRITY: 921 fc_fpin_li_stats_update(shost, tlv); 922 break; 923 case ELS_DTAG_DELIVERY: 924 fc_fpin_delivery_stats_update(shost, tlv); 925 break; 926 case ELS_DTAG_PEER_CONGEST: 927 fc_fpin_peer_congn_stats_update(shost, tlv); 928 break; 929 case ELS_DTAG_CONGESTION: 930 fc_fpin_congn_stats_update(shost, tlv); 931 } 932 933 desc_cnt++; 934 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 935 tlv = fc_tlv_next_desc(tlv); 936 } 937 938 fc_host_post_fc_event(shost, fc_get_event_number(), 939 FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0); 940 } 941 EXPORT_SYMBOL(fc_host_fpin_rcv); 942 943 944 static __init int fc_transport_init(void) 945 { 946 int error; 947 948 atomic_set(&fc_event_seq, 0); 949 950 error = transport_class_register(&fc_host_class); 951 if (error) 952 return error; 953 error = transport_class_register(&fc_vport_class); 954 if (error) 955 goto unreg_host_class; 956 error = transport_class_register(&fc_rport_class); 957 if (error) 958 goto unreg_vport_class; 959 error = transport_class_register(&fc_transport_class); 960 if (error) 961 goto unreg_rport_class; 962 return 0; 963 964 unreg_rport_class: 965 transport_class_unregister(&fc_rport_class); 966 unreg_vport_class: 967 transport_class_unregister(&fc_vport_class); 968 unreg_host_class: 969 transport_class_unregister(&fc_host_class); 970 return error; 971 } 972 973 static void __exit fc_transport_exit(void) 974 { 975 transport_class_unregister(&fc_transport_class); 976 transport_class_unregister(&fc_rport_class); 977 transport_class_unregister(&fc_host_class); 978 transport_class_unregister(&fc_vport_class); 979 } 980 981 /* 982 * FC Remote Port Attribute Management 983 */ 984 985 #define fc_rport_show_function(field, format_string, sz, cast) \ 986 static ssize_t \ 987 show_fc_rport_##field (struct device *dev, \ 988 struct device_attribute *attr, char *buf) \ 989 { \ 990 struct fc_rport *rport = transport_class_to_rport(dev); \ 991 struct Scsi_Host *shost = rport_to_shost(rport); \ 992 struct fc_internal *i = to_fc_internal(shost->transportt); \ 993 if ((i->f->get_rport_##field) && \ 994 !((rport->port_state == FC_PORTSTATE_BLOCKED) || \ 995 (rport->port_state == FC_PORTSTATE_DELETED) || \ 996 (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \ 997 i->f->get_rport_##field(rport); \ 998 return snprintf(buf, sz, format_string, cast rport->field); \ 999 } 1000 1001 #define fc_rport_store_function(field) \ 1002 static ssize_t \ 1003 store_fc_rport_##field(struct device *dev, \ 1004 struct device_attribute *attr, \ 1005 const char *buf, size_t count) \ 1006 { \ 1007 int val; \ 1008 struct fc_rport *rport = transport_class_to_rport(dev); \ 1009 struct Scsi_Host *shost = rport_to_shost(rport); \ 1010 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1011 char *cp; \ 1012 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \ 1013 (rport->port_state == FC_PORTSTATE_DELETED) || \ 1014 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \ 1015 return -EBUSY; \ 1016 val = simple_strtoul(buf, &cp, 0); \ 1017 if (*cp && (*cp != '\n')) \ 1018 return -EINVAL; \ 1019 i->f->set_rport_##field(rport, val); \ 1020 return count; \ 1021 } 1022 1023 #define fc_rport_rd_attr(field, format_string, sz) \ 1024 fc_rport_show_function(field, format_string, sz, ) \ 1025 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ 1026 show_fc_rport_##field, NULL) 1027 1028 #define fc_rport_rd_attr_cast(field, format_string, sz, cast) \ 1029 fc_rport_show_function(field, format_string, sz, (cast)) \ 1030 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ 1031 show_fc_rport_##field, NULL) 1032 1033 #define fc_rport_rw_attr(field, format_string, sz) \ 1034 fc_rport_show_function(field, format_string, sz, ) \ 1035 fc_rport_store_function(field) \ 1036 static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \ 1037 show_fc_rport_##field, \ 1038 store_fc_rport_##field) 1039 1040 1041 #define fc_private_rport_show_function(field, format_string, sz, cast) \ 1042 static ssize_t \ 1043 show_fc_rport_##field (struct device *dev, \ 1044 struct device_attribute *attr, char *buf) \ 1045 { \ 1046 struct fc_rport *rport = transport_class_to_rport(dev); \ 1047 return snprintf(buf, sz, format_string, cast rport->field); \ 1048 } 1049 1050 #define fc_private_rport_rd_attr(field, format_string, sz) \ 1051 fc_private_rport_show_function(field, format_string, sz, ) \ 1052 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ 1053 show_fc_rport_##field, NULL) 1054 1055 #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \ 1056 fc_private_rport_show_function(field, format_string, sz, (cast)) \ 1057 static FC_DEVICE_ATTR(rport, field, S_IRUGO, \ 1058 show_fc_rport_##field, NULL) 1059 1060 1061 #define fc_private_rport_rd_enum_attr(title, maxlen) \ 1062 static ssize_t \ 1063 show_fc_rport_##title (struct device *dev, \ 1064 struct device_attribute *attr, char *buf) \ 1065 { \ 1066 struct fc_rport *rport = transport_class_to_rport(dev); \ 1067 const char *name; \ 1068 name = get_fc_##title##_name(rport->title); \ 1069 if (!name) \ 1070 return -EINVAL; \ 1071 return snprintf(buf, maxlen, "%s\n", name); \ 1072 } \ 1073 static FC_DEVICE_ATTR(rport, title, S_IRUGO, \ 1074 show_fc_rport_##title, NULL) 1075 1076 1077 #define SETUP_RPORT_ATTRIBUTE_RD(field) \ 1078 i->private_rport_attrs[count] = device_attr_rport_##field; \ 1079 i->private_rport_attrs[count].attr.mode = S_IRUGO; \ 1080 i->private_rport_attrs[count].store = NULL; \ 1081 i->rport_attrs[count] = &i->private_rport_attrs[count]; \ 1082 if (i->f->show_rport_##field) \ 1083 count++ 1084 1085 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \ 1086 i->private_rport_attrs[count] = device_attr_rport_##field; \ 1087 i->private_rport_attrs[count].attr.mode = S_IRUGO; \ 1088 i->private_rport_attrs[count].store = NULL; \ 1089 i->rport_attrs[count] = &i->private_rport_attrs[count]; \ 1090 count++ 1091 1092 #define SETUP_RPORT_ATTRIBUTE_RW(field) \ 1093 i->private_rport_attrs[count] = device_attr_rport_##field; \ 1094 if (!i->f->set_rport_##field) { \ 1095 i->private_rport_attrs[count].attr.mode = S_IRUGO; \ 1096 i->private_rport_attrs[count].store = NULL; \ 1097 } \ 1098 i->rport_attrs[count] = &i->private_rport_attrs[count]; \ 1099 if (i->f->show_rport_##field) \ 1100 count++ 1101 1102 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \ 1103 { \ 1104 i->private_rport_attrs[count] = device_attr_rport_##field; \ 1105 i->rport_attrs[count] = &i->private_rport_attrs[count]; \ 1106 count++; \ 1107 } 1108 1109 1110 /* The FC Transport Remote Port Attributes: */ 1111 1112 /* Fixed Remote Port Attributes */ 1113 1114 fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20); 1115 1116 static ssize_t 1117 show_fc_rport_supported_classes (struct device *dev, 1118 struct device_attribute *attr, char *buf) 1119 { 1120 struct fc_rport *rport = transport_class_to_rport(dev); 1121 if (rport->supported_classes == FC_COS_UNSPECIFIED) 1122 return snprintf(buf, 20, "unspecified\n"); 1123 return get_fc_cos_names(rport->supported_classes, buf); 1124 } 1125 static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO, 1126 show_fc_rport_supported_classes, NULL); 1127 1128 /* Dynamic Remote Port Attributes */ 1129 1130 /* 1131 * dev_loss_tmo attribute 1132 */ 1133 static int fc_str_to_dev_loss(const char *buf, unsigned long *val) 1134 { 1135 char *cp; 1136 1137 *val = simple_strtoul(buf, &cp, 0); 1138 if (*cp && (*cp != '\n')) 1139 return -EINVAL; 1140 /* 1141 * Check for overflow; dev_loss_tmo is u32 1142 */ 1143 if (*val > UINT_MAX) 1144 return -EINVAL; 1145 1146 return 0; 1147 } 1148 1149 static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport, 1150 unsigned long val) 1151 { 1152 struct Scsi_Host *shost = rport_to_shost(rport); 1153 struct fc_internal *i = to_fc_internal(shost->transportt); 1154 1155 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || 1156 (rport->port_state == FC_PORTSTATE_DELETED) || 1157 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 1158 return -EBUSY; 1159 /* 1160 * Check for overflow; dev_loss_tmo is u32 1161 */ 1162 if (val > UINT_MAX) 1163 return -EINVAL; 1164 1165 /* 1166 * If fast_io_fail is off we have to cap 1167 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT 1168 */ 1169 if (rport->fast_io_fail_tmo == -1 && 1170 val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) 1171 return -EINVAL; 1172 1173 i->f->set_rport_dev_loss_tmo(rport, val); 1174 return 0; 1175 } 1176 1177 fc_rport_show_function(dev_loss_tmo, "%d\n", 20, ) 1178 static ssize_t 1179 store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, 1180 const char *buf, size_t count) 1181 { 1182 struct fc_rport *rport = transport_class_to_rport(dev); 1183 unsigned long val; 1184 int rc; 1185 1186 rc = fc_str_to_dev_loss(buf, &val); 1187 if (rc) 1188 return rc; 1189 1190 rc = fc_rport_set_dev_loss_tmo(rport, val); 1191 if (rc) 1192 return rc; 1193 return count; 1194 } 1195 static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR, 1196 show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo); 1197 1198 1199 /* Private Remote Port Attributes */ 1200 1201 fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); 1202 fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); 1203 fc_private_rport_rd_attr(port_id, "0x%06x\n", 20); 1204 1205 static ssize_t 1206 show_fc_rport_roles (struct device *dev, struct device_attribute *attr, 1207 char *buf) 1208 { 1209 struct fc_rport *rport = transport_class_to_rport(dev); 1210 1211 /* identify any roles that are port_id specific */ 1212 if ((rport->port_id != -1) && 1213 (rport->port_id & FC_WELLKNOWN_PORTID_MASK) == 1214 FC_WELLKNOWN_PORTID_MASK) { 1215 switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) { 1216 case FC_FPORT_PORTID: 1217 return snprintf(buf, 30, "Fabric Port\n"); 1218 case FC_FABCTLR_PORTID: 1219 return snprintf(buf, 30, "Fabric Controller\n"); 1220 case FC_DIRSRVR_PORTID: 1221 return snprintf(buf, 30, "Directory Server\n"); 1222 case FC_TIMESRVR_PORTID: 1223 return snprintf(buf, 30, "Time Server\n"); 1224 case FC_MGMTSRVR_PORTID: 1225 return snprintf(buf, 30, "Management Server\n"); 1226 default: 1227 return snprintf(buf, 30, "Unknown Fabric Entity\n"); 1228 } 1229 } else { 1230 if (rport->roles == FC_PORT_ROLE_UNKNOWN) 1231 return snprintf(buf, 20, "unknown\n"); 1232 return get_fc_port_roles_names(rport->roles, buf); 1233 } 1234 } 1235 static FC_DEVICE_ATTR(rport, roles, S_IRUGO, 1236 show_fc_rport_roles, NULL); 1237 1238 fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 1239 fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); 1240 1241 /* 1242 * fast_io_fail_tmo attribute 1243 */ 1244 static ssize_t 1245 show_fc_rport_fast_io_fail_tmo (struct device *dev, 1246 struct device_attribute *attr, char *buf) 1247 { 1248 struct fc_rport *rport = transport_class_to_rport(dev); 1249 1250 if (rport->fast_io_fail_tmo == -1) 1251 return snprintf(buf, 5, "off\n"); 1252 return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo); 1253 } 1254 1255 static ssize_t 1256 store_fc_rport_fast_io_fail_tmo(struct device *dev, 1257 struct device_attribute *attr, const char *buf, 1258 size_t count) 1259 { 1260 int val; 1261 char *cp; 1262 struct fc_rport *rport = transport_class_to_rport(dev); 1263 1264 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || 1265 (rport->port_state == FC_PORTSTATE_DELETED) || 1266 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 1267 return -EBUSY; 1268 if (strncmp(buf, "off", 3) == 0) 1269 rport->fast_io_fail_tmo = -1; 1270 else { 1271 val = simple_strtoul(buf, &cp, 0); 1272 if ((*cp && (*cp != '\n')) || (val < 0)) 1273 return -EINVAL; 1274 /* 1275 * Cap fast_io_fail by dev_loss_tmo or 1276 * SCSI_DEVICE_BLOCK_MAX_TIMEOUT. 1277 */ 1278 if ((val >= rport->dev_loss_tmo) || 1279 (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) 1280 return -EINVAL; 1281 1282 rport->fast_io_fail_tmo = val; 1283 } 1284 return count; 1285 } 1286 static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR, 1287 show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo); 1288 1289 #define fc_rport_fpin_statistic(name) \ 1290 static ssize_t fc_rport_fpinstat_##name(struct device *cd, \ 1291 struct device_attribute *attr, \ 1292 char *buf) \ 1293 { \ 1294 struct fc_rport *rport = transport_class_to_rport(cd); \ 1295 \ 1296 return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name); \ 1297 } \ 1298 static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL) 1299 1300 fc_rport_fpin_statistic(dn); 1301 fc_rport_fpin_statistic(dn_unknown); 1302 fc_rport_fpin_statistic(dn_timeout); 1303 fc_rport_fpin_statistic(dn_unable_to_route); 1304 fc_rport_fpin_statistic(dn_device_specific); 1305 fc_rport_fpin_statistic(cn); 1306 fc_rport_fpin_statistic(cn_clear); 1307 fc_rport_fpin_statistic(cn_lost_credit); 1308 fc_rport_fpin_statistic(cn_credit_stall); 1309 fc_rport_fpin_statistic(cn_oversubscription); 1310 fc_rport_fpin_statistic(cn_device_specific); 1311 fc_rport_fpin_statistic(li); 1312 fc_rport_fpin_statistic(li_failure_unknown); 1313 fc_rport_fpin_statistic(li_link_failure_count); 1314 fc_rport_fpin_statistic(li_loss_of_sync_count); 1315 fc_rport_fpin_statistic(li_loss_of_signals_count); 1316 fc_rport_fpin_statistic(li_prim_seq_err_count); 1317 fc_rport_fpin_statistic(li_invalid_tx_word_count); 1318 fc_rport_fpin_statistic(li_invalid_crc_count); 1319 fc_rport_fpin_statistic(li_device_specific); 1320 1321 static struct attribute *fc_rport_statistics_attrs[] = { 1322 &device_attr_rport_fpin_dn.attr, 1323 &device_attr_rport_fpin_dn_unknown.attr, 1324 &device_attr_rport_fpin_dn_timeout.attr, 1325 &device_attr_rport_fpin_dn_unable_to_route.attr, 1326 &device_attr_rport_fpin_dn_device_specific.attr, 1327 &device_attr_rport_fpin_li.attr, 1328 &device_attr_rport_fpin_li_failure_unknown.attr, 1329 &device_attr_rport_fpin_li_link_failure_count.attr, 1330 &device_attr_rport_fpin_li_loss_of_sync_count.attr, 1331 &device_attr_rport_fpin_li_loss_of_signals_count.attr, 1332 &device_attr_rport_fpin_li_prim_seq_err_count.attr, 1333 &device_attr_rport_fpin_li_invalid_tx_word_count.attr, 1334 &device_attr_rport_fpin_li_invalid_crc_count.attr, 1335 &device_attr_rport_fpin_li_device_specific.attr, 1336 &device_attr_rport_fpin_cn.attr, 1337 &device_attr_rport_fpin_cn_clear.attr, 1338 &device_attr_rport_fpin_cn_lost_credit.attr, 1339 &device_attr_rport_fpin_cn_credit_stall.attr, 1340 &device_attr_rport_fpin_cn_oversubscription.attr, 1341 &device_attr_rport_fpin_cn_device_specific.attr, 1342 NULL 1343 }; 1344 1345 static struct attribute_group fc_rport_statistics_group = { 1346 .name = "statistics", 1347 .attrs = fc_rport_statistics_attrs, 1348 }; 1349 1350 1351 /* 1352 * FC SCSI Target Attribute Management 1353 */ 1354 1355 /* 1356 * Note: in the target show function we recognize when the remote 1357 * port is in the hierarchy and do not allow the driver to get 1358 * involved in sysfs functions. The driver only gets involved if 1359 * it's the "old" style that doesn't use rports. 1360 */ 1361 #define fc_starget_show_function(field, format_string, sz, cast) \ 1362 static ssize_t \ 1363 show_fc_starget_##field (struct device *dev, \ 1364 struct device_attribute *attr, char *buf) \ 1365 { \ 1366 struct scsi_target *starget = transport_class_to_starget(dev); \ 1367 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ 1368 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1369 struct fc_rport *rport = starget_to_rport(starget); \ 1370 if (rport) \ 1371 fc_starget_##field(starget) = rport->field; \ 1372 else if (i->f->get_starget_##field) \ 1373 i->f->get_starget_##field(starget); \ 1374 return snprintf(buf, sz, format_string, \ 1375 cast fc_starget_##field(starget)); \ 1376 } 1377 1378 #define fc_starget_rd_attr(field, format_string, sz) \ 1379 fc_starget_show_function(field, format_string, sz, ) \ 1380 static FC_DEVICE_ATTR(starget, field, S_IRUGO, \ 1381 show_fc_starget_##field, NULL) 1382 1383 #define fc_starget_rd_attr_cast(field, format_string, sz, cast) \ 1384 fc_starget_show_function(field, format_string, sz, (cast)) \ 1385 static FC_DEVICE_ATTR(starget, field, S_IRUGO, \ 1386 show_fc_starget_##field, NULL) 1387 1388 #define SETUP_STARGET_ATTRIBUTE_RD(field) \ 1389 i->private_starget_attrs[count] = device_attr_starget_##field; \ 1390 i->private_starget_attrs[count].attr.mode = S_IRUGO; \ 1391 i->private_starget_attrs[count].store = NULL; \ 1392 i->starget_attrs[count] = &i->private_starget_attrs[count]; \ 1393 if (i->f->show_starget_##field) \ 1394 count++ 1395 1396 #define SETUP_STARGET_ATTRIBUTE_RW(field) \ 1397 i->private_starget_attrs[count] = device_attr_starget_##field; \ 1398 if (!i->f->set_starget_##field) { \ 1399 i->private_starget_attrs[count].attr.mode = S_IRUGO; \ 1400 i->private_starget_attrs[count].store = NULL; \ 1401 } \ 1402 i->starget_attrs[count] = &i->private_starget_attrs[count]; \ 1403 if (i->f->show_starget_##field) \ 1404 count++ 1405 1406 /* The FC Transport SCSI Target Attributes: */ 1407 fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); 1408 fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); 1409 fc_starget_rd_attr(port_id, "0x%06x\n", 20); 1410 1411 1412 /* 1413 * FC Virtual Port Attribute Management 1414 */ 1415 1416 #define fc_vport_show_function(field, format_string, sz, cast) \ 1417 static ssize_t \ 1418 show_fc_vport_##field (struct device *dev, \ 1419 struct device_attribute *attr, char *buf) \ 1420 { \ 1421 struct fc_vport *vport = transport_class_to_vport(dev); \ 1422 struct Scsi_Host *shost = vport_to_shost(vport); \ 1423 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1424 if ((i->f->get_vport_##field) && \ 1425 !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \ 1426 i->f->get_vport_##field(vport); \ 1427 return snprintf(buf, sz, format_string, cast vport->field); \ 1428 } 1429 1430 #define fc_vport_store_function(field) \ 1431 static ssize_t \ 1432 store_fc_vport_##field(struct device *dev, \ 1433 struct device_attribute *attr, \ 1434 const char *buf, size_t count) \ 1435 { \ 1436 int val; \ 1437 struct fc_vport *vport = transport_class_to_vport(dev); \ 1438 struct Scsi_Host *shost = vport_to_shost(vport); \ 1439 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1440 char *cp; \ 1441 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \ 1442 return -EBUSY; \ 1443 val = simple_strtoul(buf, &cp, 0); \ 1444 if (*cp && (*cp != '\n')) \ 1445 return -EINVAL; \ 1446 i->f->set_vport_##field(vport, val); \ 1447 return count; \ 1448 } 1449 1450 #define fc_vport_store_str_function(field, slen) \ 1451 static ssize_t \ 1452 store_fc_vport_##field(struct device *dev, \ 1453 struct device_attribute *attr, \ 1454 const char *buf, size_t count) \ 1455 { \ 1456 struct fc_vport *vport = transport_class_to_vport(dev); \ 1457 struct Scsi_Host *shost = vport_to_shost(vport); \ 1458 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1459 unsigned int cnt=count; \ 1460 \ 1461 /* count may include a LF at end of string */ \ 1462 if (buf[cnt-1] == '\n') \ 1463 cnt--; \ 1464 if (cnt > ((slen) - 1)) \ 1465 return -EINVAL; \ 1466 memcpy(vport->field, buf, cnt); \ 1467 i->f->set_vport_##field(vport); \ 1468 return count; \ 1469 } 1470 1471 #define fc_vport_rd_attr(field, format_string, sz) \ 1472 fc_vport_show_function(field, format_string, sz, ) \ 1473 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ 1474 show_fc_vport_##field, NULL) 1475 1476 #define fc_vport_rd_attr_cast(field, format_string, sz, cast) \ 1477 fc_vport_show_function(field, format_string, sz, (cast)) \ 1478 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ 1479 show_fc_vport_##field, NULL) 1480 1481 #define fc_vport_rw_attr(field, format_string, sz) \ 1482 fc_vport_show_function(field, format_string, sz, ) \ 1483 fc_vport_store_function(field) \ 1484 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \ 1485 show_fc_vport_##field, \ 1486 store_fc_vport_##field) 1487 1488 #define fc_private_vport_show_function(field, format_string, sz, cast) \ 1489 static ssize_t \ 1490 show_fc_vport_##field (struct device *dev, \ 1491 struct device_attribute *attr, char *buf) \ 1492 { \ 1493 struct fc_vport *vport = transport_class_to_vport(dev); \ 1494 return snprintf(buf, sz, format_string, cast vport->field); \ 1495 } 1496 1497 #define fc_private_vport_store_u32_function(field) \ 1498 static ssize_t \ 1499 store_fc_vport_##field(struct device *dev, \ 1500 struct device_attribute *attr, \ 1501 const char *buf, size_t count) \ 1502 { \ 1503 u32 val; \ 1504 struct fc_vport *vport = transport_class_to_vport(dev); \ 1505 char *cp; \ 1506 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \ 1507 return -EBUSY; \ 1508 val = simple_strtoul(buf, &cp, 0); \ 1509 if (*cp && (*cp != '\n')) \ 1510 return -EINVAL; \ 1511 vport->field = val; \ 1512 return count; \ 1513 } 1514 1515 1516 #define fc_private_vport_rd_attr(field, format_string, sz) \ 1517 fc_private_vport_show_function(field, format_string, sz, ) \ 1518 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ 1519 show_fc_vport_##field, NULL) 1520 1521 #define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \ 1522 fc_private_vport_show_function(field, format_string, sz, (cast)) \ 1523 static FC_DEVICE_ATTR(vport, field, S_IRUGO, \ 1524 show_fc_vport_##field, NULL) 1525 1526 #define fc_private_vport_rw_u32_attr(field, format_string, sz) \ 1527 fc_private_vport_show_function(field, format_string, sz, ) \ 1528 fc_private_vport_store_u32_function(field) \ 1529 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \ 1530 show_fc_vport_##field, \ 1531 store_fc_vport_##field) 1532 1533 1534 #define fc_private_vport_rd_enum_attr(title, maxlen) \ 1535 static ssize_t \ 1536 show_fc_vport_##title (struct device *dev, \ 1537 struct device_attribute *attr, \ 1538 char *buf) \ 1539 { \ 1540 struct fc_vport *vport = transport_class_to_vport(dev); \ 1541 const char *name; \ 1542 name = get_fc_##title##_name(vport->title); \ 1543 if (!name) \ 1544 return -EINVAL; \ 1545 return snprintf(buf, maxlen, "%s\n", name); \ 1546 } \ 1547 static FC_DEVICE_ATTR(vport, title, S_IRUGO, \ 1548 show_fc_vport_##title, NULL) 1549 1550 1551 #define SETUP_VPORT_ATTRIBUTE_RD(field) \ 1552 i->private_vport_attrs[count] = device_attr_vport_##field; \ 1553 i->private_vport_attrs[count].attr.mode = S_IRUGO; \ 1554 i->private_vport_attrs[count].store = NULL; \ 1555 i->vport_attrs[count] = &i->private_vport_attrs[count]; \ 1556 if (i->f->get_##field) \ 1557 count++ 1558 /* NOTE: Above MACRO differs: checks function not show bit */ 1559 1560 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \ 1561 i->private_vport_attrs[count] = device_attr_vport_##field; \ 1562 i->private_vport_attrs[count].attr.mode = S_IRUGO; \ 1563 i->private_vport_attrs[count].store = NULL; \ 1564 i->vport_attrs[count] = &i->private_vport_attrs[count]; \ 1565 count++ 1566 1567 #define SETUP_VPORT_ATTRIBUTE_WR(field) \ 1568 i->private_vport_attrs[count] = device_attr_vport_##field; \ 1569 i->vport_attrs[count] = &i->private_vport_attrs[count]; \ 1570 if (i->f->field) \ 1571 count++ 1572 /* NOTE: Above MACRO differs: checks function */ 1573 1574 #define SETUP_VPORT_ATTRIBUTE_RW(field) \ 1575 i->private_vport_attrs[count] = device_attr_vport_##field; \ 1576 if (!i->f->set_vport_##field) { \ 1577 i->private_vport_attrs[count].attr.mode = S_IRUGO; \ 1578 i->private_vport_attrs[count].store = NULL; \ 1579 } \ 1580 i->vport_attrs[count] = &i->private_vport_attrs[count]; \ 1581 count++ 1582 /* NOTE: Above MACRO differs: does not check show bit */ 1583 1584 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \ 1585 { \ 1586 i->private_vport_attrs[count] = device_attr_vport_##field; \ 1587 i->vport_attrs[count] = &i->private_vport_attrs[count]; \ 1588 count++; \ 1589 } 1590 1591 1592 /* The FC Transport Virtual Port Attributes: */ 1593 1594 /* Fixed Virtual Port Attributes */ 1595 1596 /* Dynamic Virtual Port Attributes */ 1597 1598 /* Private Virtual Port Attributes */ 1599 1600 fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN); 1601 fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN); 1602 fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); 1603 fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); 1604 1605 static ssize_t 1606 show_fc_vport_roles (struct device *dev, struct device_attribute *attr, 1607 char *buf) 1608 { 1609 struct fc_vport *vport = transport_class_to_vport(dev); 1610 1611 if (vport->roles == FC_PORT_ROLE_UNKNOWN) 1612 return snprintf(buf, 20, "unknown\n"); 1613 return get_fc_port_roles_names(vport->roles, buf); 1614 } 1615 static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL); 1616 1617 fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN); 1618 1619 fc_private_vport_show_function(symbolic_name, "%s\n", 1620 FC_VPORT_SYMBOLIC_NAMELEN + 1, ) 1621 fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN) 1622 static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR, 1623 show_fc_vport_symbolic_name, store_fc_vport_symbolic_name); 1624 1625 static ssize_t 1626 store_fc_vport_delete(struct device *dev, struct device_attribute *attr, 1627 const char *buf, size_t count) 1628 { 1629 struct fc_vport *vport = transport_class_to_vport(dev); 1630 struct Scsi_Host *shost = vport_to_shost(vport); 1631 unsigned long flags; 1632 1633 spin_lock_irqsave(shost->host_lock, flags); 1634 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) { 1635 spin_unlock_irqrestore(shost->host_lock, flags); 1636 return -EBUSY; 1637 } 1638 vport->flags |= FC_VPORT_DELETING; 1639 spin_unlock_irqrestore(shost->host_lock, flags); 1640 1641 fc_queue_work(shost, &vport->vport_delete_work); 1642 return count; 1643 } 1644 static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR, 1645 NULL, store_fc_vport_delete); 1646 1647 1648 /* 1649 * Enable/Disable vport 1650 * Write "1" to disable, write "0" to enable 1651 */ 1652 static ssize_t 1653 store_fc_vport_disable(struct device *dev, struct device_attribute *attr, 1654 const char *buf, 1655 size_t count) 1656 { 1657 struct fc_vport *vport = transport_class_to_vport(dev); 1658 struct Scsi_Host *shost = vport_to_shost(vport); 1659 struct fc_internal *i = to_fc_internal(shost->transportt); 1660 int stat; 1661 1662 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) 1663 return -EBUSY; 1664 1665 if (*buf == '0') { 1666 if (vport->vport_state != FC_VPORT_DISABLED) 1667 return -EALREADY; 1668 } else if (*buf == '1') { 1669 if (vport->vport_state == FC_VPORT_DISABLED) 1670 return -EALREADY; 1671 } else 1672 return -EINVAL; 1673 1674 stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true)); 1675 return stat ? stat : count; 1676 } 1677 static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR, 1678 NULL, store_fc_vport_disable); 1679 1680 1681 /* 1682 * Host Attribute Management 1683 */ 1684 1685 #define fc_host_show_function(field, format_string, sz, cast) \ 1686 static ssize_t \ 1687 show_fc_host_##field (struct device *dev, \ 1688 struct device_attribute *attr, char *buf) \ 1689 { \ 1690 struct Scsi_Host *shost = transport_class_to_shost(dev); \ 1691 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1692 if (i->f->get_host_##field) \ 1693 i->f->get_host_##field(shost); \ 1694 return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ 1695 } 1696 1697 #define fc_host_store_function(field) \ 1698 static ssize_t \ 1699 store_fc_host_##field(struct device *dev, \ 1700 struct device_attribute *attr, \ 1701 const char *buf, size_t count) \ 1702 { \ 1703 int val; \ 1704 struct Scsi_Host *shost = transport_class_to_shost(dev); \ 1705 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1706 char *cp; \ 1707 \ 1708 val = simple_strtoul(buf, &cp, 0); \ 1709 if (*cp && (*cp != '\n')) \ 1710 return -EINVAL; \ 1711 i->f->set_host_##field(shost, val); \ 1712 return count; \ 1713 } 1714 1715 #define fc_host_store_str_function(field, slen) \ 1716 static ssize_t \ 1717 store_fc_host_##field(struct device *dev, \ 1718 struct device_attribute *attr, \ 1719 const char *buf, size_t count) \ 1720 { \ 1721 struct Scsi_Host *shost = transport_class_to_shost(dev); \ 1722 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1723 unsigned int cnt=count; \ 1724 \ 1725 /* count may include a LF at end of string */ \ 1726 if (buf[cnt-1] == '\n') \ 1727 cnt--; \ 1728 if (cnt > ((slen) - 1)) \ 1729 return -EINVAL; \ 1730 memcpy(fc_host_##field(shost), buf, cnt); \ 1731 i->f->set_host_##field(shost); \ 1732 return count; \ 1733 } 1734 1735 #define fc_host_rd_attr(field, format_string, sz) \ 1736 fc_host_show_function(field, format_string, sz, ) \ 1737 static FC_DEVICE_ATTR(host, field, S_IRUGO, \ 1738 show_fc_host_##field, NULL) 1739 1740 #define fc_host_rd_attr_cast(field, format_string, sz, cast) \ 1741 fc_host_show_function(field, format_string, sz, (cast)) \ 1742 static FC_DEVICE_ATTR(host, field, S_IRUGO, \ 1743 show_fc_host_##field, NULL) 1744 1745 #define fc_host_rw_attr(field, format_string, sz) \ 1746 fc_host_show_function(field, format_string, sz, ) \ 1747 fc_host_store_function(field) \ 1748 static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \ 1749 show_fc_host_##field, \ 1750 store_fc_host_##field) 1751 1752 #define fc_host_rd_enum_attr(title, maxlen) \ 1753 static ssize_t \ 1754 show_fc_host_##title (struct device *dev, \ 1755 struct device_attribute *attr, char *buf) \ 1756 { \ 1757 struct Scsi_Host *shost = transport_class_to_shost(dev); \ 1758 struct fc_internal *i = to_fc_internal(shost->transportt); \ 1759 const char *name; \ 1760 if (i->f->get_host_##title) \ 1761 i->f->get_host_##title(shost); \ 1762 name = get_fc_##title##_name(fc_host_##title(shost)); \ 1763 if (!name) \ 1764 return -EINVAL; \ 1765 return snprintf(buf, maxlen, "%s\n", name); \ 1766 } \ 1767 static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL) 1768 1769 #define SETUP_HOST_ATTRIBUTE_RD(field) \ 1770 i->private_host_attrs[count] = device_attr_host_##field; \ 1771 i->private_host_attrs[count].attr.mode = S_IRUGO; \ 1772 i->private_host_attrs[count].store = NULL; \ 1773 i->host_attrs[count] = &i->private_host_attrs[count]; \ 1774 if (i->f->show_host_##field) \ 1775 count++ 1776 1777 #define SETUP_HOST_ATTRIBUTE_RD_NS(field) \ 1778 i->private_host_attrs[count] = device_attr_host_##field; \ 1779 i->private_host_attrs[count].attr.mode = S_IRUGO; \ 1780 i->private_host_attrs[count].store = NULL; \ 1781 i->host_attrs[count] = &i->private_host_attrs[count]; \ 1782 count++ 1783 1784 #define SETUP_HOST_ATTRIBUTE_RW(field) \ 1785 i->private_host_attrs[count] = device_attr_host_##field; \ 1786 if (!i->f->set_host_##field) { \ 1787 i->private_host_attrs[count].attr.mode = S_IRUGO; \ 1788 i->private_host_attrs[count].store = NULL; \ 1789 } \ 1790 i->host_attrs[count] = &i->private_host_attrs[count]; \ 1791 if (i->f->show_host_##field) \ 1792 count++ 1793 1794 1795 #define fc_private_host_show_function(field, format_string, sz, cast) \ 1796 static ssize_t \ 1797 show_fc_host_##field (struct device *dev, \ 1798 struct device_attribute *attr, char *buf) \ 1799 { \ 1800 struct Scsi_Host *shost = transport_class_to_shost(dev); \ 1801 return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \ 1802 } 1803 1804 #define fc_private_host_rd_attr(field, format_string, sz) \ 1805 fc_private_host_show_function(field, format_string, sz, ) \ 1806 static FC_DEVICE_ATTR(host, field, S_IRUGO, \ 1807 show_fc_host_##field, NULL) 1808 1809 #define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \ 1810 fc_private_host_show_function(field, format_string, sz, (cast)) \ 1811 static FC_DEVICE_ATTR(host, field, S_IRUGO, \ 1812 show_fc_host_##field, NULL) 1813 1814 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \ 1815 i->private_host_attrs[count] = device_attr_host_##field; \ 1816 i->private_host_attrs[count].attr.mode = S_IRUGO; \ 1817 i->private_host_attrs[count].store = NULL; \ 1818 i->host_attrs[count] = &i->private_host_attrs[count]; \ 1819 count++ 1820 1821 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \ 1822 { \ 1823 i->private_host_attrs[count] = device_attr_host_##field; \ 1824 i->host_attrs[count] = &i->private_host_attrs[count]; \ 1825 count++; \ 1826 } 1827 1828 1829 /* Fixed Host Attributes */ 1830 1831 static ssize_t 1832 show_fc_host_supported_classes (struct device *dev, 1833 struct device_attribute *attr, char *buf) 1834 { 1835 struct Scsi_Host *shost = transport_class_to_shost(dev); 1836 1837 if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED) 1838 return snprintf(buf, 20, "unspecified\n"); 1839 1840 return get_fc_cos_names(fc_host_supported_classes(shost), buf); 1841 } 1842 static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO, 1843 show_fc_host_supported_classes, NULL); 1844 1845 static ssize_t 1846 show_fc_host_supported_fc4s (struct device *dev, 1847 struct device_attribute *attr, char *buf) 1848 { 1849 struct Scsi_Host *shost = transport_class_to_shost(dev); 1850 return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost)); 1851 } 1852 static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO, 1853 show_fc_host_supported_fc4s, NULL); 1854 1855 static ssize_t 1856 show_fc_host_supported_speeds (struct device *dev, 1857 struct device_attribute *attr, char *buf) 1858 { 1859 struct Scsi_Host *shost = transport_class_to_shost(dev); 1860 1861 if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN) 1862 return snprintf(buf, 20, "unknown\n"); 1863 1864 return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf); 1865 } 1866 static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO, 1867 show_fc_host_supported_speeds, NULL); 1868 1869 1870 fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long); 1871 fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); 1872 fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, 1873 unsigned long long); 1874 fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); 1875 fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20); 1876 fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); 1877 fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1); 1878 fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); 1879 fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); 1880 fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1); 1881 fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1); 1882 fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1); 1883 fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1); 1884 1885 1886 /* Dynamic Host Attributes */ 1887 1888 static ssize_t 1889 show_fc_host_active_fc4s (struct device *dev, 1890 struct device_attribute *attr, char *buf) 1891 { 1892 struct Scsi_Host *shost = transport_class_to_shost(dev); 1893 struct fc_internal *i = to_fc_internal(shost->transportt); 1894 1895 if (i->f->get_host_active_fc4s) 1896 i->f->get_host_active_fc4s(shost); 1897 1898 return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost)); 1899 } 1900 static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO, 1901 show_fc_host_active_fc4s, NULL); 1902 1903 static ssize_t 1904 show_fc_host_speed (struct device *dev, 1905 struct device_attribute *attr, char *buf) 1906 { 1907 struct Scsi_Host *shost = transport_class_to_shost(dev); 1908 struct fc_internal *i = to_fc_internal(shost->transportt); 1909 1910 if (i->f->get_host_speed) 1911 i->f->get_host_speed(shost); 1912 1913 if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN) 1914 return snprintf(buf, 20, "unknown\n"); 1915 1916 return get_fc_port_speed_names(fc_host_speed(shost), buf); 1917 } 1918 static FC_DEVICE_ATTR(host, speed, S_IRUGO, 1919 show_fc_host_speed, NULL); 1920 1921 1922 fc_host_rd_attr(port_id, "0x%06x\n", 20); 1923 fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); 1924 fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 1925 fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); 1926 fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1); 1927 1928 fc_private_host_show_function(system_hostname, "%s\n", 1929 FC_SYMBOLIC_NAME_SIZE + 1, ) 1930 fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE) 1931 static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR, 1932 show_fc_host_system_hostname, store_fc_host_system_hostname); 1933 1934 1935 /* Private Host Attributes */ 1936 1937 static ssize_t 1938 show_fc_private_host_tgtid_bind_type(struct device *dev, 1939 struct device_attribute *attr, char *buf) 1940 { 1941 struct Scsi_Host *shost = transport_class_to_shost(dev); 1942 const char *name; 1943 1944 name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost)); 1945 if (!name) 1946 return -EINVAL; 1947 return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name); 1948 } 1949 1950 #define get_list_head_entry(pos, head, member) \ 1951 pos = list_entry((head)->next, typeof(*pos), member) 1952 1953 static ssize_t 1954 store_fc_private_host_tgtid_bind_type(struct device *dev, 1955 struct device_attribute *attr, const char *buf, size_t count) 1956 { 1957 struct Scsi_Host *shost = transport_class_to_shost(dev); 1958 struct fc_rport *rport; 1959 enum fc_tgtid_binding_type val; 1960 unsigned long flags; 1961 1962 if (get_fc_tgtid_bind_type_match(buf, &val)) 1963 return -EINVAL; 1964 1965 /* if changing bind type, purge all unused consistent bindings */ 1966 if (val != fc_host_tgtid_bind_type(shost)) { 1967 spin_lock_irqsave(shost->host_lock, flags); 1968 while (!list_empty(&fc_host_rport_bindings(shost))) { 1969 get_list_head_entry(rport, 1970 &fc_host_rport_bindings(shost), peers); 1971 list_del(&rport->peers); 1972 rport->port_state = FC_PORTSTATE_DELETED; 1973 fc_queue_work(shost, &rport->rport_delete_work); 1974 } 1975 spin_unlock_irqrestore(shost->host_lock, flags); 1976 } 1977 1978 fc_host_tgtid_bind_type(shost) = val; 1979 return count; 1980 } 1981 1982 static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR, 1983 show_fc_private_host_tgtid_bind_type, 1984 store_fc_private_host_tgtid_bind_type); 1985 1986 static ssize_t 1987 store_fc_private_host_issue_lip(struct device *dev, 1988 struct device_attribute *attr, const char *buf, size_t count) 1989 { 1990 struct Scsi_Host *shost = transport_class_to_shost(dev); 1991 struct fc_internal *i = to_fc_internal(shost->transportt); 1992 int ret; 1993 1994 /* ignore any data value written to the attribute */ 1995 if (i->f->issue_fc_host_lip) { 1996 ret = i->f->issue_fc_host_lip(shost); 1997 return ret ? ret: count; 1998 } 1999 2000 return -ENOENT; 2001 } 2002 2003 static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, 2004 store_fc_private_host_issue_lip); 2005 2006 static ssize_t 2007 store_fc_private_host_dev_loss_tmo(struct device *dev, 2008 struct device_attribute *attr, 2009 const char *buf, size_t count) 2010 { 2011 struct Scsi_Host *shost = transport_class_to_shost(dev); 2012 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2013 struct fc_rport *rport; 2014 unsigned long val, flags; 2015 int rc; 2016 2017 rc = fc_str_to_dev_loss(buf, &val); 2018 if (rc) 2019 return rc; 2020 2021 fc_host_dev_loss_tmo(shost) = val; 2022 spin_lock_irqsave(shost->host_lock, flags); 2023 list_for_each_entry(rport, &fc_host->rports, peers) 2024 fc_rport_set_dev_loss_tmo(rport, val); 2025 spin_unlock_irqrestore(shost->host_lock, flags); 2026 return count; 2027 } 2028 2029 fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, ); 2030 static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR, 2031 show_fc_host_dev_loss_tmo, 2032 store_fc_private_host_dev_loss_tmo); 2033 2034 fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); 2035 2036 /* 2037 * Host Statistics Management 2038 */ 2039 2040 /* Show a given attribute in the statistics group */ 2041 static ssize_t 2042 fc_stat_show(const struct device *dev, char *buf, unsigned long offset) 2043 { 2044 struct Scsi_Host *shost = transport_class_to_shost(dev); 2045 struct fc_internal *i = to_fc_internal(shost->transportt); 2046 struct fc_host_statistics *stats; 2047 ssize_t ret = -ENOENT; 2048 2049 if (offset > sizeof(struct fc_host_statistics) || 2050 offset % sizeof(u64) != 0) 2051 WARN_ON(1); 2052 2053 if (i->f->get_fc_host_stats) { 2054 stats = (i->f->get_fc_host_stats)(shost); 2055 if (stats) 2056 ret = snprintf(buf, 20, "0x%llx\n", 2057 (unsigned long long)*(u64 *)(((u8 *) stats) + offset)); 2058 } 2059 return ret; 2060 } 2061 2062 2063 /* generate a read-only statistics attribute */ 2064 #define fc_host_statistic(name) \ 2065 static ssize_t show_fcstat_##name(struct device *cd, \ 2066 struct device_attribute *attr, \ 2067 char *buf) \ 2068 { \ 2069 return fc_stat_show(cd, buf, \ 2070 offsetof(struct fc_host_statistics, name)); \ 2071 } \ 2072 static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL) 2073 2074 fc_host_statistic(seconds_since_last_reset); 2075 fc_host_statistic(tx_frames); 2076 fc_host_statistic(tx_words); 2077 fc_host_statistic(rx_frames); 2078 fc_host_statistic(rx_words); 2079 fc_host_statistic(lip_count); 2080 fc_host_statistic(nos_count); 2081 fc_host_statistic(error_frames); 2082 fc_host_statistic(dumped_frames); 2083 fc_host_statistic(link_failure_count); 2084 fc_host_statistic(loss_of_sync_count); 2085 fc_host_statistic(loss_of_signal_count); 2086 fc_host_statistic(prim_seq_protocol_err_count); 2087 fc_host_statistic(invalid_tx_word_count); 2088 fc_host_statistic(invalid_crc_count); 2089 fc_host_statistic(fcp_input_requests); 2090 fc_host_statistic(fcp_output_requests); 2091 fc_host_statistic(fcp_control_requests); 2092 fc_host_statistic(fcp_input_megabytes); 2093 fc_host_statistic(fcp_output_megabytes); 2094 fc_host_statistic(fcp_packet_alloc_failures); 2095 fc_host_statistic(fcp_packet_aborts); 2096 fc_host_statistic(fcp_frame_alloc_failures); 2097 fc_host_statistic(fc_no_free_exch); 2098 fc_host_statistic(fc_no_free_exch_xid); 2099 fc_host_statistic(fc_xid_not_found); 2100 fc_host_statistic(fc_xid_busy); 2101 fc_host_statistic(fc_seq_not_found); 2102 fc_host_statistic(fc_non_bls_resp); 2103 fc_host_statistic(cn_sig_warn); 2104 fc_host_statistic(cn_sig_alarm); 2105 2106 2107 #define fc_host_fpin_statistic(name) \ 2108 static ssize_t fc_host_fpinstat_##name(struct device *cd, \ 2109 struct device_attribute *attr, \ 2110 char *buf) \ 2111 { \ 2112 struct Scsi_Host *shost = transport_class_to_shost(cd); \ 2113 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); \ 2114 \ 2115 return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name); \ 2116 } \ 2117 static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL) 2118 2119 fc_host_fpin_statistic(dn); 2120 fc_host_fpin_statistic(dn_unknown); 2121 fc_host_fpin_statistic(dn_timeout); 2122 fc_host_fpin_statistic(dn_unable_to_route); 2123 fc_host_fpin_statistic(dn_device_specific); 2124 fc_host_fpin_statistic(cn); 2125 fc_host_fpin_statistic(cn_clear); 2126 fc_host_fpin_statistic(cn_lost_credit); 2127 fc_host_fpin_statistic(cn_credit_stall); 2128 fc_host_fpin_statistic(cn_oversubscription); 2129 fc_host_fpin_statistic(cn_device_specific); 2130 fc_host_fpin_statistic(li); 2131 fc_host_fpin_statistic(li_failure_unknown); 2132 fc_host_fpin_statistic(li_link_failure_count); 2133 fc_host_fpin_statistic(li_loss_of_sync_count); 2134 fc_host_fpin_statistic(li_loss_of_signals_count); 2135 fc_host_fpin_statistic(li_prim_seq_err_count); 2136 fc_host_fpin_statistic(li_invalid_tx_word_count); 2137 fc_host_fpin_statistic(li_invalid_crc_count); 2138 fc_host_fpin_statistic(li_device_specific); 2139 2140 static ssize_t 2141 fc_reset_statistics(struct device *dev, struct device_attribute *attr, 2142 const char *buf, size_t count) 2143 { 2144 struct Scsi_Host *shost = transport_class_to_shost(dev); 2145 struct fc_internal *i = to_fc_internal(shost->transportt); 2146 2147 /* ignore any data value written to the attribute */ 2148 if (i->f->reset_fc_host_stats) { 2149 i->f->reset_fc_host_stats(shost); 2150 return count; 2151 } 2152 2153 return -ENOENT; 2154 } 2155 static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL, 2156 fc_reset_statistics); 2157 2158 static struct attribute *fc_statistics_attrs[] = { 2159 &device_attr_host_seconds_since_last_reset.attr, 2160 &device_attr_host_tx_frames.attr, 2161 &device_attr_host_tx_words.attr, 2162 &device_attr_host_rx_frames.attr, 2163 &device_attr_host_rx_words.attr, 2164 &device_attr_host_lip_count.attr, 2165 &device_attr_host_nos_count.attr, 2166 &device_attr_host_error_frames.attr, 2167 &device_attr_host_dumped_frames.attr, 2168 &device_attr_host_link_failure_count.attr, 2169 &device_attr_host_loss_of_sync_count.attr, 2170 &device_attr_host_loss_of_signal_count.attr, 2171 &device_attr_host_prim_seq_protocol_err_count.attr, 2172 &device_attr_host_invalid_tx_word_count.attr, 2173 &device_attr_host_invalid_crc_count.attr, 2174 &device_attr_host_fcp_input_requests.attr, 2175 &device_attr_host_fcp_output_requests.attr, 2176 &device_attr_host_fcp_control_requests.attr, 2177 &device_attr_host_fcp_input_megabytes.attr, 2178 &device_attr_host_fcp_output_megabytes.attr, 2179 &device_attr_host_fcp_packet_alloc_failures.attr, 2180 &device_attr_host_fcp_packet_aborts.attr, 2181 &device_attr_host_fcp_frame_alloc_failures.attr, 2182 &device_attr_host_fc_no_free_exch.attr, 2183 &device_attr_host_fc_no_free_exch_xid.attr, 2184 &device_attr_host_fc_xid_not_found.attr, 2185 &device_attr_host_fc_xid_busy.attr, 2186 &device_attr_host_fc_seq_not_found.attr, 2187 &device_attr_host_fc_non_bls_resp.attr, 2188 &device_attr_host_cn_sig_warn.attr, 2189 &device_attr_host_cn_sig_alarm.attr, 2190 &device_attr_host_reset_statistics.attr, 2191 &device_attr_host_fpin_dn.attr, 2192 &device_attr_host_fpin_dn_unknown.attr, 2193 &device_attr_host_fpin_dn_timeout.attr, 2194 &device_attr_host_fpin_dn_unable_to_route.attr, 2195 &device_attr_host_fpin_dn_device_specific.attr, 2196 &device_attr_host_fpin_li.attr, 2197 &device_attr_host_fpin_li_failure_unknown.attr, 2198 &device_attr_host_fpin_li_link_failure_count.attr, 2199 &device_attr_host_fpin_li_loss_of_sync_count.attr, 2200 &device_attr_host_fpin_li_loss_of_signals_count.attr, 2201 &device_attr_host_fpin_li_prim_seq_err_count.attr, 2202 &device_attr_host_fpin_li_invalid_tx_word_count.attr, 2203 &device_attr_host_fpin_li_invalid_crc_count.attr, 2204 &device_attr_host_fpin_li_device_specific.attr, 2205 &device_attr_host_fpin_cn.attr, 2206 &device_attr_host_fpin_cn_clear.attr, 2207 &device_attr_host_fpin_cn_lost_credit.attr, 2208 &device_attr_host_fpin_cn_credit_stall.attr, 2209 &device_attr_host_fpin_cn_oversubscription.attr, 2210 &device_attr_host_fpin_cn_device_specific.attr, 2211 NULL 2212 }; 2213 2214 static struct attribute_group fc_statistics_group = { 2215 .name = "statistics", 2216 .attrs = fc_statistics_attrs, 2217 }; 2218 2219 2220 /* Host Vport Attributes */ 2221 2222 static int 2223 fc_parse_wwn(const char *ns, u64 *nm) 2224 { 2225 unsigned int i, j; 2226 u8 wwn[8]; 2227 2228 memset(wwn, 0, sizeof(wwn)); 2229 2230 /* Validate and store the new name */ 2231 for (i=0, j=0; i < 16; i++) { 2232 int value; 2233 2234 value = hex_to_bin(*ns++); 2235 if (value >= 0) 2236 j = (j << 4) | value; 2237 else 2238 return -EINVAL; 2239 if (i % 2) { 2240 wwn[i/2] = j & 0xff; 2241 j = 0; 2242 } 2243 } 2244 2245 *nm = wwn_to_u64(wwn); 2246 2247 return 0; 2248 } 2249 2250 2251 /* 2252 * "Short-cut" sysfs variable to create a new vport on a FC Host. 2253 * Input is a string of the form "<WWPN>:<WWNN>". Other attributes 2254 * will default to a NPIV-based FCP_Initiator; The WWNs are specified 2255 * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc) 2256 */ 2257 static ssize_t 2258 store_fc_host_vport_create(struct device *dev, struct device_attribute *attr, 2259 const char *buf, size_t count) 2260 { 2261 struct Scsi_Host *shost = transport_class_to_shost(dev); 2262 struct fc_vport_identifiers vid; 2263 struct fc_vport *vport; 2264 unsigned int cnt=count; 2265 int stat; 2266 2267 memset(&vid, 0, sizeof(vid)); 2268 2269 /* count may include a LF at end of string */ 2270 if (buf[cnt-1] == '\n') 2271 cnt--; 2272 2273 /* validate we have enough characters for WWPN */ 2274 if ((cnt != (16+1+16)) || (buf[16] != ':')) 2275 return -EINVAL; 2276 2277 stat = fc_parse_wwn(&buf[0], &vid.port_name); 2278 if (stat) 2279 return stat; 2280 2281 stat = fc_parse_wwn(&buf[17], &vid.node_name); 2282 if (stat) 2283 return stat; 2284 2285 vid.roles = FC_PORT_ROLE_FCP_INITIATOR; 2286 vid.vport_type = FC_PORTTYPE_NPIV; 2287 /* vid.symbolic_name is already zero/NULL's */ 2288 vid.disable = false; /* always enabled */ 2289 2290 /* we only allow support on Channel 0 !!! */ 2291 stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport); 2292 return stat ? stat : count; 2293 } 2294 static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, 2295 store_fc_host_vport_create); 2296 2297 2298 /* 2299 * "Short-cut" sysfs variable to delete a vport on a FC Host. 2300 * Vport is identified by a string containing "<WWPN>:<WWNN>". 2301 * The WWNs are specified as hex characters, and may *not* contain 2302 * any prefixes (e.g. 0x, x, etc) 2303 */ 2304 static ssize_t 2305 store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr, 2306 const char *buf, size_t count) 2307 { 2308 struct Scsi_Host *shost = transport_class_to_shost(dev); 2309 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2310 struct fc_vport *vport; 2311 u64 wwpn, wwnn; 2312 unsigned long flags; 2313 unsigned int cnt=count; 2314 int stat, match; 2315 2316 /* count may include a LF at end of string */ 2317 if (buf[cnt-1] == '\n') 2318 cnt--; 2319 2320 /* validate we have enough characters for WWPN */ 2321 if ((cnt != (16+1+16)) || (buf[16] != ':')) 2322 return -EINVAL; 2323 2324 stat = fc_parse_wwn(&buf[0], &wwpn); 2325 if (stat) 2326 return stat; 2327 2328 stat = fc_parse_wwn(&buf[17], &wwnn); 2329 if (stat) 2330 return stat; 2331 2332 spin_lock_irqsave(shost->host_lock, flags); 2333 match = 0; 2334 /* we only allow support on Channel 0 !!! */ 2335 list_for_each_entry(vport, &fc_host->vports, peers) { 2336 if ((vport->channel == 0) && 2337 (vport->port_name == wwpn) && (vport->node_name == wwnn)) { 2338 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) 2339 break; 2340 vport->flags |= FC_VPORT_DELETING; 2341 match = 1; 2342 break; 2343 } 2344 } 2345 spin_unlock_irqrestore(shost->host_lock, flags); 2346 2347 if (!match) 2348 return -ENODEV; 2349 2350 stat = fc_vport_terminate(vport); 2351 return stat ? stat : count; 2352 } 2353 static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL, 2354 store_fc_host_vport_delete); 2355 2356 2357 static int fc_host_match(struct attribute_container *cont, 2358 struct device *dev) 2359 { 2360 struct Scsi_Host *shost; 2361 struct fc_internal *i; 2362 2363 if (!scsi_is_host_device(dev)) 2364 return 0; 2365 2366 shost = dev_to_shost(dev); 2367 if (!shost->transportt || shost->transportt->host_attrs.ac.class 2368 != &fc_host_class.class) 2369 return 0; 2370 2371 i = to_fc_internal(shost->transportt); 2372 2373 return &i->t.host_attrs.ac == cont; 2374 } 2375 2376 static int fc_target_match(struct attribute_container *cont, 2377 struct device *dev) 2378 { 2379 struct Scsi_Host *shost; 2380 struct fc_internal *i; 2381 2382 if (!scsi_is_target_device(dev)) 2383 return 0; 2384 2385 shost = dev_to_shost(dev->parent); 2386 if (!shost->transportt || shost->transportt->host_attrs.ac.class 2387 != &fc_host_class.class) 2388 return 0; 2389 2390 i = to_fc_internal(shost->transportt); 2391 2392 return &i->t.target_attrs.ac == cont; 2393 } 2394 2395 static void fc_rport_dev_release(struct device *dev) 2396 { 2397 struct fc_rport *rport = dev_to_rport(dev); 2398 put_device(dev->parent); 2399 kfree(rport); 2400 } 2401 2402 int scsi_is_fc_rport(const struct device *dev) 2403 { 2404 return dev->release == fc_rport_dev_release; 2405 } 2406 EXPORT_SYMBOL(scsi_is_fc_rport); 2407 2408 static int fc_rport_match(struct attribute_container *cont, 2409 struct device *dev) 2410 { 2411 struct Scsi_Host *shost; 2412 struct fc_internal *i; 2413 2414 if (!scsi_is_fc_rport(dev)) 2415 return 0; 2416 2417 shost = dev_to_shost(dev->parent); 2418 if (!shost->transportt || shost->transportt->host_attrs.ac.class 2419 != &fc_host_class.class) 2420 return 0; 2421 2422 i = to_fc_internal(shost->transportt); 2423 2424 return &i->rport_attr_cont.ac == cont; 2425 } 2426 2427 2428 static void fc_vport_dev_release(struct device *dev) 2429 { 2430 struct fc_vport *vport = dev_to_vport(dev); 2431 put_device(dev->parent); /* release kobj parent */ 2432 kfree(vport); 2433 } 2434 2435 static int scsi_is_fc_vport(const struct device *dev) 2436 { 2437 return dev->release == fc_vport_dev_release; 2438 } 2439 2440 static int fc_vport_match(struct attribute_container *cont, 2441 struct device *dev) 2442 { 2443 struct fc_vport *vport; 2444 struct Scsi_Host *shost; 2445 struct fc_internal *i; 2446 2447 if (!scsi_is_fc_vport(dev)) 2448 return 0; 2449 vport = dev_to_vport(dev); 2450 2451 shost = vport_to_shost(vport); 2452 if (!shost->transportt || shost->transportt->host_attrs.ac.class 2453 != &fc_host_class.class) 2454 return 0; 2455 2456 i = to_fc_internal(shost->transportt); 2457 return &i->vport_attr_cont.ac == cont; 2458 } 2459 2460 2461 /** 2462 * fc_eh_timed_out - FC Transport I/O timeout intercept handler 2463 * @scmd: The SCSI command which timed out 2464 * 2465 * This routine protects against error handlers getting invoked while a 2466 * rport is in a blocked state, typically due to a temporarily loss of 2467 * connectivity. If the error handlers are allowed to proceed, requests 2468 * to abort i/o, reset the target, etc will likely fail as there is no way 2469 * to communicate with the device to perform the requested function. These 2470 * failures may result in the midlayer taking the device offline, requiring 2471 * manual intervention to restore operation. 2472 * 2473 * This routine, called whenever an i/o times out, validates the state of 2474 * the underlying rport. If the rport is blocked, it returns 2475 * EH_RESET_TIMER, which will continue to reschedule the timeout. 2476 * Eventually, either the device will return, or devloss_tmo will fire, 2477 * and when the timeout then fires, it will be handled normally. 2478 * If the rport is not blocked, normal error handling continues. 2479 * 2480 * Notes: 2481 * This routine assumes no locks are held on entry. 2482 */ 2483 enum blk_eh_timer_return 2484 fc_eh_timed_out(struct scsi_cmnd *scmd) 2485 { 2486 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); 2487 2488 if (rport->port_state == FC_PORTSTATE_BLOCKED) 2489 return BLK_EH_RESET_TIMER; 2490 2491 return BLK_EH_DONE; 2492 } 2493 EXPORT_SYMBOL(fc_eh_timed_out); 2494 2495 /* 2496 * Called by fc_user_scan to locate an rport on the shost that 2497 * matches the channel and target id, and invoke scsi_scan_target() 2498 * on the rport. 2499 */ 2500 static void 2501 fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun) 2502 { 2503 struct fc_rport *rport; 2504 unsigned long flags; 2505 2506 spin_lock_irqsave(shost->host_lock, flags); 2507 2508 list_for_each_entry(rport, &fc_host_rports(shost), peers) { 2509 if (rport->scsi_target_id == -1) 2510 continue; 2511 2512 if (rport->port_state != FC_PORTSTATE_ONLINE) 2513 continue; 2514 2515 if ((channel == rport->channel) && 2516 (id == rport->scsi_target_id)) { 2517 spin_unlock_irqrestore(shost->host_lock, flags); 2518 scsi_scan_target(&rport->dev, channel, id, lun, 2519 SCSI_SCAN_MANUAL); 2520 return; 2521 } 2522 } 2523 2524 spin_unlock_irqrestore(shost->host_lock, flags); 2525 } 2526 2527 /* 2528 * Called via sysfs scan routines. Necessary, as the FC transport 2529 * wants to place all target objects below the rport object. So this 2530 * routine must invoke the scsi_scan_target() routine with the rport 2531 * object as the parent. 2532 */ 2533 static int 2534 fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) 2535 { 2536 uint chlo, chhi; 2537 uint tgtlo, tgthi; 2538 2539 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 2540 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 2541 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) 2542 return -EINVAL; 2543 2544 if (channel == SCAN_WILD_CARD) { 2545 chlo = 0; 2546 chhi = shost->max_channel + 1; 2547 } else { 2548 chlo = channel; 2549 chhi = channel + 1; 2550 } 2551 2552 if (id == SCAN_WILD_CARD) { 2553 tgtlo = 0; 2554 tgthi = shost->max_id; 2555 } else { 2556 tgtlo = id; 2557 tgthi = id + 1; 2558 } 2559 2560 for ( ; chlo < chhi; chlo++) 2561 for ( ; tgtlo < tgthi; tgtlo++) 2562 fc_user_scan_tgt(shost, chlo, tgtlo, lun); 2563 2564 return 0; 2565 } 2566 2567 struct scsi_transport_template * 2568 fc_attach_transport(struct fc_function_template *ft) 2569 { 2570 int count; 2571 struct fc_internal *i = kzalloc(sizeof(struct fc_internal), 2572 GFP_KERNEL); 2573 2574 if (unlikely(!i)) 2575 return NULL; 2576 2577 i->t.target_attrs.ac.attrs = &i->starget_attrs[0]; 2578 i->t.target_attrs.ac.class = &fc_transport_class.class; 2579 i->t.target_attrs.ac.match = fc_target_match; 2580 i->t.target_size = sizeof(struct fc_starget_attrs); 2581 transport_container_register(&i->t.target_attrs); 2582 2583 i->t.host_attrs.ac.attrs = &i->host_attrs[0]; 2584 i->t.host_attrs.ac.class = &fc_host_class.class; 2585 i->t.host_attrs.ac.match = fc_host_match; 2586 i->t.host_size = sizeof(struct fc_host_attrs); 2587 if (ft->get_fc_host_stats) 2588 i->t.host_attrs.statistics = &fc_statistics_group; 2589 transport_container_register(&i->t.host_attrs); 2590 2591 i->rport_attr_cont.ac.attrs = &i->rport_attrs[0]; 2592 i->rport_attr_cont.ac.class = &fc_rport_class.class; 2593 i->rport_attr_cont.ac.match = fc_rport_match; 2594 i->rport_attr_cont.statistics = &fc_rport_statistics_group; 2595 transport_container_register(&i->rport_attr_cont); 2596 2597 i->vport_attr_cont.ac.attrs = &i->vport_attrs[0]; 2598 i->vport_attr_cont.ac.class = &fc_vport_class.class; 2599 i->vport_attr_cont.ac.match = fc_vport_match; 2600 transport_container_register(&i->vport_attr_cont); 2601 2602 i->f = ft; 2603 2604 /* Transport uses the shost workq for scsi scanning */ 2605 i->t.create_work_queue = 1; 2606 2607 i->t.user_scan = fc_user_scan; 2608 2609 /* 2610 * Setup SCSI Target Attributes. 2611 */ 2612 count = 0; 2613 SETUP_STARGET_ATTRIBUTE_RD(node_name); 2614 SETUP_STARGET_ATTRIBUTE_RD(port_name); 2615 SETUP_STARGET_ATTRIBUTE_RD(port_id); 2616 2617 BUG_ON(count > FC_STARGET_NUM_ATTRS); 2618 2619 i->starget_attrs[count] = NULL; 2620 2621 2622 /* 2623 * Setup SCSI Host Attributes. 2624 */ 2625 count=0; 2626 SETUP_HOST_ATTRIBUTE_RD(node_name); 2627 SETUP_HOST_ATTRIBUTE_RD(port_name); 2628 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name); 2629 SETUP_HOST_ATTRIBUTE_RD(supported_classes); 2630 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); 2631 SETUP_HOST_ATTRIBUTE_RD(supported_speeds); 2632 SETUP_HOST_ATTRIBUTE_RD(maxframe_size); 2633 if (ft->vport_create) { 2634 SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports); 2635 SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse); 2636 } 2637 SETUP_HOST_ATTRIBUTE_RD(serial_number); 2638 SETUP_HOST_ATTRIBUTE_RD(manufacturer); 2639 SETUP_HOST_ATTRIBUTE_RD(model); 2640 SETUP_HOST_ATTRIBUTE_RD(model_description); 2641 SETUP_HOST_ATTRIBUTE_RD(hardware_version); 2642 SETUP_HOST_ATTRIBUTE_RD(driver_version); 2643 SETUP_HOST_ATTRIBUTE_RD(firmware_version); 2644 SETUP_HOST_ATTRIBUTE_RD(optionrom_version); 2645 2646 SETUP_HOST_ATTRIBUTE_RD(port_id); 2647 SETUP_HOST_ATTRIBUTE_RD(port_type); 2648 SETUP_HOST_ATTRIBUTE_RD(port_state); 2649 SETUP_HOST_ATTRIBUTE_RD(active_fc4s); 2650 SETUP_HOST_ATTRIBUTE_RD(speed); 2651 SETUP_HOST_ATTRIBUTE_RD(fabric_name); 2652 SETUP_HOST_ATTRIBUTE_RD(symbolic_name); 2653 SETUP_HOST_ATTRIBUTE_RW(system_hostname); 2654 2655 /* Transport-managed attributes */ 2656 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo); 2657 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 2658 if (ft->issue_fc_host_lip) 2659 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); 2660 if (ft->vport_create) 2661 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create); 2662 if (ft->vport_delete) 2663 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete); 2664 2665 BUG_ON(count > FC_HOST_NUM_ATTRS); 2666 2667 i->host_attrs[count] = NULL; 2668 2669 /* 2670 * Setup Remote Port Attributes. 2671 */ 2672 count=0; 2673 SETUP_RPORT_ATTRIBUTE_RD(maxframe_size); 2674 SETUP_RPORT_ATTRIBUTE_RD(supported_classes); 2675 SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo); 2676 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name); 2677 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name); 2678 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id); 2679 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); 2680 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); 2681 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); 2682 SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo); 2683 2684 BUG_ON(count > FC_RPORT_NUM_ATTRS); 2685 2686 i->rport_attrs[count] = NULL; 2687 2688 /* 2689 * Setup Virtual Port Attributes. 2690 */ 2691 count=0; 2692 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state); 2693 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state); 2694 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name); 2695 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name); 2696 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles); 2697 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type); 2698 SETUP_VPORT_ATTRIBUTE_RW(symbolic_name); 2699 SETUP_VPORT_ATTRIBUTE_WR(vport_delete); 2700 SETUP_VPORT_ATTRIBUTE_WR(vport_disable); 2701 2702 BUG_ON(count > FC_VPORT_NUM_ATTRS); 2703 2704 i->vport_attrs[count] = NULL; 2705 2706 return &i->t; 2707 } 2708 EXPORT_SYMBOL(fc_attach_transport); 2709 2710 void fc_release_transport(struct scsi_transport_template *t) 2711 { 2712 struct fc_internal *i = to_fc_internal(t); 2713 2714 transport_container_unregister(&i->t.target_attrs); 2715 transport_container_unregister(&i->t.host_attrs); 2716 transport_container_unregister(&i->rport_attr_cont); 2717 transport_container_unregister(&i->vport_attr_cont); 2718 2719 kfree(i); 2720 } 2721 EXPORT_SYMBOL(fc_release_transport); 2722 2723 /** 2724 * fc_queue_work - Queue work to the fc_host workqueue. 2725 * @shost: Pointer to Scsi_Host bound to fc_host. 2726 * @work: Work to queue for execution. 2727 * 2728 * Return value: 2729 * 1 - work queued for execution 2730 * 0 - work is already queued 2731 * -EINVAL - work queue doesn't exist 2732 */ 2733 static int 2734 fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) 2735 { 2736 if (unlikely(!fc_host_work_q(shost))) { 2737 printk(KERN_ERR 2738 "ERROR: FC host '%s' attempted to queue work, " 2739 "when no workqueue created.\n", shost->hostt->name); 2740 dump_stack(); 2741 2742 return -EINVAL; 2743 } 2744 2745 return queue_work(fc_host_work_q(shost), work); 2746 } 2747 2748 /** 2749 * fc_flush_work - Flush a fc_host's workqueue. 2750 * @shost: Pointer to Scsi_Host bound to fc_host. 2751 */ 2752 static void 2753 fc_flush_work(struct Scsi_Host *shost) 2754 { 2755 if (!fc_host_work_q(shost)) { 2756 printk(KERN_ERR 2757 "ERROR: FC host '%s' attempted to flush work, " 2758 "when no workqueue created.\n", shost->hostt->name); 2759 dump_stack(); 2760 return; 2761 } 2762 2763 flush_workqueue(fc_host_work_q(shost)); 2764 } 2765 2766 /** 2767 * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue. 2768 * @shost: Pointer to Scsi_Host bound to fc_host. 2769 * @work: Work to queue for execution. 2770 * @delay: jiffies to delay the work queuing 2771 * 2772 * Return value: 2773 * 1 on success / 0 already queued / < 0 for error 2774 */ 2775 static int 2776 fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, 2777 unsigned long delay) 2778 { 2779 if (unlikely(!fc_host_devloss_work_q(shost))) { 2780 printk(KERN_ERR 2781 "ERROR: FC host '%s' attempted to queue work, " 2782 "when no workqueue created.\n", shost->hostt->name); 2783 dump_stack(); 2784 2785 return -EINVAL; 2786 } 2787 2788 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 2789 } 2790 2791 /** 2792 * fc_flush_devloss - Flush a fc_host's devloss workqueue. 2793 * @shost: Pointer to Scsi_Host bound to fc_host. 2794 */ 2795 static void 2796 fc_flush_devloss(struct Scsi_Host *shost) 2797 { 2798 if (!fc_host_devloss_work_q(shost)) { 2799 printk(KERN_ERR 2800 "ERROR: FC host '%s' attempted to flush work, " 2801 "when no workqueue created.\n", shost->hostt->name); 2802 dump_stack(); 2803 return; 2804 } 2805 2806 flush_workqueue(fc_host_devloss_work_q(shost)); 2807 } 2808 2809 2810 /** 2811 * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host. 2812 * @shost: Which &Scsi_Host 2813 * 2814 * This routine is expected to be called immediately preceding the 2815 * a driver's call to scsi_remove_host(). 2816 * 2817 * WARNING: A driver utilizing the fc_transport, which fails to call 2818 * this routine prior to scsi_remove_host(), will leave dangling 2819 * objects in /sys/class/fc_remote_ports. Access to any of these 2820 * objects can result in a system crash !!! 2821 * 2822 * Notes: 2823 * This routine assumes no locks are held on entry. 2824 */ 2825 void 2826 fc_remove_host(struct Scsi_Host *shost) 2827 { 2828 struct fc_vport *vport = NULL, *next_vport = NULL; 2829 struct fc_rport *rport = NULL, *next_rport = NULL; 2830 struct workqueue_struct *work_q; 2831 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2832 unsigned long flags; 2833 2834 spin_lock_irqsave(shost->host_lock, flags); 2835 2836 /* Remove any vports */ 2837 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { 2838 vport->flags |= FC_VPORT_DELETING; 2839 fc_queue_work(shost, &vport->vport_delete_work); 2840 } 2841 2842 /* Remove any remote ports */ 2843 list_for_each_entry_safe(rport, next_rport, 2844 &fc_host->rports, peers) { 2845 list_del(&rport->peers); 2846 rport->port_state = FC_PORTSTATE_DELETED; 2847 fc_queue_work(shost, &rport->rport_delete_work); 2848 } 2849 2850 list_for_each_entry_safe(rport, next_rport, 2851 &fc_host->rport_bindings, peers) { 2852 list_del(&rport->peers); 2853 rport->port_state = FC_PORTSTATE_DELETED; 2854 fc_queue_work(shost, &rport->rport_delete_work); 2855 } 2856 2857 spin_unlock_irqrestore(shost->host_lock, flags); 2858 2859 /* flush all scan work items */ 2860 scsi_flush_work(shost); 2861 2862 /* flush all stgt delete, and rport delete work items, then kill it */ 2863 if (fc_host->work_q) { 2864 work_q = fc_host->work_q; 2865 fc_host->work_q = NULL; 2866 destroy_workqueue(work_q); 2867 } 2868 2869 /* flush all devloss work items, then kill it */ 2870 if (fc_host->devloss_work_q) { 2871 work_q = fc_host->devloss_work_q; 2872 fc_host->devloss_work_q = NULL; 2873 destroy_workqueue(work_q); 2874 } 2875 } 2876 EXPORT_SYMBOL(fc_remove_host); 2877 2878 static void fc_terminate_rport_io(struct fc_rport *rport) 2879 { 2880 struct Scsi_Host *shost = rport_to_shost(rport); 2881 struct fc_internal *i = to_fc_internal(shost->transportt); 2882 2883 /* Involve the LLDD if possible to terminate all io on the rport. */ 2884 if (i->f->terminate_rport_io) 2885 i->f->terminate_rport_io(rport); 2886 2887 /* 2888 * Must unblock to flush queued IO. scsi-ml will fail incoming reqs. 2889 */ 2890 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); 2891 } 2892 2893 /** 2894 * fc_starget_delete - called to delete the scsi descendants of an rport 2895 * @work: remote port to be operated on. 2896 * 2897 * Deletes target and all sdevs. 2898 */ 2899 static void 2900 fc_starget_delete(struct work_struct *work) 2901 { 2902 struct fc_rport *rport = 2903 container_of(work, struct fc_rport, stgt_delete_work); 2904 2905 fc_terminate_rport_io(rport); 2906 scsi_remove_target(&rport->dev); 2907 } 2908 2909 2910 /** 2911 * fc_rport_final_delete - finish rport termination and delete it. 2912 * @work: remote port to be deleted. 2913 */ 2914 static void 2915 fc_rport_final_delete(struct work_struct *work) 2916 { 2917 struct fc_rport *rport = 2918 container_of(work, struct fc_rport, rport_delete_work); 2919 struct device *dev = &rport->dev; 2920 struct Scsi_Host *shost = rport_to_shost(rport); 2921 struct fc_internal *i = to_fc_internal(shost->transportt); 2922 unsigned long flags; 2923 int do_callback = 0; 2924 2925 fc_terminate_rport_io(rport); 2926 2927 /* 2928 * if a scan is pending, flush the SCSI Host work_q so that 2929 * that we can reclaim the rport scan work element. 2930 */ 2931 if (rport->flags & FC_RPORT_SCAN_PENDING) 2932 scsi_flush_work(shost); 2933 2934 /* 2935 * Cancel any outstanding timers. These should really exist 2936 * only when rmmod'ing the LLDD and we're asking for 2937 * immediate termination of the rports 2938 */ 2939 spin_lock_irqsave(shost->host_lock, flags); 2940 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { 2941 spin_unlock_irqrestore(shost->host_lock, flags); 2942 if (!cancel_delayed_work(&rport->fail_io_work)) 2943 fc_flush_devloss(shost); 2944 if (!cancel_delayed_work(&rport->dev_loss_work)) 2945 fc_flush_devloss(shost); 2946 cancel_work_sync(&rport->scan_work); 2947 spin_lock_irqsave(shost->host_lock, flags); 2948 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 2949 } 2950 spin_unlock_irqrestore(shost->host_lock, flags); 2951 2952 /* Delete SCSI target and sdevs */ 2953 if (rport->scsi_target_id != -1) 2954 fc_starget_delete(&rport->stgt_delete_work); 2955 2956 /* 2957 * Notify the driver that the rport is now dead. The LLDD will 2958 * also guarantee that any communication to the rport is terminated 2959 * 2960 * Avoid this call if we already called it when we preserved the 2961 * rport for the binding. 2962 */ 2963 spin_lock_irqsave(shost->host_lock, flags); 2964 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && 2965 (i->f->dev_loss_tmo_callbk)) { 2966 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; 2967 do_callback = 1; 2968 } 2969 spin_unlock_irqrestore(shost->host_lock, flags); 2970 2971 if (do_callback) 2972 i->f->dev_loss_tmo_callbk(rport); 2973 2974 fc_bsg_remove(rport->rqst_q); 2975 2976 transport_remove_device(dev); 2977 device_del(dev); 2978 transport_destroy_device(dev); 2979 scsi_host_put(shost); /* for fc_host->rport list */ 2980 put_device(dev); /* for self-reference */ 2981 } 2982 2983 2984 /** 2985 * fc_remote_port_create - allocates and creates a remote FC port. 2986 * @shost: scsi host the remote port is connected to. 2987 * @channel: Channel on shost port connected to. 2988 * @ids: The world wide names, fc address, and FC4 port 2989 * roles for the remote port. 2990 * 2991 * Allocates and creates the remoter port structure, including the 2992 * class and sysfs creation. 2993 * 2994 * Notes: 2995 * This routine assumes no locks are held on entry. 2996 */ 2997 static struct fc_rport * 2998 fc_remote_port_create(struct Scsi_Host *shost, int channel, 2999 struct fc_rport_identifiers *ids) 3000 { 3001 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3002 struct fc_internal *fci = to_fc_internal(shost->transportt); 3003 struct fc_rport *rport; 3004 struct device *dev; 3005 unsigned long flags; 3006 int error; 3007 size_t size; 3008 3009 size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); 3010 rport = kzalloc(size, GFP_KERNEL); 3011 if (unlikely(!rport)) { 3012 printk(KERN_ERR "%s: allocation failure\n", __func__); 3013 return NULL; 3014 } 3015 3016 rport->maxframe_size = -1; 3017 rport->supported_classes = FC_COS_UNSPECIFIED; 3018 rport->dev_loss_tmo = fc_host->dev_loss_tmo; 3019 memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); 3020 memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); 3021 rport->port_id = ids->port_id; 3022 rport->roles = ids->roles; 3023 rport->port_state = FC_PORTSTATE_ONLINE; 3024 if (fci->f->dd_fcrport_size) 3025 rport->dd_data = &rport[1]; 3026 rport->channel = channel; 3027 rport->fast_io_fail_tmo = -1; 3028 3029 INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); 3030 INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); 3031 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); 3032 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); 3033 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); 3034 3035 spin_lock_irqsave(shost->host_lock, flags); 3036 3037 rport->number = fc_host->next_rport_number++; 3038 if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) || 3039 (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR)) 3040 rport->scsi_target_id = fc_host->next_target_id++; 3041 else 3042 rport->scsi_target_id = -1; 3043 list_add_tail(&rport->peers, &fc_host->rports); 3044 scsi_host_get(shost); /* for fc_host->rport list */ 3045 3046 spin_unlock_irqrestore(shost->host_lock, flags); 3047 3048 dev = &rport->dev; 3049 device_initialize(dev); /* takes self reference */ 3050 dev->parent = get_device(&shost->shost_gendev); /* parent reference */ 3051 dev->release = fc_rport_dev_release; 3052 dev_set_name(dev, "rport-%d:%d-%d", 3053 shost->host_no, channel, rport->number); 3054 transport_setup_device(dev); 3055 3056 error = device_add(dev); 3057 if (error) { 3058 printk(KERN_ERR "FC Remote Port device_add failed\n"); 3059 goto delete_rport; 3060 } 3061 transport_add_device(dev); 3062 transport_configure_device(dev); 3063 3064 fc_bsg_rportadd(shost, rport); 3065 /* ignore any bsg add error - we just can't do sgio */ 3066 3067 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { 3068 /* initiate a scan of the target */ 3069 rport->flags |= FC_RPORT_SCAN_PENDING; 3070 scsi_queue_work(shost, &rport->scan_work); 3071 } 3072 3073 return rport; 3074 3075 delete_rport: 3076 transport_destroy_device(dev); 3077 spin_lock_irqsave(shost->host_lock, flags); 3078 list_del(&rport->peers); 3079 scsi_host_put(shost); /* for fc_host->rport list */ 3080 spin_unlock_irqrestore(shost->host_lock, flags); 3081 put_device(dev->parent); 3082 kfree(rport); 3083 return NULL; 3084 } 3085 3086 /** 3087 * fc_remote_port_add - notify fc transport of the existence of a remote FC port. 3088 * @shost: scsi host the remote port is connected to. 3089 * @channel: Channel on shost port connected to. 3090 * @ids: The world wide names, fc address, and FC4 port 3091 * roles for the remote port. 3092 * 3093 * The LLDD calls this routine to notify the transport of the existence 3094 * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn) 3095 * of the port, it's FC address (port_id), and the FC4 roles that are 3096 * active for the port. 3097 * 3098 * For ports that are FCP targets (aka scsi targets), the FC transport 3099 * maintains consistent target id bindings on behalf of the LLDD. 3100 * A consistent target id binding is an assignment of a target id to 3101 * a remote port identifier, which persists while the scsi host is 3102 * attached. The remote port can disappear, then later reappear, and 3103 * it's target id assignment remains the same. This allows for shifts 3104 * in FC addressing (if binding by wwpn or wwnn) with no apparent 3105 * changes to the scsi subsystem which is based on scsi host number and 3106 * target id values. Bindings are only valid during the attachment of 3107 * the scsi host. If the host detaches, then later re-attaches, target 3108 * id bindings may change. 3109 * 3110 * This routine is responsible for returning a remote port structure. 3111 * The routine will search the list of remote ports it maintains 3112 * internally on behalf of consistent target id mappings. If found, the 3113 * remote port structure will be reused. Otherwise, a new remote port 3114 * structure will be allocated. 3115 * 3116 * Whenever a remote port is allocated, a new fc_remote_port class 3117 * device is created. 3118 * 3119 * Should not be called from interrupt context. 3120 * 3121 * Notes: 3122 * This routine assumes no locks are held on entry. 3123 */ 3124 struct fc_rport * 3125 fc_remote_port_add(struct Scsi_Host *shost, int channel, 3126 struct fc_rport_identifiers *ids) 3127 { 3128 struct fc_internal *fci = to_fc_internal(shost->transportt); 3129 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3130 struct fc_rport *rport; 3131 unsigned long flags; 3132 int match = 0; 3133 3134 /* ensure any stgt delete functions are done */ 3135 fc_flush_work(shost); 3136 3137 /* 3138 * Search the list of "active" rports, for an rport that has been 3139 * deleted, but we've held off the real delete while the target 3140 * is in a "blocked" state. 3141 */ 3142 spin_lock_irqsave(shost->host_lock, flags); 3143 3144 list_for_each_entry(rport, &fc_host->rports, peers) { 3145 3146 if ((rport->port_state == FC_PORTSTATE_BLOCKED || 3147 rport->port_state == FC_PORTSTATE_NOTPRESENT) && 3148 (rport->channel == channel)) { 3149 3150 switch (fc_host->tgtid_bind_type) { 3151 case FC_TGTID_BIND_BY_WWPN: 3152 case FC_TGTID_BIND_NONE: 3153 if (rport->port_name == ids->port_name) 3154 match = 1; 3155 break; 3156 case FC_TGTID_BIND_BY_WWNN: 3157 if (rport->node_name == ids->node_name) 3158 match = 1; 3159 break; 3160 case FC_TGTID_BIND_BY_ID: 3161 if (rport->port_id == ids->port_id) 3162 match = 1; 3163 break; 3164 } 3165 3166 if (match) { 3167 3168 memcpy(&rport->node_name, &ids->node_name, 3169 sizeof(rport->node_name)); 3170 memcpy(&rport->port_name, &ids->port_name, 3171 sizeof(rport->port_name)); 3172 rport->port_id = ids->port_id; 3173 3174 rport->port_state = FC_PORTSTATE_ONLINE; 3175 rport->roles = ids->roles; 3176 3177 spin_unlock_irqrestore(shost->host_lock, flags); 3178 3179 if (fci->f->dd_fcrport_size) 3180 memset(rport->dd_data, 0, 3181 fci->f->dd_fcrport_size); 3182 3183 /* 3184 * If we were not a target, cancel the 3185 * io terminate and rport timers, and 3186 * we're done. 3187 * 3188 * If we were a target, but our new role 3189 * doesn't indicate a target, leave the 3190 * timers running expecting the role to 3191 * change as the target fully logs in. If 3192 * it doesn't, the target will be torn down. 3193 * 3194 * If we were a target, and our role shows 3195 * we're still a target, cancel the timers 3196 * and kick off a scan. 3197 */ 3198 3199 /* was a target, not in roles */ 3200 if ((rport->scsi_target_id != -1) && 3201 (!(ids->roles & FC_PORT_ROLE_FCP_TARGET))) 3202 return rport; 3203 3204 /* 3205 * Stop the fail io and dev_loss timers. 3206 * If they flush, the port_state will 3207 * be checked and will NOOP the function. 3208 */ 3209 if (!cancel_delayed_work(&rport->fail_io_work)) 3210 fc_flush_devloss(shost); 3211 if (!cancel_delayed_work(&rport->dev_loss_work)) 3212 fc_flush_devloss(shost); 3213 3214 spin_lock_irqsave(shost->host_lock, flags); 3215 3216 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | 3217 FC_RPORT_DEVLOSS_PENDING | 3218 FC_RPORT_DEVLOSS_CALLBK_DONE); 3219 3220 spin_unlock_irqrestore(shost->host_lock, flags); 3221 3222 /* if target, initiate a scan */ 3223 if (rport->scsi_target_id != -1) { 3224 scsi_target_unblock(&rport->dev, 3225 SDEV_RUNNING); 3226 spin_lock_irqsave(shost->host_lock, 3227 flags); 3228 rport->flags |= FC_RPORT_SCAN_PENDING; 3229 scsi_queue_work(shost, 3230 &rport->scan_work); 3231 spin_unlock_irqrestore(shost->host_lock, 3232 flags); 3233 } 3234 3235 fc_bsg_goose_queue(rport); 3236 3237 return rport; 3238 } 3239 } 3240 } 3241 3242 /* 3243 * Search the bindings array 3244 * Note: if never a FCP target, you won't be on this list 3245 */ 3246 if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) { 3247 3248 /* search for a matching consistent binding */ 3249 3250 list_for_each_entry(rport, &fc_host->rport_bindings, 3251 peers) { 3252 if (rport->channel != channel) 3253 continue; 3254 3255 switch (fc_host->tgtid_bind_type) { 3256 case FC_TGTID_BIND_BY_WWPN: 3257 if (rport->port_name == ids->port_name) 3258 match = 1; 3259 break; 3260 case FC_TGTID_BIND_BY_WWNN: 3261 if (rport->node_name == ids->node_name) 3262 match = 1; 3263 break; 3264 case FC_TGTID_BIND_BY_ID: 3265 if (rport->port_id == ids->port_id) 3266 match = 1; 3267 break; 3268 case FC_TGTID_BIND_NONE: /* to keep compiler happy */ 3269 break; 3270 } 3271 3272 if (match) { 3273 list_move_tail(&rport->peers, &fc_host->rports); 3274 break; 3275 } 3276 } 3277 3278 if (match) { 3279 memcpy(&rport->node_name, &ids->node_name, 3280 sizeof(rport->node_name)); 3281 memcpy(&rport->port_name, &ids->port_name, 3282 sizeof(rport->port_name)); 3283 rport->port_id = ids->port_id; 3284 rport->port_state = FC_PORTSTATE_ONLINE; 3285 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 3286 3287 if (fci->f->dd_fcrport_size) 3288 memset(rport->dd_data, 0, 3289 fci->f->dd_fcrport_size); 3290 spin_unlock_irqrestore(shost->host_lock, flags); 3291 3292 fc_remote_port_rolechg(rport, ids->roles); 3293 return rport; 3294 } 3295 } 3296 3297 spin_unlock_irqrestore(shost->host_lock, flags); 3298 3299 /* No consistent binding found - create new remote port entry */ 3300 rport = fc_remote_port_create(shost, channel, ids); 3301 3302 return rport; 3303 } 3304 EXPORT_SYMBOL(fc_remote_port_add); 3305 3306 3307 /** 3308 * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence. 3309 * @rport: The remote port that no longer exists 3310 * 3311 * The LLDD calls this routine to notify the transport that a remote 3312 * port is no longer part of the topology. Note: Although a port 3313 * may no longer be part of the topology, it may persist in the remote 3314 * ports displayed by the fc_host. We do this under 2 conditions: 3315 * 3316 * 1) If the port was a scsi target, we delay its deletion by "blocking" it. 3317 * This allows the port to temporarily disappear, then reappear without 3318 * disrupting the SCSI device tree attached to it. During the "blocked" 3319 * period the port will still exist. 3320 * 3321 * 2) If the port was a scsi target and disappears for longer than we 3322 * expect, we'll delete the port and the tear down the SCSI device tree 3323 * attached to it. However, we want to semi-persist the target id assigned 3324 * to that port if it eventually does exist. The port structure will 3325 * remain (although with minimal information) so that the target id 3326 * bindings also remain. 3327 * 3328 * If the remote port is not an FCP Target, it will be fully torn down 3329 * and deallocated, including the fc_remote_port class device. 3330 * 3331 * If the remote port is an FCP Target, the port will be placed in a 3332 * temporary blocked state. From the LLDD's perspective, the rport no 3333 * longer exists. From the SCSI midlayer's perspective, the SCSI target 3334 * exists, but all sdevs on it are blocked from further I/O. The following 3335 * is then expected. 3336 * 3337 * If the remote port does not return (signaled by a LLDD call to 3338 * fc_remote_port_add()) within the dev_loss_tmo timeout, then the 3339 * scsi target is removed - killing all outstanding i/o and removing the 3340 * scsi devices attached to it. The port structure will be marked Not 3341 * Present and be partially cleared, leaving only enough information to 3342 * recognize the remote port relative to the scsi target id binding if 3343 * it later appears. The port will remain as long as there is a valid 3344 * binding (e.g. until the user changes the binding type or unloads the 3345 * scsi host with the binding). 3346 * 3347 * If the remote port returns within the dev_loss_tmo value (and matches 3348 * according to the target id binding type), the port structure will be 3349 * reused. If it is no longer a SCSI target, the target will be torn 3350 * down. If it continues to be a SCSI target, then the target will be 3351 * unblocked (allowing i/o to be resumed), and a scan will be activated 3352 * to ensure that all luns are detected. 3353 * 3354 * Called from normal process context only - cannot be called from interrupt. 3355 * 3356 * Notes: 3357 * This routine assumes no locks are held on entry. 3358 */ 3359 void 3360 fc_remote_port_delete(struct fc_rport *rport) 3361 { 3362 struct Scsi_Host *shost = rport_to_shost(rport); 3363 unsigned long timeout = rport->dev_loss_tmo; 3364 unsigned long flags; 3365 3366 /* 3367 * No need to flush the fc_host work_q's, as all adds are synchronous. 3368 * 3369 * We do need to reclaim the rport scan work element, so eventually 3370 * (in fc_rport_final_delete()) we'll flush the scsi host work_q if 3371 * there's still a scan pending. 3372 */ 3373 3374 spin_lock_irqsave(shost->host_lock, flags); 3375 3376 if (rport->port_state != FC_PORTSTATE_ONLINE) { 3377 spin_unlock_irqrestore(shost->host_lock, flags); 3378 return; 3379 } 3380 3381 /* 3382 * In the past, we if this was not an FCP-Target, we would 3383 * unconditionally just jump to deleting the rport. 3384 * However, rports can be used as node containers by the LLDD, 3385 * and its not appropriate to just terminate the rport at the 3386 * first sign of a loss in connectivity. The LLDD may want to 3387 * send ELS traffic to re-validate the login. If the rport is 3388 * immediately deleted, it makes it inappropriate for a node 3389 * container. 3390 * So... we now unconditionally wait dev_loss_tmo before 3391 * destroying an rport. 3392 */ 3393 3394 rport->port_state = FC_PORTSTATE_BLOCKED; 3395 3396 rport->flags |= FC_RPORT_DEVLOSS_PENDING; 3397 3398 spin_unlock_irqrestore(shost->host_lock, flags); 3399 3400 scsi_target_block(&rport->dev); 3401 3402 /* see if we need to kill io faster than waiting for device loss */ 3403 if ((rport->fast_io_fail_tmo != -1) && 3404 (rport->fast_io_fail_tmo < timeout)) 3405 fc_queue_devloss_work(shost, &rport->fail_io_work, 3406 rport->fast_io_fail_tmo * HZ); 3407 3408 /* cap the length the devices can be blocked until they are deleted */ 3409 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); 3410 } 3411 EXPORT_SYMBOL(fc_remote_port_delete); 3412 3413 /** 3414 * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed. 3415 * @rport: The remote port that changed. 3416 * @roles: New roles for this port. 3417 * 3418 * Description: The LLDD calls this routine to notify the transport that the 3419 * roles on a remote port may have changed. The largest effect of this is 3420 * if a port now becomes a FCP Target, it must be allocated a 3421 * scsi target id. If the port is no longer a FCP target, any 3422 * scsi target id value assigned to it will persist in case the 3423 * role changes back to include FCP Target. No changes in the scsi 3424 * midlayer will be invoked if the role changes (in the expectation 3425 * that the role will be resumed. If it doesn't normal error processing 3426 * will take place). 3427 * 3428 * Should not be called from interrupt context. 3429 * 3430 * Notes: 3431 * This routine assumes no locks are held on entry. 3432 */ 3433 void 3434 fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) 3435 { 3436 struct Scsi_Host *shost = rport_to_shost(rport); 3437 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3438 unsigned long flags; 3439 int create = 0; 3440 3441 spin_lock_irqsave(shost->host_lock, flags); 3442 if (roles & FC_PORT_ROLE_FCP_TARGET) { 3443 if (rport->scsi_target_id == -1) { 3444 rport->scsi_target_id = fc_host->next_target_id++; 3445 create = 1; 3446 } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET)) 3447 create = 1; 3448 } 3449 3450 rport->roles = roles; 3451 3452 spin_unlock_irqrestore(shost->host_lock, flags); 3453 3454 if (create) { 3455 /* 3456 * There may have been a delete timer running on the 3457 * port. Ensure that it is cancelled as we now know 3458 * the port is an FCP Target. 3459 * Note: we know the rport exists and is in an online 3460 * state as the LLDD would not have had an rport 3461 * reference to pass us. 3462 * 3463 * Take no action on the del_timer failure as the state 3464 * machine state change will validate the 3465 * transaction. 3466 */ 3467 if (!cancel_delayed_work(&rport->fail_io_work)) 3468 fc_flush_devloss(shost); 3469 if (!cancel_delayed_work(&rport->dev_loss_work)) 3470 fc_flush_devloss(shost); 3471 3472 spin_lock_irqsave(shost->host_lock, flags); 3473 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | 3474 FC_RPORT_DEVLOSS_PENDING | 3475 FC_RPORT_DEVLOSS_CALLBK_DONE); 3476 spin_unlock_irqrestore(shost->host_lock, flags); 3477 3478 /* ensure any stgt delete functions are done */ 3479 fc_flush_work(shost); 3480 3481 scsi_target_unblock(&rport->dev, SDEV_RUNNING); 3482 /* initiate a scan of the target */ 3483 spin_lock_irqsave(shost->host_lock, flags); 3484 rport->flags |= FC_RPORT_SCAN_PENDING; 3485 scsi_queue_work(shost, &rport->scan_work); 3486 spin_unlock_irqrestore(shost->host_lock, flags); 3487 } 3488 } 3489 EXPORT_SYMBOL(fc_remote_port_rolechg); 3490 3491 /** 3492 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port. 3493 * @work: rport target that failed to reappear in the allotted time. 3494 * 3495 * Description: An attempt to delete a remote port blocks, and if it fails 3496 * to return in the allotted time this gets called. 3497 */ 3498 static void 3499 fc_timeout_deleted_rport(struct work_struct *work) 3500 { 3501 struct fc_rport *rport = 3502 container_of(work, struct fc_rport, dev_loss_work.work); 3503 struct Scsi_Host *shost = rport_to_shost(rport); 3504 struct fc_internal *i = to_fc_internal(shost->transportt); 3505 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3506 unsigned long flags; 3507 int do_callback = 0; 3508 3509 spin_lock_irqsave(shost->host_lock, flags); 3510 3511 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 3512 3513 /* 3514 * If the port is ONLINE, then it came back. If it was a SCSI 3515 * target, validate it still is. If not, tear down the 3516 * scsi_target on it. 3517 */ 3518 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 3519 (rport->scsi_target_id != -1) && 3520 !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) { 3521 dev_printk(KERN_ERR, &rport->dev, 3522 "blocked FC remote port time out: no longer" 3523 " a FCP target, removing starget\n"); 3524 spin_unlock_irqrestore(shost->host_lock, flags); 3525 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE); 3526 fc_queue_work(shost, &rport->stgt_delete_work); 3527 return; 3528 } 3529 3530 /* NOOP state - we're flushing workq's */ 3531 if (rport->port_state != FC_PORTSTATE_BLOCKED) { 3532 spin_unlock_irqrestore(shost->host_lock, flags); 3533 dev_printk(KERN_ERR, &rport->dev, 3534 "blocked FC remote port time out: leaving" 3535 " rport%s alone\n", 3536 (rport->scsi_target_id != -1) ? " and starget" : ""); 3537 return; 3538 } 3539 3540 if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) || 3541 (rport->scsi_target_id == -1)) { 3542 list_del(&rport->peers); 3543 rport->port_state = FC_PORTSTATE_DELETED; 3544 dev_printk(KERN_ERR, &rport->dev, 3545 "blocked FC remote port time out: removing" 3546 " rport%s\n", 3547 (rport->scsi_target_id != -1) ? " and starget" : ""); 3548 fc_queue_work(shost, &rport->rport_delete_work); 3549 spin_unlock_irqrestore(shost->host_lock, flags); 3550 return; 3551 } 3552 3553 dev_printk(KERN_ERR, &rport->dev, 3554 "blocked FC remote port time out: removing target and " 3555 "saving binding\n"); 3556 3557 list_move_tail(&rport->peers, &fc_host->rport_bindings); 3558 3559 /* 3560 * Note: We do not remove or clear the hostdata area. This allows 3561 * host-specific target data to persist along with the 3562 * scsi_target_id. It's up to the host to manage it's hostdata area. 3563 */ 3564 3565 /* 3566 * Reinitialize port attributes that may change if the port comes back. 3567 */ 3568 rport->maxframe_size = -1; 3569 rport->supported_classes = FC_COS_UNSPECIFIED; 3570 rport->roles = FC_PORT_ROLE_UNKNOWN; 3571 rport->port_state = FC_PORTSTATE_NOTPRESENT; 3572 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 3573 3574 /* 3575 * Pre-emptively kill I/O rather than waiting for the work queue 3576 * item to teardown the starget. (FCOE libFC folks prefer this 3577 * and to have the rport_port_id still set when it's done). 3578 */ 3579 spin_unlock_irqrestore(shost->host_lock, flags); 3580 fc_terminate_rport_io(rport); 3581 3582 spin_lock_irqsave(shost->host_lock, flags); 3583 3584 if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */ 3585 3586 /* remove the identifiers that aren't used in the consisting binding */ 3587 switch (fc_host->tgtid_bind_type) { 3588 case FC_TGTID_BIND_BY_WWPN: 3589 rport->node_name = -1; 3590 rport->port_id = -1; 3591 break; 3592 case FC_TGTID_BIND_BY_WWNN: 3593 rport->port_name = -1; 3594 rport->port_id = -1; 3595 break; 3596 case FC_TGTID_BIND_BY_ID: 3597 rport->node_name = -1; 3598 rport->port_name = -1; 3599 break; 3600 case FC_TGTID_BIND_NONE: /* to keep compiler happy */ 3601 break; 3602 } 3603 3604 /* 3605 * As this only occurs if the remote port (scsi target) 3606 * went away and didn't come back - we'll remove 3607 * all attached scsi devices. 3608 */ 3609 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; 3610 fc_queue_work(shost, &rport->stgt_delete_work); 3611 3612 do_callback = 1; 3613 } 3614 3615 spin_unlock_irqrestore(shost->host_lock, flags); 3616 3617 /* 3618 * Notify the driver that the rport is now dead. The LLDD will 3619 * also guarantee that any communication to the rport is terminated 3620 * 3621 * Note: we set the CALLBK_DONE flag above to correspond 3622 */ 3623 if (do_callback && i->f->dev_loss_tmo_callbk) 3624 i->f->dev_loss_tmo_callbk(rport); 3625 } 3626 3627 3628 /** 3629 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target. 3630 * @work: rport to terminate io on. 3631 * 3632 * Notes: Only requests the failure of the io, not that all are flushed 3633 * prior to returning. 3634 */ 3635 static void 3636 fc_timeout_fail_rport_io(struct work_struct *work) 3637 { 3638 struct fc_rport *rport = 3639 container_of(work, struct fc_rport, fail_io_work.work); 3640 3641 if (rport->port_state != FC_PORTSTATE_BLOCKED) 3642 return; 3643 3644 rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT; 3645 fc_terminate_rport_io(rport); 3646 } 3647 3648 /** 3649 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 3650 * @work: remote port to be scanned. 3651 */ 3652 static void 3653 fc_scsi_scan_rport(struct work_struct *work) 3654 { 3655 struct fc_rport *rport = 3656 container_of(work, struct fc_rport, scan_work); 3657 struct Scsi_Host *shost = rport_to_shost(rport); 3658 struct fc_internal *i = to_fc_internal(shost->transportt); 3659 unsigned long flags; 3660 3661 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 3662 (rport->roles & FC_PORT_ROLE_FCP_TARGET) && 3663 !(i->f->disable_target_scan)) { 3664 scsi_scan_target(&rport->dev, rport->channel, 3665 rport->scsi_target_id, SCAN_WILD_CARD, 3666 SCSI_SCAN_RESCAN); 3667 } 3668 3669 spin_lock_irqsave(shost->host_lock, flags); 3670 rport->flags &= ~FC_RPORT_SCAN_PENDING; 3671 spin_unlock_irqrestore(shost->host_lock, flags); 3672 } 3673 3674 /** 3675 * fc_block_rport() - Block SCSI eh thread for blocked fc_rport. 3676 * @rport: Remote port that scsi_eh is trying to recover. 3677 * 3678 * This routine can be called from a FC LLD scsi_eh callback. It 3679 * blocks the scsi_eh thread until the fc_rport leaves the 3680 * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is 3681 * necessary to avoid the scsi_eh failing recovery actions for blocked 3682 * rports which would lead to offlined SCSI devices. 3683 * 3684 * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED. 3685 * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be 3686 * passed back to scsi_eh. 3687 */ 3688 int fc_block_rport(struct fc_rport *rport) 3689 { 3690 struct Scsi_Host *shost = rport_to_shost(rport); 3691 unsigned long flags; 3692 3693 spin_lock_irqsave(shost->host_lock, flags); 3694 while (rport->port_state == FC_PORTSTATE_BLOCKED && 3695 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) { 3696 spin_unlock_irqrestore(shost->host_lock, flags); 3697 msleep(1000); 3698 spin_lock_irqsave(shost->host_lock, flags); 3699 } 3700 spin_unlock_irqrestore(shost->host_lock, flags); 3701 3702 if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT) 3703 return FAST_IO_FAIL; 3704 3705 return 0; 3706 } 3707 EXPORT_SYMBOL(fc_block_rport); 3708 3709 /** 3710 * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport 3711 * @cmnd: SCSI command that scsi_eh is trying to recover 3712 * 3713 * This routine can be called from a FC LLD scsi_eh callback. It 3714 * blocks the scsi_eh thread until the fc_rport leaves the 3715 * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is 3716 * necessary to avoid the scsi_eh failing recovery actions for blocked 3717 * rports which would lead to offlined SCSI devices. 3718 * 3719 * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED. 3720 * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be 3721 * passed back to scsi_eh. 3722 */ 3723 int fc_block_scsi_eh(struct scsi_cmnd *cmnd) 3724 { 3725 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 3726 3727 if (WARN_ON_ONCE(!rport)) 3728 return FAST_IO_FAIL; 3729 3730 return fc_block_rport(rport); 3731 } 3732 EXPORT_SYMBOL(fc_block_scsi_eh); 3733 3734 /** 3735 * fc_vport_setup - allocates and creates a FC virtual port. 3736 * @shost: scsi host the virtual port is connected to. 3737 * @channel: Channel on shost port connected to. 3738 * @pdev: parent device for vport 3739 * @ids: The world wide names, FC4 port roles, etc for 3740 * the virtual port. 3741 * @ret_vport: The pointer to the created vport. 3742 * 3743 * Allocates and creates the vport structure, calls the parent host 3744 * to instantiate the vport, this completes w/ class and sysfs creation. 3745 * 3746 * Notes: 3747 * This routine assumes no locks are held on entry. 3748 */ 3749 static int 3750 fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, 3751 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) 3752 { 3753 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3754 struct fc_internal *fci = to_fc_internal(shost->transportt); 3755 struct fc_vport *vport; 3756 struct device *dev; 3757 unsigned long flags; 3758 size_t size; 3759 int error; 3760 3761 *ret_vport = NULL; 3762 3763 if ( ! fci->f->vport_create) 3764 return -ENOENT; 3765 3766 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size); 3767 vport = kzalloc(size, GFP_KERNEL); 3768 if (unlikely(!vport)) { 3769 printk(KERN_ERR "%s: allocation failure\n", __func__); 3770 return -ENOMEM; 3771 } 3772 3773 vport->vport_state = FC_VPORT_UNKNOWN; 3774 vport->vport_last_state = FC_VPORT_UNKNOWN; 3775 vport->node_name = ids->node_name; 3776 vport->port_name = ids->port_name; 3777 vport->roles = ids->roles; 3778 vport->vport_type = ids->vport_type; 3779 if (fci->f->dd_fcvport_size) 3780 vport->dd_data = &vport[1]; 3781 vport->shost = shost; 3782 vport->channel = channel; 3783 vport->flags = FC_VPORT_CREATING; 3784 INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete); 3785 3786 spin_lock_irqsave(shost->host_lock, flags); 3787 3788 if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) { 3789 spin_unlock_irqrestore(shost->host_lock, flags); 3790 kfree(vport); 3791 return -ENOSPC; 3792 } 3793 fc_host->npiv_vports_inuse++; 3794 vport->number = fc_host->next_vport_number++; 3795 list_add_tail(&vport->peers, &fc_host->vports); 3796 scsi_host_get(shost); /* for fc_host->vport list */ 3797 3798 spin_unlock_irqrestore(shost->host_lock, flags); 3799 3800 dev = &vport->dev; 3801 device_initialize(dev); /* takes self reference */ 3802 dev->parent = get_device(pdev); /* takes parent reference */ 3803 dev->release = fc_vport_dev_release; 3804 dev_set_name(dev, "vport-%d:%d-%d", 3805 shost->host_no, channel, vport->number); 3806 transport_setup_device(dev); 3807 3808 error = device_add(dev); 3809 if (error) { 3810 printk(KERN_ERR "FC Virtual Port device_add failed\n"); 3811 goto delete_vport; 3812 } 3813 transport_add_device(dev); 3814 transport_configure_device(dev); 3815 3816 error = fci->f->vport_create(vport, ids->disable); 3817 if (error) { 3818 printk(KERN_ERR "FC Virtual Port LLDD Create failed\n"); 3819 goto delete_vport_all; 3820 } 3821 3822 /* 3823 * if the parent isn't the physical adapter's Scsi_Host, ensure 3824 * the Scsi_Host at least contains a symlink to the vport. 3825 */ 3826 if (pdev != &shost->shost_gendev) { 3827 error = sysfs_create_link(&shost->shost_gendev.kobj, 3828 &dev->kobj, dev_name(dev)); 3829 if (error) 3830 printk(KERN_ERR 3831 "%s: Cannot create vport symlinks for " 3832 "%s, err=%d\n", 3833 __func__, dev_name(dev), error); 3834 } 3835 spin_lock_irqsave(shost->host_lock, flags); 3836 vport->flags &= ~FC_VPORT_CREATING; 3837 spin_unlock_irqrestore(shost->host_lock, flags); 3838 3839 dev_printk(KERN_NOTICE, pdev, 3840 "%s created via shost%d channel %d\n", dev_name(dev), 3841 shost->host_no, channel); 3842 3843 *ret_vport = vport; 3844 3845 return 0; 3846 3847 delete_vport_all: 3848 transport_remove_device(dev); 3849 device_del(dev); 3850 delete_vport: 3851 transport_destroy_device(dev); 3852 spin_lock_irqsave(shost->host_lock, flags); 3853 list_del(&vport->peers); 3854 scsi_host_put(shost); /* for fc_host->vport list */ 3855 fc_host->npiv_vports_inuse--; 3856 spin_unlock_irqrestore(shost->host_lock, flags); 3857 put_device(dev->parent); 3858 kfree(vport); 3859 3860 return error; 3861 } 3862 3863 /** 3864 * fc_vport_create - Admin App or LLDD requests creation of a vport 3865 * @shost: scsi host the virtual port is connected to. 3866 * @channel: channel on shost port connected to. 3867 * @ids: The world wide names, FC4 port roles, etc for 3868 * the virtual port. 3869 * 3870 * Notes: 3871 * This routine assumes no locks are held on entry. 3872 */ 3873 struct fc_vport * 3874 fc_vport_create(struct Scsi_Host *shost, int channel, 3875 struct fc_vport_identifiers *ids) 3876 { 3877 int stat; 3878 struct fc_vport *vport; 3879 3880 stat = fc_vport_setup(shost, channel, &shost->shost_gendev, 3881 ids, &vport); 3882 return stat ? NULL : vport; 3883 } 3884 EXPORT_SYMBOL(fc_vport_create); 3885 3886 /** 3887 * fc_vport_terminate - Admin App or LLDD requests termination of a vport 3888 * @vport: fc_vport to be terminated 3889 * 3890 * Calls the LLDD vport_delete() function, then deallocates and removes 3891 * the vport from the shost and object tree. 3892 * 3893 * Notes: 3894 * This routine assumes no locks are held on entry. 3895 */ 3896 int 3897 fc_vport_terminate(struct fc_vport *vport) 3898 { 3899 struct Scsi_Host *shost = vport_to_shost(vport); 3900 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3901 struct fc_internal *i = to_fc_internal(shost->transportt); 3902 struct device *dev = &vport->dev; 3903 unsigned long flags; 3904 int stat; 3905 3906 if (i->f->vport_delete) 3907 stat = i->f->vport_delete(vport); 3908 else 3909 stat = -ENOENT; 3910 3911 spin_lock_irqsave(shost->host_lock, flags); 3912 vport->flags &= ~FC_VPORT_DELETING; 3913 if (!stat) { 3914 vport->flags |= FC_VPORT_DELETED; 3915 list_del(&vport->peers); 3916 fc_host->npiv_vports_inuse--; 3917 scsi_host_put(shost); /* for fc_host->vport list */ 3918 } 3919 spin_unlock_irqrestore(shost->host_lock, flags); 3920 3921 if (stat) 3922 return stat; 3923 3924 if (dev->parent != &shost->shost_gendev) 3925 sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev)); 3926 transport_remove_device(dev); 3927 device_del(dev); 3928 transport_destroy_device(dev); 3929 3930 /* 3931 * Removing our self-reference should mean our 3932 * release function gets called, which will drop the remaining 3933 * parent reference and free the data structure. 3934 */ 3935 put_device(dev); /* for self-reference */ 3936 3937 return 0; /* SUCCESS */ 3938 } 3939 EXPORT_SYMBOL(fc_vport_terminate); 3940 3941 /** 3942 * fc_vport_sched_delete - workq-based delete request for a vport 3943 * @work: vport to be deleted. 3944 */ 3945 static void 3946 fc_vport_sched_delete(struct work_struct *work) 3947 { 3948 struct fc_vport *vport = 3949 container_of(work, struct fc_vport, vport_delete_work); 3950 int stat; 3951 3952 stat = fc_vport_terminate(vport); 3953 if (stat) 3954 dev_printk(KERN_ERR, vport->dev.parent, 3955 "%s: %s could not be deleted created via " 3956 "shost%d channel %d - error %d\n", __func__, 3957 dev_name(&vport->dev), vport->shost->host_no, 3958 vport->channel, stat); 3959 } 3960 3961 3962 /* 3963 * BSG support 3964 */ 3965 3966 /** 3967 * fc_bsg_job_timeout - handler for when a bsg request timesout 3968 * @req: request that timed out 3969 */ 3970 static enum blk_eh_timer_return 3971 fc_bsg_job_timeout(struct request *req) 3972 { 3973 struct bsg_job *job = blk_mq_rq_to_pdu(req); 3974 struct Scsi_Host *shost = fc_bsg_to_shost(job); 3975 struct fc_rport *rport = fc_bsg_to_rport(job); 3976 struct fc_internal *i = to_fc_internal(shost->transportt); 3977 int err = 0, inflight = 0; 3978 3979 if (rport && rport->port_state == FC_PORTSTATE_BLOCKED) 3980 return BLK_EH_RESET_TIMER; 3981 3982 inflight = bsg_job_get(job); 3983 3984 if (inflight && i->f->bsg_timeout) { 3985 /* call LLDD to abort the i/o as it has timed out */ 3986 err = i->f->bsg_timeout(job); 3987 if (err == -EAGAIN) { 3988 bsg_job_put(job); 3989 return BLK_EH_RESET_TIMER; 3990 } else if (err) 3991 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " 3992 "abort failed with status %d\n", err); 3993 } 3994 3995 /* the blk_end_sync_io() doesn't check the error */ 3996 if (inflight) 3997 blk_mq_end_request(req, BLK_STS_IOERR); 3998 return BLK_EH_DONE; 3999 } 4000 4001 /** 4002 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD 4003 * @shost: scsi host rport attached to 4004 * @job: bsg job to be processed 4005 */ 4006 static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job) 4007 { 4008 struct fc_internal *i = to_fc_internal(shost->transportt); 4009 struct fc_bsg_request *bsg_request = job->request; 4010 struct fc_bsg_reply *bsg_reply = job->reply; 4011 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ 4012 int ret; 4013 4014 /* check if we really have all the request data needed */ 4015 if (job->request_len < cmdlen) { 4016 ret = -ENOMSG; 4017 goto fail_host_msg; 4018 } 4019 4020 /* Validate the host command */ 4021 switch (bsg_request->msgcode) { 4022 case FC_BSG_HST_ADD_RPORT: 4023 cmdlen += sizeof(struct fc_bsg_host_add_rport); 4024 break; 4025 4026 case FC_BSG_HST_DEL_RPORT: 4027 cmdlen += sizeof(struct fc_bsg_host_del_rport); 4028 break; 4029 4030 case FC_BSG_HST_ELS_NOLOGIN: 4031 cmdlen += sizeof(struct fc_bsg_host_els); 4032 /* there better be a xmt and rcv payloads */ 4033 if ((!job->request_payload.payload_len) || 4034 (!job->reply_payload.payload_len)) { 4035 ret = -EINVAL; 4036 goto fail_host_msg; 4037 } 4038 break; 4039 4040 case FC_BSG_HST_CT: 4041 cmdlen += sizeof(struct fc_bsg_host_ct); 4042 /* there better be xmt and rcv payloads */ 4043 if ((!job->request_payload.payload_len) || 4044 (!job->reply_payload.payload_len)) { 4045 ret = -EINVAL; 4046 goto fail_host_msg; 4047 } 4048 break; 4049 4050 case FC_BSG_HST_VENDOR: 4051 cmdlen += sizeof(struct fc_bsg_host_vendor); 4052 if ((shost->hostt->vendor_id == 0L) || 4053 (bsg_request->rqst_data.h_vendor.vendor_id != 4054 shost->hostt->vendor_id)) { 4055 ret = -ESRCH; 4056 goto fail_host_msg; 4057 } 4058 break; 4059 4060 default: 4061 ret = -EBADR; 4062 goto fail_host_msg; 4063 } 4064 4065 ret = i->f->bsg_request(job); 4066 if (!ret) 4067 return 0; 4068 4069 fail_host_msg: 4070 /* return the errno failure code as the only status */ 4071 BUG_ON(job->reply_len < sizeof(uint32_t)); 4072 bsg_reply->reply_payload_rcv_len = 0; 4073 bsg_reply->result = ret; 4074 job->reply_len = sizeof(uint32_t); 4075 bsg_job_done(job, bsg_reply->result, 4076 bsg_reply->reply_payload_rcv_len); 4077 return 0; 4078 } 4079 4080 4081 /* 4082 * fc_bsg_goose_queue - restart rport queue in case it was stopped 4083 * @rport: rport to be restarted 4084 */ 4085 static void 4086 fc_bsg_goose_queue(struct fc_rport *rport) 4087 { 4088 struct request_queue *q = rport->rqst_q; 4089 4090 if (q) 4091 blk_mq_run_hw_queues(q, true); 4092 } 4093 4094 /** 4095 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD 4096 * @shost: scsi host rport attached to 4097 * @job: bsg job to be processed 4098 */ 4099 static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job) 4100 { 4101 struct fc_internal *i = to_fc_internal(shost->transportt); 4102 struct fc_bsg_request *bsg_request = job->request; 4103 struct fc_bsg_reply *bsg_reply = job->reply; 4104 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ 4105 int ret; 4106 4107 /* check if we really have all the request data needed */ 4108 if (job->request_len < cmdlen) { 4109 ret = -ENOMSG; 4110 goto fail_rport_msg; 4111 } 4112 4113 /* Validate the rport command */ 4114 switch (bsg_request->msgcode) { 4115 case FC_BSG_RPT_ELS: 4116 cmdlen += sizeof(struct fc_bsg_rport_els); 4117 goto check_bidi; 4118 4119 case FC_BSG_RPT_CT: 4120 cmdlen += sizeof(struct fc_bsg_rport_ct); 4121 check_bidi: 4122 /* there better be xmt and rcv payloads */ 4123 if ((!job->request_payload.payload_len) || 4124 (!job->reply_payload.payload_len)) { 4125 ret = -EINVAL; 4126 goto fail_rport_msg; 4127 } 4128 break; 4129 default: 4130 ret = -EBADR; 4131 goto fail_rport_msg; 4132 } 4133 4134 ret = i->f->bsg_request(job); 4135 if (!ret) 4136 return 0; 4137 4138 fail_rport_msg: 4139 /* return the errno failure code as the only status */ 4140 BUG_ON(job->reply_len < sizeof(uint32_t)); 4141 bsg_reply->reply_payload_rcv_len = 0; 4142 bsg_reply->result = ret; 4143 job->reply_len = sizeof(uint32_t); 4144 bsg_job_done(job, bsg_reply->result, 4145 bsg_reply->reply_payload_rcv_len); 4146 return 0; 4147 } 4148 4149 static int fc_bsg_dispatch(struct bsg_job *job) 4150 { 4151 struct Scsi_Host *shost = fc_bsg_to_shost(job); 4152 4153 if (scsi_is_fc_rport(job->dev)) 4154 return fc_bsg_rport_dispatch(shost, job); 4155 else 4156 return fc_bsg_host_dispatch(shost, job); 4157 } 4158 4159 static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport) 4160 { 4161 if (rport->port_state == FC_PORTSTATE_BLOCKED && 4162 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) 4163 return BLK_STS_RESOURCE; 4164 4165 if (rport->port_state != FC_PORTSTATE_ONLINE) 4166 return BLK_STS_IOERR; 4167 4168 return BLK_STS_OK; 4169 } 4170 4171 4172 static int fc_bsg_dispatch_prep(struct bsg_job *job) 4173 { 4174 struct fc_rport *rport = fc_bsg_to_rport(job); 4175 blk_status_t ret; 4176 4177 ret = fc_bsg_rport_prep(rport); 4178 switch (ret) { 4179 case BLK_STS_OK: 4180 break; 4181 case BLK_STS_RESOURCE: 4182 return -EAGAIN; 4183 default: 4184 return -EIO; 4185 } 4186 4187 return fc_bsg_dispatch(job); 4188 } 4189 4190 /** 4191 * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests 4192 * @shost: shost for fc_host 4193 * @fc_host: fc_host adding the structures to 4194 */ 4195 static int 4196 fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) 4197 { 4198 struct device *dev = &shost->shost_gendev; 4199 struct fc_internal *i = to_fc_internal(shost->transportt); 4200 struct request_queue *q; 4201 char bsg_name[20]; 4202 4203 fc_host->rqst_q = NULL; 4204 4205 if (!i->f->bsg_request) 4206 return -ENOTSUPP; 4207 4208 snprintf(bsg_name, sizeof(bsg_name), 4209 "fc_host%d", shost->host_no); 4210 4211 q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout, 4212 i->f->dd_bsg_size); 4213 if (IS_ERR(q)) { 4214 dev_err(dev, 4215 "fc_host%d: bsg interface failed to initialize - setup queue\n", 4216 shost->host_no); 4217 return PTR_ERR(q); 4218 } 4219 __scsi_init_queue(shost, q); 4220 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); 4221 fc_host->rqst_q = q; 4222 return 0; 4223 } 4224 4225 /** 4226 * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests 4227 * @shost: shost that rport is attached to 4228 * @rport: rport that the bsg hooks are being attached to 4229 */ 4230 static int 4231 fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) 4232 { 4233 struct device *dev = &rport->dev; 4234 struct fc_internal *i = to_fc_internal(shost->transportt); 4235 struct request_queue *q; 4236 4237 rport->rqst_q = NULL; 4238 4239 if (!i->f->bsg_request) 4240 return -ENOTSUPP; 4241 4242 q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep, 4243 fc_bsg_job_timeout, i->f->dd_bsg_size); 4244 if (IS_ERR(q)) { 4245 dev_err(dev, "failed to setup bsg queue\n"); 4246 return PTR_ERR(q); 4247 } 4248 __scsi_init_queue(shost, q); 4249 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 4250 rport->rqst_q = q; 4251 return 0; 4252 } 4253 4254 4255 /** 4256 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports 4257 * @q: the request_queue that is to be torn down. 4258 * 4259 * Notes: 4260 * Before unregistering the queue empty any requests that are blocked 4261 * 4262 * 4263 */ 4264 static void 4265 fc_bsg_remove(struct request_queue *q) 4266 { 4267 bsg_remove_queue(q); 4268 } 4269 4270 4271 /* Original Author: Martin Hicks */ 4272 MODULE_AUTHOR("James Smart"); 4273 MODULE_DESCRIPTION("FC Transport Attributes"); 4274 MODULE_LICENSE("GPL"); 4275 4276 module_init(fc_transport_init); 4277 module_exit(fc_transport_exit); 4278