1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  FiberChannel transport specific attributes exported to sysfs.
4  *
5  *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
6  *  Copyright (C) 2004-2007   James Smart, Emulex Corporation
7  *    Rewrite for host, target, device, and remote port attributes,
8  *    statistics, and service functions...
9  *    Add vports, etc
10  */
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
15 #include <linux/kernel.h>
16 #include <linux/bsg-lib.h>
17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_transport.h>
20 #include <scsi/scsi_transport_fc.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <net/netlink.h>
23 #include <scsi/scsi_netlink_fc.h>
24 #include <scsi/scsi_bsg_fc.h>
25 #include <uapi/scsi/fc/fc_els.h>
26 #include "scsi_priv.h"
27 
28 static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
29 static void fc_vport_sched_delete(struct work_struct *work);
30 static int fc_vport_setup(struct Scsi_Host *shost, int channel,
31 	struct device *pdev, struct fc_vport_identifiers  *ids,
32 	struct fc_vport **vport);
33 static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
34 static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
35 static void fc_bsg_remove(struct request_queue *);
36 static void fc_bsg_goose_queue(struct fc_rport *);
37 static void fc_li_stats_update(u16 event_type,
38 			       struct fc_fpin_stats *stats);
39 static void fc_delivery_stats_update(u32 reason_code,
40 				     struct fc_fpin_stats *stats);
41 static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats);
42 
43 /*
44  * Module Parameters
45  */
46 
47 /*
48  * dev_loss_tmo: the default number of seconds that the FC transport
49  *   should insulate the loss of a remote port.
50  *   The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
51  */
52 static unsigned int fc_dev_loss_tmo = 60;		/* seconds */
53 
54 module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
55 MODULE_PARM_DESC(dev_loss_tmo,
56 		 "Maximum number of seconds that the FC transport should"
57 		 " insulate the loss of a remote port. Once this value is"
58 		 " exceeded, the scsi target is removed. Value should be"
59 		 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
60 		 " fast_io_fail_tmo is not set.");
61 
62 /*
63  * Redefine so that we can have same named attributes in the
64  * sdev/starget/host objects.
65  */
66 #define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store)		\
67 struct device_attribute device_attr_##_prefix##_##_name = 	\
68 	__ATTR(_name,_mode,_show,_store)
69 
70 #define fc_enum_name_search(title, table_type, table)			\
71 static const char *get_fc_##title##_name(enum table_type table_key)	\
72 {									\
73 	int i;								\
74 	char *name = NULL;						\
75 									\
76 	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
77 		if (table[i].value == table_key) {			\
78 			name = table[i].name;				\
79 			break;						\
80 		}							\
81 	}								\
82 	return name;							\
83 }
84 
85 #define fc_enum_name_match(title, table_type, table)			\
86 static int get_fc_##title##_match(const char *table_key,		\
87 		enum table_type *value)					\
88 {									\
89 	int i;								\
90 									\
91 	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
92 		if (strncmp(table_key, table[i].name,			\
93 				table[i].matchlen) == 0) {		\
94 			*value = table[i].value;			\
95 			return 0; /* success */				\
96 		}							\
97 	}								\
98 	return 1; /* failure */						\
99 }
100 
101 
102 /* Convert fc_port_type values to ascii string name */
103 static struct {
104 	enum fc_port_type	value;
105 	char			*name;
106 } fc_port_type_names[] = {
107 	{ FC_PORTTYPE_UNKNOWN,		"Unknown" },
108 	{ FC_PORTTYPE_OTHER,		"Other" },
109 	{ FC_PORTTYPE_NOTPRESENT,	"Not Present" },
110 	{ FC_PORTTYPE_NPORT,	"NPort (fabric via point-to-point)" },
111 	{ FC_PORTTYPE_NLPORT,	"NLPort (fabric via loop)" },
112 	{ FC_PORTTYPE_LPORT,	"LPort (private loop)" },
113 	{ FC_PORTTYPE_PTP,	"Point-To-Point (direct nport connection)" },
114 	{ FC_PORTTYPE_NPIV,		"NPIV VPORT" },
115 };
116 fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
117 #define FC_PORTTYPE_MAX_NAMELEN		50
118 
119 /* Reuse fc_port_type enum function for vport_type */
120 #define get_fc_vport_type_name get_fc_port_type_name
121 
122 
123 /* Convert fc_host_event_code values to ascii string name */
124 static const struct {
125 	enum fc_host_event_code		value;
126 	char				*name;
127 } fc_host_event_code_names[] = {
128 	{ FCH_EVT_LIP,			"lip" },
129 	{ FCH_EVT_LINKUP,		"link_up" },
130 	{ FCH_EVT_LINKDOWN,		"link_down" },
131 	{ FCH_EVT_LIPRESET,		"lip_reset" },
132 	{ FCH_EVT_RSCN,			"rscn" },
133 	{ FCH_EVT_ADAPTER_CHANGE,	"adapter_chg" },
134 	{ FCH_EVT_PORT_UNKNOWN,		"port_unknown" },
135 	{ FCH_EVT_PORT_ONLINE,		"port_online" },
136 	{ FCH_EVT_PORT_OFFLINE,		"port_offline" },
137 	{ FCH_EVT_PORT_FABRIC,		"port_fabric" },
138 	{ FCH_EVT_LINK_UNKNOWN,		"link_unknown" },
139 	{ FCH_EVT_LINK_FPIN,		"link_FPIN" },
140 	{ FCH_EVT_LINK_FPIN_ACK,	"link_FPIN_ACK" },
141 	{ FCH_EVT_VENDOR_UNIQUE,	"vendor_unique" },
142 };
143 fc_enum_name_search(host_event_code, fc_host_event_code,
144 		fc_host_event_code_names)
145 #define FC_HOST_EVENT_CODE_MAX_NAMELEN	30
146 
147 
148 /* Convert fc_port_state values to ascii string name */
149 static struct {
150 	enum fc_port_state	value;
151 	char			*name;
152 	int			matchlen;
153 } fc_port_state_names[] = {
154 	{ FC_PORTSTATE_UNKNOWN,		"Unknown", 7},
155 	{ FC_PORTSTATE_NOTPRESENT,	"Not Present", 11 },
156 	{ FC_PORTSTATE_ONLINE,		"Online", 6 },
157 	{ FC_PORTSTATE_OFFLINE,		"Offline", 7 },
158 	{ FC_PORTSTATE_BLOCKED,		"Blocked", 7 },
159 	{ FC_PORTSTATE_BYPASSED,	"Bypassed", 8 },
160 	{ FC_PORTSTATE_DIAGNOSTICS,	"Diagnostics", 11 },
161 	{ FC_PORTSTATE_LINKDOWN,	"Linkdown", 8 },
162 	{ FC_PORTSTATE_ERROR,		"Error", 5 },
163 	{ FC_PORTSTATE_LOOPBACK,	"Loopback", 8 },
164 	{ FC_PORTSTATE_DELETED,		"Deleted", 7 },
165 	{ FC_PORTSTATE_MARGINAL,	"Marginal", 8 },
166 };
167 fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
168 fc_enum_name_match(port_state, fc_port_state, fc_port_state_names)
169 #define FC_PORTSTATE_MAX_NAMELEN	20
170 
171 
172 /* Convert fc_vport_state values to ascii string name */
173 static struct {
174 	enum fc_vport_state	value;
175 	char			*name;
176 } fc_vport_state_names[] = {
177 	{ FC_VPORT_UNKNOWN,		"Unknown" },
178 	{ FC_VPORT_ACTIVE,		"Active" },
179 	{ FC_VPORT_DISABLED,		"Disabled" },
180 	{ FC_VPORT_LINKDOWN,		"Linkdown" },
181 	{ FC_VPORT_INITIALIZING,	"Initializing" },
182 	{ FC_VPORT_NO_FABRIC_SUPP,	"No Fabric Support" },
183 	{ FC_VPORT_NO_FABRIC_RSCS,	"No Fabric Resources" },
184 	{ FC_VPORT_FABRIC_LOGOUT,	"Fabric Logout" },
185 	{ FC_VPORT_FABRIC_REJ_WWN,	"Fabric Rejected WWN" },
186 	{ FC_VPORT_FAILED,		"VPort Failed" },
187 };
188 fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
189 #define FC_VPORTSTATE_MAX_NAMELEN	24
190 
191 /* Reuse fc_vport_state enum function for vport_last_state */
192 #define get_fc_vport_last_state_name get_fc_vport_state_name
193 
194 
195 /* Convert fc_tgtid_binding_type values to ascii string name */
196 static const struct {
197 	enum fc_tgtid_binding_type	value;
198 	char				*name;
199 	int				matchlen;
200 } fc_tgtid_binding_type_names[] = {
201 	{ FC_TGTID_BIND_NONE, "none", 4 },
202 	{ FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
203 	{ FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
204 	{ FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
205 };
206 fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type,
207 		fc_tgtid_binding_type_names)
208 fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type,
209 		fc_tgtid_binding_type_names)
210 #define FC_BINDTYPE_MAX_NAMELEN	30
211 
212 
213 #define fc_bitfield_name_search(title, table)			\
214 static ssize_t							\
215 get_fc_##title##_names(u32 table_key, char *buf)		\
216 {								\
217 	char *prefix = "";					\
218 	ssize_t len = 0;					\
219 	int i;							\
220 								\
221 	for (i = 0; i < ARRAY_SIZE(table); i++) {		\
222 		if (table[i].value & table_key) {		\
223 			len += sprintf(buf + len, "%s%s",	\
224 				prefix, table[i].name);		\
225 			prefix = ", ";				\
226 		}						\
227 	}							\
228 	len += sprintf(buf + len, "\n");			\
229 	return len;						\
230 }
231 
232 
233 /* Convert FC_COS bit values to ascii string name */
234 static const struct {
235 	u32 			value;
236 	char			*name;
237 } fc_cos_names[] = {
238 	{ FC_COS_CLASS1,	"Class 1" },
239 	{ FC_COS_CLASS2,	"Class 2" },
240 	{ FC_COS_CLASS3,	"Class 3" },
241 	{ FC_COS_CLASS4,	"Class 4" },
242 	{ FC_COS_CLASS6,	"Class 6" },
243 };
244 fc_bitfield_name_search(cos, fc_cos_names)
245 
246 
247 /* Convert FC_PORTSPEED bit values to ascii string name */
248 static const struct {
249 	u32 			value;
250 	char			*name;
251 } fc_port_speed_names[] = {
252 	{ FC_PORTSPEED_1GBIT,		"1 Gbit" },
253 	{ FC_PORTSPEED_2GBIT,		"2 Gbit" },
254 	{ FC_PORTSPEED_4GBIT,		"4 Gbit" },
255 	{ FC_PORTSPEED_10GBIT,		"10 Gbit" },
256 	{ FC_PORTSPEED_8GBIT,		"8 Gbit" },
257 	{ FC_PORTSPEED_16GBIT,		"16 Gbit" },
258 	{ FC_PORTSPEED_32GBIT,		"32 Gbit" },
259 	{ FC_PORTSPEED_20GBIT,		"20 Gbit" },
260 	{ FC_PORTSPEED_40GBIT,		"40 Gbit" },
261 	{ FC_PORTSPEED_50GBIT,		"50 Gbit" },
262 	{ FC_PORTSPEED_100GBIT,		"100 Gbit" },
263 	{ FC_PORTSPEED_25GBIT,		"25 Gbit" },
264 	{ FC_PORTSPEED_64GBIT,		"64 Gbit" },
265 	{ FC_PORTSPEED_128GBIT,		"128 Gbit" },
266 	{ FC_PORTSPEED_256GBIT,		"256 Gbit" },
267 	{ FC_PORTSPEED_NOT_NEGOTIATED,	"Not Negotiated" },
268 };
269 fc_bitfield_name_search(port_speed, fc_port_speed_names)
270 
271 
272 static int
273 show_fc_fc4s (char *buf, u8 *fc4_list)
274 {
275 	int i, len=0;
276 
277 	for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
278 		len += sprintf(buf + len , "0x%02x ", *fc4_list);
279 	len += sprintf(buf + len, "\n");
280 	return len;
281 }
282 
283 
284 /* Convert FC_PORT_ROLE bit values to ascii string name */
285 static const struct {
286 	u32 			value;
287 	char			*name;
288 } fc_port_role_names[] = {
289 	{ FC_PORT_ROLE_FCP_TARGET,		"FCP Target" },
290 	{ FC_PORT_ROLE_FCP_INITIATOR,		"FCP Initiator" },
291 	{ FC_PORT_ROLE_IP_PORT,			"IP Port" },
292 	{ FC_PORT_ROLE_FCP_DUMMY_INITIATOR,	"FCP Dummy Initiator" },
293 	{ FC_PORT_ROLE_NVME_INITIATOR,		"NVMe Initiator" },
294 	{ FC_PORT_ROLE_NVME_TARGET,		"NVMe Target" },
295 	{ FC_PORT_ROLE_NVME_DISCOVERY,		"NVMe Discovery" },
296 };
297 fc_bitfield_name_search(port_roles, fc_port_role_names)
298 
299 /*
300  * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
301  */
302 #define FC_WELLKNOWN_PORTID_MASK	0xfffff0
303 #define FC_WELLKNOWN_ROLE_MASK  	0x00000f
304 #define FC_FPORT_PORTID			0x00000e
305 #define FC_FABCTLR_PORTID		0x00000d
306 #define FC_DIRSRVR_PORTID		0x00000c
307 #define FC_TIMESRVR_PORTID		0x00000b
308 #define FC_MGMTSRVR_PORTID		0x00000a
309 
310 
311 static void fc_timeout_deleted_rport(struct work_struct *work);
312 static void fc_timeout_fail_rport_io(struct work_struct *work);
313 static void fc_scsi_scan_rport(struct work_struct *work);
314 
315 /*
316  * Attribute counts pre object type...
317  * Increase these values if you add attributes
318  */
319 #define FC_STARGET_NUM_ATTRS 	3
320 #define FC_RPORT_NUM_ATTRS	10
321 #define FC_VPORT_NUM_ATTRS	9
322 #define FC_HOST_NUM_ATTRS	29
323 
324 struct fc_internal {
325 	struct scsi_transport_template t;
326 	struct fc_function_template *f;
327 
328 	/*
329 	 * For attributes : each object has :
330 	 *   An array of the actual attributes structures
331 	 *   An array of null-terminated pointers to the attribute
332 	 *     structures - used for mid-layer interaction.
333 	 *
334 	 * The attribute containers for the starget and host are are
335 	 * part of the midlayer. As the remote port is specific to the
336 	 * fc transport, we must provide the attribute container.
337 	 */
338 	struct device_attribute private_starget_attrs[
339 							FC_STARGET_NUM_ATTRS];
340 	struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
341 
342 	struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
343 	struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
344 
345 	struct transport_container rport_attr_cont;
346 	struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
347 	struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
348 
349 	struct transport_container vport_attr_cont;
350 	struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
351 	struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
352 };
353 
354 #define to_fc_internal(tmpl)	container_of(tmpl, struct fc_internal, t)
355 
356 static int fc_target_setup(struct transport_container *tc, struct device *dev,
357 			   struct device *cdev)
358 {
359 	struct scsi_target *starget = to_scsi_target(dev);
360 	struct fc_rport *rport = starget_to_rport(starget);
361 
362 	/*
363 	 * if parent is remote port, use values from remote port.
364 	 * Otherwise, this host uses the fc_transport, but not the
365 	 * remote port interface. As such, initialize to known non-values.
366 	 */
367 	if (rport) {
368 		fc_starget_node_name(starget) = rport->node_name;
369 		fc_starget_port_name(starget) = rport->port_name;
370 		fc_starget_port_id(starget) = rport->port_id;
371 	} else {
372 		fc_starget_node_name(starget) = -1;
373 		fc_starget_port_name(starget) = -1;
374 		fc_starget_port_id(starget) = -1;
375 	}
376 
377 	return 0;
378 }
379 
380 static DECLARE_TRANSPORT_CLASS(fc_transport_class,
381 			       "fc_transport",
382 			       fc_target_setup,
383 			       NULL,
384 			       NULL);
385 
386 static int fc_host_setup(struct transport_container *tc, struct device *dev,
387 			 struct device *cdev)
388 {
389 	struct Scsi_Host *shost = dev_to_shost(dev);
390 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
391 
392 	/*
393 	 * Set default values easily detected by the midlayer as
394 	 * failure cases.  The scsi lldd is responsible for initializing
395 	 * all transport attributes to valid values per host.
396 	 */
397 	fc_host->node_name = -1;
398 	fc_host->port_name = -1;
399 	fc_host->permanent_port_name = -1;
400 	fc_host->supported_classes = FC_COS_UNSPECIFIED;
401 	memset(fc_host->supported_fc4s, 0,
402 		sizeof(fc_host->supported_fc4s));
403 	fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
404 	fc_host->maxframe_size = -1;
405 	fc_host->max_npiv_vports = 0;
406 	memset(fc_host->serial_number, 0,
407 		sizeof(fc_host->serial_number));
408 	memset(fc_host->manufacturer, 0,
409 		sizeof(fc_host->manufacturer));
410 	memset(fc_host->model, 0,
411 		sizeof(fc_host->model));
412 	memset(fc_host->model_description, 0,
413 		sizeof(fc_host->model_description));
414 	memset(fc_host->hardware_version, 0,
415 		sizeof(fc_host->hardware_version));
416 	memset(fc_host->driver_version, 0,
417 		sizeof(fc_host->driver_version));
418 	memset(fc_host->firmware_version, 0,
419 		sizeof(fc_host->firmware_version));
420 	memset(fc_host->optionrom_version, 0,
421 		sizeof(fc_host->optionrom_version));
422 
423 	fc_host->port_id = -1;
424 	fc_host->port_type = FC_PORTTYPE_UNKNOWN;
425 	fc_host->port_state = FC_PORTSTATE_UNKNOWN;
426 	memset(fc_host->active_fc4s, 0,
427 		sizeof(fc_host->active_fc4s));
428 	fc_host->speed = FC_PORTSPEED_UNKNOWN;
429 	fc_host->fabric_name = -1;
430 	memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
431 	memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
432 	memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats));
433 
434 	fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
435 
436 	INIT_LIST_HEAD(&fc_host->rports);
437 	INIT_LIST_HEAD(&fc_host->rport_bindings);
438 	INIT_LIST_HEAD(&fc_host->vports);
439 	fc_host->next_rport_number = 0;
440 	fc_host->next_target_id = 0;
441 	fc_host->next_vport_number = 0;
442 	fc_host->npiv_vports_inuse = 0;
443 
444 	snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
445 		 "fc_wq_%d", shost->host_no);
446 	fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
447 	if (!fc_host->work_q)
448 		return -ENOMEM;
449 
450 	fc_host->dev_loss_tmo = fc_dev_loss_tmo;
451 	snprintf(fc_host->devloss_work_q_name,
452 		 sizeof(fc_host->devloss_work_q_name),
453 		 "fc_dl_%d", shost->host_no);
454 	fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
455 					fc_host->devloss_work_q_name);
456 	if (!fc_host->devloss_work_q) {
457 		destroy_workqueue(fc_host->work_q);
458 		fc_host->work_q = NULL;
459 		return -ENOMEM;
460 	}
461 
462 	fc_bsg_hostadd(shost, fc_host);
463 	/* ignore any bsg add error - we just can't do sgio */
464 
465 	return 0;
466 }
467 
468 static int fc_host_remove(struct transport_container *tc, struct device *dev,
469 			 struct device *cdev)
470 {
471 	struct Scsi_Host *shost = dev_to_shost(dev);
472 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
473 
474 	fc_bsg_remove(fc_host->rqst_q);
475 	return 0;
476 }
477 
478 static DECLARE_TRANSPORT_CLASS(fc_host_class,
479 			       "fc_host",
480 			       fc_host_setup,
481 			       fc_host_remove,
482 			       NULL);
483 
484 /*
485  * Setup and Remove actions for remote ports are handled
486  * in the service functions below.
487  */
488 static DECLARE_TRANSPORT_CLASS(fc_rport_class,
489 			       "fc_remote_ports",
490 			       NULL,
491 			       NULL,
492 			       NULL);
493 
494 /*
495  * Setup and Remove actions for virtual ports are handled
496  * in the service functions below.
497  */
498 static DECLARE_TRANSPORT_CLASS(fc_vport_class,
499 			       "fc_vports",
500 			       NULL,
501 			       NULL,
502 			       NULL);
503 
504 /*
505  * Netlink Infrastructure
506  */
507 
508 static atomic_t fc_event_seq;
509 
510 /**
511  * fc_get_event_number - Obtain the next sequential FC event number
512  *
513  * Notes:
514  *   We could have inlined this, but it would have required fc_event_seq to
515  *   be exposed. For now, live with the subroutine call.
516  *   Atomic used to avoid lock/unlock...
517  */
518 u32
519 fc_get_event_number(void)
520 {
521 	return atomic_add_return(1, &fc_event_seq);
522 }
523 EXPORT_SYMBOL(fc_get_event_number);
524 
525 /**
526  * fc_host_post_fc_event - routine to do the work of posting an event
527  *                      on an fc_host.
528  * @shost:		host the event occurred on
529  * @event_number:	fc event number obtained from get_fc_event_number()
530  * @event_code:		fc_host event being posted
531  * @data_len:		amount, in bytes, of event data
532  * @data_buf:		pointer to event data
533  * @vendor_id:          value for Vendor id
534  *
535  * Notes:
536  *	This routine assumes no locks are held on entry.
537  */
538 void
539 fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
540 		enum fc_host_event_code event_code,
541 		u32 data_len, char *data_buf, u64 vendor_id)
542 {
543 	struct sk_buff *skb;
544 	struct nlmsghdr	*nlh;
545 	struct fc_nl_event *event;
546 	const char *name;
547 	size_t len, padding;
548 	int err;
549 
550 	if (!data_buf || data_len < 4)
551 		data_len = 0;
552 
553 	if (!scsi_nl_sock) {
554 		err = -ENOENT;
555 		goto send_fail;
556 	}
557 
558 	len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len);
559 
560 	skb = nlmsg_new(len, GFP_KERNEL);
561 	if (!skb) {
562 		err = -ENOBUFS;
563 		goto send_fail;
564 	}
565 
566 	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
567 	if (!nlh) {
568 		err = -ENOBUFS;
569 		goto send_fail_skb;
570 	}
571 	event = nlmsg_data(nlh);
572 
573 	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
574 				FC_NL_ASYNC_EVENT, len);
575 	event->seconds = ktime_get_real_seconds();
576 	event->vendor_id = vendor_id;
577 	event->host_no = shost->host_no;
578 	event->event_datalen = data_len;	/* bytes */
579 	event->event_num = event_number;
580 	event->event_code = event_code;
581 	if (data_len)
582 		memcpy(event->event_data_flex, data_buf, data_len);
583 	padding = len - offsetof(typeof(*event), event_data_flex) - data_len;
584 	memset(event->event_data_flex + data_len, 0, padding);
585 
586 	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
587 			GFP_KERNEL);
588 	return;
589 
590 send_fail_skb:
591 	kfree_skb(skb);
592 send_fail:
593 	name = get_fc_host_event_code_name(event_code);
594 	printk(KERN_WARNING
595 		"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
596 		__func__, shost->host_no,
597 		(name) ? name : "<unknown>",
598 		(data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err);
599 	return;
600 }
601 EXPORT_SYMBOL(fc_host_post_fc_event);
602 
603 /**
604  * fc_host_post_event - called to post an even on an fc_host.
605  * @shost:		host the event occurred on
606  * @event_number:	fc event number obtained from get_fc_event_number()
607  * @event_code:		fc_host event being posted
608  * @event_data:		32bits of data for the event being posted
609  *
610  * Notes:
611  *	This routine assumes no locks are held on entry.
612  */
613 void
614 fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
615 		enum fc_host_event_code event_code, u32 event_data)
616 {
617 	fc_host_post_fc_event(shost, event_number, event_code,
618 		(u32)sizeof(u32), (char *)&event_data, 0);
619 }
620 EXPORT_SYMBOL(fc_host_post_event);
621 
622 
623 /**
624  * fc_host_post_vendor_event - called to post a vendor unique event
625  *                      on an fc_host
626  * @shost:		host the event occurred on
627  * @event_number:	fc event number obtained from get_fc_event_number()
628  * @data_len:		amount, in bytes, of vendor unique data
629  * @data_buf:		pointer to vendor unique data
630  * @vendor_id:          Vendor id
631  *
632  * Notes:
633  *	This routine assumes no locks are held on entry.
634  */
635 void
636 fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
637 		u32 data_len, char * data_buf, u64 vendor_id)
638 {
639 	fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE,
640 		data_len, data_buf, vendor_id);
641 }
642 EXPORT_SYMBOL(fc_host_post_vendor_event);
643 
644 /**
645  * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn
646  * @shost:		host the fc_rport is associated with
647  * @wwpn:		wwpn of the fc_rport device
648  *
649  * Notes:
650  *	This routine assumes no locks are held on entry.
651  */
652 struct fc_rport *
653 fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn)
654 {
655 	struct fc_rport *rport;
656 	unsigned long flags;
657 
658 	spin_lock_irqsave(shost->host_lock, flags);
659 
660 	list_for_each_entry(rport, &fc_host_rports(shost), peers) {
661 		if (rport->port_state != FC_PORTSTATE_ONLINE)
662 			continue;
663 
664 		if (rport->port_name == wwpn) {
665 			spin_unlock_irqrestore(shost->host_lock, flags);
666 			return rport;
667 		}
668 	}
669 
670 	spin_unlock_irqrestore(shost->host_lock, flags);
671 	return NULL;
672 }
673 EXPORT_SYMBOL(fc_find_rport_by_wwpn);
674 
675 static void
676 fc_li_stats_update(u16 event_type,
677 		   struct fc_fpin_stats *stats)
678 {
679 	stats->li++;
680 	switch (event_type) {
681 	case FPIN_LI_UNKNOWN:
682 		stats->li_failure_unknown++;
683 		break;
684 	case FPIN_LI_LINK_FAILURE:
685 		stats->li_link_failure_count++;
686 		break;
687 	case FPIN_LI_LOSS_OF_SYNC:
688 		stats->li_loss_of_sync_count++;
689 		break;
690 	case FPIN_LI_LOSS_OF_SIG:
691 		stats->li_loss_of_signals_count++;
692 		break;
693 	case FPIN_LI_PRIM_SEQ_ERR:
694 		stats->li_prim_seq_err_count++;
695 		break;
696 	case FPIN_LI_INVALID_TX_WD:
697 		stats->li_invalid_tx_word_count++;
698 		break;
699 	case FPIN_LI_INVALID_CRC:
700 		stats->li_invalid_crc_count++;
701 		break;
702 	case FPIN_LI_DEVICE_SPEC:
703 		stats->li_device_specific++;
704 		break;
705 	}
706 }
707 
708 static void
709 fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats)
710 {
711 	stats->dn++;
712 	switch (reason_code) {
713 	case FPIN_DELI_UNKNOWN:
714 		stats->dn_unknown++;
715 		break;
716 	case FPIN_DELI_TIMEOUT:
717 		stats->dn_timeout++;
718 		break;
719 	case FPIN_DELI_UNABLE_TO_ROUTE:
720 		stats->dn_unable_to_route++;
721 		break;
722 	case FPIN_DELI_DEVICE_SPEC:
723 		stats->dn_device_specific++;
724 		break;
725 	}
726 }
727 
728 static void
729 fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats)
730 {
731 	stats->cn++;
732 	switch (event_type) {
733 	case FPIN_CONGN_CLEAR:
734 		stats->cn_clear++;
735 		break;
736 	case FPIN_CONGN_LOST_CREDIT:
737 		stats->cn_lost_credit++;
738 		break;
739 	case FPIN_CONGN_CREDIT_STALL:
740 		stats->cn_credit_stall++;
741 		break;
742 	case FPIN_CONGN_OVERSUBSCRIPTION:
743 		stats->cn_oversubscription++;
744 		break;
745 	case FPIN_CONGN_DEVICE_SPEC:
746 		stats->cn_device_specific++;
747 	}
748 }
749 
750 /*
751  * fc_fpin_li_stats_update - routine to update Link Integrity
752  * event statistics.
753  * @shost:		host the FPIN was received on
754  * @tlv:		pointer to link integrity descriptor
755  *
756  */
757 static void
758 fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
759 {
760 	u8 i;
761 	struct fc_rport *rport = NULL;
762 	struct fc_rport *attach_rport = NULL;
763 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
764 	struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
765 	u16 event_type = be16_to_cpu(li_desc->event_type);
766 	u64 wwpn;
767 
768 	rport = fc_find_rport_by_wwpn(shost,
769 				      be64_to_cpu(li_desc->attached_wwpn));
770 	if (rport &&
771 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
772 	     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
773 		attach_rport = rport;
774 		fc_li_stats_update(event_type, &attach_rport->fpin_stats);
775 	}
776 
777 	if (be32_to_cpu(li_desc->pname_count) > 0) {
778 		for (i = 0;
779 		    i < be32_to_cpu(li_desc->pname_count);
780 		    i++) {
781 			wwpn = be64_to_cpu(li_desc->pname_list[i]);
782 			rport = fc_find_rport_by_wwpn(shost, wwpn);
783 			if (rport &&
784 			    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
785 			    rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
786 				if (rport == attach_rport)
787 					continue;
788 				fc_li_stats_update(event_type,
789 						   &rport->fpin_stats);
790 			}
791 		}
792 	}
793 
794 	if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
795 		fc_li_stats_update(event_type, &fc_host->fpin_stats);
796 }
797 
798 /*
799  * fc_fpin_delivery_stats_update - routine to update Delivery Notification
800  * event statistics.
801  * @shost:		host the FPIN was received on
802  * @tlv:		pointer to delivery descriptor
803  *
804  */
805 static void
806 fc_fpin_delivery_stats_update(struct Scsi_Host *shost,
807 			      struct fc_tlv_desc *tlv)
808 {
809 	struct fc_rport *rport = NULL;
810 	struct fc_rport *attach_rport = NULL;
811 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
812 	struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv;
813 	u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code);
814 
815 	rport = fc_find_rport_by_wwpn(shost,
816 				      be64_to_cpu(dn_desc->attached_wwpn));
817 	if (rport &&
818 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
819 	     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
820 		attach_rport = rport;
821 		fc_delivery_stats_update(reason_code,
822 					 &attach_rport->fpin_stats);
823 	}
824 
825 	if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn))
826 		fc_delivery_stats_update(reason_code, &fc_host->fpin_stats);
827 }
828 
829 /*
830  * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion
831  * event statistics.
832  * @shost:		host the FPIN was received on
833  * @tlv:		pointer to peer congestion descriptor
834  *
835  */
836 static void
837 fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost,
838 				struct fc_tlv_desc *tlv)
839 {
840 	u8 i;
841 	struct fc_rport *rport = NULL;
842 	struct fc_rport *attach_rport = NULL;
843 	struct fc_fn_peer_congn_desc *pc_desc =
844 	    (struct fc_fn_peer_congn_desc *)tlv;
845 	u16 event_type = be16_to_cpu(pc_desc->event_type);
846 	u64 wwpn;
847 
848 	rport = fc_find_rport_by_wwpn(shost,
849 				      be64_to_cpu(pc_desc->attached_wwpn));
850 	if (rport &&
851 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
852 	     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
853 		attach_rport = rport;
854 		fc_cn_stats_update(event_type, &attach_rport->fpin_stats);
855 	}
856 
857 	if (be32_to_cpu(pc_desc->pname_count) > 0) {
858 		for (i = 0;
859 		    i < be32_to_cpu(pc_desc->pname_count);
860 		    i++) {
861 			wwpn = be64_to_cpu(pc_desc->pname_list[i]);
862 			rport = fc_find_rport_by_wwpn(shost, wwpn);
863 			if (rport &&
864 			    (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
865 			     rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
866 				if (rport == attach_rport)
867 					continue;
868 				fc_cn_stats_update(event_type,
869 						   &rport->fpin_stats);
870 			}
871 		}
872 	}
873 }
874 
875 /*
876  * fc_fpin_congn_stats_update - routine to update Congestion
877  * event statistics.
878  * @shost:		host the FPIN was received on
879  * @tlv:		pointer to congestion descriptor
880  *
881  */
882 static void
883 fc_fpin_congn_stats_update(struct Scsi_Host *shost,
884 			   struct fc_tlv_desc *tlv)
885 {
886 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
887 	struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv;
888 
889 	fc_cn_stats_update(be16_to_cpu(congn->event_type),
890 			   &fc_host->fpin_stats);
891 }
892 
893 /**
894  * fc_host_fpin_rcv - routine to process a received FPIN.
895  * @shost:		host the FPIN was received on
896  * @fpin_len:		length of FPIN payload, in bytes
897  * @fpin_buf:		pointer to FPIN payload
898  * @event_acknowledge:	1, if LLDD handles this event.
899  * Notes:
900  *	This routine assumes no locks are held on entry.
901  */
902 void
903 fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf,
904 		u8 event_acknowledge)
905 {
906 	struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf;
907 	struct fc_tlv_desc *tlv;
908 	u32 desc_cnt = 0, bytes_remain;
909 	u32 dtag;
910 	enum fc_host_event_code event_code =
911 		event_acknowledge ? FCH_EVT_LINK_FPIN_ACK : FCH_EVT_LINK_FPIN;
912 
913 	/* Update Statistics */
914 	tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
915 	bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc);
916 	bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
917 
918 	while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
919 	       bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
920 		dtag = be32_to_cpu(tlv->desc_tag);
921 		switch (dtag) {
922 		case ELS_DTAG_LNK_INTEGRITY:
923 			fc_fpin_li_stats_update(shost, tlv);
924 			break;
925 		case ELS_DTAG_DELIVERY:
926 			fc_fpin_delivery_stats_update(shost, tlv);
927 			break;
928 		case ELS_DTAG_PEER_CONGEST:
929 			fc_fpin_peer_congn_stats_update(shost, tlv);
930 			break;
931 		case ELS_DTAG_CONGESTION:
932 			fc_fpin_congn_stats_update(shost, tlv);
933 		}
934 
935 		desc_cnt++;
936 		bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
937 		tlv = fc_tlv_next_desc(tlv);
938 	}
939 
940 	fc_host_post_fc_event(shost, fc_get_event_number(),
941 				event_code, fpin_len, fpin_buf, 0);
942 }
943 EXPORT_SYMBOL(fc_host_fpin_rcv);
944 
945 
946 static __init int fc_transport_init(void)
947 {
948 	int error;
949 
950 	atomic_set(&fc_event_seq, 0);
951 
952 	error = transport_class_register(&fc_host_class);
953 	if (error)
954 		return error;
955 	error = transport_class_register(&fc_vport_class);
956 	if (error)
957 		goto unreg_host_class;
958 	error = transport_class_register(&fc_rport_class);
959 	if (error)
960 		goto unreg_vport_class;
961 	error = transport_class_register(&fc_transport_class);
962 	if (error)
963 		goto unreg_rport_class;
964 	return 0;
965 
966 unreg_rport_class:
967 	transport_class_unregister(&fc_rport_class);
968 unreg_vport_class:
969 	transport_class_unregister(&fc_vport_class);
970 unreg_host_class:
971 	transport_class_unregister(&fc_host_class);
972 	return error;
973 }
974 
975 static void __exit fc_transport_exit(void)
976 {
977 	transport_class_unregister(&fc_transport_class);
978 	transport_class_unregister(&fc_rport_class);
979 	transport_class_unregister(&fc_host_class);
980 	transport_class_unregister(&fc_vport_class);
981 }
982 
983 /*
984  * FC Remote Port Attribute Management
985  */
986 
987 #define fc_rport_show_function(field, format_string, sz, cast)		\
988 static ssize_t								\
989 show_fc_rport_##field (struct device *dev, 				\
990 		       struct device_attribute *attr, char *buf)	\
991 {									\
992 	struct fc_rport *rport = transport_class_to_rport(dev);		\
993 	struct Scsi_Host *shost = rport_to_shost(rport);		\
994 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
995 	if ((i->f->get_rport_##field) &&				\
996 	    !((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
997 	      (rport->port_state == FC_PORTSTATE_DELETED) ||		\
998 	      (rport->port_state == FC_PORTSTATE_NOTPRESENT)))		\
999 		i->f->get_rport_##field(rport);				\
1000 	return snprintf(buf, sz, format_string, cast rport->field); 	\
1001 }
1002 
1003 #define fc_rport_store_function(field)					\
1004 static ssize_t								\
1005 store_fc_rport_##field(struct device *dev,				\
1006 		       struct device_attribute *attr,			\
1007 		       const char *buf,	size_t count)			\
1008 {									\
1009 	int val;							\
1010 	struct fc_rport *rport = transport_class_to_rport(dev);		\
1011 	struct Scsi_Host *shost = rport_to_shost(rport);		\
1012 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1013 	char *cp;							\
1014 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
1015 	    (rport->port_state == FC_PORTSTATE_DELETED) ||		\
1016 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))		\
1017 		return -EBUSY;						\
1018 	val = simple_strtoul(buf, &cp, 0);				\
1019 	if (*cp && (*cp != '\n'))					\
1020 		return -EINVAL;						\
1021 	i->f->set_rport_##field(rport, val);				\
1022 	return count;							\
1023 }
1024 
1025 #define fc_rport_rd_attr(field, format_string, sz)			\
1026 	fc_rport_show_function(field, format_string, sz, )		\
1027 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1028 			 show_fc_rport_##field, NULL)
1029 
1030 #define fc_rport_rd_attr_cast(field, format_string, sz, cast)		\
1031 	fc_rport_show_function(field, format_string, sz, (cast))	\
1032 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1033 			  show_fc_rport_##field, NULL)
1034 
1035 #define fc_rport_rw_attr(field, format_string, sz)			\
1036 	fc_rport_show_function(field, format_string, sz, )		\
1037 	fc_rport_store_function(field)					\
1038 static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR,		\
1039 			show_fc_rport_##field,				\
1040 			store_fc_rport_##field)
1041 
1042 
1043 #define fc_private_rport_show_function(field, format_string, sz, cast)	\
1044 static ssize_t								\
1045 show_fc_rport_##field (struct device *dev, 				\
1046 		       struct device_attribute *attr, char *buf)	\
1047 {									\
1048 	struct fc_rport *rport = transport_class_to_rport(dev);		\
1049 	return snprintf(buf, sz, format_string, cast rport->field); 	\
1050 }
1051 
1052 #define fc_private_rport_rd_attr(field, format_string, sz)		\
1053 	fc_private_rport_show_function(field, format_string, sz, )	\
1054 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1055 			 show_fc_rport_##field, NULL)
1056 
1057 #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast)	\
1058 	fc_private_rport_show_function(field, format_string, sz, (cast)) \
1059 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
1060 			  show_fc_rport_##field, NULL)
1061 
1062 
1063 #define fc_private_rport_rd_enum_attr(title, maxlen)			\
1064 static ssize_t								\
1065 show_fc_rport_##title (struct device *dev,				\
1066 		       struct device_attribute *attr, char *buf)	\
1067 {									\
1068 	struct fc_rport *rport = transport_class_to_rport(dev);		\
1069 	const char *name;						\
1070 	name = get_fc_##title##_name(rport->title);			\
1071 	if (!name)							\
1072 		return -EINVAL;						\
1073 	return snprintf(buf, maxlen, "%s\n", name);			\
1074 }									\
1075 static FC_DEVICE_ATTR(rport, title, S_IRUGO,			\
1076 			show_fc_rport_##title, NULL)
1077 
1078 
1079 #define SETUP_RPORT_ATTRIBUTE_RD(field)					\
1080 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1081 	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
1082 	i->private_rport_attrs[count].store = NULL;			\
1083 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1084 	if (i->f->show_rport_##field)					\
1085 		count++
1086 
1087 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field)				\
1088 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1089 	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
1090 	i->private_rport_attrs[count].store = NULL;			\
1091 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1092 	count++
1093 
1094 #define SETUP_RPORT_ATTRIBUTE_RW(field)					\
1095 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1096 	if (!i->f->set_rport_##field) {					\
1097 		i->private_rport_attrs[count].attr.mode = S_IRUGO;	\
1098 		i->private_rport_attrs[count].store = NULL;		\
1099 	}								\
1100 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1101 	if (i->f->show_rport_##field)					\
1102 		count++
1103 
1104 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field)				\
1105 {									\
1106 	i->private_rport_attrs[count] = device_attr_rport_##field; \
1107 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
1108 	count++;							\
1109 }
1110 
1111 
1112 /* The FC Transport Remote Port Attributes: */
1113 
1114 /* Fixed Remote Port Attributes */
1115 
1116 fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
1117 
1118 static ssize_t
1119 show_fc_rport_supported_classes (struct device *dev,
1120 				 struct device_attribute *attr, char *buf)
1121 {
1122 	struct fc_rport *rport = transport_class_to_rport(dev);
1123 	if (rport->supported_classes == FC_COS_UNSPECIFIED)
1124 		return snprintf(buf, 20, "unspecified\n");
1125 	return get_fc_cos_names(rport->supported_classes, buf);
1126 }
1127 static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
1128 		show_fc_rport_supported_classes, NULL);
1129 
1130 /* Dynamic Remote Port Attributes */
1131 
1132 /*
1133  * dev_loss_tmo attribute
1134  */
1135 static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
1136 {
1137 	char *cp;
1138 
1139 	*val = simple_strtoul(buf, &cp, 0);
1140 	if (*cp && (*cp != '\n'))
1141 		return -EINVAL;
1142 	/*
1143 	 * Check for overflow; dev_loss_tmo is u32
1144 	 */
1145 	if (*val > UINT_MAX)
1146 		return -EINVAL;
1147 
1148 	return 0;
1149 }
1150 
1151 static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
1152 				     unsigned long val)
1153 {
1154 	struct Scsi_Host *shost = rport_to_shost(rport);
1155 	struct fc_internal *i = to_fc_internal(shost->transportt);
1156 
1157 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
1158 	    (rport->port_state == FC_PORTSTATE_DELETED) ||
1159 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
1160 		return -EBUSY;
1161 	/*
1162 	 * Check for overflow; dev_loss_tmo is u32
1163 	 */
1164 	if (val > UINT_MAX)
1165 		return -EINVAL;
1166 
1167 	/*
1168 	 * If fast_io_fail is off we have to cap
1169 	 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
1170 	 */
1171 	if (rport->fast_io_fail_tmo == -1 &&
1172 	    val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
1173 		return -EINVAL;
1174 
1175 	i->f->set_rport_dev_loss_tmo(rport, val);
1176 	return 0;
1177 }
1178 
1179 fc_rport_show_function(dev_loss_tmo, "%u\n", 20, )
1180 static ssize_t
1181 store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
1182 			    const char *buf, size_t count)
1183 {
1184 	struct fc_rport *rport = transport_class_to_rport(dev);
1185 	unsigned long val;
1186 	int rc;
1187 
1188 	rc = fc_str_to_dev_loss(buf, &val);
1189 	if (rc)
1190 		return rc;
1191 
1192 	rc = fc_rport_set_dev_loss_tmo(rport, val);
1193 	if (rc)
1194 		return rc;
1195 	return count;
1196 }
1197 static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
1198 		show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
1199 
1200 
1201 /* Private Remote Port Attributes */
1202 
1203 fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1204 fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1205 fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
1206 
1207 static ssize_t
1208 show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
1209 		     char *buf)
1210 {
1211 	struct fc_rport *rport = transport_class_to_rport(dev);
1212 
1213 	/* identify any roles that are port_id specific */
1214 	if ((rport->port_id != -1) &&
1215 	    (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
1216 					FC_WELLKNOWN_PORTID_MASK) {
1217 		switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
1218 		case FC_FPORT_PORTID:
1219 			return snprintf(buf, 30, "Fabric Port\n");
1220 		case FC_FABCTLR_PORTID:
1221 			return snprintf(buf, 30, "Fabric Controller\n");
1222 		case FC_DIRSRVR_PORTID:
1223 			return snprintf(buf, 30, "Directory Server\n");
1224 		case FC_TIMESRVR_PORTID:
1225 			return snprintf(buf, 30, "Time Server\n");
1226 		case FC_MGMTSRVR_PORTID:
1227 			return snprintf(buf, 30, "Management Server\n");
1228 		default:
1229 			return snprintf(buf, 30, "Unknown Fabric Entity\n");
1230 		}
1231 	} else {
1232 		if (rport->roles == FC_PORT_ROLE_UNKNOWN)
1233 			return snprintf(buf, 20, "unknown\n");
1234 		return get_fc_port_roles_names(rport->roles, buf);
1235 	}
1236 }
1237 static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
1238 		show_fc_rport_roles, NULL);
1239 
1240 static ssize_t fc_rport_set_marginal_state(struct device *dev,
1241 						struct device_attribute *attr,
1242 						const char *buf, size_t count)
1243 {
1244 	struct fc_rport *rport = transport_class_to_rport(dev);
1245 	enum fc_port_state port_state;
1246 	int ret = 0;
1247 
1248 	ret = get_fc_port_state_match(buf, &port_state);
1249 	if (ret)
1250 		return -EINVAL;
1251 	if (port_state == FC_PORTSTATE_MARGINAL) {
1252 		/*
1253 		 * Change the state to Marginal only if the
1254 		 * current rport state is Online
1255 		 * Allow only Online->Marginal
1256 		 */
1257 		if (rport->port_state == FC_PORTSTATE_ONLINE)
1258 			rport->port_state = port_state;
1259 		else
1260 			return -EINVAL;
1261 	} else if (port_state == FC_PORTSTATE_ONLINE) {
1262 		/*
1263 		 * Change the state to Online only if the
1264 		 * current rport state is Marginal
1265 		 * Allow only Marginal->Online
1266 		 */
1267 		if (rport->port_state == FC_PORTSTATE_MARGINAL)
1268 			rport->port_state = port_state;
1269 		else
1270 			return -EINVAL;
1271 	} else
1272 		return -EINVAL;
1273 	return count;
1274 }
1275 
1276 static ssize_t
1277 show_fc_rport_port_state(struct device *dev,
1278 				struct device_attribute *attr, char *buf)
1279 {
1280 	const char *name;
1281 	struct fc_rport *rport = transport_class_to_rport(dev);
1282 
1283 	name = get_fc_port_state_name(rport->port_state);
1284 	if (!name)
1285 		return -EINVAL;
1286 
1287 	return snprintf(buf, 20, "%s\n", name);
1288 }
1289 
1290 static FC_DEVICE_ATTR(rport, port_state, 0444 | 0200,
1291 			show_fc_rport_port_state, fc_rport_set_marginal_state);
1292 
1293 fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
1294 
1295 /*
1296  * fast_io_fail_tmo attribute
1297  */
1298 static ssize_t
1299 show_fc_rport_fast_io_fail_tmo (struct device *dev,
1300 				struct device_attribute *attr, char *buf)
1301 {
1302 	struct fc_rport *rport = transport_class_to_rport(dev);
1303 
1304 	if (rport->fast_io_fail_tmo == -1)
1305 		return snprintf(buf, 5, "off\n");
1306 	return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
1307 }
1308 
1309 static ssize_t
1310 store_fc_rport_fast_io_fail_tmo(struct device *dev,
1311 				struct device_attribute *attr, const char *buf,
1312 				size_t count)
1313 {
1314 	int val;
1315 	char *cp;
1316 	struct fc_rport *rport = transport_class_to_rport(dev);
1317 
1318 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
1319 	    (rport->port_state == FC_PORTSTATE_DELETED) ||
1320 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
1321 		return -EBUSY;
1322 	if (strncmp(buf, "off", 3) == 0)
1323 		rport->fast_io_fail_tmo = -1;
1324 	else {
1325 		val = simple_strtoul(buf, &cp, 0);
1326 		if ((*cp && (*cp != '\n')) || (val < 0))
1327 			return -EINVAL;
1328 		/*
1329 		 * Cap fast_io_fail by dev_loss_tmo or
1330 		 * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
1331 		 */
1332 		if ((val >= rport->dev_loss_tmo) ||
1333 		    (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
1334 			return -EINVAL;
1335 
1336 		rport->fast_io_fail_tmo = val;
1337 	}
1338 	return count;
1339 }
1340 static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
1341 	show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
1342 
1343 #define fc_rport_fpin_statistic(name)					\
1344 static ssize_t fc_rport_fpinstat_##name(struct device *cd,		\
1345 				  struct device_attribute *attr,	\
1346 				  char *buf)				\
1347 {									\
1348 	struct fc_rport *rport = transport_class_to_rport(cd);		\
1349 									\
1350 	return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name);	\
1351 }									\
1352 static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL)
1353 
1354 fc_rport_fpin_statistic(dn);
1355 fc_rport_fpin_statistic(dn_unknown);
1356 fc_rport_fpin_statistic(dn_timeout);
1357 fc_rport_fpin_statistic(dn_unable_to_route);
1358 fc_rport_fpin_statistic(dn_device_specific);
1359 fc_rport_fpin_statistic(cn);
1360 fc_rport_fpin_statistic(cn_clear);
1361 fc_rport_fpin_statistic(cn_lost_credit);
1362 fc_rport_fpin_statistic(cn_credit_stall);
1363 fc_rport_fpin_statistic(cn_oversubscription);
1364 fc_rport_fpin_statistic(cn_device_specific);
1365 fc_rport_fpin_statistic(li);
1366 fc_rport_fpin_statistic(li_failure_unknown);
1367 fc_rport_fpin_statistic(li_link_failure_count);
1368 fc_rport_fpin_statistic(li_loss_of_sync_count);
1369 fc_rport_fpin_statistic(li_loss_of_signals_count);
1370 fc_rport_fpin_statistic(li_prim_seq_err_count);
1371 fc_rport_fpin_statistic(li_invalid_tx_word_count);
1372 fc_rport_fpin_statistic(li_invalid_crc_count);
1373 fc_rport_fpin_statistic(li_device_specific);
1374 
1375 static struct attribute *fc_rport_statistics_attrs[] = {
1376 	&device_attr_rport_fpin_dn.attr,
1377 	&device_attr_rport_fpin_dn_unknown.attr,
1378 	&device_attr_rport_fpin_dn_timeout.attr,
1379 	&device_attr_rport_fpin_dn_unable_to_route.attr,
1380 	&device_attr_rport_fpin_dn_device_specific.attr,
1381 	&device_attr_rport_fpin_li.attr,
1382 	&device_attr_rport_fpin_li_failure_unknown.attr,
1383 	&device_attr_rport_fpin_li_link_failure_count.attr,
1384 	&device_attr_rport_fpin_li_loss_of_sync_count.attr,
1385 	&device_attr_rport_fpin_li_loss_of_signals_count.attr,
1386 	&device_attr_rport_fpin_li_prim_seq_err_count.attr,
1387 	&device_attr_rport_fpin_li_invalid_tx_word_count.attr,
1388 	&device_attr_rport_fpin_li_invalid_crc_count.attr,
1389 	&device_attr_rport_fpin_li_device_specific.attr,
1390 	&device_attr_rport_fpin_cn.attr,
1391 	&device_attr_rport_fpin_cn_clear.attr,
1392 	&device_attr_rport_fpin_cn_lost_credit.attr,
1393 	&device_attr_rport_fpin_cn_credit_stall.attr,
1394 	&device_attr_rport_fpin_cn_oversubscription.attr,
1395 	&device_attr_rport_fpin_cn_device_specific.attr,
1396 	NULL
1397 };
1398 
1399 static struct attribute_group fc_rport_statistics_group = {
1400 	.name = "statistics",
1401 	.attrs = fc_rport_statistics_attrs,
1402 };
1403 
1404 
1405 /*
1406  * FC SCSI Target Attribute Management
1407  */
1408 
1409 /*
1410  * Note: in the target show function we recognize when the remote
1411  *  port is in the hierarchy and do not allow the driver to get
1412  *  involved in sysfs functions. The driver only gets involved if
1413  *  it's the "old" style that doesn't use rports.
1414  */
1415 #define fc_starget_show_function(field, format_string, sz, cast)	\
1416 static ssize_t								\
1417 show_fc_starget_##field (struct device *dev, 				\
1418 			 struct device_attribute *attr, char *buf)	\
1419 {									\
1420 	struct scsi_target *starget = transport_class_to_starget(dev);	\
1421 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);	\
1422 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1423 	struct fc_rport *rport = starget_to_rport(starget);		\
1424 	if (rport)							\
1425 		fc_starget_##field(starget) = rport->field;		\
1426 	else if (i->f->get_starget_##field)				\
1427 		i->f->get_starget_##field(starget);			\
1428 	return snprintf(buf, sz, format_string, 			\
1429 		cast fc_starget_##field(starget)); 			\
1430 }
1431 
1432 #define fc_starget_rd_attr(field, format_string, sz)			\
1433 	fc_starget_show_function(field, format_string, sz, )		\
1434 static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
1435 			 show_fc_starget_##field, NULL)
1436 
1437 #define fc_starget_rd_attr_cast(field, format_string, sz, cast)		\
1438 	fc_starget_show_function(field, format_string, sz, (cast))	\
1439 static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
1440 			  show_fc_starget_##field, NULL)
1441 
1442 #define SETUP_STARGET_ATTRIBUTE_RD(field)				\
1443 	i->private_starget_attrs[count] = device_attr_starget_##field; \
1444 	i->private_starget_attrs[count].attr.mode = S_IRUGO;		\
1445 	i->private_starget_attrs[count].store = NULL;			\
1446 	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
1447 	if (i->f->show_starget_##field)					\
1448 		count++
1449 
1450 #define SETUP_STARGET_ATTRIBUTE_RW(field)				\
1451 	i->private_starget_attrs[count] = device_attr_starget_##field; \
1452 	if (!i->f->set_starget_##field) {				\
1453 		i->private_starget_attrs[count].attr.mode = S_IRUGO;	\
1454 		i->private_starget_attrs[count].store = NULL;		\
1455 	}								\
1456 	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
1457 	if (i->f->show_starget_##field)					\
1458 		count++
1459 
1460 /* The FC Transport SCSI Target Attributes: */
1461 fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1462 fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1463 fc_starget_rd_attr(port_id, "0x%06x\n", 20);
1464 
1465 
1466 /*
1467  * FC Virtual Port Attribute Management
1468  */
1469 
1470 #define fc_vport_show_function(field, format_string, sz, cast)		\
1471 static ssize_t								\
1472 show_fc_vport_##field (struct device *dev, 				\
1473 		       struct device_attribute *attr, char *buf)	\
1474 {									\
1475 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1476 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1477 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1478 	if ((i->f->get_vport_##field) &&				\
1479 	    !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)))	\
1480 		i->f->get_vport_##field(vport);				\
1481 	return snprintf(buf, sz, format_string, cast vport->field); 	\
1482 }
1483 
1484 #define fc_vport_store_function(field)					\
1485 static ssize_t								\
1486 store_fc_vport_##field(struct device *dev,				\
1487 		       struct device_attribute *attr,			\
1488 		       const char *buf,	size_t count)			\
1489 {									\
1490 	int val;							\
1491 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1492 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1493 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1494 	char *cp;							\
1495 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))	\
1496 		return -EBUSY;						\
1497 	val = simple_strtoul(buf, &cp, 0);				\
1498 	if (*cp && (*cp != '\n'))					\
1499 		return -EINVAL;						\
1500 	i->f->set_vport_##field(vport, val);				\
1501 	return count;							\
1502 }
1503 
1504 #define fc_vport_store_str_function(field, slen)			\
1505 static ssize_t								\
1506 store_fc_vport_##field(struct device *dev,				\
1507 		       struct device_attribute *attr, 			\
1508 		       const char *buf,	size_t count)			\
1509 {									\
1510 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1511 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1512 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1513 	unsigned int cnt=count;						\
1514 									\
1515 	/* count may include a LF at end of string */			\
1516 	if (buf[cnt-1] == '\n')						\
1517 		cnt--;							\
1518 	if (cnt > ((slen) - 1))						\
1519 		return -EINVAL;						\
1520 	memcpy(vport->field, buf, cnt);					\
1521 	i->f->set_vport_##field(vport);					\
1522 	return count;							\
1523 }
1524 
1525 #define fc_vport_rd_attr(field, format_string, sz)			\
1526 	fc_vport_show_function(field, format_string, sz, )		\
1527 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1528 			 show_fc_vport_##field, NULL)
1529 
1530 #define fc_vport_rd_attr_cast(field, format_string, sz, cast)		\
1531 	fc_vport_show_function(field, format_string, sz, (cast))	\
1532 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1533 			  show_fc_vport_##field, NULL)
1534 
1535 #define fc_vport_rw_attr(field, format_string, sz)			\
1536 	fc_vport_show_function(field, format_string, sz, )		\
1537 	fc_vport_store_function(field)					\
1538 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1539 			show_fc_vport_##field,				\
1540 			store_fc_vport_##field)
1541 
1542 #define fc_private_vport_show_function(field, format_string, sz, cast)	\
1543 static ssize_t								\
1544 show_fc_vport_##field (struct device *dev,				\
1545 		       struct device_attribute *attr, char *buf)	\
1546 {									\
1547 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1548 	return snprintf(buf, sz, format_string, cast vport->field); 	\
1549 }
1550 
1551 #define fc_private_vport_store_u32_function(field)			\
1552 static ssize_t								\
1553 store_fc_vport_##field(struct device *dev,				\
1554 		       struct device_attribute *attr,			\
1555 		       const char *buf,	size_t count)			\
1556 {									\
1557 	u32 val;							\
1558 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1559 	char *cp;							\
1560 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))		\
1561 		return -EBUSY;						\
1562 	val = simple_strtoul(buf, &cp, 0);				\
1563 	if (*cp && (*cp != '\n'))					\
1564 		return -EINVAL;						\
1565 	vport->field = val;						\
1566 	return count;							\
1567 }
1568 
1569 
1570 #define fc_private_vport_rd_attr(field, format_string, sz)		\
1571 	fc_private_vport_show_function(field, format_string, sz, )	\
1572 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1573 			 show_fc_vport_##field, NULL)
1574 
1575 #define fc_private_vport_rd_attr_cast(field, format_string, sz, cast)	\
1576 	fc_private_vport_show_function(field, format_string, sz, (cast)) \
1577 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1578 			  show_fc_vport_##field, NULL)
1579 
1580 #define fc_private_vport_rw_u32_attr(field, format_string, sz)		\
1581 	fc_private_vport_show_function(field, format_string, sz, )	\
1582 	fc_private_vport_store_u32_function(field)			\
1583 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1584 			show_fc_vport_##field,				\
1585 			store_fc_vport_##field)
1586 
1587 
1588 #define fc_private_vport_rd_enum_attr(title, maxlen)			\
1589 static ssize_t								\
1590 show_fc_vport_##title (struct device *dev,				\
1591 		       struct device_attribute *attr,			\
1592 		       char *buf)					\
1593 {									\
1594 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1595 	const char *name;						\
1596 	name = get_fc_##title##_name(vport->title);			\
1597 	if (!name)							\
1598 		return -EINVAL;						\
1599 	return snprintf(buf, maxlen, "%s\n", name);			\
1600 }									\
1601 static FC_DEVICE_ATTR(vport, title, S_IRUGO,			\
1602 			show_fc_vport_##title, NULL)
1603 
1604 
1605 #define SETUP_VPORT_ATTRIBUTE_RD(field)					\
1606 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1607 	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1608 	i->private_vport_attrs[count].store = NULL;			\
1609 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1610 	if (i->f->get_##field)						\
1611 		count++
1612 	/* NOTE: Above MACRO differs: checks function not show bit */
1613 
1614 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field)				\
1615 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1616 	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1617 	i->private_vport_attrs[count].store = NULL;			\
1618 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1619 	count++
1620 
1621 #define SETUP_VPORT_ATTRIBUTE_WR(field)					\
1622 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1623 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1624 	if (i->f->field)						\
1625 		count++
1626 	/* NOTE: Above MACRO differs: checks function */
1627 
1628 #define SETUP_VPORT_ATTRIBUTE_RW(field)					\
1629 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1630 	if (!i->f->set_vport_##field) {					\
1631 		i->private_vport_attrs[count].attr.mode = S_IRUGO;	\
1632 		i->private_vport_attrs[count].store = NULL;		\
1633 	}								\
1634 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1635 	count++
1636 	/* NOTE: Above MACRO differs: does not check show bit */
1637 
1638 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field)				\
1639 {									\
1640 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1641 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1642 	count++;							\
1643 }
1644 
1645 
1646 /* The FC Transport Virtual Port Attributes: */
1647 
1648 /* Fixed Virtual Port Attributes */
1649 
1650 /* Dynamic Virtual Port Attributes */
1651 
1652 /* Private Virtual Port Attributes */
1653 
1654 fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1655 fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1656 fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1657 fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1658 
1659 static ssize_t
1660 show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
1661 		     char *buf)
1662 {
1663 	struct fc_vport *vport = transport_class_to_vport(dev);
1664 
1665 	if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1666 		return snprintf(buf, 20, "unknown\n");
1667 	return get_fc_port_roles_names(vport->roles, buf);
1668 }
1669 static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1670 
1671 fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1672 
1673 fc_private_vport_show_function(symbolic_name, "%s\n",
1674 		FC_VPORT_SYMBOLIC_NAMELEN + 1, )
1675 fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
1676 static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1677 		show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1678 
1679 static ssize_t
1680 store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1681 		      const char *buf, size_t count)
1682 {
1683 	struct fc_vport *vport = transport_class_to_vport(dev);
1684 	struct Scsi_Host *shost = vport_to_shost(vport);
1685 	unsigned long flags;
1686 
1687 	spin_lock_irqsave(shost->host_lock, flags);
1688 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
1689 		spin_unlock_irqrestore(shost->host_lock, flags);
1690 		return -EBUSY;
1691 	}
1692 	vport->flags |= FC_VPORT_DELETING;
1693 	spin_unlock_irqrestore(shost->host_lock, flags);
1694 
1695 	fc_queue_work(shost, &vport->vport_delete_work);
1696 	return count;
1697 }
1698 static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1699 			NULL, store_fc_vport_delete);
1700 
1701 
1702 /*
1703  * Enable/Disable vport
1704  *  Write "1" to disable, write "0" to enable
1705  */
1706 static ssize_t
1707 store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
1708 		       const char *buf,
1709 			   size_t count)
1710 {
1711 	struct fc_vport *vport = transport_class_to_vport(dev);
1712 	struct Scsi_Host *shost = vport_to_shost(vport);
1713 	struct fc_internal *i = to_fc_internal(shost->transportt);
1714 	int stat;
1715 
1716 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1717 		return -EBUSY;
1718 
1719 	if (*buf == '0') {
1720 		if (vport->vport_state != FC_VPORT_DISABLED)
1721 			return -EALREADY;
1722 	} else if (*buf == '1') {
1723 		if (vport->vport_state == FC_VPORT_DISABLED)
1724 			return -EALREADY;
1725 	} else
1726 		return -EINVAL;
1727 
1728 	stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1729 	return stat ? stat : count;
1730 }
1731 static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1732 			NULL, store_fc_vport_disable);
1733 
1734 
1735 /*
1736  * Host Attribute Management
1737  */
1738 
1739 #define fc_host_show_function(field, format_string, sz, cast)		\
1740 static ssize_t								\
1741 show_fc_host_##field (struct device *dev,				\
1742 		      struct device_attribute *attr, char *buf)		\
1743 {									\
1744 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1745 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1746 	if (i->f->get_host_##field)					\
1747 		i->f->get_host_##field(shost);				\
1748 	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1749 }
1750 
1751 #define fc_host_store_function(field)					\
1752 static ssize_t								\
1753 store_fc_host_##field(struct device *dev, 				\
1754 		      struct device_attribute *attr,			\
1755 		      const char *buf,	size_t count)			\
1756 {									\
1757 	int val;							\
1758 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1759 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1760 	char *cp;							\
1761 									\
1762 	val = simple_strtoul(buf, &cp, 0);				\
1763 	if (*cp && (*cp != '\n'))					\
1764 		return -EINVAL;						\
1765 	i->f->set_host_##field(shost, val);				\
1766 	return count;							\
1767 }
1768 
1769 #define fc_host_store_str_function(field, slen)				\
1770 static ssize_t								\
1771 store_fc_host_##field(struct device *dev,				\
1772 		      struct device_attribute *attr,			\
1773 		      const char *buf, size_t count)			\
1774 {									\
1775 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1776 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1777 	unsigned int cnt=count;						\
1778 									\
1779 	/* count may include a LF at end of string */			\
1780 	if (buf[cnt-1] == '\n')						\
1781 		cnt--;							\
1782 	if (cnt > ((slen) - 1))						\
1783 		return -EINVAL;						\
1784 	memcpy(fc_host_##field(shost), buf, cnt);			\
1785 	i->f->set_host_##field(shost);					\
1786 	return count;							\
1787 }
1788 
1789 #define fc_host_rd_attr(field, format_string, sz)			\
1790 	fc_host_show_function(field, format_string, sz, )		\
1791 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1792 			 show_fc_host_##field, NULL)
1793 
1794 #define fc_host_rd_attr_cast(field, format_string, sz, cast)		\
1795 	fc_host_show_function(field, format_string, sz, (cast))		\
1796 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1797 			  show_fc_host_##field, NULL)
1798 
1799 #define fc_host_rw_attr(field, format_string, sz)			\
1800 	fc_host_show_function(field, format_string, sz, )		\
1801 	fc_host_store_function(field)					\
1802 static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR,		\
1803 			show_fc_host_##field,				\
1804 			store_fc_host_##field)
1805 
1806 #define fc_host_rd_enum_attr(title, maxlen)				\
1807 static ssize_t								\
1808 show_fc_host_##title (struct device *dev,				\
1809 		      struct device_attribute *attr, char *buf)		\
1810 {									\
1811 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1812 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1813 	const char *name;						\
1814 	if (i->f->get_host_##title)					\
1815 		i->f->get_host_##title(shost);				\
1816 	name = get_fc_##title##_name(fc_host_##title(shost));		\
1817 	if (!name)							\
1818 		return -EINVAL;						\
1819 	return snprintf(buf, maxlen, "%s\n", name);			\
1820 }									\
1821 static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1822 
1823 #define SETUP_HOST_ATTRIBUTE_RD(field)					\
1824 	i->private_host_attrs[count] = device_attr_host_##field;	\
1825 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1826 	i->private_host_attrs[count].store = NULL;			\
1827 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1828 	if (i->f->show_host_##field)					\
1829 		count++
1830 
1831 #define SETUP_HOST_ATTRIBUTE_RD_NS(field)				\
1832 	i->private_host_attrs[count] = device_attr_host_##field;	\
1833 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1834 	i->private_host_attrs[count].store = NULL;			\
1835 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1836 	count++
1837 
1838 #define SETUP_HOST_ATTRIBUTE_RW(field)					\
1839 	i->private_host_attrs[count] = device_attr_host_##field;	\
1840 	if (!i->f->set_host_##field) {					\
1841 		i->private_host_attrs[count].attr.mode = S_IRUGO;	\
1842 		i->private_host_attrs[count].store = NULL;		\
1843 	}								\
1844 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1845 	if (i->f->show_host_##field)					\
1846 		count++
1847 
1848 
1849 #define fc_private_host_show_function(field, format_string, sz, cast)	\
1850 static ssize_t								\
1851 show_fc_host_##field (struct device *dev,				\
1852 		      struct device_attribute *attr, char *buf)		\
1853 {									\
1854 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1855 	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1856 }
1857 
1858 #define fc_private_host_rd_attr(field, format_string, sz)		\
1859 	fc_private_host_show_function(field, format_string, sz, )	\
1860 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1861 			 show_fc_host_##field, NULL)
1862 
1863 #define fc_private_host_rd_attr_cast(field, format_string, sz, cast)	\
1864 	fc_private_host_show_function(field, format_string, sz, (cast)) \
1865 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1866 			  show_fc_host_##field, NULL)
1867 
1868 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field)			\
1869 	i->private_host_attrs[count] = device_attr_host_##field;	\
1870 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1871 	i->private_host_attrs[count].store = NULL;			\
1872 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1873 	count++
1874 
1875 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field)			\
1876 {									\
1877 	i->private_host_attrs[count] = device_attr_host_##field;	\
1878 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1879 	count++;							\
1880 }
1881 
1882 
1883 /* Fixed Host Attributes */
1884 
1885 static ssize_t
1886 show_fc_host_supported_classes (struct device *dev,
1887 			        struct device_attribute *attr, char *buf)
1888 {
1889 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1890 
1891 	if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED)
1892 		return snprintf(buf, 20, "unspecified\n");
1893 
1894 	return get_fc_cos_names(fc_host_supported_classes(shost), buf);
1895 }
1896 static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO,
1897 		show_fc_host_supported_classes, NULL);
1898 
1899 static ssize_t
1900 show_fc_host_supported_fc4s (struct device *dev,
1901 			     struct device_attribute *attr, char *buf)
1902 {
1903 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1904 	return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
1905 }
1906 static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO,
1907 		show_fc_host_supported_fc4s, NULL);
1908 
1909 static ssize_t
1910 show_fc_host_supported_speeds (struct device *dev,
1911 			       struct device_attribute *attr, char *buf)
1912 {
1913 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1914 
1915 	if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN)
1916 		return snprintf(buf, 20, "unknown\n");
1917 
1918 	return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
1919 }
1920 static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
1921 		show_fc_host_supported_speeds, NULL);
1922 
1923 
1924 fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1925 fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1926 fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1927 			     unsigned long long);
1928 fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1929 fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1930 fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1931 fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
1932 fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1933 fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1934 fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1935 fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1936 fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1937 fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1938 
1939 
1940 /* Dynamic Host Attributes */
1941 
1942 static ssize_t
1943 show_fc_host_active_fc4s (struct device *dev,
1944 			  struct device_attribute *attr, char *buf)
1945 {
1946 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1947 	struct fc_internal *i = to_fc_internal(shost->transportt);
1948 
1949 	if (i->f->get_host_active_fc4s)
1950 		i->f->get_host_active_fc4s(shost);
1951 
1952 	return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
1953 }
1954 static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
1955 		show_fc_host_active_fc4s, NULL);
1956 
1957 static ssize_t
1958 show_fc_host_speed (struct device *dev,
1959 		    struct device_attribute *attr, char *buf)
1960 {
1961 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1962 	struct fc_internal *i = to_fc_internal(shost->transportt);
1963 
1964 	if (i->f->get_host_speed)
1965 		i->f->get_host_speed(shost);
1966 
1967 	if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
1968 		return snprintf(buf, 20, "unknown\n");
1969 
1970 	return get_fc_port_speed_names(fc_host_speed(shost), buf);
1971 }
1972 static FC_DEVICE_ATTR(host, speed, S_IRUGO,
1973 		show_fc_host_speed, NULL);
1974 
1975 
1976 fc_host_rd_attr(port_id, "0x%06x\n", 20);
1977 fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
1978 fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
1979 fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1980 fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1981 
1982 fc_private_host_show_function(system_hostname, "%s\n",
1983 		FC_SYMBOLIC_NAME_SIZE + 1, )
1984 fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1985 static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1986 		show_fc_host_system_hostname, store_fc_host_system_hostname);
1987 
1988 
1989 /* Private Host Attributes */
1990 
1991 static ssize_t
1992 show_fc_private_host_tgtid_bind_type(struct device *dev,
1993 				     struct device_attribute *attr, char *buf)
1994 {
1995 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1996 	const char *name;
1997 
1998 	name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
1999 	if (!name)
2000 		return -EINVAL;
2001 	return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
2002 }
2003 
2004 #define get_list_head_entry(pos, head, member) 		\
2005 	pos = list_entry((head)->next, typeof(*pos), member)
2006 
2007 static ssize_t
2008 store_fc_private_host_tgtid_bind_type(struct device *dev,
2009 	struct device_attribute *attr, const char *buf, size_t count)
2010 {
2011 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2012 	struct fc_rport *rport;
2013  	enum fc_tgtid_binding_type val;
2014 	unsigned long flags;
2015 
2016 	if (get_fc_tgtid_bind_type_match(buf, &val))
2017 		return -EINVAL;
2018 
2019 	/* if changing bind type, purge all unused consistent bindings */
2020 	if (val != fc_host_tgtid_bind_type(shost)) {
2021 		spin_lock_irqsave(shost->host_lock, flags);
2022 		while (!list_empty(&fc_host_rport_bindings(shost))) {
2023 			get_list_head_entry(rport,
2024 				&fc_host_rport_bindings(shost), peers);
2025 			list_del(&rport->peers);
2026 			rport->port_state = FC_PORTSTATE_DELETED;
2027 			fc_queue_work(shost, &rport->rport_delete_work);
2028 		}
2029 		spin_unlock_irqrestore(shost->host_lock, flags);
2030 	}
2031 
2032 	fc_host_tgtid_bind_type(shost) = val;
2033 	return count;
2034 }
2035 
2036 static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
2037 			show_fc_private_host_tgtid_bind_type,
2038 			store_fc_private_host_tgtid_bind_type);
2039 
2040 static ssize_t
2041 store_fc_private_host_issue_lip(struct device *dev,
2042 	struct device_attribute *attr, const char *buf, size_t count)
2043 {
2044 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2045 	struct fc_internal *i = to_fc_internal(shost->transportt);
2046 	int ret;
2047 
2048 	/* ignore any data value written to the attribute */
2049 	if (i->f->issue_fc_host_lip) {
2050 		ret = i->f->issue_fc_host_lip(shost);
2051 		return ret ? ret: count;
2052 	}
2053 
2054 	return -ENOENT;
2055 }
2056 
2057 static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
2058 			store_fc_private_host_issue_lip);
2059 
2060 static ssize_t
2061 store_fc_private_host_dev_loss_tmo(struct device *dev,
2062 				   struct device_attribute *attr,
2063 				   const char *buf, size_t count)
2064 {
2065 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2066 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2067 	struct fc_rport *rport;
2068 	unsigned long val, flags;
2069 	int rc;
2070 
2071 	rc = fc_str_to_dev_loss(buf, &val);
2072 	if (rc)
2073 		return rc;
2074 
2075 	fc_host_dev_loss_tmo(shost) = val;
2076 	spin_lock_irqsave(shost->host_lock, flags);
2077 	list_for_each_entry(rport, &fc_host->rports, peers)
2078 		fc_rport_set_dev_loss_tmo(rport, val);
2079 	spin_unlock_irqrestore(shost->host_lock, flags);
2080 	return count;
2081 }
2082 
2083 fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
2084 static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
2085 		      show_fc_host_dev_loss_tmo,
2086 		      store_fc_private_host_dev_loss_tmo);
2087 
2088 fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
2089 
2090 /*
2091  * Host Statistics Management
2092  */
2093 
2094 /* Show a given attribute in the statistics group */
2095 static ssize_t
2096 fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
2097 {
2098 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2099 	struct fc_internal *i = to_fc_internal(shost->transportt);
2100 	struct fc_host_statistics *stats;
2101 	ssize_t ret = -ENOENT;
2102 
2103 	if (offset > sizeof(struct fc_host_statistics) ||
2104 	    offset % sizeof(u64) != 0)
2105 		WARN_ON(1);
2106 
2107 	if (i->f->get_fc_host_stats) {
2108 		stats = (i->f->get_fc_host_stats)(shost);
2109 		if (stats)
2110 			ret = snprintf(buf, 20, "0x%llx\n",
2111 			      (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
2112 	}
2113 	return ret;
2114 }
2115 
2116 
2117 /* generate a read-only statistics attribute */
2118 #define fc_host_statistic(name)						\
2119 static ssize_t show_fcstat_##name(struct device *cd,			\
2120 				  struct device_attribute *attr,	\
2121 				  char *buf)				\
2122 {									\
2123 	return fc_stat_show(cd, buf, 					\
2124 			    offsetof(struct fc_host_statistics, name));	\
2125 }									\
2126 static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
2127 
2128 fc_host_statistic(seconds_since_last_reset);
2129 fc_host_statistic(tx_frames);
2130 fc_host_statistic(tx_words);
2131 fc_host_statistic(rx_frames);
2132 fc_host_statistic(rx_words);
2133 fc_host_statistic(lip_count);
2134 fc_host_statistic(nos_count);
2135 fc_host_statistic(error_frames);
2136 fc_host_statistic(dumped_frames);
2137 fc_host_statistic(link_failure_count);
2138 fc_host_statistic(loss_of_sync_count);
2139 fc_host_statistic(loss_of_signal_count);
2140 fc_host_statistic(prim_seq_protocol_err_count);
2141 fc_host_statistic(invalid_tx_word_count);
2142 fc_host_statistic(invalid_crc_count);
2143 fc_host_statistic(fcp_input_requests);
2144 fc_host_statistic(fcp_output_requests);
2145 fc_host_statistic(fcp_control_requests);
2146 fc_host_statistic(fcp_input_megabytes);
2147 fc_host_statistic(fcp_output_megabytes);
2148 fc_host_statistic(fcp_packet_alloc_failures);
2149 fc_host_statistic(fcp_packet_aborts);
2150 fc_host_statistic(fcp_frame_alloc_failures);
2151 fc_host_statistic(fc_no_free_exch);
2152 fc_host_statistic(fc_no_free_exch_xid);
2153 fc_host_statistic(fc_xid_not_found);
2154 fc_host_statistic(fc_xid_busy);
2155 fc_host_statistic(fc_seq_not_found);
2156 fc_host_statistic(fc_non_bls_resp);
2157 fc_host_statistic(cn_sig_warn);
2158 fc_host_statistic(cn_sig_alarm);
2159 
2160 
2161 #define fc_host_fpin_statistic(name)					\
2162 static ssize_t fc_host_fpinstat_##name(struct device *cd,		\
2163 				  struct device_attribute *attr,	\
2164 				  char *buf)				\
2165 {									\
2166 	struct Scsi_Host *shost = transport_class_to_shost(cd);		\
2167 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);	\
2168 									\
2169 	return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name);	\
2170 }									\
2171 static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL)
2172 
2173 fc_host_fpin_statistic(dn);
2174 fc_host_fpin_statistic(dn_unknown);
2175 fc_host_fpin_statistic(dn_timeout);
2176 fc_host_fpin_statistic(dn_unable_to_route);
2177 fc_host_fpin_statistic(dn_device_specific);
2178 fc_host_fpin_statistic(cn);
2179 fc_host_fpin_statistic(cn_clear);
2180 fc_host_fpin_statistic(cn_lost_credit);
2181 fc_host_fpin_statistic(cn_credit_stall);
2182 fc_host_fpin_statistic(cn_oversubscription);
2183 fc_host_fpin_statistic(cn_device_specific);
2184 fc_host_fpin_statistic(li);
2185 fc_host_fpin_statistic(li_failure_unknown);
2186 fc_host_fpin_statistic(li_link_failure_count);
2187 fc_host_fpin_statistic(li_loss_of_sync_count);
2188 fc_host_fpin_statistic(li_loss_of_signals_count);
2189 fc_host_fpin_statistic(li_prim_seq_err_count);
2190 fc_host_fpin_statistic(li_invalid_tx_word_count);
2191 fc_host_fpin_statistic(li_invalid_crc_count);
2192 fc_host_fpin_statistic(li_device_specific);
2193 
2194 static ssize_t
2195 fc_reset_statistics(struct device *dev, struct device_attribute *attr,
2196 		    const char *buf, size_t count)
2197 {
2198 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2199 	struct fc_internal *i = to_fc_internal(shost->transportt);
2200 
2201 	/* ignore any data value written to the attribute */
2202 	if (i->f->reset_fc_host_stats) {
2203 		i->f->reset_fc_host_stats(shost);
2204 		return count;
2205 	}
2206 
2207 	return -ENOENT;
2208 }
2209 static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
2210 				fc_reset_statistics);
2211 
2212 static struct attribute *fc_statistics_attrs[] = {
2213 	&device_attr_host_seconds_since_last_reset.attr,
2214 	&device_attr_host_tx_frames.attr,
2215 	&device_attr_host_tx_words.attr,
2216 	&device_attr_host_rx_frames.attr,
2217 	&device_attr_host_rx_words.attr,
2218 	&device_attr_host_lip_count.attr,
2219 	&device_attr_host_nos_count.attr,
2220 	&device_attr_host_error_frames.attr,
2221 	&device_attr_host_dumped_frames.attr,
2222 	&device_attr_host_link_failure_count.attr,
2223 	&device_attr_host_loss_of_sync_count.attr,
2224 	&device_attr_host_loss_of_signal_count.attr,
2225 	&device_attr_host_prim_seq_protocol_err_count.attr,
2226 	&device_attr_host_invalid_tx_word_count.attr,
2227 	&device_attr_host_invalid_crc_count.attr,
2228 	&device_attr_host_fcp_input_requests.attr,
2229 	&device_attr_host_fcp_output_requests.attr,
2230 	&device_attr_host_fcp_control_requests.attr,
2231 	&device_attr_host_fcp_input_megabytes.attr,
2232 	&device_attr_host_fcp_output_megabytes.attr,
2233 	&device_attr_host_fcp_packet_alloc_failures.attr,
2234 	&device_attr_host_fcp_packet_aborts.attr,
2235 	&device_attr_host_fcp_frame_alloc_failures.attr,
2236 	&device_attr_host_fc_no_free_exch.attr,
2237 	&device_attr_host_fc_no_free_exch_xid.attr,
2238 	&device_attr_host_fc_xid_not_found.attr,
2239 	&device_attr_host_fc_xid_busy.attr,
2240 	&device_attr_host_fc_seq_not_found.attr,
2241 	&device_attr_host_fc_non_bls_resp.attr,
2242 	&device_attr_host_cn_sig_warn.attr,
2243 	&device_attr_host_cn_sig_alarm.attr,
2244 	&device_attr_host_reset_statistics.attr,
2245 	&device_attr_host_fpin_dn.attr,
2246 	&device_attr_host_fpin_dn_unknown.attr,
2247 	&device_attr_host_fpin_dn_timeout.attr,
2248 	&device_attr_host_fpin_dn_unable_to_route.attr,
2249 	&device_attr_host_fpin_dn_device_specific.attr,
2250 	&device_attr_host_fpin_li.attr,
2251 	&device_attr_host_fpin_li_failure_unknown.attr,
2252 	&device_attr_host_fpin_li_link_failure_count.attr,
2253 	&device_attr_host_fpin_li_loss_of_sync_count.attr,
2254 	&device_attr_host_fpin_li_loss_of_signals_count.attr,
2255 	&device_attr_host_fpin_li_prim_seq_err_count.attr,
2256 	&device_attr_host_fpin_li_invalid_tx_word_count.attr,
2257 	&device_attr_host_fpin_li_invalid_crc_count.attr,
2258 	&device_attr_host_fpin_li_device_specific.attr,
2259 	&device_attr_host_fpin_cn.attr,
2260 	&device_attr_host_fpin_cn_clear.attr,
2261 	&device_attr_host_fpin_cn_lost_credit.attr,
2262 	&device_attr_host_fpin_cn_credit_stall.attr,
2263 	&device_attr_host_fpin_cn_oversubscription.attr,
2264 	&device_attr_host_fpin_cn_device_specific.attr,
2265 	NULL
2266 };
2267 
2268 static struct attribute_group fc_statistics_group = {
2269 	.name = "statistics",
2270 	.attrs = fc_statistics_attrs,
2271 };
2272 
2273 
2274 /* Host Vport Attributes */
2275 
2276 static int
2277 fc_parse_wwn(const char *ns, u64 *nm)
2278 {
2279 	unsigned int i, j;
2280 	u8 wwn[8];
2281 
2282 	memset(wwn, 0, sizeof(wwn));
2283 
2284 	/* Validate and store the new name */
2285 	for (i=0, j=0; i < 16; i++) {
2286 		int value;
2287 
2288 		value = hex_to_bin(*ns++);
2289 		if (value >= 0)
2290 			j = (j << 4) | value;
2291 		else
2292 			return -EINVAL;
2293 		if (i % 2) {
2294 			wwn[i/2] = j & 0xff;
2295 			j = 0;
2296 		}
2297 	}
2298 
2299 	*nm = wwn_to_u64(wwn);
2300 
2301 	return 0;
2302 }
2303 
2304 
2305 /*
2306  * "Short-cut" sysfs variable to create a new vport on a FC Host.
2307  * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
2308  * will default to a NPIV-based FCP_Initiator; The WWNs are specified
2309  * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
2310  */
2311 static ssize_t
2312 store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
2313 			   const char *buf, size_t count)
2314 {
2315 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2316 	struct fc_vport_identifiers vid;
2317 	struct fc_vport *vport;
2318 	unsigned int cnt=count;
2319 	int stat;
2320 
2321 	memset(&vid, 0, sizeof(vid));
2322 
2323 	/* count may include a LF at end of string */
2324 	if (buf[cnt-1] == '\n')
2325 		cnt--;
2326 
2327 	/* validate we have enough characters for WWPN */
2328 	if ((cnt != (16+1+16)) || (buf[16] != ':'))
2329 		return -EINVAL;
2330 
2331 	stat = fc_parse_wwn(&buf[0], &vid.port_name);
2332 	if (stat)
2333 		return stat;
2334 
2335 	stat = fc_parse_wwn(&buf[17], &vid.node_name);
2336 	if (stat)
2337 		return stat;
2338 
2339 	vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
2340 	vid.vport_type = FC_PORTTYPE_NPIV;
2341 	/* vid.symbolic_name is already zero/NULL's */
2342 	vid.disable = false;		/* always enabled */
2343 
2344 	/* we only allow support on Channel 0 !!! */
2345 	stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
2346 	return stat ? stat : count;
2347 }
2348 static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
2349 			store_fc_host_vport_create);
2350 
2351 
2352 /*
2353  * "Short-cut" sysfs variable to delete a vport on a FC Host.
2354  * Vport is identified by a string containing "<WWPN>:<WWNN>".
2355  * The WWNs are specified as hex characters, and may *not* contain
2356  * any prefixes (e.g. 0x, x, etc)
2357  */
2358 static ssize_t
2359 store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
2360 			   const char *buf, size_t count)
2361 {
2362 	struct Scsi_Host *shost = transport_class_to_shost(dev);
2363 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2364 	struct fc_vport *vport;
2365 	u64 wwpn, wwnn;
2366 	unsigned long flags;
2367 	unsigned int cnt=count;
2368 	int stat, match;
2369 
2370 	/* count may include a LF at end of string */
2371 	if (buf[cnt-1] == '\n')
2372 		cnt--;
2373 
2374 	/* validate we have enough characters for WWPN */
2375 	if ((cnt != (16+1+16)) || (buf[16] != ':'))
2376 		return -EINVAL;
2377 
2378 	stat = fc_parse_wwn(&buf[0], &wwpn);
2379 	if (stat)
2380 		return stat;
2381 
2382 	stat = fc_parse_wwn(&buf[17], &wwnn);
2383 	if (stat)
2384 		return stat;
2385 
2386 	spin_lock_irqsave(shost->host_lock, flags);
2387 	match = 0;
2388 	/* we only allow support on Channel 0 !!! */
2389 	list_for_each_entry(vport, &fc_host->vports, peers) {
2390 		if ((vport->channel == 0) &&
2391 		    (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
2392 			if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
2393 				break;
2394 			vport->flags |= FC_VPORT_DELETING;
2395 			match = 1;
2396 			break;
2397 		}
2398 	}
2399 	spin_unlock_irqrestore(shost->host_lock, flags);
2400 
2401 	if (!match)
2402 		return -ENODEV;
2403 
2404 	stat = fc_vport_terminate(vport);
2405 	return stat ? stat : count;
2406 }
2407 static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
2408 			store_fc_host_vport_delete);
2409 
2410 
2411 static int fc_host_match(struct attribute_container *cont,
2412 			  struct device *dev)
2413 {
2414 	struct Scsi_Host *shost;
2415 	struct fc_internal *i;
2416 
2417 	if (!scsi_is_host_device(dev))
2418 		return 0;
2419 
2420 	shost = dev_to_shost(dev);
2421 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2422 	    != &fc_host_class.class)
2423 		return 0;
2424 
2425 	i = to_fc_internal(shost->transportt);
2426 
2427 	return &i->t.host_attrs.ac == cont;
2428 }
2429 
2430 static int fc_target_match(struct attribute_container *cont,
2431 			    struct device *dev)
2432 {
2433 	struct Scsi_Host *shost;
2434 	struct fc_internal *i;
2435 
2436 	if (!scsi_is_target_device(dev))
2437 		return 0;
2438 
2439 	shost = dev_to_shost(dev->parent);
2440 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2441 	    != &fc_host_class.class)
2442 		return 0;
2443 
2444 	i = to_fc_internal(shost->transportt);
2445 
2446 	return &i->t.target_attrs.ac == cont;
2447 }
2448 
2449 static void fc_rport_dev_release(struct device *dev)
2450 {
2451 	struct fc_rport *rport = dev_to_rport(dev);
2452 	put_device(dev->parent);
2453 	kfree(rport);
2454 }
2455 
2456 int scsi_is_fc_rport(const struct device *dev)
2457 {
2458 	return dev->release == fc_rport_dev_release;
2459 }
2460 EXPORT_SYMBOL(scsi_is_fc_rport);
2461 
2462 static int fc_rport_match(struct attribute_container *cont,
2463 			    struct device *dev)
2464 {
2465 	struct Scsi_Host *shost;
2466 	struct fc_internal *i;
2467 
2468 	if (!scsi_is_fc_rport(dev))
2469 		return 0;
2470 
2471 	shost = dev_to_shost(dev->parent);
2472 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2473 	    != &fc_host_class.class)
2474 		return 0;
2475 
2476 	i = to_fc_internal(shost->transportt);
2477 
2478 	return &i->rport_attr_cont.ac == cont;
2479 }
2480 
2481 
2482 static void fc_vport_dev_release(struct device *dev)
2483 {
2484 	struct fc_vport *vport = dev_to_vport(dev);
2485 	put_device(dev->parent);		/* release kobj parent */
2486 	kfree(vport);
2487 }
2488 
2489 static int scsi_is_fc_vport(const struct device *dev)
2490 {
2491 	return dev->release == fc_vport_dev_release;
2492 }
2493 
2494 static int fc_vport_match(struct attribute_container *cont,
2495 			    struct device *dev)
2496 {
2497 	struct fc_vport *vport;
2498 	struct Scsi_Host *shost;
2499 	struct fc_internal *i;
2500 
2501 	if (!scsi_is_fc_vport(dev))
2502 		return 0;
2503 	vport = dev_to_vport(dev);
2504 
2505 	shost = vport_to_shost(vport);
2506 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
2507 	    != &fc_host_class.class)
2508 		return 0;
2509 
2510 	i = to_fc_internal(shost->transportt);
2511 	return &i->vport_attr_cont.ac == cont;
2512 }
2513 
2514 
2515 /**
2516  * fc_eh_timed_out - FC Transport I/O timeout intercept handler
2517  * @scmd:	The SCSI command which timed out
2518  *
2519  * This routine protects against error handlers getting invoked while a
2520  * rport is in a blocked state, typically due to a temporarily loss of
2521  * connectivity. If the error handlers are allowed to proceed, requests
2522  * to abort i/o, reset the target, etc will likely fail as there is no way
2523  * to communicate with the device to perform the requested function. These
2524  * failures may result in the midlayer taking the device offline, requiring
2525  * manual intervention to restore operation.
2526  *
2527  * This routine, called whenever an i/o times out, validates the state of
2528  * the underlying rport. If the rport is blocked, it returns
2529  * EH_RESET_TIMER, which will continue to reschedule the timeout.
2530  * Eventually, either the device will return, or devloss_tmo will fire,
2531  * and when the timeout then fires, it will be handled normally.
2532  * If the rport is not blocked, normal error handling continues.
2533  *
2534  * Notes:
2535  *	This routine assumes no locks are held on entry.
2536  */
2537 enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd)
2538 {
2539 	struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
2540 
2541 	if (rport->port_state == FC_PORTSTATE_BLOCKED)
2542 		return SCSI_EH_RESET_TIMER;
2543 
2544 	return SCSI_EH_NOT_HANDLED;
2545 }
2546 EXPORT_SYMBOL(fc_eh_timed_out);
2547 
2548 /*
2549  * Called by fc_user_scan to locate an rport on the shost that
2550  * matches the channel and target id, and invoke scsi_scan_target()
2551  * on the rport.
2552  */
2553 static void
2554 fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
2555 {
2556 	struct fc_rport *rport;
2557 	unsigned long flags;
2558 
2559 	spin_lock_irqsave(shost->host_lock, flags);
2560 
2561 	list_for_each_entry(rport, &fc_host_rports(shost), peers) {
2562 		if (rport->scsi_target_id == -1)
2563 			continue;
2564 
2565 		if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
2566 			(rport->port_state != FC_PORTSTATE_MARGINAL))
2567 			continue;
2568 
2569 		if ((channel == rport->channel) &&
2570 		    (id == rport->scsi_target_id)) {
2571 			spin_unlock_irqrestore(shost->host_lock, flags);
2572 			scsi_scan_target(&rport->dev, channel, id, lun,
2573 					 SCSI_SCAN_MANUAL);
2574 			return;
2575 		}
2576 	}
2577 
2578 	spin_unlock_irqrestore(shost->host_lock, flags);
2579 }
2580 
2581 /*
2582  * Called via sysfs scan routines. Necessary, as the FC transport
2583  * wants to place all target objects below the rport object. So this
2584  * routine must invoke the scsi_scan_target() routine with the rport
2585  * object as the parent.
2586  */
2587 static int
2588 fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
2589 {
2590 	uint chlo, chhi;
2591 	uint tgtlo, tgthi;
2592 
2593 	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
2594 	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
2595 	    ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
2596 		return -EINVAL;
2597 
2598 	if (channel == SCAN_WILD_CARD) {
2599 		chlo = 0;
2600 		chhi = shost->max_channel + 1;
2601 	} else {
2602 		chlo = channel;
2603 		chhi = channel + 1;
2604 	}
2605 
2606 	if (id == SCAN_WILD_CARD) {
2607 		tgtlo = 0;
2608 		tgthi = shost->max_id;
2609 	} else {
2610 		tgtlo = id;
2611 		tgthi = id + 1;
2612 	}
2613 
2614 	for ( ; chlo < chhi; chlo++)
2615 		for ( ; tgtlo < tgthi; tgtlo++)
2616 			fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2617 
2618 	return 0;
2619 }
2620 
2621 struct scsi_transport_template *
2622 fc_attach_transport(struct fc_function_template *ft)
2623 {
2624 	int count;
2625 	struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
2626 					GFP_KERNEL);
2627 
2628 	if (unlikely(!i))
2629 		return NULL;
2630 
2631 	i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
2632 	i->t.target_attrs.ac.class = &fc_transport_class.class;
2633 	i->t.target_attrs.ac.match = fc_target_match;
2634 	i->t.target_size = sizeof(struct fc_starget_attrs);
2635 	transport_container_register(&i->t.target_attrs);
2636 
2637 	i->t.host_attrs.ac.attrs = &i->host_attrs[0];
2638 	i->t.host_attrs.ac.class = &fc_host_class.class;
2639 	i->t.host_attrs.ac.match = fc_host_match;
2640 	i->t.host_size = sizeof(struct fc_host_attrs);
2641 	if (ft->get_fc_host_stats)
2642 		i->t.host_attrs.statistics = &fc_statistics_group;
2643 	transport_container_register(&i->t.host_attrs);
2644 
2645 	i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
2646 	i->rport_attr_cont.ac.class = &fc_rport_class.class;
2647 	i->rport_attr_cont.ac.match = fc_rport_match;
2648 	i->rport_attr_cont.statistics = &fc_rport_statistics_group;
2649 	transport_container_register(&i->rport_attr_cont);
2650 
2651 	i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
2652 	i->vport_attr_cont.ac.class = &fc_vport_class.class;
2653 	i->vport_attr_cont.ac.match = fc_vport_match;
2654 	transport_container_register(&i->vport_attr_cont);
2655 
2656 	i->f = ft;
2657 
2658 	/* Transport uses the shost workq for scsi scanning */
2659 	i->t.create_work_queue = 1;
2660 
2661 	i->t.user_scan = fc_user_scan;
2662 
2663 	/*
2664 	 * Setup SCSI Target Attributes.
2665 	 */
2666 	count = 0;
2667 	SETUP_STARGET_ATTRIBUTE_RD(node_name);
2668 	SETUP_STARGET_ATTRIBUTE_RD(port_name);
2669 	SETUP_STARGET_ATTRIBUTE_RD(port_id);
2670 
2671 	BUG_ON(count > FC_STARGET_NUM_ATTRS);
2672 
2673 	i->starget_attrs[count] = NULL;
2674 
2675 
2676 	/*
2677 	 * Setup SCSI Host Attributes.
2678 	 */
2679 	count=0;
2680 	SETUP_HOST_ATTRIBUTE_RD(node_name);
2681 	SETUP_HOST_ATTRIBUTE_RD(port_name);
2682 	SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
2683 	SETUP_HOST_ATTRIBUTE_RD(supported_classes);
2684 	SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
2685 	SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
2686 	SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
2687 	if (ft->vport_create) {
2688 		SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
2689 		SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2690 	}
2691 	SETUP_HOST_ATTRIBUTE_RD(serial_number);
2692 	SETUP_HOST_ATTRIBUTE_RD(manufacturer);
2693 	SETUP_HOST_ATTRIBUTE_RD(model);
2694 	SETUP_HOST_ATTRIBUTE_RD(model_description);
2695 	SETUP_HOST_ATTRIBUTE_RD(hardware_version);
2696 	SETUP_HOST_ATTRIBUTE_RD(driver_version);
2697 	SETUP_HOST_ATTRIBUTE_RD(firmware_version);
2698 	SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
2699 
2700 	SETUP_HOST_ATTRIBUTE_RD(port_id);
2701 	SETUP_HOST_ATTRIBUTE_RD(port_type);
2702 	SETUP_HOST_ATTRIBUTE_RD(port_state);
2703 	SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
2704 	SETUP_HOST_ATTRIBUTE_RD(speed);
2705 	SETUP_HOST_ATTRIBUTE_RD(fabric_name);
2706 	SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
2707 	SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2708 
2709 	/* Transport-managed attributes */
2710 	SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
2711 	SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2712 	if (ft->issue_fc_host_lip)
2713 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
2714 	if (ft->vport_create)
2715 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2716 	if (ft->vport_delete)
2717 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
2718 
2719 	BUG_ON(count > FC_HOST_NUM_ATTRS);
2720 
2721 	i->host_attrs[count] = NULL;
2722 
2723 	/*
2724 	 * Setup Remote Port Attributes.
2725 	 */
2726 	count=0;
2727 	SETUP_RPORT_ATTRIBUTE_RD(maxframe_size);
2728 	SETUP_RPORT_ATTRIBUTE_RD(supported_classes);
2729 	SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
2730 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name);
2731 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
2732 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
2733 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
2734 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(port_state);
2735 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
2736 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
2737 
2738 	BUG_ON(count > FC_RPORT_NUM_ATTRS);
2739 
2740 	i->rport_attrs[count] = NULL;
2741 
2742 	/*
2743 	 * Setup Virtual Port Attributes.
2744 	 */
2745 	count=0;
2746 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
2747 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
2748 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
2749 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
2750 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
2751 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
2752 	SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2753 	SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2754 	SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2755 
2756 	BUG_ON(count > FC_VPORT_NUM_ATTRS);
2757 
2758 	i->vport_attrs[count] = NULL;
2759 
2760 	return &i->t;
2761 }
2762 EXPORT_SYMBOL(fc_attach_transport);
2763 
2764 void fc_release_transport(struct scsi_transport_template *t)
2765 {
2766 	struct fc_internal *i = to_fc_internal(t);
2767 
2768 	transport_container_unregister(&i->t.target_attrs);
2769 	transport_container_unregister(&i->t.host_attrs);
2770 	transport_container_unregister(&i->rport_attr_cont);
2771 	transport_container_unregister(&i->vport_attr_cont);
2772 
2773 	kfree(i);
2774 }
2775 EXPORT_SYMBOL(fc_release_transport);
2776 
2777 /**
2778  * fc_queue_work - Queue work to the fc_host workqueue.
2779  * @shost:	Pointer to Scsi_Host bound to fc_host.
2780  * @work:	Work to queue for execution.
2781  *
2782  * Return value:
2783  * 	1 - work queued for execution
2784  *	0 - work is already queued
2785  *	-EINVAL - work queue doesn't exist
2786  */
2787 static int
2788 fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2789 {
2790 	if (unlikely(!fc_host_work_q(shost))) {
2791 		printk(KERN_ERR
2792 			"ERROR: FC host '%s' attempted to queue work, "
2793 			"when no workqueue created.\n", shost->hostt->name);
2794 		dump_stack();
2795 
2796 		return -EINVAL;
2797 	}
2798 
2799 	return queue_work(fc_host_work_q(shost), work);
2800 }
2801 
2802 /**
2803  * fc_flush_work - Flush a fc_host's workqueue.
2804  * @shost:	Pointer to Scsi_Host bound to fc_host.
2805  */
2806 static void
2807 fc_flush_work(struct Scsi_Host *shost)
2808 {
2809 	if (!fc_host_work_q(shost)) {
2810 		printk(KERN_ERR
2811 			"ERROR: FC host '%s' attempted to flush work, "
2812 			"when no workqueue created.\n", shost->hostt->name);
2813 		dump_stack();
2814 		return;
2815 	}
2816 
2817 	flush_workqueue(fc_host_work_q(shost));
2818 }
2819 
2820 /**
2821  * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue.
2822  * @shost:	Pointer to Scsi_Host bound to fc_host.
2823  * @work:	Work to queue for execution.
2824  * @delay:	jiffies to delay the work queuing
2825  *
2826  * Return value:
2827  * 	1 on success / 0 already queued / < 0 for error
2828  */
2829 static int
2830 fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2831 				unsigned long delay)
2832 {
2833 	if (unlikely(!fc_host_devloss_work_q(shost))) {
2834 		printk(KERN_ERR
2835 			"ERROR: FC host '%s' attempted to queue work, "
2836 			"when no workqueue created.\n", shost->hostt->name);
2837 		dump_stack();
2838 
2839 		return -EINVAL;
2840 	}
2841 
2842 	return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
2843 }
2844 
2845 /**
2846  * fc_flush_devloss - Flush a fc_host's devloss workqueue.
2847  * @shost:	Pointer to Scsi_Host bound to fc_host.
2848  */
2849 static void
2850 fc_flush_devloss(struct Scsi_Host *shost)
2851 {
2852 	if (!fc_host_devloss_work_q(shost)) {
2853 		printk(KERN_ERR
2854 			"ERROR: FC host '%s' attempted to flush work, "
2855 			"when no workqueue created.\n", shost->hostt->name);
2856 		dump_stack();
2857 		return;
2858 	}
2859 
2860 	flush_workqueue(fc_host_devloss_work_q(shost));
2861 }
2862 
2863 
2864 /**
2865  * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
2866  * @shost:	Which &Scsi_Host
2867  *
2868  * This routine is expected to be called immediately preceding the
2869  * a driver's call to scsi_remove_host().
2870  *
2871  * WARNING: A driver utilizing the fc_transport, which fails to call
2872  *   this routine prior to scsi_remove_host(), will leave dangling
2873  *   objects in /sys/class/fc_remote_ports. Access to any of these
2874  *   objects can result in a system crash !!!
2875  *
2876  * Notes:
2877  *	This routine assumes no locks are held on entry.
2878  */
2879 void
2880 fc_remove_host(struct Scsi_Host *shost)
2881 {
2882 	struct fc_vport *vport = NULL, *next_vport = NULL;
2883 	struct fc_rport *rport = NULL, *next_rport = NULL;
2884 	struct workqueue_struct *work_q;
2885 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2886 	unsigned long flags;
2887 
2888 	spin_lock_irqsave(shost->host_lock, flags);
2889 
2890 	/* Remove any vports */
2891 	list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
2892 		vport->flags |= FC_VPORT_DELETING;
2893 		fc_queue_work(shost, &vport->vport_delete_work);
2894 	}
2895 
2896 	/* Remove any remote ports */
2897 	list_for_each_entry_safe(rport, next_rport,
2898 			&fc_host->rports, peers) {
2899 		list_del(&rport->peers);
2900 		rport->port_state = FC_PORTSTATE_DELETED;
2901 		fc_queue_work(shost, &rport->rport_delete_work);
2902 	}
2903 
2904 	list_for_each_entry_safe(rport, next_rport,
2905 			&fc_host->rport_bindings, peers) {
2906 		list_del(&rport->peers);
2907 		rport->port_state = FC_PORTSTATE_DELETED;
2908 		fc_queue_work(shost, &rport->rport_delete_work);
2909 	}
2910 
2911 	spin_unlock_irqrestore(shost->host_lock, flags);
2912 
2913 	/* flush all scan work items */
2914 	scsi_flush_work(shost);
2915 
2916 	/* flush all stgt delete, and rport delete work items, then kill it  */
2917 	if (fc_host->work_q) {
2918 		work_q = fc_host->work_q;
2919 		fc_host->work_q = NULL;
2920 		destroy_workqueue(work_q);
2921 	}
2922 
2923 	/* flush all devloss work items, then kill it  */
2924 	if (fc_host->devloss_work_q) {
2925 		work_q = fc_host->devloss_work_q;
2926 		fc_host->devloss_work_q = NULL;
2927 		destroy_workqueue(work_q);
2928 	}
2929 }
2930 EXPORT_SYMBOL(fc_remove_host);
2931 
2932 static void fc_terminate_rport_io(struct fc_rport *rport)
2933 {
2934 	struct Scsi_Host *shost = rport_to_shost(rport);
2935 	struct fc_internal *i = to_fc_internal(shost->transportt);
2936 
2937 	/* Involve the LLDD if possible to terminate all io on the rport. */
2938 	if (i->f->terminate_rport_io)
2939 		i->f->terminate_rport_io(rport);
2940 
2941 	/*
2942 	 * Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
2943 	 */
2944 	scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
2945 }
2946 
2947 /**
2948  * fc_starget_delete - called to delete the scsi descendants of an rport
2949  * @work:	remote port to be operated on.
2950  *
2951  * Deletes target and all sdevs.
2952  */
2953 static void
2954 fc_starget_delete(struct work_struct *work)
2955 {
2956 	struct fc_rport *rport =
2957 		container_of(work, struct fc_rport, stgt_delete_work);
2958 
2959 	fc_terminate_rport_io(rport);
2960 	scsi_remove_target(&rport->dev);
2961 }
2962 
2963 
2964 /**
2965  * fc_rport_final_delete - finish rport termination and delete it.
2966  * @work:	remote port to be deleted.
2967  */
2968 static void
2969 fc_rport_final_delete(struct work_struct *work)
2970 {
2971 	struct fc_rport *rport =
2972 		container_of(work, struct fc_rport, rport_delete_work);
2973 	struct device *dev = &rport->dev;
2974 	struct Scsi_Host *shost = rport_to_shost(rport);
2975 	struct fc_internal *i = to_fc_internal(shost->transportt);
2976 	unsigned long flags;
2977 	int do_callback = 0;
2978 
2979 	fc_terminate_rport_io(rport);
2980 
2981 	/*
2982 	 * if a scan is pending, flush the SCSI Host work_q so that
2983 	 * that we can reclaim the rport scan work element.
2984 	 */
2985 	if (rport->flags & FC_RPORT_SCAN_PENDING)
2986 		scsi_flush_work(shost);
2987 
2988 	/*
2989 	 * Cancel any outstanding timers. These should really exist
2990 	 * only when rmmod'ing the LLDD and we're asking for
2991 	 * immediate termination of the rports
2992 	 */
2993 	spin_lock_irqsave(shost->host_lock, flags);
2994 	if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
2995 		spin_unlock_irqrestore(shost->host_lock, flags);
2996 		if (!cancel_delayed_work(&rport->fail_io_work))
2997 			fc_flush_devloss(shost);
2998 		if (!cancel_delayed_work(&rport->dev_loss_work))
2999 			fc_flush_devloss(shost);
3000 		cancel_work_sync(&rport->scan_work);
3001 		spin_lock_irqsave(shost->host_lock, flags);
3002 		rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
3003 	}
3004 	spin_unlock_irqrestore(shost->host_lock, flags);
3005 
3006 	/* Delete SCSI target and sdevs */
3007 	if (rport->scsi_target_id != -1)
3008 		fc_starget_delete(&rport->stgt_delete_work);
3009 
3010 	/*
3011 	 * Notify the driver that the rport is now dead. The LLDD will
3012 	 * also guarantee that any communication to the rport is terminated
3013 	 *
3014 	 * Avoid this call if we already called it when we preserved the
3015 	 * rport for the binding.
3016 	 */
3017 	spin_lock_irqsave(shost->host_lock, flags);
3018 	if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
3019 	    (i->f->dev_loss_tmo_callbk)) {
3020 		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3021 		do_callback = 1;
3022 	}
3023 	spin_unlock_irqrestore(shost->host_lock, flags);
3024 
3025 	if (do_callback)
3026 		i->f->dev_loss_tmo_callbk(rport);
3027 
3028 	fc_bsg_remove(rport->rqst_q);
3029 
3030 	transport_remove_device(dev);
3031 	device_del(dev);
3032 	transport_destroy_device(dev);
3033 	scsi_host_put(shost);			/* for fc_host->rport list */
3034 	put_device(dev);			/* for self-reference */
3035 }
3036 
3037 
3038 /**
3039  * fc_remote_port_create - allocates and creates a remote FC port.
3040  * @shost:	scsi host the remote port is connected to.
3041  * @channel:	Channel on shost port connected to.
3042  * @ids:	The world wide names, fc address, and FC4 port
3043  *		roles for the remote port.
3044  *
3045  * Allocates and creates the remoter port structure, including the
3046  * class and sysfs creation.
3047  *
3048  * Notes:
3049  *	This routine assumes no locks are held on entry.
3050  */
3051 static struct fc_rport *
3052 fc_remote_port_create(struct Scsi_Host *shost, int channel,
3053 		      struct fc_rport_identifiers  *ids)
3054 {
3055 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3056 	struct fc_internal *fci = to_fc_internal(shost->transportt);
3057 	struct fc_rport *rport;
3058 	struct device *dev;
3059 	unsigned long flags;
3060 	int error;
3061 	size_t size;
3062 
3063 	size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
3064 	rport = kzalloc(size, GFP_KERNEL);
3065 	if (unlikely(!rport)) {
3066 		printk(KERN_ERR "%s: allocation failure\n", __func__);
3067 		return NULL;
3068 	}
3069 
3070 	rport->maxframe_size = -1;
3071 	rport->supported_classes = FC_COS_UNSPECIFIED;
3072 	rport->dev_loss_tmo = fc_host->dev_loss_tmo;
3073 	memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
3074 	memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
3075 	rport->port_id = ids->port_id;
3076 	rport->roles = ids->roles;
3077 	rport->port_state = FC_PORTSTATE_ONLINE;
3078 	if (fci->f->dd_fcrport_size)
3079 		rport->dd_data = &rport[1];
3080 	rport->channel = channel;
3081 	rport->fast_io_fail_tmo = -1;
3082 
3083 	INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
3084 	INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
3085 	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
3086 	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
3087 	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
3088 
3089 	spin_lock_irqsave(shost->host_lock, flags);
3090 
3091 	rport->number = fc_host->next_rport_number++;
3092 	if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) ||
3093 	    (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR))
3094 		rport->scsi_target_id = fc_host->next_target_id++;
3095 	else
3096 		rport->scsi_target_id = -1;
3097 	list_add_tail(&rport->peers, &fc_host->rports);
3098 	scsi_host_get(shost);			/* for fc_host->rport list */
3099 
3100 	spin_unlock_irqrestore(shost->host_lock, flags);
3101 
3102 	dev = &rport->dev;
3103 	device_initialize(dev);			/* takes self reference */
3104 	dev->parent = get_device(&shost->shost_gendev); /* parent reference */
3105 	dev->release = fc_rport_dev_release;
3106 	dev_set_name(dev, "rport-%d:%d-%d",
3107 		     shost->host_no, channel, rport->number);
3108 	transport_setup_device(dev);
3109 
3110 	error = device_add(dev);
3111 	if (error) {
3112 		printk(KERN_ERR "FC Remote Port device_add failed\n");
3113 		goto delete_rport;
3114 	}
3115 	transport_add_device(dev);
3116 	transport_configure_device(dev);
3117 
3118 	fc_bsg_rportadd(shost, rport);
3119 	/* ignore any bsg add error - we just can't do sgio */
3120 
3121 	if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
3122 		/* initiate a scan of the target */
3123 		rport->flags |= FC_RPORT_SCAN_PENDING;
3124 		scsi_queue_work(shost, &rport->scan_work);
3125 	}
3126 
3127 	return rport;
3128 
3129 delete_rport:
3130 	transport_destroy_device(dev);
3131 	spin_lock_irqsave(shost->host_lock, flags);
3132 	list_del(&rport->peers);
3133 	scsi_host_put(shost);			/* for fc_host->rport list */
3134 	spin_unlock_irqrestore(shost->host_lock, flags);
3135 	put_device(dev->parent);
3136 	kfree(rport);
3137 	return NULL;
3138 }
3139 
3140 /**
3141  * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
3142  * @shost:	scsi host the remote port is connected to.
3143  * @channel:	Channel on shost port connected to.
3144  * @ids:	The world wide names, fc address, and FC4 port
3145  *		roles for the remote port.
3146  *
3147  * The LLDD calls this routine to notify the transport of the existence
3148  * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn)
3149  * of the port, it's FC address (port_id), and the FC4 roles that are
3150  * active for the port.
3151  *
3152  * For ports that are FCP targets (aka scsi targets), the FC transport
3153  * maintains consistent target id bindings on behalf of the LLDD.
3154  * A consistent target id binding is an assignment of a target id to
3155  * a remote port identifier, which persists while the scsi host is
3156  * attached. The remote port can disappear, then later reappear, and
3157  * it's target id assignment remains the same. This allows for shifts
3158  * in FC addressing (if binding by wwpn or wwnn) with no apparent
3159  * changes to the scsi subsystem which is based on scsi host number and
3160  * target id values.  Bindings are only valid during the attachment of
3161  * the scsi host. If the host detaches, then later re-attaches, target
3162  * id bindings may change.
3163  *
3164  * This routine is responsible for returning a remote port structure.
3165  * The routine will search the list of remote ports it maintains
3166  * internally on behalf of consistent target id mappings. If found, the
3167  * remote port structure will be reused. Otherwise, a new remote port
3168  * structure will be allocated.
3169  *
3170  * Whenever a remote port is allocated, a new fc_remote_port class
3171  * device is created.
3172  *
3173  * Should not be called from interrupt context.
3174  *
3175  * Notes:
3176  *	This routine assumes no locks are held on entry.
3177  */
3178 struct fc_rport *
3179 fc_remote_port_add(struct Scsi_Host *shost, int channel,
3180 	struct fc_rport_identifiers  *ids)
3181 {
3182 	struct fc_internal *fci = to_fc_internal(shost->transportt);
3183 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3184 	struct fc_rport *rport;
3185 	unsigned long flags;
3186 	int match = 0;
3187 
3188 	/* ensure any stgt delete functions are done */
3189 	fc_flush_work(shost);
3190 
3191 	/*
3192 	 * Search the list of "active" rports, for an rport that has been
3193 	 * deleted, but we've held off the real delete while the target
3194 	 * is in a "blocked" state.
3195 	 */
3196 	spin_lock_irqsave(shost->host_lock, flags);
3197 
3198 	list_for_each_entry(rport, &fc_host->rports, peers) {
3199 
3200 		if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
3201 		     rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
3202 			(rport->channel == channel)) {
3203 
3204 			switch (fc_host->tgtid_bind_type) {
3205 			case FC_TGTID_BIND_BY_WWPN:
3206 			case FC_TGTID_BIND_NONE:
3207 				if (rport->port_name == ids->port_name)
3208 					match = 1;
3209 				break;
3210 			case FC_TGTID_BIND_BY_WWNN:
3211 				if (rport->node_name == ids->node_name)
3212 					match = 1;
3213 				break;
3214 			case FC_TGTID_BIND_BY_ID:
3215 				if (rport->port_id == ids->port_id)
3216 					match = 1;
3217 				break;
3218 			}
3219 
3220 			if (match) {
3221 
3222 				memcpy(&rport->node_name, &ids->node_name,
3223 					sizeof(rport->node_name));
3224 				memcpy(&rport->port_name, &ids->port_name,
3225 					sizeof(rport->port_name));
3226 				rport->port_id = ids->port_id;
3227 
3228 				rport->port_state = FC_PORTSTATE_ONLINE;
3229 				rport->roles = ids->roles;
3230 
3231 				spin_unlock_irqrestore(shost->host_lock, flags);
3232 
3233 				if (fci->f->dd_fcrport_size)
3234 					memset(rport->dd_data, 0,
3235 						fci->f->dd_fcrport_size);
3236 
3237 				/*
3238 				 * If we were not a target, cancel the
3239 				 * io terminate and rport timers, and
3240 				 * we're done.
3241 				 *
3242 				 * If we were a target, but our new role
3243 				 * doesn't indicate a target, leave the
3244 				 * timers running expecting the role to
3245 				 * change as the target fully logs in. If
3246 				 * it doesn't, the target will be torn down.
3247 				 *
3248 				 * If we were a target, and our role shows
3249 				 * we're still a target, cancel the timers
3250 				 * and kick off a scan.
3251 				 */
3252 
3253 				/* was a target, not in roles */
3254 				if ((rport->scsi_target_id != -1) &&
3255 				    (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
3256 					return rport;
3257 
3258 				/*
3259 				 * Stop the fail io and dev_loss timers.
3260 				 * If they flush, the port_state will
3261 				 * be checked and will NOOP the function.
3262 				 */
3263 				if (!cancel_delayed_work(&rport->fail_io_work))
3264 					fc_flush_devloss(shost);
3265 				if (!cancel_delayed_work(&rport->dev_loss_work))
3266 					fc_flush_devloss(shost);
3267 
3268 				spin_lock_irqsave(shost->host_lock, flags);
3269 
3270 				rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3271 						  FC_RPORT_DEVLOSS_PENDING |
3272 						  FC_RPORT_DEVLOSS_CALLBK_DONE);
3273 
3274 				spin_unlock_irqrestore(shost->host_lock, flags);
3275 
3276 				/* if target, initiate a scan */
3277 				if (rport->scsi_target_id != -1) {
3278 					scsi_target_unblock(&rport->dev,
3279 							    SDEV_RUNNING);
3280 					spin_lock_irqsave(shost->host_lock,
3281 							  flags);
3282 					rport->flags |= FC_RPORT_SCAN_PENDING;
3283 					scsi_queue_work(shost,
3284 							&rport->scan_work);
3285 					spin_unlock_irqrestore(shost->host_lock,
3286 							flags);
3287 				}
3288 
3289 				fc_bsg_goose_queue(rport);
3290 
3291 				return rport;
3292 			}
3293 		}
3294 	}
3295 
3296 	/*
3297 	 * Search the bindings array
3298 	 * Note: if never a FCP target, you won't be on this list
3299 	 */
3300 	if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
3301 
3302 		/* search for a matching consistent binding */
3303 
3304 		list_for_each_entry(rport, &fc_host->rport_bindings,
3305 					peers) {
3306 			if (rport->channel != channel)
3307 				continue;
3308 
3309 			switch (fc_host->tgtid_bind_type) {
3310 			case FC_TGTID_BIND_BY_WWPN:
3311 				if (rport->port_name == ids->port_name)
3312 					match = 1;
3313 				break;
3314 			case FC_TGTID_BIND_BY_WWNN:
3315 				if (rport->node_name == ids->node_name)
3316 					match = 1;
3317 				break;
3318 			case FC_TGTID_BIND_BY_ID:
3319 				if (rport->port_id == ids->port_id)
3320 					match = 1;
3321 				break;
3322 			case FC_TGTID_BIND_NONE: /* to keep compiler happy */
3323 				break;
3324 			}
3325 
3326 			if (match) {
3327 				list_move_tail(&rport->peers, &fc_host->rports);
3328 				break;
3329 			}
3330 		}
3331 
3332 		if (match) {
3333 			memcpy(&rport->node_name, &ids->node_name,
3334 				sizeof(rport->node_name));
3335 			memcpy(&rport->port_name, &ids->port_name,
3336 				sizeof(rport->port_name));
3337 			rport->port_id = ids->port_id;
3338 			rport->port_state = FC_PORTSTATE_ONLINE;
3339 			rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3340 
3341 			if (fci->f->dd_fcrport_size)
3342 				memset(rport->dd_data, 0,
3343 						fci->f->dd_fcrport_size);
3344 			spin_unlock_irqrestore(shost->host_lock, flags);
3345 
3346 			fc_remote_port_rolechg(rport, ids->roles);
3347 			return rport;
3348 		}
3349 	}
3350 
3351 	spin_unlock_irqrestore(shost->host_lock, flags);
3352 
3353 	/* No consistent binding found - create new remote port entry */
3354 	rport = fc_remote_port_create(shost, channel, ids);
3355 
3356 	return rport;
3357 }
3358 EXPORT_SYMBOL(fc_remote_port_add);
3359 
3360 
3361 /**
3362  * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
3363  * @rport:	The remote port that no longer exists
3364  *
3365  * The LLDD calls this routine to notify the transport that a remote
3366  * port is no longer part of the topology. Note: Although a port
3367  * may no longer be part of the topology, it may persist in the remote
3368  * ports displayed by the fc_host. We do this under 2 conditions:
3369  *
3370  * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
3371  *    This allows the port to temporarily disappear, then reappear without
3372  *    disrupting the SCSI device tree attached to it. During the "blocked"
3373  *    period the port will still exist.
3374  *
3375  * 2) If the port was a scsi target and disappears for longer than we
3376  *    expect, we'll delete the port and the tear down the SCSI device tree
3377  *    attached to it. However, we want to semi-persist the target id assigned
3378  *    to that port if it eventually does exist. The port structure will
3379  *    remain (although with minimal information) so that the target id
3380  *    bindings also remain.
3381  *
3382  * If the remote port is not an FCP Target, it will be fully torn down
3383  * and deallocated, including the fc_remote_port class device.
3384  *
3385  * If the remote port is an FCP Target, the port will be placed in a
3386  * temporary blocked state. From the LLDD's perspective, the rport no
3387  * longer exists. From the SCSI midlayer's perspective, the SCSI target
3388  * exists, but all sdevs on it are blocked from further I/O. The following
3389  * is then expected.
3390  *
3391  *   If the remote port does not return (signaled by a LLDD call to
3392  *   fc_remote_port_add()) within the dev_loss_tmo timeout, then the
3393  *   scsi target is removed - killing all outstanding i/o and removing the
3394  *   scsi devices attached to it. The port structure will be marked Not
3395  *   Present and be partially cleared, leaving only enough information to
3396  *   recognize the remote port relative to the scsi target id binding if
3397  *   it later appears.  The port will remain as long as there is a valid
3398  *   binding (e.g. until the user changes the binding type or unloads the
3399  *   scsi host with the binding).
3400  *
3401  *   If the remote port returns within the dev_loss_tmo value (and matches
3402  *   according to the target id binding type), the port structure will be
3403  *   reused. If it is no longer a SCSI target, the target will be torn
3404  *   down. If it continues to be a SCSI target, then the target will be
3405  *   unblocked (allowing i/o to be resumed), and a scan will be activated
3406  *   to ensure that all luns are detected.
3407  *
3408  * Called from normal process context only - cannot be called from interrupt.
3409  *
3410  * Notes:
3411  *	This routine assumes no locks are held on entry.
3412  */
3413 void
3414 fc_remote_port_delete(struct fc_rport  *rport)
3415 {
3416 	struct Scsi_Host *shost = rport_to_shost(rport);
3417 	unsigned long timeout = rport->dev_loss_tmo;
3418 	unsigned long flags;
3419 
3420 	/*
3421 	 * No need to flush the fc_host work_q's, as all adds are synchronous.
3422 	 *
3423 	 * We do need to reclaim the rport scan work element, so eventually
3424 	 * (in fc_rport_final_delete()) we'll flush the scsi host work_q if
3425 	 * there's still a scan pending.
3426 	 */
3427 
3428 	spin_lock_irqsave(shost->host_lock, flags);
3429 
3430 	if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
3431 		(rport->port_state != FC_PORTSTATE_MARGINAL)) {
3432 		spin_unlock_irqrestore(shost->host_lock, flags);
3433 		return;
3434 	}
3435 
3436 	/*
3437 	 * In the past, we if this was not an FCP-Target, we would
3438 	 * unconditionally just jump to deleting the rport.
3439 	 * However, rports can be used as node containers by the LLDD,
3440 	 * and its not appropriate to just terminate the rport at the
3441 	 * first sign of a loss in connectivity. The LLDD may want to
3442 	 * send ELS traffic to re-validate the login. If the rport is
3443 	 * immediately deleted, it makes it inappropriate for a node
3444 	 * container.
3445 	 * So... we now unconditionally wait dev_loss_tmo before
3446 	 * destroying an rport.
3447 	 */
3448 
3449 	rport->port_state = FC_PORTSTATE_BLOCKED;
3450 
3451 	rport->flags |= FC_RPORT_DEVLOSS_PENDING;
3452 
3453 	spin_unlock_irqrestore(shost->host_lock, flags);
3454 
3455 	scsi_target_block(&rport->dev);
3456 
3457 	/* see if we need to kill io faster than waiting for device loss */
3458 	if ((rport->fast_io_fail_tmo != -1) &&
3459 	    (rport->fast_io_fail_tmo < timeout))
3460 		fc_queue_devloss_work(shost, &rport->fail_io_work,
3461 					rport->fast_io_fail_tmo * HZ);
3462 
3463 	/* cap the length the devices can be blocked until they are deleted */
3464 	fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
3465 }
3466 EXPORT_SYMBOL(fc_remote_port_delete);
3467 
3468 /**
3469  * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
3470  * @rport:	The remote port that changed.
3471  * @roles:      New roles for this port.
3472  *
3473  * Description: The LLDD calls this routine to notify the transport that the
3474  * roles on a remote port may have changed. The largest effect of this is
3475  * if a port now becomes a FCP Target, it must be allocated a
3476  * scsi target id.  If the port is no longer a FCP target, any
3477  * scsi target id value assigned to it will persist in case the
3478  * role changes back to include FCP Target. No changes in the scsi
3479  * midlayer will be invoked if the role changes (in the expectation
3480  * that the role will be resumed. If it doesn't normal error processing
3481  * will take place).
3482  *
3483  * Should not be called from interrupt context.
3484  *
3485  * Notes:
3486  *	This routine assumes no locks are held on entry.
3487  */
3488 void
3489 fc_remote_port_rolechg(struct fc_rport  *rport, u32 roles)
3490 {
3491 	struct Scsi_Host *shost = rport_to_shost(rport);
3492 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3493 	unsigned long flags;
3494 	int create = 0;
3495 
3496 	spin_lock_irqsave(shost->host_lock, flags);
3497 	if (roles & FC_PORT_ROLE_FCP_TARGET) {
3498 		if (rport->scsi_target_id == -1) {
3499 			rport->scsi_target_id = fc_host->next_target_id++;
3500 			create = 1;
3501 		} else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
3502 			create = 1;
3503 	}
3504 
3505 	rport->roles = roles;
3506 
3507 	spin_unlock_irqrestore(shost->host_lock, flags);
3508 
3509 	if (create) {
3510 		/*
3511 		 * There may have been a delete timer running on the
3512 		 * port. Ensure that it is cancelled as we now know
3513 		 * the port is an FCP Target.
3514 		 * Note: we know the rport exists and is in an online
3515 		 *  state as the LLDD would not have had an rport
3516 		 *  reference to pass us.
3517 		 *
3518 		 * Take no action on the del_timer failure as the state
3519 		 * machine state change will validate the
3520 		 * transaction.
3521 		 */
3522 		if (!cancel_delayed_work(&rport->fail_io_work))
3523 			fc_flush_devloss(shost);
3524 		if (!cancel_delayed_work(&rport->dev_loss_work))
3525 			fc_flush_devloss(shost);
3526 
3527 		spin_lock_irqsave(shost->host_lock, flags);
3528 		rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3529 				  FC_RPORT_DEVLOSS_PENDING |
3530 				  FC_RPORT_DEVLOSS_CALLBK_DONE);
3531 		spin_unlock_irqrestore(shost->host_lock, flags);
3532 
3533 		/* ensure any stgt delete functions are done */
3534 		fc_flush_work(shost);
3535 
3536 		scsi_target_unblock(&rport->dev, SDEV_RUNNING);
3537 		/* initiate a scan of the target */
3538 		spin_lock_irqsave(shost->host_lock, flags);
3539 		rport->flags |= FC_RPORT_SCAN_PENDING;
3540 		scsi_queue_work(shost, &rport->scan_work);
3541 		spin_unlock_irqrestore(shost->host_lock, flags);
3542 	}
3543 }
3544 EXPORT_SYMBOL(fc_remote_port_rolechg);
3545 
3546 /**
3547  * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
3548  * @work:	rport target that failed to reappear in the allotted time.
3549  *
3550  * Description: An attempt to delete a remote port blocks, and if it fails
3551  *              to return in the allotted time this gets called.
3552  */
3553 static void
3554 fc_timeout_deleted_rport(struct work_struct *work)
3555 {
3556 	struct fc_rport *rport =
3557 		container_of(work, struct fc_rport, dev_loss_work.work);
3558 	struct Scsi_Host *shost = rport_to_shost(rport);
3559 	struct fc_internal *i = to_fc_internal(shost->transportt);
3560 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3561 	unsigned long flags;
3562 	int do_callback = 0;
3563 
3564 	spin_lock_irqsave(shost->host_lock, flags);
3565 
3566 	rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
3567 
3568 	/*
3569 	 * If the port is ONLINE, then it came back. If it was a SCSI
3570 	 * target, validate it still is. If not, tear down the
3571 	 * scsi_target on it.
3572 	 */
3573 	if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
3574 		(rport->port_state == FC_PORTSTATE_MARGINAL)) &&
3575 	    (rport->scsi_target_id != -1) &&
3576 	    !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
3577 		dev_printk(KERN_ERR, &rport->dev,
3578 			"blocked FC remote port time out: no longer"
3579 			" a FCP target, removing starget\n");
3580 		spin_unlock_irqrestore(shost->host_lock, flags);
3581 		scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
3582 		fc_queue_work(shost, &rport->stgt_delete_work);
3583 		return;
3584 	}
3585 
3586 	/* NOOP state - we're flushing workq's */
3587 	if (rport->port_state != FC_PORTSTATE_BLOCKED) {
3588 		spin_unlock_irqrestore(shost->host_lock, flags);
3589 		dev_printk(KERN_ERR, &rport->dev,
3590 			"blocked FC remote port time out: leaving"
3591 			" rport%s alone\n",
3592 			(rport->scsi_target_id != -1) ?  " and starget" : "");
3593 		return;
3594 	}
3595 
3596 	if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
3597 	    (rport->scsi_target_id == -1)) {
3598 		list_del(&rport->peers);
3599 		rport->port_state = FC_PORTSTATE_DELETED;
3600 		dev_printk(KERN_ERR, &rport->dev,
3601 			"blocked FC remote port time out: removing"
3602 			" rport%s\n",
3603 			(rport->scsi_target_id != -1) ?  " and starget" : "");
3604 		fc_queue_work(shost, &rport->rport_delete_work);
3605 		spin_unlock_irqrestore(shost->host_lock, flags);
3606 		return;
3607 	}
3608 
3609 	dev_printk(KERN_ERR, &rport->dev,
3610 		"blocked FC remote port time out: removing target and "
3611 		"saving binding\n");
3612 
3613 	list_move_tail(&rport->peers, &fc_host->rport_bindings);
3614 
3615 	/*
3616 	 * Note: We do not remove or clear the hostdata area. This allows
3617 	 *   host-specific target data to persist along with the
3618 	 *   scsi_target_id. It's up to the host to manage it's hostdata area.
3619 	 */
3620 
3621 	/*
3622 	 * Reinitialize port attributes that may change if the port comes back.
3623 	 */
3624 	rport->maxframe_size = -1;
3625 	rport->supported_classes = FC_COS_UNSPECIFIED;
3626 	rport->roles = FC_PORT_ROLE_UNKNOWN;
3627 	rport->port_state = FC_PORTSTATE_NOTPRESENT;
3628 	rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3629 
3630 	/*
3631 	 * Pre-emptively kill I/O rather than waiting for the work queue
3632 	 * item to teardown the starget. (FCOE libFC folks prefer this
3633 	 * and to have the rport_port_id still set when it's done).
3634 	 */
3635 	spin_unlock_irqrestore(shost->host_lock, flags);
3636 	fc_terminate_rport_io(rport);
3637 
3638 	spin_lock_irqsave(shost->host_lock, flags);
3639 
3640 	if (rport->port_state == FC_PORTSTATE_NOTPRESENT) {	/* still missing */
3641 
3642 		/* remove the identifiers that aren't used in the consisting binding */
3643 		switch (fc_host->tgtid_bind_type) {
3644 		case FC_TGTID_BIND_BY_WWPN:
3645 			rport->node_name = -1;
3646 			rport->port_id = -1;
3647 			break;
3648 		case FC_TGTID_BIND_BY_WWNN:
3649 			rport->port_name = -1;
3650 			rport->port_id = -1;
3651 			break;
3652 		case FC_TGTID_BIND_BY_ID:
3653 			rport->node_name = -1;
3654 			rport->port_name = -1;
3655 			break;
3656 		case FC_TGTID_BIND_NONE:	/* to keep compiler happy */
3657 			break;
3658 		}
3659 
3660 		/*
3661 		 * As this only occurs if the remote port (scsi target)
3662 		 * went away and didn't come back - we'll remove
3663 		 * all attached scsi devices.
3664 		 */
3665 		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3666 		fc_queue_work(shost, &rport->stgt_delete_work);
3667 
3668 		do_callback = 1;
3669 	}
3670 
3671 	spin_unlock_irqrestore(shost->host_lock, flags);
3672 
3673 	/*
3674 	 * Notify the driver that the rport is now dead. The LLDD will
3675 	 * also guarantee that any communication to the rport is terminated
3676 	 *
3677 	 * Note: we set the CALLBK_DONE flag above to correspond
3678 	 */
3679 	if (do_callback && i->f->dev_loss_tmo_callbk)
3680 		i->f->dev_loss_tmo_callbk(rport);
3681 }
3682 
3683 
3684 /**
3685  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
3686  * @work:	rport to terminate io on.
3687  *
3688  * Notes: Only requests the failure of the io, not that all are flushed
3689  *    prior to returning.
3690  */
3691 static void
3692 fc_timeout_fail_rport_io(struct work_struct *work)
3693 {
3694 	struct fc_rport *rport =
3695 		container_of(work, struct fc_rport, fail_io_work.work);
3696 
3697 	if (rport->port_state != FC_PORTSTATE_BLOCKED)
3698 		return;
3699 
3700 	rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
3701 	fc_terminate_rport_io(rport);
3702 }
3703 
3704 /**
3705  * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
3706  * @work:	remote port to be scanned.
3707  */
3708 static void
3709 fc_scsi_scan_rport(struct work_struct *work)
3710 {
3711 	struct fc_rport *rport =
3712 		container_of(work, struct fc_rport, scan_work);
3713 	struct Scsi_Host *shost = rport_to_shost(rport);
3714 	struct fc_internal *i = to_fc_internal(shost->transportt);
3715 	unsigned long flags;
3716 
3717 	if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
3718 		(rport->port_state == FC_PORTSTATE_MARGINAL)) &&
3719 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
3720 	    !(i->f->disable_target_scan)) {
3721 		scsi_scan_target(&rport->dev, rport->channel,
3722 				 rport->scsi_target_id, SCAN_WILD_CARD,
3723 				 SCSI_SCAN_RESCAN);
3724 	}
3725 
3726 	spin_lock_irqsave(shost->host_lock, flags);
3727 	rport->flags &= ~FC_RPORT_SCAN_PENDING;
3728 	spin_unlock_irqrestore(shost->host_lock, flags);
3729 }
3730 
3731 /**
3732  * fc_block_rport() - Block SCSI eh thread for blocked fc_rport.
3733  * @rport: Remote port that scsi_eh is trying to recover.
3734  *
3735  * This routine can be called from a FC LLD scsi_eh callback. It
3736  * blocks the scsi_eh thread until the fc_rport leaves the
3737  * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
3738  * necessary to avoid the scsi_eh failing recovery actions for blocked
3739  * rports which would lead to offlined SCSI devices.
3740  *
3741  * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
3742  *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
3743  *	    passed back to scsi_eh.
3744  */
3745 int fc_block_rport(struct fc_rport *rport)
3746 {
3747 	struct Scsi_Host *shost = rport_to_shost(rport);
3748 	unsigned long flags;
3749 
3750 	spin_lock_irqsave(shost->host_lock, flags);
3751 	while (rport->port_state == FC_PORTSTATE_BLOCKED &&
3752 	       !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
3753 		spin_unlock_irqrestore(shost->host_lock, flags);
3754 		msleep(1000);
3755 		spin_lock_irqsave(shost->host_lock, flags);
3756 	}
3757 	spin_unlock_irqrestore(shost->host_lock, flags);
3758 
3759 	if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
3760 		return FAST_IO_FAIL;
3761 
3762 	return 0;
3763 }
3764 EXPORT_SYMBOL(fc_block_rport);
3765 
3766 /**
3767  * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
3768  * @cmnd: SCSI command that scsi_eh is trying to recover
3769  *
3770  * This routine can be called from a FC LLD scsi_eh callback. It
3771  * blocks the scsi_eh thread until the fc_rport leaves the
3772  * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
3773  * necessary to avoid the scsi_eh failing recovery actions for blocked
3774  * rports which would lead to offlined SCSI devices.
3775  *
3776  * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
3777  *	    FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
3778  *	    passed back to scsi_eh.
3779  */
3780 int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3781 {
3782 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3783 
3784 	if (WARN_ON_ONCE(!rport))
3785 		return FAST_IO_FAIL;
3786 
3787 	return fc_block_rport(rport);
3788 }
3789 EXPORT_SYMBOL(fc_block_scsi_eh);
3790 
3791 /*
3792  * fc_eh_should_retry_cmd - Checks if the cmd should be retried or not
3793  * @scmd:        The SCSI command to be checked
3794  *
3795  * This checks the rport state to decide if a cmd is
3796  * retryable.
3797  *
3798  * Returns: true if the rport state is not in marginal state.
3799  */
3800 bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
3801 {
3802 	struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
3803 
3804 	if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
3805 		(scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
3806 		set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
3807 		return false;
3808 	}
3809 	return true;
3810 }
3811 EXPORT_SYMBOL_GPL(fc_eh_should_retry_cmd);
3812 
3813 /**
3814  * fc_vport_setup - allocates and creates a FC virtual port.
3815  * @shost:	scsi host the virtual port is connected to.
3816  * @channel:	Channel on shost port connected to.
3817  * @pdev:	parent device for vport
3818  * @ids:	The world wide names, FC4 port roles, etc for
3819  *              the virtual port.
3820  * @ret_vport:	The pointer to the created vport.
3821  *
3822  * Allocates and creates the vport structure, calls the parent host
3823  * to instantiate the vport, this completes w/ class and sysfs creation.
3824  *
3825  * Notes:
3826  *	This routine assumes no locks are held on entry.
3827  */
3828 static int
3829 fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3830 	struct fc_vport_identifiers  *ids, struct fc_vport **ret_vport)
3831 {
3832 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3833 	struct fc_internal *fci = to_fc_internal(shost->transportt);
3834 	struct fc_vport *vport;
3835 	struct device *dev;
3836 	unsigned long flags;
3837 	size_t size;
3838 	int error;
3839 
3840 	*ret_vport = NULL;
3841 
3842 	if ( ! fci->f->vport_create)
3843 		return -ENOENT;
3844 
3845 	size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3846 	vport = kzalloc(size, GFP_KERNEL);
3847 	if (unlikely(!vport)) {
3848 		printk(KERN_ERR "%s: allocation failure\n", __func__);
3849 		return -ENOMEM;
3850 	}
3851 
3852 	vport->vport_state = FC_VPORT_UNKNOWN;
3853 	vport->vport_last_state = FC_VPORT_UNKNOWN;
3854 	vport->node_name = ids->node_name;
3855 	vport->port_name = ids->port_name;
3856 	vport->roles = ids->roles;
3857 	vport->vport_type = ids->vport_type;
3858 	if (fci->f->dd_fcvport_size)
3859 		vport->dd_data = &vport[1];
3860 	vport->shost = shost;
3861 	vport->channel = channel;
3862 	vport->flags = FC_VPORT_CREATING;
3863 	INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3864 
3865 	spin_lock_irqsave(shost->host_lock, flags);
3866 
3867 	if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3868 		spin_unlock_irqrestore(shost->host_lock, flags);
3869 		kfree(vport);
3870 		return -ENOSPC;
3871 	}
3872 	fc_host->npiv_vports_inuse++;
3873 	vport->number = fc_host->next_vport_number++;
3874 	list_add_tail(&vport->peers, &fc_host->vports);
3875 	scsi_host_get(shost);			/* for fc_host->vport list */
3876 
3877 	spin_unlock_irqrestore(shost->host_lock, flags);
3878 
3879 	dev = &vport->dev;
3880 	device_initialize(dev);			/* takes self reference */
3881 	dev->parent = get_device(pdev);		/* takes parent reference */
3882 	dev->release = fc_vport_dev_release;
3883 	dev_set_name(dev, "vport-%d:%d-%d",
3884 		     shost->host_no, channel, vport->number);
3885 	transport_setup_device(dev);
3886 
3887 	error = device_add(dev);
3888 	if (error) {
3889 		printk(KERN_ERR "FC Virtual Port device_add failed\n");
3890 		goto delete_vport;
3891 	}
3892 	transport_add_device(dev);
3893 	transport_configure_device(dev);
3894 
3895 	error = fci->f->vport_create(vport, ids->disable);
3896 	if (error) {
3897 		printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3898 		goto delete_vport_all;
3899 	}
3900 
3901 	/*
3902 	 * if the parent isn't the physical adapter's Scsi_Host, ensure
3903 	 * the Scsi_Host at least contains a symlink to the vport.
3904 	 */
3905 	if (pdev != &shost->shost_gendev) {
3906 		error = sysfs_create_link(&shost->shost_gendev.kobj,
3907 				 &dev->kobj, dev_name(dev));
3908 		if (error)
3909 			printk(KERN_ERR
3910 				"%s: Cannot create vport symlinks for "
3911 				"%s, err=%d\n",
3912 				__func__, dev_name(dev), error);
3913 	}
3914 	spin_lock_irqsave(shost->host_lock, flags);
3915 	vport->flags &= ~FC_VPORT_CREATING;
3916 	spin_unlock_irqrestore(shost->host_lock, flags);
3917 
3918 	dev_printk(KERN_NOTICE, pdev,
3919 			"%s created via shost%d channel %d\n", dev_name(dev),
3920 			shost->host_no, channel);
3921 
3922 	*ret_vport = vport;
3923 
3924 	return 0;
3925 
3926 delete_vport_all:
3927 	transport_remove_device(dev);
3928 	device_del(dev);
3929 delete_vport:
3930 	transport_destroy_device(dev);
3931 	spin_lock_irqsave(shost->host_lock, flags);
3932 	list_del(&vport->peers);
3933 	scsi_host_put(shost);			/* for fc_host->vport list */
3934 	fc_host->npiv_vports_inuse--;
3935 	spin_unlock_irqrestore(shost->host_lock, flags);
3936 	put_device(dev->parent);
3937 	kfree(vport);
3938 
3939 	return error;
3940 }
3941 
3942 /**
3943  * fc_vport_create - Admin App or LLDD requests creation of a vport
3944  * @shost:	scsi host the virtual port is connected to.
3945  * @channel:	channel on shost port connected to.
3946  * @ids:	The world wide names, FC4 port roles, etc for
3947  *              the virtual port.
3948  *
3949  * Notes:
3950  *	This routine assumes no locks are held on entry.
3951  */
3952 struct fc_vport *
3953 fc_vport_create(struct Scsi_Host *shost, int channel,
3954 	struct fc_vport_identifiers *ids)
3955 {
3956 	int stat;
3957 	struct fc_vport *vport;
3958 
3959 	stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3960 		 ids, &vport);
3961 	return stat ? NULL : vport;
3962 }
3963 EXPORT_SYMBOL(fc_vport_create);
3964 
3965 /**
3966  * fc_vport_terminate - Admin App or LLDD requests termination of a vport
3967  * @vport:	fc_vport to be terminated
3968  *
3969  * Calls the LLDD vport_delete() function, then deallocates and removes
3970  * the vport from the shost and object tree.
3971  *
3972  * Notes:
3973  *	This routine assumes no locks are held on entry.
3974  */
3975 int
3976 fc_vport_terminate(struct fc_vport *vport)
3977 {
3978 	struct Scsi_Host *shost = vport_to_shost(vport);
3979 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3980 	struct fc_internal *i = to_fc_internal(shost->transportt);
3981 	struct device *dev = &vport->dev;
3982 	unsigned long flags;
3983 	int stat;
3984 
3985 	if (i->f->vport_delete)
3986 		stat = i->f->vport_delete(vport);
3987 	else
3988 		stat = -ENOENT;
3989 
3990 	spin_lock_irqsave(shost->host_lock, flags);
3991 	vport->flags &= ~FC_VPORT_DELETING;
3992 	if (!stat) {
3993 		vport->flags |= FC_VPORT_DELETED;
3994 		list_del(&vport->peers);
3995 		fc_host->npiv_vports_inuse--;
3996 		scsi_host_put(shost);		/* for fc_host->vport list */
3997 	}
3998 	spin_unlock_irqrestore(shost->host_lock, flags);
3999 
4000 	if (stat)
4001 		return stat;
4002 
4003 	if (dev->parent != &shost->shost_gendev)
4004 		sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
4005 	transport_remove_device(dev);
4006 	device_del(dev);
4007 	transport_destroy_device(dev);
4008 
4009 	/*
4010 	 * Removing our self-reference should mean our
4011 	 * release function gets called, which will drop the remaining
4012 	 * parent reference and free the data structure.
4013 	 */
4014 	put_device(dev);			/* for self-reference */
4015 
4016 	return 0; /* SUCCESS */
4017 }
4018 EXPORT_SYMBOL(fc_vport_terminate);
4019 
4020 /**
4021  * fc_vport_sched_delete - workq-based delete request for a vport
4022  * @work:	vport to be deleted.
4023  */
4024 static void
4025 fc_vport_sched_delete(struct work_struct *work)
4026 {
4027 	struct fc_vport *vport =
4028 		container_of(work, struct fc_vport, vport_delete_work);
4029 	int stat;
4030 
4031 	stat = fc_vport_terminate(vport);
4032 	if (stat)
4033 		dev_printk(KERN_ERR, vport->dev.parent,
4034 			"%s: %s could not be deleted created via "
4035 			"shost%d channel %d - error %d\n", __func__,
4036 			dev_name(&vport->dev), vport->shost->host_no,
4037 			vport->channel, stat);
4038 }
4039 
4040 
4041 /*
4042  * BSG support
4043  */
4044 
4045 /**
4046  * fc_bsg_job_timeout - handler for when a bsg request timesout
4047  * @req:	request that timed out
4048  */
4049 static enum blk_eh_timer_return
4050 fc_bsg_job_timeout(struct request *req)
4051 {
4052 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
4053 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
4054 	struct fc_rport *rport = fc_bsg_to_rport(job);
4055 	struct fc_internal *i = to_fc_internal(shost->transportt);
4056 	int err = 0, inflight = 0;
4057 
4058 	if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
4059 		return BLK_EH_RESET_TIMER;
4060 
4061 	inflight = bsg_job_get(job);
4062 
4063 	if (inflight && i->f->bsg_timeout) {
4064 		/* call LLDD to abort the i/o as it has timed out */
4065 		err = i->f->bsg_timeout(job);
4066 		if (err == -EAGAIN) {
4067 			bsg_job_put(job);
4068 			return BLK_EH_RESET_TIMER;
4069 		} else if (err)
4070 			printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
4071 				"abort failed with status %d\n", err);
4072 	}
4073 
4074 	/* the blk_end_sync_io() doesn't check the error */
4075 	if (inflight)
4076 		blk_mq_end_request(req, BLK_STS_IOERR);
4077 	return BLK_EH_DONE;
4078 }
4079 
4080 /**
4081  * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
4082  * @shost:	scsi host rport attached to
4083  * @job:	bsg job to be processed
4084  */
4085 static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
4086 {
4087 	struct fc_internal *i = to_fc_internal(shost->transportt);
4088 	struct fc_bsg_request *bsg_request = job->request;
4089 	struct fc_bsg_reply *bsg_reply = job->reply;
4090 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
4091 	int ret;
4092 
4093 	/* check if we really have all the request data needed */
4094 	if (job->request_len < cmdlen) {
4095 		ret = -ENOMSG;
4096 		goto fail_host_msg;
4097 	}
4098 
4099 	/* Validate the host command */
4100 	switch (bsg_request->msgcode) {
4101 	case FC_BSG_HST_ADD_RPORT:
4102 		cmdlen += sizeof(struct fc_bsg_host_add_rport);
4103 		break;
4104 
4105 	case FC_BSG_HST_DEL_RPORT:
4106 		cmdlen += sizeof(struct fc_bsg_host_del_rport);
4107 		break;
4108 
4109 	case FC_BSG_HST_ELS_NOLOGIN:
4110 		cmdlen += sizeof(struct fc_bsg_host_els);
4111 		/* there better be a xmt and rcv payloads */
4112 		if ((!job->request_payload.payload_len) ||
4113 		    (!job->reply_payload.payload_len)) {
4114 			ret = -EINVAL;
4115 			goto fail_host_msg;
4116 		}
4117 		break;
4118 
4119 	case FC_BSG_HST_CT:
4120 		cmdlen += sizeof(struct fc_bsg_host_ct);
4121 		/* there better be xmt and rcv payloads */
4122 		if ((!job->request_payload.payload_len) ||
4123 		    (!job->reply_payload.payload_len)) {
4124 			ret = -EINVAL;
4125 			goto fail_host_msg;
4126 		}
4127 		break;
4128 
4129 	case FC_BSG_HST_VENDOR:
4130 		cmdlen += sizeof(struct fc_bsg_host_vendor);
4131 		if ((shost->hostt->vendor_id == 0L) ||
4132 		    (bsg_request->rqst_data.h_vendor.vendor_id !=
4133 			shost->hostt->vendor_id)) {
4134 			ret = -ESRCH;
4135 			goto fail_host_msg;
4136 		}
4137 		break;
4138 
4139 	default:
4140 		ret = -EBADR;
4141 		goto fail_host_msg;
4142 	}
4143 
4144 	ret = i->f->bsg_request(job);
4145 	if (!ret)
4146 		return 0;
4147 
4148 fail_host_msg:
4149 	/* return the errno failure code as the only status */
4150 	BUG_ON(job->reply_len < sizeof(uint32_t));
4151 	bsg_reply->reply_payload_rcv_len = 0;
4152 	bsg_reply->result = ret;
4153 	job->reply_len = sizeof(uint32_t);
4154 	bsg_job_done(job, bsg_reply->result,
4155 		       bsg_reply->reply_payload_rcv_len);
4156 	return 0;
4157 }
4158 
4159 
4160 /*
4161  * fc_bsg_goose_queue - restart rport queue in case it was stopped
4162  * @rport:	rport to be restarted
4163  */
4164 static void
4165 fc_bsg_goose_queue(struct fc_rport *rport)
4166 {
4167 	struct request_queue *q = rport->rqst_q;
4168 
4169 	if (q)
4170 		blk_mq_run_hw_queues(q, true);
4171 }
4172 
4173 /**
4174  * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
4175  * @shost:	scsi host rport attached to
4176  * @job:	bsg job to be processed
4177  */
4178 static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
4179 {
4180 	struct fc_internal *i = to_fc_internal(shost->transportt);
4181 	struct fc_bsg_request *bsg_request = job->request;
4182 	struct fc_bsg_reply *bsg_reply = job->reply;
4183 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
4184 	int ret;
4185 
4186 	/* check if we really have all the request data needed */
4187 	if (job->request_len < cmdlen) {
4188 		ret = -ENOMSG;
4189 		goto fail_rport_msg;
4190 	}
4191 
4192 	/* Validate the rport command */
4193 	switch (bsg_request->msgcode) {
4194 	case FC_BSG_RPT_ELS:
4195 		cmdlen += sizeof(struct fc_bsg_rport_els);
4196 		goto check_bidi;
4197 
4198 	case FC_BSG_RPT_CT:
4199 		cmdlen += sizeof(struct fc_bsg_rport_ct);
4200 check_bidi:
4201 		/* there better be xmt and rcv payloads */
4202 		if ((!job->request_payload.payload_len) ||
4203 		    (!job->reply_payload.payload_len)) {
4204 			ret = -EINVAL;
4205 			goto fail_rport_msg;
4206 		}
4207 		break;
4208 	default:
4209 		ret = -EBADR;
4210 		goto fail_rport_msg;
4211 	}
4212 
4213 	ret = i->f->bsg_request(job);
4214 	if (!ret)
4215 		return 0;
4216 
4217 fail_rport_msg:
4218 	/* return the errno failure code as the only status */
4219 	BUG_ON(job->reply_len < sizeof(uint32_t));
4220 	bsg_reply->reply_payload_rcv_len = 0;
4221 	bsg_reply->result = ret;
4222 	job->reply_len = sizeof(uint32_t);
4223 	bsg_job_done(job, bsg_reply->result,
4224 		       bsg_reply->reply_payload_rcv_len);
4225 	return 0;
4226 }
4227 
4228 static int fc_bsg_dispatch(struct bsg_job *job)
4229 {
4230 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
4231 
4232 	if (scsi_is_fc_rport(job->dev))
4233 		return fc_bsg_rport_dispatch(shost, job);
4234 	else
4235 		return fc_bsg_host_dispatch(shost, job);
4236 }
4237 
4238 static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
4239 {
4240 	if (rport->port_state == FC_PORTSTATE_BLOCKED &&
4241 	    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
4242 		return BLK_STS_RESOURCE;
4243 
4244 	if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
4245 		(rport->port_state != FC_PORTSTATE_MARGINAL))
4246 		return BLK_STS_IOERR;
4247 
4248 	return BLK_STS_OK;
4249 }
4250 
4251 
4252 static int fc_bsg_dispatch_prep(struct bsg_job *job)
4253 {
4254 	struct fc_rport *rport = fc_bsg_to_rport(job);
4255 	blk_status_t ret;
4256 
4257 	ret = fc_bsg_rport_prep(rport);
4258 	switch (ret) {
4259 	case BLK_STS_OK:
4260 		break;
4261 	case BLK_STS_RESOURCE:
4262 		return -EAGAIN;
4263 	default:
4264 		return -EIO;
4265 	}
4266 
4267 	return fc_bsg_dispatch(job);
4268 }
4269 
4270 /**
4271  * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
4272  * @shost:	shost for fc_host
4273  * @fc_host:	fc_host adding the structures to
4274  */
4275 static int
4276 fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
4277 {
4278 	struct device *dev = &shost->shost_gendev;
4279 	struct fc_internal *i = to_fc_internal(shost->transportt);
4280 	struct request_queue *q;
4281 	char bsg_name[20];
4282 
4283 	fc_host->rqst_q = NULL;
4284 
4285 	if (!i->f->bsg_request)
4286 		return -ENOTSUPP;
4287 
4288 	snprintf(bsg_name, sizeof(bsg_name),
4289 		 "fc_host%d", shost->host_no);
4290 
4291 	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout,
4292 				i->f->dd_bsg_size);
4293 	if (IS_ERR(q)) {
4294 		dev_err(dev,
4295 			"fc_host%d: bsg interface failed to initialize - setup queue\n",
4296 			shost->host_no);
4297 		return PTR_ERR(q);
4298 	}
4299 	__scsi_init_queue(shost, q);
4300 	blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
4301 	fc_host->rqst_q = q;
4302 	return 0;
4303 }
4304 
4305 /**
4306  * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
4307  * @shost:	shost that rport is attached to
4308  * @rport:	rport that the bsg hooks are being attached to
4309  */
4310 static int
4311 fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4312 {
4313 	struct device *dev = &rport->dev;
4314 	struct fc_internal *i = to_fc_internal(shost->transportt);
4315 	struct request_queue *q;
4316 
4317 	rport->rqst_q = NULL;
4318 
4319 	if (!i->f->bsg_request)
4320 		return -ENOTSUPP;
4321 
4322 	q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep,
4323 				fc_bsg_job_timeout, i->f->dd_bsg_size);
4324 	if (IS_ERR(q)) {
4325 		dev_err(dev, "failed to setup bsg queue\n");
4326 		return PTR_ERR(q);
4327 	}
4328 	__scsi_init_queue(shost, q);
4329 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
4330 	rport->rqst_q = q;
4331 	return 0;
4332 }
4333 
4334 
4335 /**
4336  * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
4337  * @q:	the request_queue that is to be torn down.
4338  *
4339  * Notes:
4340  *   Before unregistering the queue empty any requests that are blocked
4341  *
4342  *
4343  */
4344 static void
4345 fc_bsg_remove(struct request_queue *q)
4346 {
4347 	bsg_remove_queue(q);
4348 }
4349 
4350 
4351 /* Original Author:  Martin Hicks */
4352 MODULE_AUTHOR("James Smart");
4353 MODULE_DESCRIPTION("FC Transport Attributes");
4354 MODULE_LICENSE("GPL");
4355 
4356 module_init(fc_transport_init);
4357 module_exit(fc_transport_exit);
4358