1 /*
2  *  FiberChannel transport specific attributes exported to sysfs.
3  *
4  *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  *
20  *  ========
21  *
22  *  Copyright (C) 2004-2007   James Smart, Emulex Corporation
23  *    Rewrite for host, target, device, and remote port attributes,
24  *    statistics, and service functions...
25  *    Add vports, etc
26  *
27  */
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_transport_fc.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <linux/netlink.h>
37 #include <net/netlink.h>
38 #include <scsi/scsi_netlink_fc.h>
39 #include <scsi/scsi_bsg_fc.h>
40 #include "scsi_priv.h"
41 #include "scsi_transport_fc_internal.h"
42 
43 static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
44 static void fc_vport_sched_delete(struct work_struct *work);
45 static int fc_vport_setup(struct Scsi_Host *shost, int channel,
46 	struct device *pdev, struct fc_vport_identifiers  *ids,
47 	struct fc_vport **vport);
48 static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
49 static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
50 static void fc_bsg_remove(struct request_queue *);
51 static void fc_bsg_goose_queue(struct fc_rport *);
52 
53 /*
54  * Redefine so that we can have same named attributes in the
55  * sdev/starget/host objects.
56  */
57 #define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store)		\
58 struct device_attribute device_attr_##_prefix##_##_name = 	\
59 	__ATTR(_name,_mode,_show,_store)
60 
61 #define fc_enum_name_search(title, table_type, table)			\
62 static const char *get_fc_##title##_name(enum table_type table_key)	\
63 {									\
64 	int i;								\
65 	char *name = NULL;						\
66 									\
67 	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
68 		if (table[i].value == table_key) {			\
69 			name = table[i].name;				\
70 			break;						\
71 		}							\
72 	}								\
73 	return name;							\
74 }
75 
76 #define fc_enum_name_match(title, table_type, table)			\
77 static int get_fc_##title##_match(const char *table_key,		\
78 		enum table_type *value)					\
79 {									\
80 	int i;								\
81 									\
82 	for (i = 0; i < ARRAY_SIZE(table); i++) {			\
83 		if (strncmp(table_key, table[i].name,			\
84 				table[i].matchlen) == 0) {		\
85 			*value = table[i].value;			\
86 			return 0; /* success */				\
87 		}							\
88 	}								\
89 	return 1; /* failure */						\
90 }
91 
92 
93 /* Convert fc_port_type values to ascii string name */
94 static struct {
95 	enum fc_port_type	value;
96 	char			*name;
97 } fc_port_type_names[] = {
98 	{ FC_PORTTYPE_UNKNOWN,		"Unknown" },
99 	{ FC_PORTTYPE_OTHER,		"Other" },
100 	{ FC_PORTTYPE_NOTPRESENT,	"Not Present" },
101 	{ FC_PORTTYPE_NPORT,	"NPort (fabric via point-to-point)" },
102 	{ FC_PORTTYPE_NLPORT,	"NLPort (fabric via loop)" },
103 	{ FC_PORTTYPE_LPORT,	"LPort (private loop)" },
104 	{ FC_PORTTYPE_PTP,	"Point-To-Point (direct nport connection)" },
105 	{ FC_PORTTYPE_NPIV,		"NPIV VPORT" },
106 };
107 fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
108 #define FC_PORTTYPE_MAX_NAMELEN		50
109 
110 /* Reuse fc_port_type enum function for vport_type */
111 #define get_fc_vport_type_name get_fc_port_type_name
112 
113 
114 /* Convert fc_host_event_code values to ascii string name */
115 static const struct {
116 	enum fc_host_event_code		value;
117 	char				*name;
118 } fc_host_event_code_names[] = {
119 	{ FCH_EVT_LIP,			"lip" },
120 	{ FCH_EVT_LINKUP,		"link_up" },
121 	{ FCH_EVT_LINKDOWN,		"link_down" },
122 	{ FCH_EVT_LIPRESET,		"lip_reset" },
123 	{ FCH_EVT_RSCN,			"rscn" },
124 	{ FCH_EVT_ADAPTER_CHANGE,	"adapter_chg" },
125 	{ FCH_EVT_PORT_UNKNOWN,		"port_unknown" },
126 	{ FCH_EVT_PORT_ONLINE,		"port_online" },
127 	{ FCH_EVT_PORT_OFFLINE,		"port_offline" },
128 	{ FCH_EVT_PORT_FABRIC,		"port_fabric" },
129 	{ FCH_EVT_LINK_UNKNOWN,		"link_unknown" },
130 	{ FCH_EVT_VENDOR_UNIQUE,	"vendor_unique" },
131 };
132 fc_enum_name_search(host_event_code, fc_host_event_code,
133 		fc_host_event_code_names)
134 #define FC_HOST_EVENT_CODE_MAX_NAMELEN	30
135 
136 
137 /* Convert fc_port_state values to ascii string name */
138 static struct {
139 	enum fc_port_state	value;
140 	char			*name;
141 } fc_port_state_names[] = {
142 	{ FC_PORTSTATE_UNKNOWN,		"Unknown" },
143 	{ FC_PORTSTATE_NOTPRESENT,	"Not Present" },
144 	{ FC_PORTSTATE_ONLINE,		"Online" },
145 	{ FC_PORTSTATE_OFFLINE,		"Offline" },
146 	{ FC_PORTSTATE_BLOCKED,		"Blocked" },
147 	{ FC_PORTSTATE_BYPASSED,	"Bypassed" },
148 	{ FC_PORTSTATE_DIAGNOSTICS,	"Diagnostics" },
149 	{ FC_PORTSTATE_LINKDOWN,	"Linkdown" },
150 	{ FC_PORTSTATE_ERROR,		"Error" },
151 	{ FC_PORTSTATE_LOOPBACK,	"Loopback" },
152 	{ FC_PORTSTATE_DELETED,		"Deleted" },
153 };
154 fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
155 #define FC_PORTSTATE_MAX_NAMELEN	20
156 
157 
158 /* Convert fc_vport_state values to ascii string name */
159 static struct {
160 	enum fc_vport_state	value;
161 	char			*name;
162 } fc_vport_state_names[] = {
163 	{ FC_VPORT_UNKNOWN,		"Unknown" },
164 	{ FC_VPORT_ACTIVE,		"Active" },
165 	{ FC_VPORT_DISABLED,		"Disabled" },
166 	{ FC_VPORT_LINKDOWN,		"Linkdown" },
167 	{ FC_VPORT_INITIALIZING,	"Initializing" },
168 	{ FC_VPORT_NO_FABRIC_SUPP,	"No Fabric Support" },
169 	{ FC_VPORT_NO_FABRIC_RSCS,	"No Fabric Resources" },
170 	{ FC_VPORT_FABRIC_LOGOUT,	"Fabric Logout" },
171 	{ FC_VPORT_FABRIC_REJ_WWN,	"Fabric Rejected WWN" },
172 	{ FC_VPORT_FAILED,		"VPort Failed" },
173 };
174 fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
175 #define FC_VPORTSTATE_MAX_NAMELEN	24
176 
177 /* Reuse fc_vport_state enum function for vport_last_state */
178 #define get_fc_vport_last_state_name get_fc_vport_state_name
179 
180 
181 /* Convert fc_tgtid_binding_type values to ascii string name */
182 static const struct {
183 	enum fc_tgtid_binding_type	value;
184 	char				*name;
185 	int				matchlen;
186 } fc_tgtid_binding_type_names[] = {
187 	{ FC_TGTID_BIND_NONE, "none", 4 },
188 	{ FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
189 	{ FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
190 	{ FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
191 };
192 fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type,
193 		fc_tgtid_binding_type_names)
194 fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type,
195 		fc_tgtid_binding_type_names)
196 #define FC_BINDTYPE_MAX_NAMELEN	30
197 
198 
199 #define fc_bitfield_name_search(title, table)			\
200 static ssize_t							\
201 get_fc_##title##_names(u32 table_key, char *buf)		\
202 {								\
203 	char *prefix = "";					\
204 	ssize_t len = 0;					\
205 	int i;							\
206 								\
207 	for (i = 0; i < ARRAY_SIZE(table); i++) {		\
208 		if (table[i].value & table_key) {		\
209 			len += sprintf(buf + len, "%s%s",	\
210 				prefix, table[i].name);		\
211 			prefix = ", ";				\
212 		}						\
213 	}							\
214 	len += sprintf(buf + len, "\n");			\
215 	return len;						\
216 }
217 
218 
219 /* Convert FC_COS bit values to ascii string name */
220 static const struct {
221 	u32 			value;
222 	char			*name;
223 } fc_cos_names[] = {
224 	{ FC_COS_CLASS1,	"Class 1" },
225 	{ FC_COS_CLASS2,	"Class 2" },
226 	{ FC_COS_CLASS3,	"Class 3" },
227 	{ FC_COS_CLASS4,	"Class 4" },
228 	{ FC_COS_CLASS6,	"Class 6" },
229 };
230 fc_bitfield_name_search(cos, fc_cos_names)
231 
232 
233 /* Convert FC_PORTSPEED bit values to ascii string name */
234 static const struct {
235 	u32 			value;
236 	char			*name;
237 } fc_port_speed_names[] = {
238 	{ FC_PORTSPEED_1GBIT,		"1 Gbit" },
239 	{ FC_PORTSPEED_2GBIT,		"2 Gbit" },
240 	{ FC_PORTSPEED_4GBIT,		"4 Gbit" },
241 	{ FC_PORTSPEED_10GBIT,		"10 Gbit" },
242 	{ FC_PORTSPEED_8GBIT,		"8 Gbit" },
243 	{ FC_PORTSPEED_16GBIT,		"16 Gbit" },
244 	{ FC_PORTSPEED_NOT_NEGOTIATED,	"Not Negotiated" },
245 };
246 fc_bitfield_name_search(port_speed, fc_port_speed_names)
247 
248 
249 static int
250 show_fc_fc4s (char *buf, u8 *fc4_list)
251 {
252 	int i, len=0;
253 
254 	for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
255 		len += sprintf(buf + len , "0x%02x ", *fc4_list);
256 	len += sprintf(buf + len, "\n");
257 	return len;
258 }
259 
260 
261 /* Convert FC_PORT_ROLE bit values to ascii string name */
262 static const struct {
263 	u32 			value;
264 	char			*name;
265 } fc_port_role_names[] = {
266 	{ FC_PORT_ROLE_FCP_TARGET,	"FCP Target" },
267 	{ FC_PORT_ROLE_FCP_INITIATOR,	"FCP Initiator" },
268 	{ FC_PORT_ROLE_IP_PORT,		"IP Port" },
269 };
270 fc_bitfield_name_search(port_roles, fc_port_role_names)
271 
272 /*
273  * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
274  */
275 #define FC_WELLKNOWN_PORTID_MASK	0xfffff0
276 #define FC_WELLKNOWN_ROLE_MASK  	0x00000f
277 #define FC_FPORT_PORTID			0x00000e
278 #define FC_FABCTLR_PORTID		0x00000d
279 #define FC_DIRSRVR_PORTID		0x00000c
280 #define FC_TIMESRVR_PORTID		0x00000b
281 #define FC_MGMTSRVR_PORTID		0x00000a
282 
283 
284 static void fc_timeout_deleted_rport(struct work_struct *work);
285 static void fc_timeout_fail_rport_io(struct work_struct *work);
286 static void fc_scsi_scan_rport(struct work_struct *work);
287 
288 /*
289  * Attribute counts pre object type...
290  * Increase these values if you add attributes
291  */
292 #define FC_STARGET_NUM_ATTRS 	3
293 #define FC_RPORT_NUM_ATTRS	10
294 #define FC_VPORT_NUM_ATTRS	9
295 #define FC_HOST_NUM_ATTRS	22
296 
297 struct fc_internal {
298 	struct scsi_transport_template t;
299 	struct fc_function_template *f;
300 
301 	/*
302 	 * For attributes : each object has :
303 	 *   An array of the actual attributes structures
304 	 *   An array of null-terminated pointers to the attribute
305 	 *     structures - used for mid-layer interaction.
306 	 *
307 	 * The attribute containers for the starget and host are are
308 	 * part of the midlayer. As the remote port is specific to the
309 	 * fc transport, we must provide the attribute container.
310 	 */
311 	struct device_attribute private_starget_attrs[
312 							FC_STARGET_NUM_ATTRS];
313 	struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
314 
315 	struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
316 	struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
317 
318 	struct transport_container rport_attr_cont;
319 	struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
320 	struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
321 
322 	struct transport_container vport_attr_cont;
323 	struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
324 	struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
325 };
326 
327 #define to_fc_internal(tmpl)	container_of(tmpl, struct fc_internal, t)
328 
329 static int fc_target_setup(struct transport_container *tc, struct device *dev,
330 			   struct device *cdev)
331 {
332 	struct scsi_target *starget = to_scsi_target(dev);
333 	struct fc_rport *rport = starget_to_rport(starget);
334 
335 	/*
336 	 * if parent is remote port, use values from remote port.
337 	 * Otherwise, this host uses the fc_transport, but not the
338 	 * remote port interface. As such, initialize to known non-values.
339 	 */
340 	if (rport) {
341 		fc_starget_node_name(starget) = rport->node_name;
342 		fc_starget_port_name(starget) = rport->port_name;
343 		fc_starget_port_id(starget) = rport->port_id;
344 	} else {
345 		fc_starget_node_name(starget) = -1;
346 		fc_starget_port_name(starget) = -1;
347 		fc_starget_port_id(starget) = -1;
348 	}
349 
350 	return 0;
351 }
352 
353 static DECLARE_TRANSPORT_CLASS(fc_transport_class,
354 			       "fc_transport",
355 			       fc_target_setup,
356 			       NULL,
357 			       NULL);
358 
359 static int fc_host_setup(struct transport_container *tc, struct device *dev,
360 			 struct device *cdev)
361 {
362 	struct Scsi_Host *shost = dev_to_shost(dev);
363 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
364 
365 	/*
366 	 * Set default values easily detected by the midlayer as
367 	 * failure cases.  The scsi lldd is responsible for initializing
368 	 * all transport attributes to valid values per host.
369 	 */
370 	fc_host->node_name = -1;
371 	fc_host->port_name = -1;
372 	fc_host->permanent_port_name = -1;
373 	fc_host->supported_classes = FC_COS_UNSPECIFIED;
374 	memset(fc_host->supported_fc4s, 0,
375 		sizeof(fc_host->supported_fc4s));
376 	fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
377 	fc_host->maxframe_size = -1;
378 	fc_host->max_npiv_vports = 0;
379 	memset(fc_host->serial_number, 0,
380 		sizeof(fc_host->serial_number));
381 
382 	fc_host->port_id = -1;
383 	fc_host->port_type = FC_PORTTYPE_UNKNOWN;
384 	fc_host->port_state = FC_PORTSTATE_UNKNOWN;
385 	memset(fc_host->active_fc4s, 0,
386 		sizeof(fc_host->active_fc4s));
387 	fc_host->speed = FC_PORTSPEED_UNKNOWN;
388 	fc_host->fabric_name = -1;
389 	memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
390 	memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
391 
392 	fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
393 
394 	INIT_LIST_HEAD(&fc_host->rports);
395 	INIT_LIST_HEAD(&fc_host->rport_bindings);
396 	INIT_LIST_HEAD(&fc_host->vports);
397 	fc_host->next_rport_number = 0;
398 	fc_host->next_target_id = 0;
399 	fc_host->next_vport_number = 0;
400 	fc_host->npiv_vports_inuse = 0;
401 
402 	snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
403 		 "fc_wq_%d", shost->host_no);
404 	fc_host->work_q = create_singlethread_workqueue(
405 					fc_host->work_q_name);
406 	if (!fc_host->work_q)
407 		return -ENOMEM;
408 
409 	snprintf(fc_host->devloss_work_q_name,
410 		 sizeof(fc_host->devloss_work_q_name),
411 		 "fc_dl_%d", shost->host_no);
412 	fc_host->devloss_work_q = create_singlethread_workqueue(
413 					fc_host->devloss_work_q_name);
414 	if (!fc_host->devloss_work_q) {
415 		destroy_workqueue(fc_host->work_q);
416 		fc_host->work_q = NULL;
417 		return -ENOMEM;
418 	}
419 
420 	fc_bsg_hostadd(shost, fc_host);
421 	/* ignore any bsg add error - we just can't do sgio */
422 
423 	return 0;
424 }
425 
426 static int fc_host_remove(struct transport_container *tc, struct device *dev,
427 			 struct device *cdev)
428 {
429 	struct Scsi_Host *shost = dev_to_shost(dev);
430 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
431 
432 	fc_bsg_remove(fc_host->rqst_q);
433 	return 0;
434 }
435 
436 static DECLARE_TRANSPORT_CLASS(fc_host_class,
437 			       "fc_host",
438 			       fc_host_setup,
439 			       fc_host_remove,
440 			       NULL);
441 
442 /*
443  * Setup and Remove actions for remote ports are handled
444  * in the service functions below.
445  */
446 static DECLARE_TRANSPORT_CLASS(fc_rport_class,
447 			       "fc_remote_ports",
448 			       NULL,
449 			       NULL,
450 			       NULL);
451 
452 /*
453  * Setup and Remove actions for virtual ports are handled
454  * in the service functions below.
455  */
456 static DECLARE_TRANSPORT_CLASS(fc_vport_class,
457 			       "fc_vports",
458 			       NULL,
459 			       NULL,
460 			       NULL);
461 
462 /*
463  * Module Parameters
464  */
465 
466 /*
467  * dev_loss_tmo: the default number of seconds that the FC transport
468  *   should insulate the loss of a remote port.
469  *   The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
470  */
471 static unsigned int fc_dev_loss_tmo = 60;		/* seconds */
472 
473 module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
474 MODULE_PARM_DESC(dev_loss_tmo,
475 		 "Maximum number of seconds that the FC transport should"
476 		 " insulate the loss of a remote port. Once this value is"
477 		 " exceeded, the scsi target is removed. Value should be"
478 		 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
479 
480 /*
481  * Netlink Infrastructure
482  */
483 
484 static atomic_t fc_event_seq;
485 
486 /**
487  * fc_get_event_number - Obtain the next sequential FC event number
488  *
489  * Notes:
490  *   We could have inlined this, but it would have required fc_event_seq to
491  *   be exposed. For now, live with the subroutine call.
492  *   Atomic used to avoid lock/unlock...
493  */
494 u32
495 fc_get_event_number(void)
496 {
497 	return atomic_add_return(1, &fc_event_seq);
498 }
499 EXPORT_SYMBOL(fc_get_event_number);
500 
501 
502 /**
503  * fc_host_post_event - called to post an even on an fc_host.
504  * @shost:		host the event occurred on
505  * @event_number:	fc event number obtained from get_fc_event_number()
506  * @event_code:		fc_host event being posted
507  * @event_data:		32bits of data for the event being posted
508  *
509  * Notes:
510  *	This routine assumes no locks are held on entry.
511  */
512 void
513 fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
514 		enum fc_host_event_code event_code, u32 event_data)
515 {
516 	struct sk_buff *skb;
517 	struct nlmsghdr	*nlh;
518 	struct fc_nl_event *event;
519 	const char *name;
520 	u32 len, skblen;
521 	int err;
522 
523 	if (!scsi_nl_sock) {
524 		err = -ENOENT;
525 		goto send_fail;
526 	}
527 
528 	len = FC_NL_MSGALIGN(sizeof(*event));
529 	skblen = NLMSG_SPACE(len);
530 
531 	skb = alloc_skb(skblen, GFP_KERNEL);
532 	if (!skb) {
533 		err = -ENOBUFS;
534 		goto send_fail;
535 	}
536 
537 	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
538 				skblen - sizeof(*nlh), 0);
539 	if (!nlh) {
540 		err = -ENOBUFS;
541 		goto send_fail_skb;
542 	}
543 	event = NLMSG_DATA(nlh);
544 
545 	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
546 				FC_NL_ASYNC_EVENT, len);
547 	event->seconds = get_seconds();
548 	event->vendor_id = 0;
549 	event->host_no = shost->host_no;
550 	event->event_datalen = sizeof(u32);	/* bytes */
551 	event->event_num = event_number;
552 	event->event_code = event_code;
553 	event->event_data = event_data;
554 
555 	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
556 			GFP_KERNEL);
557 	return;
558 
559 send_fail_skb:
560 	kfree_skb(skb);
561 send_fail:
562 	name = get_fc_host_event_code_name(event_code);
563 	printk(KERN_WARNING
564 		"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
565 		__func__, shost->host_no,
566 		(name) ? name : "<unknown>", event_data, err);
567 	return;
568 }
569 EXPORT_SYMBOL(fc_host_post_event);
570 
571 
572 /**
573  * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
574  * @shost:		host the event occurred on
575  * @event_number:	fc event number obtained from get_fc_event_number()
576  * @data_len:		amount, in bytes, of vendor unique data
577  * @data_buf:		pointer to vendor unique data
578  * @vendor_id:          Vendor id
579  *
580  * Notes:
581  *	This routine assumes no locks are held on entry.
582  */
583 void
584 fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
585 		u32 data_len, char * data_buf, u64 vendor_id)
586 {
587 	struct sk_buff *skb;
588 	struct nlmsghdr	*nlh;
589 	struct fc_nl_event *event;
590 	u32 len, skblen;
591 	int err;
592 
593 	if (!scsi_nl_sock) {
594 		err = -ENOENT;
595 		goto send_vendor_fail;
596 	}
597 
598 	len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
599 	skblen = NLMSG_SPACE(len);
600 
601 	skb = alloc_skb(skblen, GFP_KERNEL);
602 	if (!skb) {
603 		err = -ENOBUFS;
604 		goto send_vendor_fail;
605 	}
606 
607 	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
608 				skblen - sizeof(*nlh), 0);
609 	if (!nlh) {
610 		err = -ENOBUFS;
611 		goto send_vendor_fail_skb;
612 	}
613 	event = NLMSG_DATA(nlh);
614 
615 	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
616 				FC_NL_ASYNC_EVENT, len);
617 	event->seconds = get_seconds();
618 	event->vendor_id = vendor_id;
619 	event->host_no = shost->host_no;
620 	event->event_datalen = data_len;	/* bytes */
621 	event->event_num = event_number;
622 	event->event_code = FCH_EVT_VENDOR_UNIQUE;
623 	memcpy(&event->event_data, data_buf, data_len);
624 
625 	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
626 			GFP_KERNEL);
627 	return;
628 
629 send_vendor_fail_skb:
630 	kfree_skb(skb);
631 send_vendor_fail:
632 	printk(KERN_WARNING
633 		"%s: Dropped Event : host %d vendor_unique - err %d\n",
634 		__func__, shost->host_no, err);
635 	return;
636 }
637 EXPORT_SYMBOL(fc_host_post_vendor_event);
638 
639 
640 
641 static __init int fc_transport_init(void)
642 {
643 	int error;
644 
645 	atomic_set(&fc_event_seq, 0);
646 
647 	error = transport_class_register(&fc_host_class);
648 	if (error)
649 		return error;
650 	error = transport_class_register(&fc_vport_class);
651 	if (error)
652 		goto unreg_host_class;
653 	error = transport_class_register(&fc_rport_class);
654 	if (error)
655 		goto unreg_vport_class;
656 	error = transport_class_register(&fc_transport_class);
657 	if (error)
658 		goto unreg_rport_class;
659 	return 0;
660 
661 unreg_rport_class:
662 	transport_class_unregister(&fc_rport_class);
663 unreg_vport_class:
664 	transport_class_unregister(&fc_vport_class);
665 unreg_host_class:
666 	transport_class_unregister(&fc_host_class);
667 	return error;
668 }
669 
670 static void __exit fc_transport_exit(void)
671 {
672 	transport_class_unregister(&fc_transport_class);
673 	transport_class_unregister(&fc_rport_class);
674 	transport_class_unregister(&fc_host_class);
675 	transport_class_unregister(&fc_vport_class);
676 }
677 
678 /*
679  * FC Remote Port Attribute Management
680  */
681 
682 #define fc_rport_show_function(field, format_string, sz, cast)		\
683 static ssize_t								\
684 show_fc_rport_##field (struct device *dev, 				\
685 		       struct device_attribute *attr, char *buf)	\
686 {									\
687 	struct fc_rport *rport = transport_class_to_rport(dev);		\
688 	struct Scsi_Host *shost = rport_to_shost(rport);		\
689 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
690 	if ((i->f->get_rport_##field) &&				\
691 	    !((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
692 	      (rport->port_state == FC_PORTSTATE_DELETED) ||		\
693 	      (rport->port_state == FC_PORTSTATE_NOTPRESENT)))		\
694 		i->f->get_rport_##field(rport);				\
695 	return snprintf(buf, sz, format_string, cast rport->field); 	\
696 }
697 
698 #define fc_rport_store_function(field)					\
699 static ssize_t								\
700 store_fc_rport_##field(struct device *dev,				\
701 		       struct device_attribute *attr,			\
702 		       const char *buf,	size_t count)			\
703 {									\
704 	int val;							\
705 	struct fc_rport *rport = transport_class_to_rport(dev);		\
706 	struct Scsi_Host *shost = rport_to_shost(rport);		\
707 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
708 	char *cp;							\
709 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||		\
710 	    (rport->port_state == FC_PORTSTATE_DELETED) ||		\
711 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))		\
712 		return -EBUSY;						\
713 	val = simple_strtoul(buf, &cp, 0);				\
714 	if (*cp && (*cp != '\n'))					\
715 		return -EINVAL;						\
716 	i->f->set_rport_##field(rport, val);				\
717 	return count;							\
718 }
719 
720 #define fc_rport_rd_attr(field, format_string, sz)			\
721 	fc_rport_show_function(field, format_string, sz, )		\
722 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
723 			 show_fc_rport_##field, NULL)
724 
725 #define fc_rport_rd_attr_cast(field, format_string, sz, cast)		\
726 	fc_rport_show_function(field, format_string, sz, (cast))	\
727 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
728 			  show_fc_rport_##field, NULL)
729 
730 #define fc_rport_rw_attr(field, format_string, sz)			\
731 	fc_rport_show_function(field, format_string, sz, )		\
732 	fc_rport_store_function(field)					\
733 static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR,		\
734 			show_fc_rport_##field,				\
735 			store_fc_rport_##field)
736 
737 
738 #define fc_private_rport_show_function(field, format_string, sz, cast)	\
739 static ssize_t								\
740 show_fc_rport_##field (struct device *dev, 				\
741 		       struct device_attribute *attr, char *buf)	\
742 {									\
743 	struct fc_rport *rport = transport_class_to_rport(dev);		\
744 	return snprintf(buf, sz, format_string, cast rport->field); 	\
745 }
746 
747 #define fc_private_rport_rd_attr(field, format_string, sz)		\
748 	fc_private_rport_show_function(field, format_string, sz, )	\
749 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
750 			 show_fc_rport_##field, NULL)
751 
752 #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast)	\
753 	fc_private_rport_show_function(field, format_string, sz, (cast)) \
754 static FC_DEVICE_ATTR(rport, field, S_IRUGO,			\
755 			  show_fc_rport_##field, NULL)
756 
757 
758 #define fc_private_rport_rd_enum_attr(title, maxlen)			\
759 static ssize_t								\
760 show_fc_rport_##title (struct device *dev,				\
761 		       struct device_attribute *attr, char *buf)	\
762 {									\
763 	struct fc_rport *rport = transport_class_to_rport(dev);		\
764 	const char *name;						\
765 	name = get_fc_##title##_name(rport->title);			\
766 	if (!name)							\
767 		return -EINVAL;						\
768 	return snprintf(buf, maxlen, "%s\n", name);			\
769 }									\
770 static FC_DEVICE_ATTR(rport, title, S_IRUGO,			\
771 			show_fc_rport_##title, NULL)
772 
773 
774 #define SETUP_RPORT_ATTRIBUTE_RD(field)					\
775 	i->private_rport_attrs[count] = device_attr_rport_##field; \
776 	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
777 	i->private_rport_attrs[count].store = NULL;			\
778 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
779 	if (i->f->show_rport_##field)					\
780 		count++
781 
782 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field)				\
783 	i->private_rport_attrs[count] = device_attr_rport_##field; \
784 	i->private_rport_attrs[count].attr.mode = S_IRUGO;		\
785 	i->private_rport_attrs[count].store = NULL;			\
786 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
787 	count++
788 
789 #define SETUP_RPORT_ATTRIBUTE_RW(field)					\
790 	i->private_rport_attrs[count] = device_attr_rport_##field; \
791 	if (!i->f->set_rport_##field) {					\
792 		i->private_rport_attrs[count].attr.mode = S_IRUGO;	\
793 		i->private_rport_attrs[count].store = NULL;		\
794 	}								\
795 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
796 	if (i->f->show_rport_##field)					\
797 		count++
798 
799 #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field)				\
800 {									\
801 	i->private_rport_attrs[count] = device_attr_rport_##field; \
802 	i->rport_attrs[count] = &i->private_rport_attrs[count];		\
803 	count++;							\
804 }
805 
806 
807 /* The FC Transport Remote Port Attributes: */
808 
809 /* Fixed Remote Port Attributes */
810 
811 fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
812 
813 static ssize_t
814 show_fc_rport_supported_classes (struct device *dev,
815 				 struct device_attribute *attr, char *buf)
816 {
817 	struct fc_rport *rport = transport_class_to_rport(dev);
818 	if (rport->supported_classes == FC_COS_UNSPECIFIED)
819 		return snprintf(buf, 20, "unspecified\n");
820 	return get_fc_cos_names(rport->supported_classes, buf);
821 }
822 static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
823 		show_fc_rport_supported_classes, NULL);
824 
825 /* Dynamic Remote Port Attributes */
826 
827 /*
828  * dev_loss_tmo attribute
829  */
830 fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
831 static ssize_t
832 store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
833 			    const char *buf, size_t count)
834 {
835 	int val;
836 	struct fc_rport *rport = transport_class_to_rport(dev);
837 	struct Scsi_Host *shost = rport_to_shost(rport);
838 	struct fc_internal *i = to_fc_internal(shost->transportt);
839 	char *cp;
840 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
841 	    (rport->port_state == FC_PORTSTATE_DELETED) ||
842 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
843 		return -EBUSY;
844 	val = simple_strtoul(buf, &cp, 0);
845 	if ((*cp && (*cp != '\n')) ||
846 	    (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
847 		return -EINVAL;
848 	i->f->set_rport_dev_loss_tmo(rport, val);
849 	return count;
850 }
851 static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
852 		show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
853 
854 
855 /* Private Remote Port Attributes */
856 
857 fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
858 fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
859 fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
860 
861 static ssize_t
862 show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
863 		     char *buf)
864 {
865 	struct fc_rport *rport = transport_class_to_rport(dev);
866 
867 	/* identify any roles that are port_id specific */
868 	if ((rport->port_id != -1) &&
869 	    (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
870 					FC_WELLKNOWN_PORTID_MASK) {
871 		switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
872 		case FC_FPORT_PORTID:
873 			return snprintf(buf, 30, "Fabric Port\n");
874 		case FC_FABCTLR_PORTID:
875 			return snprintf(buf, 30, "Fabric Controller\n");
876 		case FC_DIRSRVR_PORTID:
877 			return snprintf(buf, 30, "Directory Server\n");
878 		case FC_TIMESRVR_PORTID:
879 			return snprintf(buf, 30, "Time Server\n");
880 		case FC_MGMTSRVR_PORTID:
881 			return snprintf(buf, 30, "Management Server\n");
882 		default:
883 			return snprintf(buf, 30, "Unknown Fabric Entity\n");
884 		}
885 	} else {
886 		if (rport->roles == FC_PORT_ROLE_UNKNOWN)
887 			return snprintf(buf, 20, "unknown\n");
888 		return get_fc_port_roles_names(rport->roles, buf);
889 	}
890 }
891 static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
892 		show_fc_rport_roles, NULL);
893 
894 fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
895 fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
896 
897 /*
898  * fast_io_fail_tmo attribute
899  */
900 static ssize_t
901 show_fc_rport_fast_io_fail_tmo (struct device *dev,
902 				struct device_attribute *attr, char *buf)
903 {
904 	struct fc_rport *rport = transport_class_to_rport(dev);
905 
906 	if (rport->fast_io_fail_tmo == -1)
907 		return snprintf(buf, 5, "off\n");
908 	return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
909 }
910 
911 static ssize_t
912 store_fc_rport_fast_io_fail_tmo(struct device *dev,
913 				struct device_attribute *attr, const char *buf,
914 				size_t count)
915 {
916 	int val;
917 	char *cp;
918 	struct fc_rport *rport = transport_class_to_rport(dev);
919 
920 	if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
921 	    (rport->port_state == FC_PORTSTATE_DELETED) ||
922 	    (rport->port_state == FC_PORTSTATE_NOTPRESENT))
923 		return -EBUSY;
924 	if (strncmp(buf, "off", 3) == 0)
925 		rport->fast_io_fail_tmo = -1;
926 	else {
927 		val = simple_strtoul(buf, &cp, 0);
928 		if ((*cp && (*cp != '\n')) ||
929 		    (val < 0) || (val >= rport->dev_loss_tmo))
930 			return -EINVAL;
931 		rport->fast_io_fail_tmo = val;
932 	}
933 	return count;
934 }
935 static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
936 	show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
937 
938 
939 /*
940  * FC SCSI Target Attribute Management
941  */
942 
943 /*
944  * Note: in the target show function we recognize when the remote
945  *  port is in the heirarchy and do not allow the driver to get
946  *  involved in sysfs functions. The driver only gets involved if
947  *  it's the "old" style that doesn't use rports.
948  */
949 #define fc_starget_show_function(field, format_string, sz, cast)	\
950 static ssize_t								\
951 show_fc_starget_##field (struct device *dev, 				\
952 			 struct device_attribute *attr, char *buf)	\
953 {									\
954 	struct scsi_target *starget = transport_class_to_starget(dev);	\
955 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);	\
956 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
957 	struct fc_rport *rport = starget_to_rport(starget);		\
958 	if (rport)							\
959 		fc_starget_##field(starget) = rport->field;		\
960 	else if (i->f->get_starget_##field)				\
961 		i->f->get_starget_##field(starget);			\
962 	return snprintf(buf, sz, format_string, 			\
963 		cast fc_starget_##field(starget)); 			\
964 }
965 
966 #define fc_starget_rd_attr(field, format_string, sz)			\
967 	fc_starget_show_function(field, format_string, sz, )		\
968 static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
969 			 show_fc_starget_##field, NULL)
970 
971 #define fc_starget_rd_attr_cast(field, format_string, sz, cast)		\
972 	fc_starget_show_function(field, format_string, sz, (cast))	\
973 static FC_DEVICE_ATTR(starget, field, S_IRUGO,			\
974 			  show_fc_starget_##field, NULL)
975 
976 #define SETUP_STARGET_ATTRIBUTE_RD(field)				\
977 	i->private_starget_attrs[count] = device_attr_starget_##field; \
978 	i->private_starget_attrs[count].attr.mode = S_IRUGO;		\
979 	i->private_starget_attrs[count].store = NULL;			\
980 	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
981 	if (i->f->show_starget_##field)					\
982 		count++
983 
984 #define SETUP_STARGET_ATTRIBUTE_RW(field)				\
985 	i->private_starget_attrs[count] = device_attr_starget_##field; \
986 	if (!i->f->set_starget_##field) {				\
987 		i->private_starget_attrs[count].attr.mode = S_IRUGO;	\
988 		i->private_starget_attrs[count].store = NULL;		\
989 	}								\
990 	i->starget_attrs[count] = &i->private_starget_attrs[count];	\
991 	if (i->f->show_starget_##field)					\
992 		count++
993 
994 /* The FC Transport SCSI Target Attributes: */
995 fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
996 fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
997 fc_starget_rd_attr(port_id, "0x%06x\n", 20);
998 
999 
1000 /*
1001  * FC Virtual Port Attribute Management
1002  */
1003 
1004 #define fc_vport_show_function(field, format_string, sz, cast)		\
1005 static ssize_t								\
1006 show_fc_vport_##field (struct device *dev, 				\
1007 		       struct device_attribute *attr, char *buf)	\
1008 {									\
1009 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1010 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1011 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1012 	if ((i->f->get_vport_##field) &&				\
1013 	    !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)))	\
1014 		i->f->get_vport_##field(vport);				\
1015 	return snprintf(buf, sz, format_string, cast vport->field); 	\
1016 }
1017 
1018 #define fc_vport_store_function(field)					\
1019 static ssize_t								\
1020 store_fc_vport_##field(struct device *dev,				\
1021 		       struct device_attribute *attr,			\
1022 		       const char *buf,	size_t count)			\
1023 {									\
1024 	int val;							\
1025 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1026 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1027 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1028 	char *cp;							\
1029 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))	\
1030 		return -EBUSY;						\
1031 	val = simple_strtoul(buf, &cp, 0);				\
1032 	if (*cp && (*cp != '\n'))					\
1033 		return -EINVAL;						\
1034 	i->f->set_vport_##field(vport, val);				\
1035 	return count;							\
1036 }
1037 
1038 #define fc_vport_store_str_function(field, slen)			\
1039 static ssize_t								\
1040 store_fc_vport_##field(struct device *dev,				\
1041 		       struct device_attribute *attr, 			\
1042 		       const char *buf,	size_t count)			\
1043 {									\
1044 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1045 	struct Scsi_Host *shost = vport_to_shost(vport);		\
1046 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1047 	unsigned int cnt=count;						\
1048 									\
1049 	/* count may include a LF at end of string */			\
1050 	if (buf[cnt-1] == '\n')						\
1051 		cnt--;							\
1052 	if (cnt > ((slen) - 1))						\
1053 		return -EINVAL;						\
1054 	memcpy(vport->field, buf, cnt);					\
1055 	i->f->set_vport_##field(vport);					\
1056 	return count;							\
1057 }
1058 
1059 #define fc_vport_rd_attr(field, format_string, sz)			\
1060 	fc_vport_show_function(field, format_string, sz, )		\
1061 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1062 			 show_fc_vport_##field, NULL)
1063 
1064 #define fc_vport_rd_attr_cast(field, format_string, sz, cast)		\
1065 	fc_vport_show_function(field, format_string, sz, (cast))	\
1066 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1067 			  show_fc_vport_##field, NULL)
1068 
1069 #define fc_vport_rw_attr(field, format_string, sz)			\
1070 	fc_vport_show_function(field, format_string, sz, )		\
1071 	fc_vport_store_function(field)					\
1072 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1073 			show_fc_vport_##field,				\
1074 			store_fc_vport_##field)
1075 
1076 #define fc_private_vport_show_function(field, format_string, sz, cast)	\
1077 static ssize_t								\
1078 show_fc_vport_##field (struct device *dev,				\
1079 		       struct device_attribute *attr, char *buf)	\
1080 {									\
1081 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1082 	return snprintf(buf, sz, format_string, cast vport->field); 	\
1083 }
1084 
1085 #define fc_private_vport_store_u32_function(field)			\
1086 static ssize_t								\
1087 store_fc_vport_##field(struct device *dev,				\
1088 		       struct device_attribute *attr,			\
1089 		       const char *buf,	size_t count)			\
1090 {									\
1091 	u32 val;							\
1092 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1093 	char *cp;							\
1094 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))		\
1095 		return -EBUSY;						\
1096 	val = simple_strtoul(buf, &cp, 0);				\
1097 	if (*cp && (*cp != '\n'))					\
1098 		return -EINVAL;						\
1099 	vport->field = val;						\
1100 	return count;							\
1101 }
1102 
1103 
1104 #define fc_private_vport_rd_attr(field, format_string, sz)		\
1105 	fc_private_vport_show_function(field, format_string, sz, )	\
1106 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1107 			 show_fc_vport_##field, NULL)
1108 
1109 #define fc_private_vport_rd_attr_cast(field, format_string, sz, cast)	\
1110 	fc_private_vport_show_function(field, format_string, sz, (cast)) \
1111 static FC_DEVICE_ATTR(vport, field, S_IRUGO,			\
1112 			  show_fc_vport_##field, NULL)
1113 
1114 #define fc_private_vport_rw_u32_attr(field, format_string, sz)		\
1115 	fc_private_vport_show_function(field, format_string, sz, )	\
1116 	fc_private_vport_store_u32_function(field)			\
1117 static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR,		\
1118 			show_fc_vport_##field,				\
1119 			store_fc_vport_##field)
1120 
1121 
1122 #define fc_private_vport_rd_enum_attr(title, maxlen)			\
1123 static ssize_t								\
1124 show_fc_vport_##title (struct device *dev,				\
1125 		       struct device_attribute *attr,			\
1126 		       char *buf)					\
1127 {									\
1128 	struct fc_vport *vport = transport_class_to_vport(dev);		\
1129 	const char *name;						\
1130 	name = get_fc_##title##_name(vport->title);			\
1131 	if (!name)							\
1132 		return -EINVAL;						\
1133 	return snprintf(buf, maxlen, "%s\n", name);			\
1134 }									\
1135 static FC_DEVICE_ATTR(vport, title, S_IRUGO,			\
1136 			show_fc_vport_##title, NULL)
1137 
1138 
1139 #define SETUP_VPORT_ATTRIBUTE_RD(field)					\
1140 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1141 	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1142 	i->private_vport_attrs[count].store = NULL;			\
1143 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1144 	if (i->f->get_##field)						\
1145 		count++
1146 	/* NOTE: Above MACRO differs: checks function not show bit */
1147 
1148 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field)				\
1149 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1150 	i->private_vport_attrs[count].attr.mode = S_IRUGO;		\
1151 	i->private_vport_attrs[count].store = NULL;			\
1152 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1153 	count++
1154 
1155 #define SETUP_VPORT_ATTRIBUTE_WR(field)					\
1156 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1157 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1158 	if (i->f->field)						\
1159 		count++
1160 	/* NOTE: Above MACRO differs: checks function */
1161 
1162 #define SETUP_VPORT_ATTRIBUTE_RW(field)					\
1163 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1164 	if (!i->f->set_vport_##field) {					\
1165 		i->private_vport_attrs[count].attr.mode = S_IRUGO;	\
1166 		i->private_vport_attrs[count].store = NULL;		\
1167 	}								\
1168 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1169 	count++
1170 	/* NOTE: Above MACRO differs: does not check show bit */
1171 
1172 #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field)				\
1173 {									\
1174 	i->private_vport_attrs[count] = device_attr_vport_##field; \
1175 	i->vport_attrs[count] = &i->private_vport_attrs[count];		\
1176 	count++;							\
1177 }
1178 
1179 
1180 /* The FC Transport Virtual Port Attributes: */
1181 
1182 /* Fixed Virtual Port Attributes */
1183 
1184 /* Dynamic Virtual Port Attributes */
1185 
1186 /* Private Virtual Port Attributes */
1187 
1188 fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1189 fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1190 fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1191 fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1192 
1193 static ssize_t
1194 show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
1195 		     char *buf)
1196 {
1197 	struct fc_vport *vport = transport_class_to_vport(dev);
1198 
1199 	if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1200 		return snprintf(buf, 20, "unknown\n");
1201 	return get_fc_port_roles_names(vport->roles, buf);
1202 }
1203 static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1204 
1205 fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1206 
1207 fc_private_vport_show_function(symbolic_name, "%s\n",
1208 		FC_VPORT_SYMBOLIC_NAMELEN + 1, )
1209 fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
1210 static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1211 		show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1212 
1213 static ssize_t
1214 store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1215 		      const char *buf, size_t count)
1216 {
1217 	struct fc_vport *vport = transport_class_to_vport(dev);
1218 	struct Scsi_Host *shost = vport_to_shost(vport);
1219 
1220 	fc_queue_work(shost, &vport->vport_delete_work);
1221 	return count;
1222 }
1223 static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1224 			NULL, store_fc_vport_delete);
1225 
1226 
1227 /*
1228  * Enable/Disable vport
1229  *  Write "1" to disable, write "0" to enable
1230  */
1231 static ssize_t
1232 store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
1233 		       const char *buf,
1234 			   size_t count)
1235 {
1236 	struct fc_vport *vport = transport_class_to_vport(dev);
1237 	struct Scsi_Host *shost = vport_to_shost(vport);
1238 	struct fc_internal *i = to_fc_internal(shost->transportt);
1239 	int stat;
1240 
1241 	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1242 		return -EBUSY;
1243 
1244 	if (*buf == '0') {
1245 		if (vport->vport_state != FC_VPORT_DISABLED)
1246 			return -EALREADY;
1247 	} else if (*buf == '1') {
1248 		if (vport->vport_state == FC_VPORT_DISABLED)
1249 			return -EALREADY;
1250 	} else
1251 		return -EINVAL;
1252 
1253 	stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1254 	return stat ? stat : count;
1255 }
1256 static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1257 			NULL, store_fc_vport_disable);
1258 
1259 
1260 /*
1261  * Host Attribute Management
1262  */
1263 
1264 #define fc_host_show_function(field, format_string, sz, cast)		\
1265 static ssize_t								\
1266 show_fc_host_##field (struct device *dev,				\
1267 		      struct device_attribute *attr, char *buf)		\
1268 {									\
1269 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1270 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1271 	if (i->f->get_host_##field)					\
1272 		i->f->get_host_##field(shost);				\
1273 	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1274 }
1275 
1276 #define fc_host_store_function(field)					\
1277 static ssize_t								\
1278 store_fc_host_##field(struct device *dev, 				\
1279 		      struct device_attribute *attr,			\
1280 		      const char *buf,	size_t count)			\
1281 {									\
1282 	int val;							\
1283 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1284 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1285 	char *cp;							\
1286 									\
1287 	val = simple_strtoul(buf, &cp, 0);				\
1288 	if (*cp && (*cp != '\n'))					\
1289 		return -EINVAL;						\
1290 	i->f->set_host_##field(shost, val);				\
1291 	return count;							\
1292 }
1293 
1294 #define fc_host_store_str_function(field, slen)				\
1295 static ssize_t								\
1296 store_fc_host_##field(struct device *dev,				\
1297 		      struct device_attribute *attr,			\
1298 		      const char *buf, size_t count)			\
1299 {									\
1300 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1301 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1302 	unsigned int cnt=count;						\
1303 									\
1304 	/* count may include a LF at end of string */			\
1305 	if (buf[cnt-1] == '\n')						\
1306 		cnt--;							\
1307 	if (cnt > ((slen) - 1))						\
1308 		return -EINVAL;						\
1309 	memcpy(fc_host_##field(shost), buf, cnt);			\
1310 	i->f->set_host_##field(shost);					\
1311 	return count;							\
1312 }
1313 
1314 #define fc_host_rd_attr(field, format_string, sz)			\
1315 	fc_host_show_function(field, format_string, sz, )		\
1316 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1317 			 show_fc_host_##field, NULL)
1318 
1319 #define fc_host_rd_attr_cast(field, format_string, sz, cast)		\
1320 	fc_host_show_function(field, format_string, sz, (cast))		\
1321 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1322 			  show_fc_host_##field, NULL)
1323 
1324 #define fc_host_rw_attr(field, format_string, sz)			\
1325 	fc_host_show_function(field, format_string, sz, )		\
1326 	fc_host_store_function(field)					\
1327 static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR,		\
1328 			show_fc_host_##field,				\
1329 			store_fc_host_##field)
1330 
1331 #define fc_host_rd_enum_attr(title, maxlen)				\
1332 static ssize_t								\
1333 show_fc_host_##title (struct device *dev,				\
1334 		      struct device_attribute *attr, char *buf)		\
1335 {									\
1336 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1337 	struct fc_internal *i = to_fc_internal(shost->transportt);	\
1338 	const char *name;						\
1339 	if (i->f->get_host_##title)					\
1340 		i->f->get_host_##title(shost);				\
1341 	name = get_fc_##title##_name(fc_host_##title(shost));		\
1342 	if (!name)							\
1343 		return -EINVAL;						\
1344 	return snprintf(buf, maxlen, "%s\n", name);			\
1345 }									\
1346 static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1347 
1348 #define SETUP_HOST_ATTRIBUTE_RD(field)					\
1349 	i->private_host_attrs[count] = device_attr_host_##field;	\
1350 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1351 	i->private_host_attrs[count].store = NULL;			\
1352 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1353 	if (i->f->show_host_##field)					\
1354 		count++
1355 
1356 #define SETUP_HOST_ATTRIBUTE_RD_NS(field)				\
1357 	i->private_host_attrs[count] = device_attr_host_##field;	\
1358 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1359 	i->private_host_attrs[count].store = NULL;			\
1360 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1361 	count++
1362 
1363 #define SETUP_HOST_ATTRIBUTE_RW(field)					\
1364 	i->private_host_attrs[count] = device_attr_host_##field;	\
1365 	if (!i->f->set_host_##field) {					\
1366 		i->private_host_attrs[count].attr.mode = S_IRUGO;	\
1367 		i->private_host_attrs[count].store = NULL;		\
1368 	}								\
1369 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1370 	if (i->f->show_host_##field)					\
1371 		count++
1372 
1373 
1374 #define fc_private_host_show_function(field, format_string, sz, cast)	\
1375 static ssize_t								\
1376 show_fc_host_##field (struct device *dev,				\
1377 		      struct device_attribute *attr, char *buf)		\
1378 {									\
1379 	struct Scsi_Host *shost = transport_class_to_shost(dev);	\
1380 	return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1381 }
1382 
1383 #define fc_private_host_rd_attr(field, format_string, sz)		\
1384 	fc_private_host_show_function(field, format_string, sz, )	\
1385 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1386 			 show_fc_host_##field, NULL)
1387 
1388 #define fc_private_host_rd_attr_cast(field, format_string, sz, cast)	\
1389 	fc_private_host_show_function(field, format_string, sz, (cast)) \
1390 static FC_DEVICE_ATTR(host, field, S_IRUGO,			\
1391 			  show_fc_host_##field, NULL)
1392 
1393 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field)			\
1394 	i->private_host_attrs[count] = device_attr_host_##field;	\
1395 	i->private_host_attrs[count].attr.mode = S_IRUGO;		\
1396 	i->private_host_attrs[count].store = NULL;			\
1397 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1398 	count++
1399 
1400 #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field)			\
1401 {									\
1402 	i->private_host_attrs[count] = device_attr_host_##field;	\
1403 	i->host_attrs[count] = &i->private_host_attrs[count];		\
1404 	count++;							\
1405 }
1406 
1407 
1408 /* Fixed Host Attributes */
1409 
1410 static ssize_t
1411 show_fc_host_supported_classes (struct device *dev,
1412 			        struct device_attribute *attr, char *buf)
1413 {
1414 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1415 
1416 	if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED)
1417 		return snprintf(buf, 20, "unspecified\n");
1418 
1419 	return get_fc_cos_names(fc_host_supported_classes(shost), buf);
1420 }
1421 static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO,
1422 		show_fc_host_supported_classes, NULL);
1423 
1424 static ssize_t
1425 show_fc_host_supported_fc4s (struct device *dev,
1426 			     struct device_attribute *attr, char *buf)
1427 {
1428 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1429 	return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
1430 }
1431 static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO,
1432 		show_fc_host_supported_fc4s, NULL);
1433 
1434 static ssize_t
1435 show_fc_host_supported_speeds (struct device *dev,
1436 			       struct device_attribute *attr, char *buf)
1437 {
1438 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1439 
1440 	if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN)
1441 		return snprintf(buf, 20, "unknown\n");
1442 
1443 	return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
1444 }
1445 static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
1446 		show_fc_host_supported_speeds, NULL);
1447 
1448 
1449 fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1450 fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1451 fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1452 			     unsigned long long);
1453 fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1454 fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1455 fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1456 
1457 
1458 /* Dynamic Host Attributes */
1459 
1460 static ssize_t
1461 show_fc_host_active_fc4s (struct device *dev,
1462 			  struct device_attribute *attr, char *buf)
1463 {
1464 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1465 	struct fc_internal *i = to_fc_internal(shost->transportt);
1466 
1467 	if (i->f->get_host_active_fc4s)
1468 		i->f->get_host_active_fc4s(shost);
1469 
1470 	return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
1471 }
1472 static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
1473 		show_fc_host_active_fc4s, NULL);
1474 
1475 static ssize_t
1476 show_fc_host_speed (struct device *dev,
1477 		    struct device_attribute *attr, char *buf)
1478 {
1479 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1480 	struct fc_internal *i = to_fc_internal(shost->transportt);
1481 
1482 	if (i->f->get_host_speed)
1483 		i->f->get_host_speed(shost);
1484 
1485 	if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
1486 		return snprintf(buf, 20, "unknown\n");
1487 
1488 	return get_fc_port_speed_names(fc_host_speed(shost), buf);
1489 }
1490 static FC_DEVICE_ATTR(host, speed, S_IRUGO,
1491 		show_fc_host_speed, NULL);
1492 
1493 
1494 fc_host_rd_attr(port_id, "0x%06x\n", 20);
1495 fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
1496 fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
1497 fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1498 fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1499 
1500 fc_private_host_show_function(system_hostname, "%s\n",
1501 		FC_SYMBOLIC_NAME_SIZE + 1, )
1502 fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1503 static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1504 		show_fc_host_system_hostname, store_fc_host_system_hostname);
1505 
1506 
1507 /* Private Host Attributes */
1508 
1509 static ssize_t
1510 show_fc_private_host_tgtid_bind_type(struct device *dev,
1511 				     struct device_attribute *attr, char *buf)
1512 {
1513 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1514 	const char *name;
1515 
1516 	name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
1517 	if (!name)
1518 		return -EINVAL;
1519 	return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
1520 }
1521 
1522 #define get_list_head_entry(pos, head, member) 		\
1523 	pos = list_entry((head)->next, typeof(*pos), member)
1524 
1525 static ssize_t
1526 store_fc_private_host_tgtid_bind_type(struct device *dev,
1527 	struct device_attribute *attr, const char *buf, size_t count)
1528 {
1529 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1530 	struct fc_rport *rport;
1531  	enum fc_tgtid_binding_type val;
1532 	unsigned long flags;
1533 
1534 	if (get_fc_tgtid_bind_type_match(buf, &val))
1535 		return -EINVAL;
1536 
1537 	/* if changing bind type, purge all unused consistent bindings */
1538 	if (val != fc_host_tgtid_bind_type(shost)) {
1539 		spin_lock_irqsave(shost->host_lock, flags);
1540 		while (!list_empty(&fc_host_rport_bindings(shost))) {
1541 			get_list_head_entry(rport,
1542 				&fc_host_rport_bindings(shost), peers);
1543 			list_del(&rport->peers);
1544 			rport->port_state = FC_PORTSTATE_DELETED;
1545 			fc_queue_work(shost, &rport->rport_delete_work);
1546 		}
1547 		spin_unlock_irqrestore(shost->host_lock, flags);
1548 	}
1549 
1550 	fc_host_tgtid_bind_type(shost) = val;
1551 	return count;
1552 }
1553 
1554 static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
1555 			show_fc_private_host_tgtid_bind_type,
1556 			store_fc_private_host_tgtid_bind_type);
1557 
1558 static ssize_t
1559 store_fc_private_host_issue_lip(struct device *dev,
1560 	struct device_attribute *attr, const char *buf, size_t count)
1561 {
1562 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1563 	struct fc_internal *i = to_fc_internal(shost->transportt);
1564 	int ret;
1565 
1566 	/* ignore any data value written to the attribute */
1567 	if (i->f->issue_fc_host_lip) {
1568 		ret = i->f->issue_fc_host_lip(shost);
1569 		return ret ? ret: count;
1570 	}
1571 
1572 	return -ENOENT;
1573 }
1574 
1575 static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1576 			store_fc_private_host_issue_lip);
1577 
1578 fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1579 
1580 
1581 /*
1582  * Host Statistics Management
1583  */
1584 
1585 /* Show a given an attribute in the statistics group */
1586 static ssize_t
1587 fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
1588 {
1589 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1590 	struct fc_internal *i = to_fc_internal(shost->transportt);
1591 	struct fc_host_statistics *stats;
1592 	ssize_t ret = -ENOENT;
1593 
1594 	if (offset > sizeof(struct fc_host_statistics) ||
1595 	    offset % sizeof(u64) != 0)
1596 		WARN_ON(1);
1597 
1598 	if (i->f->get_fc_host_stats) {
1599 		stats = (i->f->get_fc_host_stats)(shost);
1600 		if (stats)
1601 			ret = snprintf(buf, 20, "0x%llx\n",
1602 			      (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
1603 	}
1604 	return ret;
1605 }
1606 
1607 
1608 /* generate a read-only statistics attribute */
1609 #define fc_host_statistic(name)						\
1610 static ssize_t show_fcstat_##name(struct device *cd,			\
1611 				  struct device_attribute *attr,	\
1612 				  char *buf)				\
1613 {									\
1614 	return fc_stat_show(cd, buf, 					\
1615 			    offsetof(struct fc_host_statistics, name));	\
1616 }									\
1617 static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
1618 
1619 fc_host_statistic(seconds_since_last_reset);
1620 fc_host_statistic(tx_frames);
1621 fc_host_statistic(tx_words);
1622 fc_host_statistic(rx_frames);
1623 fc_host_statistic(rx_words);
1624 fc_host_statistic(lip_count);
1625 fc_host_statistic(nos_count);
1626 fc_host_statistic(error_frames);
1627 fc_host_statistic(dumped_frames);
1628 fc_host_statistic(link_failure_count);
1629 fc_host_statistic(loss_of_sync_count);
1630 fc_host_statistic(loss_of_signal_count);
1631 fc_host_statistic(prim_seq_protocol_err_count);
1632 fc_host_statistic(invalid_tx_word_count);
1633 fc_host_statistic(invalid_crc_count);
1634 fc_host_statistic(fcp_input_requests);
1635 fc_host_statistic(fcp_output_requests);
1636 fc_host_statistic(fcp_control_requests);
1637 fc_host_statistic(fcp_input_megabytes);
1638 fc_host_statistic(fcp_output_megabytes);
1639 
1640 static ssize_t
1641 fc_reset_statistics(struct device *dev, struct device_attribute *attr,
1642 		    const char *buf, size_t count)
1643 {
1644 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1645 	struct fc_internal *i = to_fc_internal(shost->transportt);
1646 
1647 	/* ignore any data value written to the attribute */
1648 	if (i->f->reset_fc_host_stats) {
1649 		i->f->reset_fc_host_stats(shost);
1650 		return count;
1651 	}
1652 
1653 	return -ENOENT;
1654 }
1655 static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
1656 				fc_reset_statistics);
1657 
1658 static struct attribute *fc_statistics_attrs[] = {
1659 	&device_attr_host_seconds_since_last_reset.attr,
1660 	&device_attr_host_tx_frames.attr,
1661 	&device_attr_host_tx_words.attr,
1662 	&device_attr_host_rx_frames.attr,
1663 	&device_attr_host_rx_words.attr,
1664 	&device_attr_host_lip_count.attr,
1665 	&device_attr_host_nos_count.attr,
1666 	&device_attr_host_error_frames.attr,
1667 	&device_attr_host_dumped_frames.attr,
1668 	&device_attr_host_link_failure_count.attr,
1669 	&device_attr_host_loss_of_sync_count.attr,
1670 	&device_attr_host_loss_of_signal_count.attr,
1671 	&device_attr_host_prim_seq_protocol_err_count.attr,
1672 	&device_attr_host_invalid_tx_word_count.attr,
1673 	&device_attr_host_invalid_crc_count.attr,
1674 	&device_attr_host_fcp_input_requests.attr,
1675 	&device_attr_host_fcp_output_requests.attr,
1676 	&device_attr_host_fcp_control_requests.attr,
1677 	&device_attr_host_fcp_input_megabytes.attr,
1678 	&device_attr_host_fcp_output_megabytes.attr,
1679 	&device_attr_host_reset_statistics.attr,
1680 	NULL
1681 };
1682 
1683 static struct attribute_group fc_statistics_group = {
1684 	.name = "statistics",
1685 	.attrs = fc_statistics_attrs,
1686 };
1687 
1688 
1689 /* Host Vport Attributes */
1690 
1691 static int
1692 fc_parse_wwn(const char *ns, u64 *nm)
1693 {
1694 	unsigned int i, j;
1695 	u8 wwn[8];
1696 
1697 	memset(wwn, 0, sizeof(wwn));
1698 
1699 	/* Validate and store the new name */
1700 	for (i=0, j=0; i < 16; i++) {
1701 		if ((*ns >= 'a') && (*ns <= 'f'))
1702 			j = ((j << 4) | ((*ns++ -'a') + 10));
1703 		else if ((*ns >= 'A') && (*ns <= 'F'))
1704 			j = ((j << 4) | ((*ns++ -'A') + 10));
1705 		else if ((*ns >= '0') && (*ns <= '9'))
1706 			j = ((j << 4) | (*ns++ -'0'));
1707 		else
1708 			return -EINVAL;
1709 		if (i % 2) {
1710 			wwn[i/2] = j & 0xff;
1711 			j = 0;
1712 		}
1713 	}
1714 
1715 	*nm = wwn_to_u64(wwn);
1716 
1717 	return 0;
1718 }
1719 
1720 
1721 /*
1722  * "Short-cut" sysfs variable to create a new vport on a FC Host.
1723  * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
1724  * will default to a NPIV-based FCP_Initiator; The WWNs are specified
1725  * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
1726  */
1727 static ssize_t
1728 store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1729 			   const char *buf, size_t count)
1730 {
1731 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1732 	struct fc_vport_identifiers vid;
1733 	struct fc_vport *vport;
1734 	unsigned int cnt=count;
1735 	int stat;
1736 
1737 	memset(&vid, 0, sizeof(vid));
1738 
1739 	/* count may include a LF at end of string */
1740 	if (buf[cnt-1] == '\n')
1741 		cnt--;
1742 
1743 	/* validate we have enough characters for WWPN */
1744 	if ((cnt != (16+1+16)) || (buf[16] != ':'))
1745 		return -EINVAL;
1746 
1747 	stat = fc_parse_wwn(&buf[0], &vid.port_name);
1748 	if (stat)
1749 		return stat;
1750 
1751 	stat = fc_parse_wwn(&buf[17], &vid.node_name);
1752 	if (stat)
1753 		return stat;
1754 
1755 	vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1756 	vid.vport_type = FC_PORTTYPE_NPIV;
1757 	/* vid.symbolic_name is already zero/NULL's */
1758 	vid.disable = false;		/* always enabled */
1759 
1760 	/* we only allow support on Channel 0 !!! */
1761 	stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1762 	return stat ? stat : count;
1763 }
1764 static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
1765 			store_fc_host_vport_create);
1766 
1767 
1768 /*
1769  * "Short-cut" sysfs variable to delete a vport on a FC Host.
1770  * Vport is identified by a string containing "<WWPN>:<WWNN>".
1771  * The WWNs are specified as hex characters, and may *not* contain
1772  * any prefixes (e.g. 0x, x, etc)
1773  */
1774 static ssize_t
1775 store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
1776 			   const char *buf, size_t count)
1777 {
1778 	struct Scsi_Host *shost = transport_class_to_shost(dev);
1779 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1780 	struct fc_vport *vport;
1781 	u64 wwpn, wwnn;
1782 	unsigned long flags;
1783 	unsigned int cnt=count;
1784 	int stat, match;
1785 
1786 	/* count may include a LF at end of string */
1787 	if (buf[cnt-1] == '\n')
1788 		cnt--;
1789 
1790 	/* validate we have enough characters for WWPN */
1791 	if ((cnt != (16+1+16)) || (buf[16] != ':'))
1792 		return -EINVAL;
1793 
1794 	stat = fc_parse_wwn(&buf[0], &wwpn);
1795 	if (stat)
1796 		return stat;
1797 
1798 	stat = fc_parse_wwn(&buf[17], &wwnn);
1799 	if (stat)
1800 		return stat;
1801 
1802 	spin_lock_irqsave(shost->host_lock, flags);
1803 	match = 0;
1804 	/* we only allow support on Channel 0 !!! */
1805 	list_for_each_entry(vport, &fc_host->vports, peers) {
1806 		if ((vport->channel == 0) &&
1807 		    (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1808 			match = 1;
1809 			break;
1810 		}
1811 	}
1812 	spin_unlock_irqrestore(shost->host_lock, flags);
1813 
1814 	if (!match)
1815 		return -ENODEV;
1816 
1817 	stat = fc_vport_terminate(vport);
1818 	return stat ? stat : count;
1819 }
1820 static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
1821 			store_fc_host_vport_delete);
1822 
1823 
1824 static int fc_host_match(struct attribute_container *cont,
1825 			  struct device *dev)
1826 {
1827 	struct Scsi_Host *shost;
1828 	struct fc_internal *i;
1829 
1830 	if (!scsi_is_host_device(dev))
1831 		return 0;
1832 
1833 	shost = dev_to_shost(dev);
1834 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1835 	    != &fc_host_class.class)
1836 		return 0;
1837 
1838 	i = to_fc_internal(shost->transportt);
1839 
1840 	return &i->t.host_attrs.ac == cont;
1841 }
1842 
1843 static int fc_target_match(struct attribute_container *cont,
1844 			    struct device *dev)
1845 {
1846 	struct Scsi_Host *shost;
1847 	struct fc_internal *i;
1848 
1849 	if (!scsi_is_target_device(dev))
1850 		return 0;
1851 
1852 	shost = dev_to_shost(dev->parent);
1853 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1854 	    != &fc_host_class.class)
1855 		return 0;
1856 
1857 	i = to_fc_internal(shost->transportt);
1858 
1859 	return &i->t.target_attrs.ac == cont;
1860 }
1861 
1862 static void fc_rport_dev_release(struct device *dev)
1863 {
1864 	struct fc_rport *rport = dev_to_rport(dev);
1865 	put_device(dev->parent);
1866 	kfree(rport);
1867 }
1868 
1869 int scsi_is_fc_rport(const struct device *dev)
1870 {
1871 	return dev->release == fc_rport_dev_release;
1872 }
1873 EXPORT_SYMBOL(scsi_is_fc_rport);
1874 
1875 static int fc_rport_match(struct attribute_container *cont,
1876 			    struct device *dev)
1877 {
1878 	struct Scsi_Host *shost;
1879 	struct fc_internal *i;
1880 
1881 	if (!scsi_is_fc_rport(dev))
1882 		return 0;
1883 
1884 	shost = dev_to_shost(dev->parent);
1885 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1886 	    != &fc_host_class.class)
1887 		return 0;
1888 
1889 	i = to_fc_internal(shost->transportt);
1890 
1891 	return &i->rport_attr_cont.ac == cont;
1892 }
1893 
1894 
1895 static void fc_vport_dev_release(struct device *dev)
1896 {
1897 	struct fc_vport *vport = dev_to_vport(dev);
1898 	put_device(dev->parent);		/* release kobj parent */
1899 	kfree(vport);
1900 }
1901 
1902 int scsi_is_fc_vport(const struct device *dev)
1903 {
1904 	return dev->release == fc_vport_dev_release;
1905 }
1906 EXPORT_SYMBOL(scsi_is_fc_vport);
1907 
1908 static int fc_vport_match(struct attribute_container *cont,
1909 			    struct device *dev)
1910 {
1911 	struct fc_vport *vport;
1912 	struct Scsi_Host *shost;
1913 	struct fc_internal *i;
1914 
1915 	if (!scsi_is_fc_vport(dev))
1916 		return 0;
1917 	vport = dev_to_vport(dev);
1918 
1919 	shost = vport_to_shost(vport);
1920 	if (!shost->transportt  || shost->transportt->host_attrs.ac.class
1921 	    != &fc_host_class.class)
1922 		return 0;
1923 
1924 	i = to_fc_internal(shost->transportt);
1925 	return &i->vport_attr_cont.ac == cont;
1926 }
1927 
1928 
1929 /**
1930  * fc_timed_out - FC Transport I/O timeout intercept handler
1931  * @scmd:	The SCSI command which timed out
1932  *
1933  * This routine protects against error handlers getting invoked while a
1934  * rport is in a blocked state, typically due to a temporarily loss of
1935  * connectivity. If the error handlers are allowed to proceed, requests
1936  * to abort i/o, reset the target, etc will likely fail as there is no way
1937  * to communicate with the device to perform the requested function. These
1938  * failures may result in the midlayer taking the device offline, requiring
1939  * manual intervention to restore operation.
1940  *
1941  * This routine, called whenever an i/o times out, validates the state of
1942  * the underlying rport. If the rport is blocked, it returns
1943  * EH_RESET_TIMER, which will continue to reschedule the timeout.
1944  * Eventually, either the device will return, or devloss_tmo will fire,
1945  * and when the timeout then fires, it will be handled normally.
1946  * If the rport is not blocked, normal error handling continues.
1947  *
1948  * Notes:
1949  *	This routine assumes no locks are held on entry.
1950  */
1951 static enum blk_eh_timer_return
1952 fc_timed_out(struct scsi_cmnd *scmd)
1953 {
1954 	struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1955 
1956 	if (rport->port_state == FC_PORTSTATE_BLOCKED)
1957 		return BLK_EH_RESET_TIMER;
1958 
1959 	return BLK_EH_NOT_HANDLED;
1960 }
1961 
1962 /*
1963  * Called by fc_user_scan to locate an rport on the shost that
1964  * matches the channel and target id, and invoke scsi_scan_target()
1965  * on the rport.
1966  */
1967 static void
1968 fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
1969 {
1970 	struct fc_rport *rport;
1971 	unsigned long flags;
1972 
1973 	spin_lock_irqsave(shost->host_lock, flags);
1974 
1975 	list_for_each_entry(rport, &fc_host_rports(shost), peers) {
1976 		if (rport->scsi_target_id == -1)
1977 			continue;
1978 
1979 		if (rport->port_state != FC_PORTSTATE_ONLINE)
1980 			continue;
1981 
1982 		if ((channel == rport->channel) &&
1983 		    (id == rport->scsi_target_id)) {
1984 			spin_unlock_irqrestore(shost->host_lock, flags);
1985 			scsi_scan_target(&rport->dev, channel, id, lun, 1);
1986 			return;
1987 		}
1988 	}
1989 
1990 	spin_unlock_irqrestore(shost->host_lock, flags);
1991 }
1992 
1993 /*
1994  * Called via sysfs scan routines. Necessary, as the FC transport
1995  * wants to place all target objects below the rport object. So this
1996  * routine must invoke the scsi_scan_target() routine with the rport
1997  * object as the parent.
1998  */
1999 static int
2000 fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
2001 {
2002 	uint chlo, chhi;
2003 	uint tgtlo, tgthi;
2004 
2005 	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
2006 	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
2007 	    ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
2008 		return -EINVAL;
2009 
2010 	if (channel == SCAN_WILD_CARD) {
2011 		chlo = 0;
2012 		chhi = shost->max_channel + 1;
2013 	} else {
2014 		chlo = channel;
2015 		chhi = channel + 1;
2016 	}
2017 
2018 	if (id == SCAN_WILD_CARD) {
2019 		tgtlo = 0;
2020 		tgthi = shost->max_id;
2021 	} else {
2022 		tgtlo = id;
2023 		tgthi = id + 1;
2024 	}
2025 
2026 	for ( ; chlo < chhi; chlo++)
2027 		for ( ; tgtlo < tgthi; tgtlo++)
2028 			fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2029 
2030 	return 0;
2031 }
2032 
2033 static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
2034 				int result)
2035 {
2036 	struct fc_internal *i = to_fc_internal(shost->transportt);
2037 	return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
2038 }
2039 
2040 static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
2041 {
2042 	struct fc_internal *i = to_fc_internal(shost->transportt);
2043 	return i->f->it_nexus_response(shost, nexus, result);
2044 }
2045 
2046 struct scsi_transport_template *
2047 fc_attach_transport(struct fc_function_template *ft)
2048 {
2049 	int count;
2050 	struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
2051 					GFP_KERNEL);
2052 
2053 	if (unlikely(!i))
2054 		return NULL;
2055 
2056 	i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
2057 	i->t.target_attrs.ac.class = &fc_transport_class.class;
2058 	i->t.target_attrs.ac.match = fc_target_match;
2059 	i->t.target_size = sizeof(struct fc_starget_attrs);
2060 	transport_container_register(&i->t.target_attrs);
2061 
2062 	i->t.host_attrs.ac.attrs = &i->host_attrs[0];
2063 	i->t.host_attrs.ac.class = &fc_host_class.class;
2064 	i->t.host_attrs.ac.match = fc_host_match;
2065 	i->t.host_size = sizeof(struct fc_host_attrs);
2066 	if (ft->get_fc_host_stats)
2067 		i->t.host_attrs.statistics = &fc_statistics_group;
2068 	transport_container_register(&i->t.host_attrs);
2069 
2070 	i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
2071 	i->rport_attr_cont.ac.class = &fc_rport_class.class;
2072 	i->rport_attr_cont.ac.match = fc_rport_match;
2073 	transport_container_register(&i->rport_attr_cont);
2074 
2075 	i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
2076 	i->vport_attr_cont.ac.class = &fc_vport_class.class;
2077 	i->vport_attr_cont.ac.match = fc_vport_match;
2078 	transport_container_register(&i->vport_attr_cont);
2079 
2080 	i->f = ft;
2081 
2082 	/* Transport uses the shost workq for scsi scanning */
2083 	i->t.create_work_queue = 1;
2084 
2085 	i->t.eh_timed_out = fc_timed_out;
2086 
2087 	i->t.user_scan = fc_user_scan;
2088 
2089 	/* target-mode drivers' functions */
2090 	i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
2091 	i->t.it_nexus_response = fc_it_nexus_response;
2092 
2093 	/*
2094 	 * Setup SCSI Target Attributes.
2095 	 */
2096 	count = 0;
2097 	SETUP_STARGET_ATTRIBUTE_RD(node_name);
2098 	SETUP_STARGET_ATTRIBUTE_RD(port_name);
2099 	SETUP_STARGET_ATTRIBUTE_RD(port_id);
2100 
2101 	BUG_ON(count > FC_STARGET_NUM_ATTRS);
2102 
2103 	i->starget_attrs[count] = NULL;
2104 
2105 
2106 	/*
2107 	 * Setup SCSI Host Attributes.
2108 	 */
2109 	count=0;
2110 	SETUP_HOST_ATTRIBUTE_RD(node_name);
2111 	SETUP_HOST_ATTRIBUTE_RD(port_name);
2112 	SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
2113 	SETUP_HOST_ATTRIBUTE_RD(supported_classes);
2114 	SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
2115 	SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
2116 	SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
2117 	if (ft->vport_create) {
2118 		SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
2119 		SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2120 	}
2121 	SETUP_HOST_ATTRIBUTE_RD(serial_number);
2122 
2123 	SETUP_HOST_ATTRIBUTE_RD(port_id);
2124 	SETUP_HOST_ATTRIBUTE_RD(port_type);
2125 	SETUP_HOST_ATTRIBUTE_RD(port_state);
2126 	SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
2127 	SETUP_HOST_ATTRIBUTE_RD(speed);
2128 	SETUP_HOST_ATTRIBUTE_RD(fabric_name);
2129 	SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
2130 	SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2131 
2132 	/* Transport-managed attributes */
2133 	SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2134 	if (ft->issue_fc_host_lip)
2135 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
2136 	if (ft->vport_create)
2137 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2138 	if (ft->vport_delete)
2139 		SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
2140 
2141 	BUG_ON(count > FC_HOST_NUM_ATTRS);
2142 
2143 	i->host_attrs[count] = NULL;
2144 
2145 	/*
2146 	 * Setup Remote Port Attributes.
2147 	 */
2148 	count=0;
2149 	SETUP_RPORT_ATTRIBUTE_RD(maxframe_size);
2150 	SETUP_RPORT_ATTRIBUTE_RD(supported_classes);
2151 	SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
2152 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name);
2153 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
2154 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
2155 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
2156 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
2157 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
2158 	SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
2159 
2160 	BUG_ON(count > FC_RPORT_NUM_ATTRS);
2161 
2162 	i->rport_attrs[count] = NULL;
2163 
2164 	/*
2165 	 * Setup Virtual Port Attributes.
2166 	 */
2167 	count=0;
2168 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
2169 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
2170 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
2171 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
2172 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
2173 	SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
2174 	SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2175 	SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2176 	SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2177 
2178 	BUG_ON(count > FC_VPORT_NUM_ATTRS);
2179 
2180 	i->vport_attrs[count] = NULL;
2181 
2182 	return &i->t;
2183 }
2184 EXPORT_SYMBOL(fc_attach_transport);
2185 
2186 void fc_release_transport(struct scsi_transport_template *t)
2187 {
2188 	struct fc_internal *i = to_fc_internal(t);
2189 
2190 	transport_container_unregister(&i->t.target_attrs);
2191 	transport_container_unregister(&i->t.host_attrs);
2192 	transport_container_unregister(&i->rport_attr_cont);
2193 	transport_container_unregister(&i->vport_attr_cont);
2194 
2195 	kfree(i);
2196 }
2197 EXPORT_SYMBOL(fc_release_transport);
2198 
2199 /**
2200  * fc_queue_work - Queue work to the fc_host workqueue.
2201  * @shost:	Pointer to Scsi_Host bound to fc_host.
2202  * @work:	Work to queue for execution.
2203  *
2204  * Return value:
2205  * 	1 - work queued for execution
2206  *	0 - work is already queued
2207  *	-EINVAL - work queue doesn't exist
2208  */
2209 static int
2210 fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2211 {
2212 	if (unlikely(!fc_host_work_q(shost))) {
2213 		printk(KERN_ERR
2214 			"ERROR: FC host '%s' attempted to queue work, "
2215 			"when no workqueue created.\n", shost->hostt->name);
2216 		dump_stack();
2217 
2218 		return -EINVAL;
2219 	}
2220 
2221 	return queue_work(fc_host_work_q(shost), work);
2222 }
2223 
2224 /**
2225  * fc_flush_work - Flush a fc_host's workqueue.
2226  * @shost:	Pointer to Scsi_Host bound to fc_host.
2227  */
2228 static void
2229 fc_flush_work(struct Scsi_Host *shost)
2230 {
2231 	if (!fc_host_work_q(shost)) {
2232 		printk(KERN_ERR
2233 			"ERROR: FC host '%s' attempted to flush work, "
2234 			"when no workqueue created.\n", shost->hostt->name);
2235 		dump_stack();
2236 		return;
2237 	}
2238 
2239 	flush_workqueue(fc_host_work_q(shost));
2240 }
2241 
2242 /**
2243  * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue.
2244  * @shost:	Pointer to Scsi_Host bound to fc_host.
2245  * @work:	Work to queue for execution.
2246  * @delay:	jiffies to delay the work queuing
2247  *
2248  * Return value:
2249  * 	1 on success / 0 already queued / < 0 for error
2250  */
2251 static int
2252 fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2253 				unsigned long delay)
2254 {
2255 	if (unlikely(!fc_host_devloss_work_q(shost))) {
2256 		printk(KERN_ERR
2257 			"ERROR: FC host '%s' attempted to queue work, "
2258 			"when no workqueue created.\n", shost->hostt->name);
2259 		dump_stack();
2260 
2261 		return -EINVAL;
2262 	}
2263 
2264 	return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
2265 }
2266 
2267 /**
2268  * fc_flush_devloss - Flush a fc_host's devloss workqueue.
2269  * @shost:	Pointer to Scsi_Host bound to fc_host.
2270  */
2271 static void
2272 fc_flush_devloss(struct Scsi_Host *shost)
2273 {
2274 	if (!fc_host_devloss_work_q(shost)) {
2275 		printk(KERN_ERR
2276 			"ERROR: FC host '%s' attempted to flush work, "
2277 			"when no workqueue created.\n", shost->hostt->name);
2278 		dump_stack();
2279 		return;
2280 	}
2281 
2282 	flush_workqueue(fc_host_devloss_work_q(shost));
2283 }
2284 
2285 
2286 /**
2287  * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
2288  * @shost:	Which &Scsi_Host
2289  *
2290  * This routine is expected to be called immediately preceeding the
2291  * a driver's call to scsi_remove_host().
2292  *
2293  * WARNING: A driver utilizing the fc_transport, which fails to call
2294  *   this routine prior to scsi_remove_host(), will leave dangling
2295  *   objects in /sys/class/fc_remote_ports. Access to any of these
2296  *   objects can result in a system crash !!!
2297  *
2298  * Notes:
2299  *	This routine assumes no locks are held on entry.
2300  */
2301 void
2302 fc_remove_host(struct Scsi_Host *shost)
2303 {
2304 	struct fc_vport *vport = NULL, *next_vport = NULL;
2305 	struct fc_rport *rport = NULL, *next_rport = NULL;
2306 	struct workqueue_struct *work_q;
2307 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2308 	unsigned long flags;
2309 
2310 	spin_lock_irqsave(shost->host_lock, flags);
2311 
2312 	/* Remove any vports */
2313 	list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
2314 		fc_queue_work(shost, &vport->vport_delete_work);
2315 
2316 	/* Remove any remote ports */
2317 	list_for_each_entry_safe(rport, next_rport,
2318 			&fc_host->rports, peers) {
2319 		list_del(&rport->peers);
2320 		rport->port_state = FC_PORTSTATE_DELETED;
2321 		fc_queue_work(shost, &rport->rport_delete_work);
2322 	}
2323 
2324 	list_for_each_entry_safe(rport, next_rport,
2325 			&fc_host->rport_bindings, peers) {
2326 		list_del(&rport->peers);
2327 		rport->port_state = FC_PORTSTATE_DELETED;
2328 		fc_queue_work(shost, &rport->rport_delete_work);
2329 	}
2330 
2331 	spin_unlock_irqrestore(shost->host_lock, flags);
2332 
2333 	/* flush all scan work items */
2334 	scsi_flush_work(shost);
2335 
2336 	/* flush all stgt delete, and rport delete work items, then kill it  */
2337 	if (fc_host->work_q) {
2338 		work_q = fc_host->work_q;
2339 		fc_host->work_q = NULL;
2340 		destroy_workqueue(work_q);
2341 	}
2342 
2343 	/* flush all devloss work items, then kill it  */
2344 	if (fc_host->devloss_work_q) {
2345 		work_q = fc_host->devloss_work_q;
2346 		fc_host->devloss_work_q = NULL;
2347 		destroy_workqueue(work_q);
2348 	}
2349 }
2350 EXPORT_SYMBOL(fc_remove_host);
2351 
2352 static void fc_terminate_rport_io(struct fc_rport *rport)
2353 {
2354 	struct Scsi_Host *shost = rport_to_shost(rport);
2355 	struct fc_internal *i = to_fc_internal(shost->transportt);
2356 
2357 	/* Involve the LLDD if possible to terminate all io on the rport. */
2358 	if (i->f->terminate_rport_io)
2359 		i->f->terminate_rport_io(rport);
2360 
2361 	/*
2362 	 * must unblock to flush queued IO. The caller will have set
2363 	 * the port_state or flags, so that fc_remote_port_chkready will
2364 	 * fail IO.
2365 	 */
2366 	scsi_target_unblock(&rport->dev);
2367 }
2368 
2369 /**
2370  * fc_starget_delete - called to delete the scsi decendents of an rport
2371  * @work:	remote port to be operated on.
2372  *
2373  * Deletes target and all sdevs.
2374  */
2375 static void
2376 fc_starget_delete(struct work_struct *work)
2377 {
2378 	struct fc_rport *rport =
2379 		container_of(work, struct fc_rport, stgt_delete_work);
2380 
2381 	fc_terminate_rport_io(rport);
2382 	scsi_remove_target(&rport->dev);
2383 }
2384 
2385 
2386 /**
2387  * fc_rport_final_delete - finish rport termination and delete it.
2388  * @work:	remote port to be deleted.
2389  */
2390 static void
2391 fc_rport_final_delete(struct work_struct *work)
2392 {
2393 	struct fc_rport *rport =
2394 		container_of(work, struct fc_rport, rport_delete_work);
2395 	struct device *dev = &rport->dev;
2396 	struct Scsi_Host *shost = rport_to_shost(rport);
2397 	struct fc_internal *i = to_fc_internal(shost->transportt);
2398 	unsigned long flags;
2399 	int do_callback = 0;
2400 
2401 	/*
2402 	 * if a scan is pending, flush the SCSI Host work_q so that
2403 	 * that we can reclaim the rport scan work element.
2404 	 */
2405 	if (rport->flags & FC_RPORT_SCAN_PENDING)
2406 		scsi_flush_work(shost);
2407 
2408 	fc_terminate_rport_io(rport);
2409 
2410 	/*
2411 	 * Cancel any outstanding timers. These should really exist
2412 	 * only when rmmod'ing the LLDD and we're asking for
2413 	 * immediate termination of the rports
2414 	 */
2415 	spin_lock_irqsave(shost->host_lock, flags);
2416 	if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
2417 		spin_unlock_irqrestore(shost->host_lock, flags);
2418 		if (!cancel_delayed_work(&rport->fail_io_work))
2419 			fc_flush_devloss(shost);
2420 		if (!cancel_delayed_work(&rport->dev_loss_work))
2421 			fc_flush_devloss(shost);
2422 		spin_lock_irqsave(shost->host_lock, flags);
2423 		rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2424 	}
2425 	spin_unlock_irqrestore(shost->host_lock, flags);
2426 
2427 	/* Delete SCSI target and sdevs */
2428 	if (rport->scsi_target_id != -1)
2429 		fc_starget_delete(&rport->stgt_delete_work);
2430 
2431 	/*
2432 	 * Notify the driver that the rport is now dead. The LLDD will
2433 	 * also guarantee that any communication to the rport is terminated
2434 	 *
2435 	 * Avoid this call if we already called it when we preserved the
2436 	 * rport for the binding.
2437 	 */
2438 	spin_lock_irqsave(shost->host_lock, flags);
2439 	if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2440 	    (i->f->dev_loss_tmo_callbk)) {
2441 		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
2442 		do_callback = 1;
2443 	}
2444 	spin_unlock_irqrestore(shost->host_lock, flags);
2445 
2446 	if (do_callback)
2447 		i->f->dev_loss_tmo_callbk(rport);
2448 
2449 	fc_bsg_remove(rport->rqst_q);
2450 
2451 	transport_remove_device(dev);
2452 	device_del(dev);
2453 	transport_destroy_device(dev);
2454 	put_device(&shost->shost_gendev);	/* for fc_host->rport list */
2455 	put_device(dev);			/* for self-reference */
2456 }
2457 
2458 
2459 /**
2460  * fc_rport_create - allocates and creates a remote FC port.
2461  * @shost:	scsi host the remote port is connected to.
2462  * @channel:	Channel on shost port connected to.
2463  * @ids:	The world wide names, fc address, and FC4 port
2464  *		roles for the remote port.
2465  *
2466  * Allocates and creates the remoter port structure, including the
2467  * class and sysfs creation.
2468  *
2469  * Notes:
2470  *	This routine assumes no locks are held on entry.
2471  */
2472 static struct fc_rport *
2473 fc_rport_create(struct Scsi_Host *shost, int channel,
2474 	struct fc_rport_identifiers  *ids)
2475 {
2476 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2477 	struct fc_internal *fci = to_fc_internal(shost->transportt);
2478 	struct fc_rport *rport;
2479 	struct device *dev;
2480 	unsigned long flags;
2481 	int error;
2482 	size_t size;
2483 
2484 	size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
2485 	rport = kzalloc(size, GFP_KERNEL);
2486 	if (unlikely(!rport)) {
2487 		printk(KERN_ERR "%s: allocation failure\n", __func__);
2488 		return NULL;
2489 	}
2490 
2491 	rport->maxframe_size = -1;
2492 	rport->supported_classes = FC_COS_UNSPECIFIED;
2493 	rport->dev_loss_tmo = fc_dev_loss_tmo;
2494 	memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
2495 	memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
2496 	rport->port_id = ids->port_id;
2497 	rport->roles = ids->roles;
2498 	rport->port_state = FC_PORTSTATE_ONLINE;
2499 	if (fci->f->dd_fcrport_size)
2500 		rport->dd_data = &rport[1];
2501 	rport->channel = channel;
2502 	rport->fast_io_fail_tmo = -1;
2503 
2504 	INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
2505 	INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
2506 	INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
2507 	INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
2508 	INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
2509 
2510 	spin_lock_irqsave(shost->host_lock, flags);
2511 
2512 	rport->number = fc_host->next_rport_number++;
2513 	if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
2514 		rport->scsi_target_id = fc_host->next_target_id++;
2515 	else
2516 		rport->scsi_target_id = -1;
2517 	list_add_tail(&rport->peers, &fc_host->rports);
2518 	get_device(&shost->shost_gendev);	/* for fc_host->rport list */
2519 
2520 	spin_unlock_irqrestore(shost->host_lock, flags);
2521 
2522 	dev = &rport->dev;
2523 	device_initialize(dev);			/* takes self reference */
2524 	dev->parent = get_device(&shost->shost_gendev); /* parent reference */
2525 	dev->release = fc_rport_dev_release;
2526 	dev_set_name(dev, "rport-%d:%d-%d",
2527 		     shost->host_no, channel, rport->number);
2528 	transport_setup_device(dev);
2529 
2530 	error = device_add(dev);
2531 	if (error) {
2532 		printk(KERN_ERR "FC Remote Port device_add failed\n");
2533 		goto delete_rport;
2534 	}
2535 	transport_add_device(dev);
2536 	transport_configure_device(dev);
2537 
2538 	fc_bsg_rportadd(shost, rport);
2539 	/* ignore any bsg add error - we just can't do sgio */
2540 
2541 	if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2542 		/* initiate a scan of the target */
2543 		rport->flags |= FC_RPORT_SCAN_PENDING;
2544 		scsi_queue_work(shost, &rport->scan_work);
2545 	}
2546 
2547 	return rport;
2548 
2549 delete_rport:
2550 	transport_destroy_device(dev);
2551 	spin_lock_irqsave(shost->host_lock, flags);
2552 	list_del(&rport->peers);
2553 	put_device(&shost->shost_gendev);	/* for fc_host->rport list */
2554 	spin_unlock_irqrestore(shost->host_lock, flags);
2555 	put_device(dev->parent);
2556 	kfree(rport);
2557 	return NULL;
2558 }
2559 
2560 /**
2561  * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
2562  * @shost:	scsi host the remote port is connected to.
2563  * @channel:	Channel on shost port connected to.
2564  * @ids:	The world wide names, fc address, and FC4 port
2565  *		roles for the remote port.
2566  *
2567  * The LLDD calls this routine to notify the transport of the existence
2568  * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn)
2569  * of the port, it's FC address (port_id), and the FC4 roles that are
2570  * active for the port.
2571  *
2572  * For ports that are FCP targets (aka scsi targets), the FC transport
2573  * maintains consistent target id bindings on behalf of the LLDD.
2574  * A consistent target id binding is an assignment of a target id to
2575  * a remote port identifier, which persists while the scsi host is
2576  * attached. The remote port can disappear, then later reappear, and
2577  * it's target id assignment remains the same. This allows for shifts
2578  * in FC addressing (if binding by wwpn or wwnn) with no apparent
2579  * changes to the scsi subsystem which is based on scsi host number and
2580  * target id values.  Bindings are only valid during the attachment of
2581  * the scsi host. If the host detaches, then later re-attaches, target
2582  * id bindings may change.
2583  *
2584  * This routine is responsible for returning a remote port structure.
2585  * The routine will search the list of remote ports it maintains
2586  * internally on behalf of consistent target id mappings. If found, the
2587  * remote port structure will be reused. Otherwise, a new remote port
2588  * structure will be allocated.
2589  *
2590  * Whenever a remote port is allocated, a new fc_remote_port class
2591  * device is created.
2592  *
2593  * Should not be called from interrupt context.
2594  *
2595  * Notes:
2596  *	This routine assumes no locks are held on entry.
2597  */
2598 struct fc_rport *
2599 fc_remote_port_add(struct Scsi_Host *shost, int channel,
2600 	struct fc_rport_identifiers  *ids)
2601 {
2602 	struct fc_internal *fci = to_fc_internal(shost->transportt);
2603 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2604 	struct fc_rport *rport;
2605 	unsigned long flags;
2606 	int match = 0;
2607 
2608 	/* ensure any stgt delete functions are done */
2609 	fc_flush_work(shost);
2610 
2611 	/*
2612 	 * Search the list of "active" rports, for an rport that has been
2613 	 * deleted, but we've held off the real delete while the target
2614 	 * is in a "blocked" state.
2615 	 */
2616 	spin_lock_irqsave(shost->host_lock, flags);
2617 
2618 	list_for_each_entry(rport, &fc_host->rports, peers) {
2619 
2620 		if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
2621 			(rport->channel == channel)) {
2622 
2623 			switch (fc_host->tgtid_bind_type) {
2624 			case FC_TGTID_BIND_BY_WWPN:
2625 			case FC_TGTID_BIND_NONE:
2626 				if (rport->port_name == ids->port_name)
2627 					match = 1;
2628 				break;
2629 			case FC_TGTID_BIND_BY_WWNN:
2630 				if (rport->node_name == ids->node_name)
2631 					match = 1;
2632 				break;
2633 			case FC_TGTID_BIND_BY_ID:
2634 				if (rport->port_id == ids->port_id)
2635 					match = 1;
2636 				break;
2637 			}
2638 
2639 			if (match) {
2640 
2641 				memcpy(&rport->node_name, &ids->node_name,
2642 					sizeof(rport->node_name));
2643 				memcpy(&rport->port_name, &ids->port_name,
2644 					sizeof(rport->port_name));
2645 				rport->port_id = ids->port_id;
2646 
2647 				rport->port_state = FC_PORTSTATE_ONLINE;
2648 				rport->roles = ids->roles;
2649 
2650 				spin_unlock_irqrestore(shost->host_lock, flags);
2651 
2652 				if (fci->f->dd_fcrport_size)
2653 					memset(rport->dd_data, 0,
2654 						fci->f->dd_fcrport_size);
2655 
2656 				/*
2657 				 * If we were not a target, cancel the
2658 				 * io terminate and rport timers, and
2659 				 * we're done.
2660 				 *
2661 				 * If we were a target, but our new role
2662 				 * doesn't indicate a target, leave the
2663 				 * timers running expecting the role to
2664 				 * change as the target fully logs in. If
2665 				 * it doesn't, the target will be torn down.
2666 				 *
2667 				 * If we were a target, and our role shows
2668 				 * we're still a target, cancel the timers
2669 				 * and kick off a scan.
2670 				 */
2671 
2672 				/* was a target, not in roles */
2673 				if ((rport->scsi_target_id != -1) &&
2674 				    (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
2675 					return rport;
2676 
2677 				/*
2678 				 * Stop the fail io and dev_loss timers.
2679 				 * If they flush, the port_state will
2680 				 * be checked and will NOOP the function.
2681 				 */
2682 				if (!cancel_delayed_work(&rport->fail_io_work))
2683 					fc_flush_devloss(shost);
2684 				if (!cancel_delayed_work(&rport->dev_loss_work))
2685 					fc_flush_devloss(shost);
2686 
2687 				spin_lock_irqsave(shost->host_lock, flags);
2688 
2689 				rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2690 						  FC_RPORT_DEVLOSS_PENDING |
2691 						  FC_RPORT_DEVLOSS_CALLBK_DONE);
2692 
2693 				/* if target, initiate a scan */
2694 				if (rport->scsi_target_id != -1) {
2695 					rport->flags |= FC_RPORT_SCAN_PENDING;
2696 					scsi_queue_work(shost,
2697 							&rport->scan_work);
2698 					spin_unlock_irqrestore(shost->host_lock,
2699 							flags);
2700 					scsi_target_unblock(&rport->dev);
2701 				} else
2702 					spin_unlock_irqrestore(shost->host_lock,
2703 							flags);
2704 
2705 				fc_bsg_goose_queue(rport);
2706 
2707 				return rport;
2708 			}
2709 		}
2710 	}
2711 
2712 	/*
2713 	 * Search the bindings array
2714 	 * Note: if never a FCP target, you won't be on this list
2715 	 */
2716 	if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
2717 
2718 		/* search for a matching consistent binding */
2719 
2720 		list_for_each_entry(rport, &fc_host->rport_bindings,
2721 					peers) {
2722 			if (rport->channel != channel)
2723 				continue;
2724 
2725 			switch (fc_host->tgtid_bind_type) {
2726 			case FC_TGTID_BIND_BY_WWPN:
2727 				if (rport->port_name == ids->port_name)
2728 					match = 1;
2729 				break;
2730 			case FC_TGTID_BIND_BY_WWNN:
2731 				if (rport->node_name == ids->node_name)
2732 					match = 1;
2733 				break;
2734 			case FC_TGTID_BIND_BY_ID:
2735 				if (rport->port_id == ids->port_id)
2736 					match = 1;
2737 				break;
2738 			case FC_TGTID_BIND_NONE: /* to keep compiler happy */
2739 				break;
2740 			}
2741 
2742 			if (match) {
2743 				list_move_tail(&rport->peers, &fc_host->rports);
2744 				break;
2745 			}
2746 		}
2747 
2748 		if (match) {
2749 			memcpy(&rport->node_name, &ids->node_name,
2750 				sizeof(rport->node_name));
2751 			memcpy(&rport->port_name, &ids->port_name,
2752 				sizeof(rport->port_name));
2753 			rport->port_id = ids->port_id;
2754 			rport->roles = ids->roles;
2755 			rport->port_state = FC_PORTSTATE_ONLINE;
2756 			rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
2757 
2758 			if (fci->f->dd_fcrport_size)
2759 				memset(rport->dd_data, 0,
2760 						fci->f->dd_fcrport_size);
2761 
2762 			if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2763 				/* initiate a scan of the target */
2764 				rport->flags |= FC_RPORT_SCAN_PENDING;
2765 				scsi_queue_work(shost, &rport->scan_work);
2766 				spin_unlock_irqrestore(shost->host_lock, flags);
2767 				scsi_target_unblock(&rport->dev);
2768 			} else
2769 				spin_unlock_irqrestore(shost->host_lock, flags);
2770 
2771 			return rport;
2772 		}
2773 	}
2774 
2775 	spin_unlock_irqrestore(shost->host_lock, flags);
2776 
2777 	/* No consistent binding found - create new remote port entry */
2778 	rport = fc_rport_create(shost, channel, ids);
2779 
2780 	return rport;
2781 }
2782 EXPORT_SYMBOL(fc_remote_port_add);
2783 
2784 
2785 /**
2786  * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
2787  * @rport:	The remote port that no longer exists
2788  *
2789  * The LLDD calls this routine to notify the transport that a remote
2790  * port is no longer part of the topology. Note: Although a port
2791  * may no longer be part of the topology, it may persist in the remote
2792  * ports displayed by the fc_host. We do this under 2 conditions:
2793  * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
2794  *   This allows the port to temporarily disappear, then reappear without
2795  *   disrupting the SCSI device tree attached to it. During the "blocked"
2796  *   period the port will still exist.
2797  * 2) If the port was a scsi target and disappears for longer than we
2798  *   expect, we'll delete the port and the tear down the SCSI device tree
2799  *   attached to it. However, we want to semi-persist the target id assigned
2800  *   to that port if it eventually does exist. The port structure will
2801  *   remain (although with minimal information) so that the target id
2802  *   bindings remails.
2803  *
2804  * If the remote port is not an FCP Target, it will be fully torn down
2805  * and deallocated, including the fc_remote_port class device.
2806  *
2807  * If the remote port is an FCP Target, the port will be placed in a
2808  * temporary blocked state. From the LLDD's perspective, the rport no
2809  * longer exists. From the SCSI midlayer's perspective, the SCSI target
2810  * exists, but all sdevs on it are blocked from further I/O. The following
2811  * is then expected.
2812  *
2813  *   If the remote port does not return (signaled by a LLDD call to
2814  *   fc_remote_port_add()) within the dev_loss_tmo timeout, then the
2815  *   scsi target is removed - killing all outstanding i/o and removing the
2816  *   scsi devices attached ot it. The port structure will be marked Not
2817  *   Present and be partially cleared, leaving only enough information to
2818  *   recognize the remote port relative to the scsi target id binding if
2819  *   it later appears.  The port will remain as long as there is a valid
2820  *   binding (e.g. until the user changes the binding type or unloads the
2821  *   scsi host with the binding).
2822  *
2823  *   If the remote port returns within the dev_loss_tmo value (and matches
2824  *   according to the target id binding type), the port structure will be
2825  *   reused. If it is no longer a SCSI target, the target will be torn
2826  *   down. If it continues to be a SCSI target, then the target will be
2827  *   unblocked (allowing i/o to be resumed), and a scan will be activated
2828  *   to ensure that all luns are detected.
2829  *
2830  * Called from normal process context only - cannot be called from interrupt.
2831  *
2832  * Notes:
2833  *	This routine assumes no locks are held on entry.
2834  */
2835 void
2836 fc_remote_port_delete(struct fc_rport  *rport)
2837 {
2838 	struct Scsi_Host *shost = rport_to_shost(rport);
2839 	int timeout = rport->dev_loss_tmo;
2840 	unsigned long flags;
2841 
2842 	/*
2843 	 * No need to flush the fc_host work_q's, as all adds are synchronous.
2844 	 *
2845 	 * We do need to reclaim the rport scan work element, so eventually
2846 	 * (in fc_rport_final_delete()) we'll flush the scsi host work_q if
2847 	 * there's still a scan pending.
2848 	 */
2849 
2850 	spin_lock_irqsave(shost->host_lock, flags);
2851 
2852 	if (rport->port_state != FC_PORTSTATE_ONLINE) {
2853 		spin_unlock_irqrestore(shost->host_lock, flags);
2854 		return;
2855 	}
2856 
2857 	/*
2858 	 * In the past, we if this was not an FCP-Target, we would
2859 	 * unconditionally just jump to deleting the rport.
2860 	 * However, rports can be used as node containers by the LLDD,
2861 	 * and its not appropriate to just terminate the rport at the
2862 	 * first sign of a loss in connectivity. The LLDD may want to
2863 	 * send ELS traffic to re-validate the login. If the rport is
2864 	 * immediately deleted, it makes it inappropriate for a node
2865 	 * container.
2866 	 * So... we now unconditionally wait dev_loss_tmo before
2867 	 * destroying an rport.
2868 	 */
2869 
2870 	rport->port_state = FC_PORTSTATE_BLOCKED;
2871 
2872 	rport->flags |= FC_RPORT_DEVLOSS_PENDING;
2873 
2874 	spin_unlock_irqrestore(shost->host_lock, flags);
2875 
2876 	if (rport->roles & FC_PORT_ROLE_FCP_INITIATOR &&
2877 	    shost->active_mode & MODE_TARGET)
2878 		fc_tgt_it_nexus_destroy(shost, (unsigned long)rport);
2879 
2880 	scsi_target_block(&rport->dev);
2881 
2882 	/* see if we need to kill io faster than waiting for device loss */
2883 	if ((rport->fast_io_fail_tmo != -1) &&
2884 	    (rport->fast_io_fail_tmo < timeout))
2885 		fc_queue_devloss_work(shost, &rport->fail_io_work,
2886 					rport->fast_io_fail_tmo * HZ);
2887 
2888 	/* cap the length the devices can be blocked until they are deleted */
2889 	fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
2890 }
2891 EXPORT_SYMBOL(fc_remote_port_delete);
2892 
2893 /**
2894  * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
2895  * @rport:	The remote port that changed.
2896  * @roles:      New roles for this port.
2897  *
2898  * Description: The LLDD calls this routine to notify the transport that the
2899  * roles on a remote port may have changed. The largest effect of this is
2900  * if a port now becomes a FCP Target, it must be allocated a
2901  * scsi target id.  If the port is no longer a FCP target, any
2902  * scsi target id value assigned to it will persist in case the
2903  * role changes back to include FCP Target. No changes in the scsi
2904  * midlayer will be invoked if the role changes (in the expectation
2905  * that the role will be resumed. If it doesn't normal error processing
2906  * will take place).
2907  *
2908  * Should not be called from interrupt context.
2909  *
2910  * Notes:
2911  *	This routine assumes no locks are held on entry.
2912  */
2913 void
2914 fc_remote_port_rolechg(struct fc_rport  *rport, u32 roles)
2915 {
2916 	struct Scsi_Host *shost = rport_to_shost(rport);
2917 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2918 	unsigned long flags;
2919 	int create = 0;
2920 	int ret;
2921 
2922 	spin_lock_irqsave(shost->host_lock, flags);
2923 	if (roles & FC_PORT_ROLE_FCP_TARGET) {
2924 		if (rport->scsi_target_id == -1) {
2925 			rport->scsi_target_id = fc_host->next_target_id++;
2926 			create = 1;
2927 		} else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
2928 			create = 1;
2929 	} else if (shost->active_mode & MODE_TARGET) {
2930 		ret = fc_tgt_it_nexus_create(shost, (unsigned long)rport,
2931 					     (char *)&rport->node_name);
2932 		if (ret)
2933 			printk(KERN_ERR "FC Remore Port tgt nexus failed %d\n",
2934 			       ret);
2935 	}
2936 
2937 	rport->roles = roles;
2938 
2939 	spin_unlock_irqrestore(shost->host_lock, flags);
2940 
2941 	if (create) {
2942 		/*
2943 		 * There may have been a delete timer running on the
2944 		 * port. Ensure that it is cancelled as we now know
2945 		 * the port is an FCP Target.
2946 		 * Note: we know the rport is exists and in an online
2947 		 *  state as the LLDD would not have had an rport
2948 		 *  reference to pass us.
2949 		 *
2950 		 * Take no action on the del_timer failure as the state
2951 		 * machine state change will validate the
2952 		 * transaction.
2953 		 */
2954 		if (!cancel_delayed_work(&rport->fail_io_work))
2955 			fc_flush_devloss(shost);
2956 		if (!cancel_delayed_work(&rport->dev_loss_work))
2957 			fc_flush_devloss(shost);
2958 
2959 		spin_lock_irqsave(shost->host_lock, flags);
2960 		rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2961 				  FC_RPORT_DEVLOSS_PENDING);
2962 		spin_unlock_irqrestore(shost->host_lock, flags);
2963 
2964 		/* ensure any stgt delete functions are done */
2965 		fc_flush_work(shost);
2966 
2967 		/* initiate a scan of the target */
2968 		spin_lock_irqsave(shost->host_lock, flags);
2969 		rport->flags |= FC_RPORT_SCAN_PENDING;
2970 		scsi_queue_work(shost, &rport->scan_work);
2971 		spin_unlock_irqrestore(shost->host_lock, flags);
2972 		scsi_target_unblock(&rport->dev);
2973 	}
2974 }
2975 EXPORT_SYMBOL(fc_remote_port_rolechg);
2976 
2977 /**
2978  * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
2979  * @work:	rport target that failed to reappear in the allotted time.
2980  *
2981  * Description: An attempt to delete a remote port blocks, and if it fails
2982  *              to return in the allotted time this gets called.
2983  */
2984 static void
2985 fc_timeout_deleted_rport(struct work_struct *work)
2986 {
2987 	struct fc_rport *rport =
2988 		container_of(work, struct fc_rport, dev_loss_work.work);
2989 	struct Scsi_Host *shost = rport_to_shost(rport);
2990 	struct fc_internal *i = to_fc_internal(shost->transportt);
2991 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2992 	unsigned long flags;
2993 	int do_callback = 0;
2994 
2995 	spin_lock_irqsave(shost->host_lock, flags);
2996 
2997 	rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2998 
2999 	/*
3000 	 * If the port is ONLINE, then it came back. If it was a SCSI
3001 	 * target, validate it still is. If not, tear down the
3002 	 * scsi_target on it.
3003 	 */
3004 	if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3005 	    (rport->scsi_target_id != -1) &&
3006 	    !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
3007 		dev_printk(KERN_ERR, &rport->dev,
3008 			"blocked FC remote port time out: no longer"
3009 			" a FCP target, removing starget\n");
3010 		spin_unlock_irqrestore(shost->host_lock, flags);
3011 		scsi_target_unblock(&rport->dev);
3012 		fc_queue_work(shost, &rport->stgt_delete_work);
3013 		return;
3014 	}
3015 
3016 	/* NOOP state - we're flushing workq's */
3017 	if (rport->port_state != FC_PORTSTATE_BLOCKED) {
3018 		spin_unlock_irqrestore(shost->host_lock, flags);
3019 		dev_printk(KERN_ERR, &rport->dev,
3020 			"blocked FC remote port time out: leaving"
3021 			" rport%s alone\n",
3022 			(rport->scsi_target_id != -1) ?  " and starget" : "");
3023 		return;
3024 	}
3025 
3026 	if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
3027 	    (rport->scsi_target_id == -1)) {
3028 		list_del(&rport->peers);
3029 		rport->port_state = FC_PORTSTATE_DELETED;
3030 		dev_printk(KERN_ERR, &rport->dev,
3031 			"blocked FC remote port time out: removing"
3032 			" rport%s\n",
3033 			(rport->scsi_target_id != -1) ?  " and starget" : "");
3034 		fc_queue_work(shost, &rport->rport_delete_work);
3035 		spin_unlock_irqrestore(shost->host_lock, flags);
3036 		return;
3037 	}
3038 
3039 	dev_printk(KERN_ERR, &rport->dev,
3040 		"blocked FC remote port time out: removing target and "
3041 		"saving binding\n");
3042 
3043 	list_move_tail(&rport->peers, &fc_host->rport_bindings);
3044 
3045 	/*
3046 	 * Note: We do not remove or clear the hostdata area. This allows
3047 	 *   host-specific target data to persist along with the
3048 	 *   scsi_target_id. It's up to the host to manage it's hostdata area.
3049 	 */
3050 
3051 	/*
3052 	 * Reinitialize port attributes that may change if the port comes back.
3053 	 */
3054 	rport->maxframe_size = -1;
3055 	rport->supported_classes = FC_COS_UNSPECIFIED;
3056 	rport->roles = FC_PORT_ROLE_UNKNOWN;
3057 	rport->port_state = FC_PORTSTATE_NOTPRESENT;
3058 	rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3059 
3060 	/*
3061 	 * Pre-emptively kill I/O rather than waiting for the work queue
3062 	 * item to teardown the starget. (FCOE libFC folks prefer this
3063 	 * and to have the rport_port_id still set when it's done).
3064 	 */
3065 	spin_unlock_irqrestore(shost->host_lock, flags);
3066 	fc_terminate_rport_io(rport);
3067 
3068 	spin_lock_irqsave(shost->host_lock, flags);
3069 
3070 	if (rport->port_state == FC_PORTSTATE_NOTPRESENT) {	/* still missing */
3071 
3072 		/* remove the identifiers that aren't used in the consisting binding */
3073 		switch (fc_host->tgtid_bind_type) {
3074 		case FC_TGTID_BIND_BY_WWPN:
3075 			rport->node_name = -1;
3076 			rport->port_id = -1;
3077 			break;
3078 		case FC_TGTID_BIND_BY_WWNN:
3079 			rport->port_name = -1;
3080 			rport->port_id = -1;
3081 			break;
3082 		case FC_TGTID_BIND_BY_ID:
3083 			rport->node_name = -1;
3084 			rport->port_name = -1;
3085 			break;
3086 		case FC_TGTID_BIND_NONE:	/* to keep compiler happy */
3087 			break;
3088 		}
3089 
3090 		/*
3091 		 * As this only occurs if the remote port (scsi target)
3092 		 * went away and didn't come back - we'll remove
3093 		 * all attached scsi devices.
3094 		 */
3095 		rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3096 		fc_queue_work(shost, &rport->stgt_delete_work);
3097 
3098 		do_callback = 1;
3099 	}
3100 
3101 	spin_unlock_irqrestore(shost->host_lock, flags);
3102 
3103 	/*
3104 	 * Notify the driver that the rport is now dead. The LLDD will
3105 	 * also guarantee that any communication to the rport is terminated
3106 	 *
3107 	 * Note: we set the CALLBK_DONE flag above to correspond
3108 	 */
3109 	if (do_callback && i->f->dev_loss_tmo_callbk)
3110 		i->f->dev_loss_tmo_callbk(rport);
3111 }
3112 
3113 
3114 /**
3115  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
3116  * @work:	rport to terminate io on.
3117  *
3118  * Notes: Only requests the failure of the io, not that all are flushed
3119  *    prior to returning.
3120  */
3121 static void
3122 fc_timeout_fail_rport_io(struct work_struct *work)
3123 {
3124 	struct fc_rport *rport =
3125 		container_of(work, struct fc_rport, fail_io_work.work);
3126 
3127 	if (rport->port_state != FC_PORTSTATE_BLOCKED)
3128 		return;
3129 
3130 	rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
3131 	fc_terminate_rport_io(rport);
3132 }
3133 
3134 /**
3135  * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
3136  * @work:	remote port to be scanned.
3137  */
3138 static void
3139 fc_scsi_scan_rport(struct work_struct *work)
3140 {
3141 	struct fc_rport *rport =
3142 		container_of(work, struct fc_rport, scan_work);
3143 	struct Scsi_Host *shost = rport_to_shost(rport);
3144 	struct fc_internal *i = to_fc_internal(shost->transportt);
3145 	unsigned long flags;
3146 
3147 	if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3148 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
3149 	    !(i->f->disable_target_scan)) {
3150 		scsi_scan_target(&rport->dev, rport->channel,
3151 			rport->scsi_target_id, SCAN_WILD_CARD, 1);
3152 	}
3153 
3154 	spin_lock_irqsave(shost->host_lock, flags);
3155 	rport->flags &= ~FC_RPORT_SCAN_PENDING;
3156 	spin_unlock_irqrestore(shost->host_lock, flags);
3157 }
3158 
3159 /**
3160  * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
3161  * @cmnd: SCSI command that scsi_eh is trying to recover
3162  *
3163  * This routine can be called from a FC LLD scsi_eh callback. It
3164  * blocks the scsi_eh thread until the fc_rport leaves the
3165  * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh
3166  * failing recovery actions for blocked rports which would lead to
3167  * offlined SCSI devices.
3168  */
3169 void fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3170 {
3171 	struct Scsi_Host *shost = cmnd->device->host;
3172 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3173 	unsigned long flags;
3174 
3175 	spin_lock_irqsave(shost->host_lock, flags);
3176 	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
3177 		spin_unlock_irqrestore(shost->host_lock, flags);
3178 		msleep(1000);
3179 		spin_lock_irqsave(shost->host_lock, flags);
3180 	}
3181 	spin_unlock_irqrestore(shost->host_lock, flags);
3182 }
3183 EXPORT_SYMBOL(fc_block_scsi_eh);
3184 
3185 /**
3186  * fc_vport_setup - allocates and creates a FC virtual port.
3187  * @shost:	scsi host the virtual port is connected to.
3188  * @channel:	Channel on shost port connected to.
3189  * @pdev:	parent device for vport
3190  * @ids:	The world wide names, FC4 port roles, etc for
3191  *              the virtual port.
3192  * @ret_vport:	The pointer to the created vport.
3193  *
3194  * Allocates and creates the vport structure, calls the parent host
3195  * to instantiate the vport, the completes w/ class and sysfs creation.
3196  *
3197  * Notes:
3198  *	This routine assumes no locks are held on entry.
3199  */
3200 static int
3201 fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3202 	struct fc_vport_identifiers  *ids, struct fc_vport **ret_vport)
3203 {
3204 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3205 	struct fc_internal *fci = to_fc_internal(shost->transportt);
3206 	struct fc_vport *vport;
3207 	struct device *dev;
3208 	unsigned long flags;
3209 	size_t size;
3210 	int error;
3211 
3212 	*ret_vport = NULL;
3213 
3214 	if ( ! fci->f->vport_create)
3215 		return -ENOENT;
3216 
3217 	size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3218 	vport = kzalloc(size, GFP_KERNEL);
3219 	if (unlikely(!vport)) {
3220 		printk(KERN_ERR "%s: allocation failure\n", __func__);
3221 		return -ENOMEM;
3222 	}
3223 
3224 	vport->vport_state = FC_VPORT_UNKNOWN;
3225 	vport->vport_last_state = FC_VPORT_UNKNOWN;
3226 	vport->node_name = ids->node_name;
3227 	vport->port_name = ids->port_name;
3228 	vport->roles = ids->roles;
3229 	vport->vport_type = ids->vport_type;
3230 	if (fci->f->dd_fcvport_size)
3231 		vport->dd_data = &vport[1];
3232 	vport->shost = shost;
3233 	vport->channel = channel;
3234 	vport->flags = FC_VPORT_CREATING;
3235 	INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3236 
3237 	spin_lock_irqsave(shost->host_lock, flags);
3238 
3239 	if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3240 		spin_unlock_irqrestore(shost->host_lock, flags);
3241 		kfree(vport);
3242 		return -ENOSPC;
3243 	}
3244 	fc_host->npiv_vports_inuse++;
3245 	vport->number = fc_host->next_vport_number++;
3246 	list_add_tail(&vport->peers, &fc_host->vports);
3247 	get_device(&shost->shost_gendev);	/* for fc_host->vport list */
3248 
3249 	spin_unlock_irqrestore(shost->host_lock, flags);
3250 
3251 	dev = &vport->dev;
3252 	device_initialize(dev);			/* takes self reference */
3253 	dev->parent = get_device(pdev);		/* takes parent reference */
3254 	dev->release = fc_vport_dev_release;
3255 	dev_set_name(dev, "vport-%d:%d-%d",
3256 		     shost->host_no, channel, vport->number);
3257 	transport_setup_device(dev);
3258 
3259 	error = device_add(dev);
3260 	if (error) {
3261 		printk(KERN_ERR "FC Virtual Port device_add failed\n");
3262 		goto delete_vport;
3263 	}
3264 	transport_add_device(dev);
3265 	transport_configure_device(dev);
3266 
3267 	error = fci->f->vport_create(vport, ids->disable);
3268 	if (error) {
3269 		printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3270 		goto delete_vport_all;
3271 	}
3272 
3273 	/*
3274 	 * if the parent isn't the physical adapter's Scsi_Host, ensure
3275 	 * the Scsi_Host at least contains ia symlink to the vport.
3276 	 */
3277 	if (pdev != &shost->shost_gendev) {
3278 		error = sysfs_create_link(&shost->shost_gendev.kobj,
3279 				 &dev->kobj, dev_name(dev));
3280 		if (error)
3281 			printk(KERN_ERR
3282 				"%s: Cannot create vport symlinks for "
3283 				"%s, err=%d\n",
3284 				__func__, dev_name(dev), error);
3285 	}
3286 	spin_lock_irqsave(shost->host_lock, flags);
3287 	vport->flags &= ~FC_VPORT_CREATING;
3288 	spin_unlock_irqrestore(shost->host_lock, flags);
3289 
3290 	dev_printk(KERN_NOTICE, pdev,
3291 			"%s created via shost%d channel %d\n", dev_name(dev),
3292 			shost->host_no, channel);
3293 
3294 	*ret_vport = vport;
3295 
3296 	return 0;
3297 
3298 delete_vport_all:
3299 	transport_remove_device(dev);
3300 	device_del(dev);
3301 delete_vport:
3302 	transport_destroy_device(dev);
3303 	spin_lock_irqsave(shost->host_lock, flags);
3304 	list_del(&vport->peers);
3305 	put_device(&shost->shost_gendev);	/* for fc_host->vport list */
3306 	fc_host->npiv_vports_inuse--;
3307 	spin_unlock_irqrestore(shost->host_lock, flags);
3308 	put_device(dev->parent);
3309 	kfree(vport);
3310 
3311 	return error;
3312 }
3313 
3314 /**
3315  * fc_vport_create - Admin App or LLDD requests creation of a vport
3316  * @shost:	scsi host the virtual port is connected to.
3317  * @channel:	channel on shost port connected to.
3318  * @ids:	The world wide names, FC4 port roles, etc for
3319  *              the virtual port.
3320  *
3321  * Notes:
3322  *	This routine assumes no locks are held on entry.
3323  */
3324 struct fc_vport *
3325 fc_vport_create(struct Scsi_Host *shost, int channel,
3326 	struct fc_vport_identifiers *ids)
3327 {
3328 	int stat;
3329 	struct fc_vport *vport;
3330 
3331 	stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3332 		 ids, &vport);
3333 	return stat ? NULL : vport;
3334 }
3335 EXPORT_SYMBOL(fc_vport_create);
3336 
3337 /**
3338  * fc_vport_terminate - Admin App or LLDD requests termination of a vport
3339  * @vport:	fc_vport to be terminated
3340  *
3341  * Calls the LLDD vport_delete() function, then deallocates and removes
3342  * the vport from the shost and object tree.
3343  *
3344  * Notes:
3345  *	This routine assumes no locks are held on entry.
3346  */
3347 int
3348 fc_vport_terminate(struct fc_vport *vport)
3349 {
3350 	struct Scsi_Host *shost = vport_to_shost(vport);
3351 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3352 	struct fc_internal *i = to_fc_internal(shost->transportt);
3353 	struct device *dev = &vport->dev;
3354 	unsigned long flags;
3355 	int stat;
3356 
3357 	spin_lock_irqsave(shost->host_lock, flags);
3358 	if (vport->flags & FC_VPORT_CREATING) {
3359 		spin_unlock_irqrestore(shost->host_lock, flags);
3360 		return -EBUSY;
3361 	}
3362 	if (vport->flags & (FC_VPORT_DEL)) {
3363 		spin_unlock_irqrestore(shost->host_lock, flags);
3364 		return -EALREADY;
3365 	}
3366 	vport->flags |= FC_VPORT_DELETING;
3367 	spin_unlock_irqrestore(shost->host_lock, flags);
3368 
3369 	if (i->f->vport_delete)
3370 		stat = i->f->vport_delete(vport);
3371 	else
3372 		stat = -ENOENT;
3373 
3374 	spin_lock_irqsave(shost->host_lock, flags);
3375 	vport->flags &= ~FC_VPORT_DELETING;
3376 	if (!stat) {
3377 		vport->flags |= FC_VPORT_DELETED;
3378 		list_del(&vport->peers);
3379 		fc_host->npiv_vports_inuse--;
3380 		put_device(&shost->shost_gendev);  /* for fc_host->vport list */
3381 	}
3382 	spin_unlock_irqrestore(shost->host_lock, flags);
3383 
3384 	if (stat)
3385 		return stat;
3386 
3387 	if (dev->parent != &shost->shost_gendev)
3388 		sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
3389 	transport_remove_device(dev);
3390 	device_del(dev);
3391 	transport_destroy_device(dev);
3392 
3393 	/*
3394 	 * Removing our self-reference should mean our
3395 	 * release function gets called, which will drop the remaining
3396 	 * parent reference and free the data structure.
3397 	 */
3398 	put_device(dev);			/* for self-reference */
3399 
3400 	return 0; /* SUCCESS */
3401 }
3402 EXPORT_SYMBOL(fc_vport_terminate);
3403 
3404 /**
3405  * fc_vport_sched_delete - workq-based delete request for a vport
3406  * @work:	vport to be deleted.
3407  */
3408 static void
3409 fc_vport_sched_delete(struct work_struct *work)
3410 {
3411 	struct fc_vport *vport =
3412 		container_of(work, struct fc_vport, vport_delete_work);
3413 	int stat;
3414 
3415 	stat = fc_vport_terminate(vport);
3416 	if (stat)
3417 		dev_printk(KERN_ERR, vport->dev.parent,
3418 			"%s: %s could not be deleted created via "
3419 			"shost%d channel %d - error %d\n", __func__,
3420 			dev_name(&vport->dev), vport->shost->host_no,
3421 			vport->channel, stat);
3422 }
3423 
3424 
3425 /*
3426  * BSG support
3427  */
3428 
3429 
3430 /**
3431  * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
3432  * @job:	fc_bsg_job that is to be torn down
3433  */
3434 static void
3435 fc_destroy_bsgjob(struct fc_bsg_job *job)
3436 {
3437 	unsigned long flags;
3438 
3439 	spin_lock_irqsave(&job->job_lock, flags);
3440 	if (job->ref_cnt) {
3441 		spin_unlock_irqrestore(&job->job_lock, flags);
3442 		return;
3443 	}
3444 	spin_unlock_irqrestore(&job->job_lock, flags);
3445 
3446 	put_device(job->dev);	/* release reference for the request */
3447 
3448 	kfree(job->request_payload.sg_list);
3449 	kfree(job->reply_payload.sg_list);
3450 	kfree(job);
3451 }
3452 
3453 /**
3454  * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
3455  *                  completed
3456  * @job:	fc_bsg_job that is complete
3457  */
3458 static void
3459 fc_bsg_jobdone(struct fc_bsg_job *job)
3460 {
3461 	struct request *req = job->req;
3462 	struct request *rsp = req->next_rq;
3463 	int err;
3464 
3465 	err = job->req->errors = job->reply->result;
3466 
3467 	if (err < 0)
3468 		/* we're only returning the result field in the reply */
3469 		job->req->sense_len = sizeof(uint32_t);
3470 	else
3471 		job->req->sense_len = job->reply_len;
3472 
3473 	/* we assume all request payload was transferred, residual == 0 */
3474 	req->resid_len = 0;
3475 
3476 	if (rsp) {
3477 		WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
3478 
3479 		/* set reply (bidi) residual */
3480 		rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
3481 				      rsp->resid_len);
3482 	}
3483 	blk_complete_request(req);
3484 }
3485 
3486 /**
3487  * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
3488  * @rq:        BSG request that holds the job to be destroyed
3489  */
3490 static void fc_bsg_softirq_done(struct request *rq)
3491 {
3492 	struct fc_bsg_job *job = rq->special;
3493 	unsigned long flags;
3494 
3495 	spin_lock_irqsave(&job->job_lock, flags);
3496 	job->state_flags |= FC_RQST_STATE_DONE;
3497 	job->ref_cnt--;
3498 	spin_unlock_irqrestore(&job->job_lock, flags);
3499 
3500 	blk_end_request_all(rq, rq->errors);
3501 	fc_destroy_bsgjob(job);
3502 }
3503 
3504 /**
3505  * fc_bsg_job_timeout - handler for when a bsg request timesout
3506  * @req:	request that timed out
3507  */
3508 static enum blk_eh_timer_return
3509 fc_bsg_job_timeout(struct request *req)
3510 {
3511 	struct fc_bsg_job *job = (void *) req->special;
3512 	struct Scsi_Host *shost = job->shost;
3513 	struct fc_internal *i = to_fc_internal(shost->transportt);
3514 	unsigned long flags;
3515 	int err = 0, done = 0;
3516 
3517 	if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
3518 		return BLK_EH_RESET_TIMER;
3519 
3520 	spin_lock_irqsave(&job->job_lock, flags);
3521 	if (job->state_flags & FC_RQST_STATE_DONE)
3522 		done = 1;
3523 	else
3524 		job->ref_cnt++;
3525 	spin_unlock_irqrestore(&job->job_lock, flags);
3526 
3527 	if (!done && i->f->bsg_timeout) {
3528 		/* call LLDD to abort the i/o as it has timed out */
3529 		err = i->f->bsg_timeout(job);
3530 		if (err)
3531 			printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3532 				"abort failed with status %d\n", err);
3533 	}
3534 
3535 	/* the blk_end_sync_io() doesn't check the error */
3536 	if (done)
3537 		return BLK_EH_NOT_HANDLED;
3538 	else
3539 		return BLK_EH_HANDLED;
3540 }
3541 
3542 static int
3543 fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
3544 {
3545 	size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
3546 
3547 	BUG_ON(!req->nr_phys_segments);
3548 
3549 	buf->sg_list = kzalloc(sz, GFP_KERNEL);
3550 	if (!buf->sg_list)
3551 		return -ENOMEM;
3552 	sg_init_table(buf->sg_list, req->nr_phys_segments);
3553 	buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
3554 	buf->payload_len = blk_rq_bytes(req);
3555 	return 0;
3556 }
3557 
3558 
3559 /**
3560  * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
3561  *                   bsg request
3562  * @shost:	SCSI Host corresponding to the bsg object
3563  * @rport:	(optional) FC Remote Port corresponding to the bsg object
3564  * @req:	BSG request that needs a job structure
3565  */
3566 static int
3567 fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
3568 	struct request *req)
3569 {
3570 	struct fc_internal *i = to_fc_internal(shost->transportt);
3571 	struct request *rsp = req->next_rq;
3572 	struct fc_bsg_job *job;
3573 	int ret;
3574 
3575 	BUG_ON(req->special);
3576 
3577 	job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
3578 			GFP_KERNEL);
3579 	if (!job)
3580 		return -ENOMEM;
3581 
3582 	/*
3583 	 * Note: this is a bit silly.
3584 	 * The request gets formatted as a SGIO v4 ioctl request, which
3585 	 * then gets reformatted as a blk request, which then gets
3586 	 * reformatted as a fc bsg request. And on completion, we have
3587 	 * to wrap return results such that SGIO v4 thinks it was a scsi
3588 	 * status.  I hope this was all worth it.
3589 	 */
3590 
3591 	req->special = job;
3592 	job->shost = shost;
3593 	job->rport = rport;
3594 	job->req = req;
3595 	if (i->f->dd_bsg_size)
3596 		job->dd_data = (void *)&job[1];
3597 	spin_lock_init(&job->job_lock);
3598 	job->request = (struct fc_bsg_request *)req->cmd;
3599 	job->request_len = req->cmd_len;
3600 	job->reply = req->sense;
3601 	job->reply_len = SCSI_SENSE_BUFFERSIZE;	/* Size of sense buffer
3602 						 * allocated */
3603 	if (req->bio) {
3604 		ret = fc_bsg_map_buffer(&job->request_payload, req);
3605 		if (ret)
3606 			goto failjob_rls_job;
3607 	}
3608 	if (rsp && rsp->bio) {
3609 		ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
3610 		if (ret)
3611 			goto failjob_rls_rqst_payload;
3612 	}
3613 	job->job_done = fc_bsg_jobdone;
3614 	if (rport)
3615 		job->dev = &rport->dev;
3616 	else
3617 		job->dev = &shost->shost_gendev;
3618 	get_device(job->dev);		/* take a reference for the request */
3619 
3620 	job->ref_cnt = 1;
3621 
3622 	return 0;
3623 
3624 
3625 failjob_rls_rqst_payload:
3626 	kfree(job->request_payload.sg_list);
3627 failjob_rls_job:
3628 	kfree(job);
3629 	return -ENOMEM;
3630 }
3631 
3632 
3633 enum fc_dispatch_result {
3634 	FC_DISPATCH_BREAK,	/* on return, q is locked, break from q loop */
3635 	FC_DISPATCH_LOCKED,	/* on return, q is locked, continue on */
3636 	FC_DISPATCH_UNLOCKED,	/* on return, q is unlocked, continue on */
3637 };
3638 
3639 
3640 /**
3641  * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
3642  * @q:		fc host request queue
3643  * @shost:	scsi host rport attached to
3644  * @job:	bsg job to be processed
3645  */
3646 static enum fc_dispatch_result
3647 fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3648 			 struct fc_bsg_job *job)
3649 {
3650 	struct fc_internal *i = to_fc_internal(shost->transportt);
3651 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
3652 	int ret;
3653 
3654 	/* Validate the host command */
3655 	switch (job->request->msgcode) {
3656 	case FC_BSG_HST_ADD_RPORT:
3657 		cmdlen += sizeof(struct fc_bsg_host_add_rport);
3658 		break;
3659 
3660 	case FC_BSG_HST_DEL_RPORT:
3661 		cmdlen += sizeof(struct fc_bsg_host_del_rport);
3662 		break;
3663 
3664 	case FC_BSG_HST_ELS_NOLOGIN:
3665 		cmdlen += sizeof(struct fc_bsg_host_els);
3666 		/* there better be a xmt and rcv payloads */
3667 		if ((!job->request_payload.payload_len) ||
3668 		    (!job->reply_payload.payload_len)) {
3669 			ret = -EINVAL;
3670 			goto fail_host_msg;
3671 		}
3672 		break;
3673 
3674 	case FC_BSG_HST_CT:
3675 		cmdlen += sizeof(struct fc_bsg_host_ct);
3676 		/* there better be xmt and rcv payloads */
3677 		if ((!job->request_payload.payload_len) ||
3678 		    (!job->reply_payload.payload_len)) {
3679 			ret = -EINVAL;
3680 			goto fail_host_msg;
3681 		}
3682 		break;
3683 
3684 	case FC_BSG_HST_VENDOR:
3685 		cmdlen += sizeof(struct fc_bsg_host_vendor);
3686 		if ((shost->hostt->vendor_id == 0L) ||
3687 		    (job->request->rqst_data.h_vendor.vendor_id !=
3688 			shost->hostt->vendor_id)) {
3689 			ret = -ESRCH;
3690 			goto fail_host_msg;
3691 		}
3692 		break;
3693 
3694 	default:
3695 		ret = -EBADR;
3696 		goto fail_host_msg;
3697 	}
3698 
3699 	/* check if we really have all the request data needed */
3700 	if (job->request_len < cmdlen) {
3701 		ret = -ENOMSG;
3702 		goto fail_host_msg;
3703 	}
3704 
3705 	ret = i->f->bsg_request(job);
3706 	if (!ret)
3707 		return FC_DISPATCH_UNLOCKED;
3708 
3709 fail_host_msg:
3710 	/* return the errno failure code as the only status */
3711 	BUG_ON(job->reply_len < sizeof(uint32_t));
3712 	job->reply->reply_payload_rcv_len = 0;
3713 	job->reply->result = ret;
3714 	job->reply_len = sizeof(uint32_t);
3715 	fc_bsg_jobdone(job);
3716 	return FC_DISPATCH_UNLOCKED;
3717 }
3718 
3719 
3720 /*
3721  * fc_bsg_goose_queue - restart rport queue in case it was stopped
3722  * @rport:	rport to be restarted
3723  */
3724 static void
3725 fc_bsg_goose_queue(struct fc_rport *rport)
3726 {
3727 	int flagset;
3728 	unsigned long flags;
3729 
3730 	if (!rport->rqst_q)
3731 		return;
3732 
3733 	get_device(&rport->dev);
3734 
3735 	spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
3736 	flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3737 		  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3738 	if (flagset)
3739 		queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3740 	__blk_run_queue(rport->rqst_q);
3741 	if (flagset)
3742 		queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3743 	spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
3744 
3745 	put_device(&rport->dev);
3746 }
3747 
3748 
3749 /**
3750  * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3751  * @q:		rport request queue
3752  * @shost:	scsi host rport attached to
3753  * @rport:	rport request destined to
3754  * @job:	bsg job to be processed
3755  */
3756 static enum fc_dispatch_result
3757 fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3758 			 struct fc_rport *rport, struct fc_bsg_job *job)
3759 {
3760 	struct fc_internal *i = to_fc_internal(shost->transportt);
3761 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
3762 	int ret;
3763 
3764 	/* Validate the rport command */
3765 	switch (job->request->msgcode) {
3766 	case FC_BSG_RPT_ELS:
3767 		cmdlen += sizeof(struct fc_bsg_rport_els);
3768 		goto check_bidi;
3769 
3770 	case FC_BSG_RPT_CT:
3771 		cmdlen += sizeof(struct fc_bsg_rport_ct);
3772 check_bidi:
3773 		/* there better be xmt and rcv payloads */
3774 		if ((!job->request_payload.payload_len) ||
3775 		    (!job->reply_payload.payload_len)) {
3776 			ret = -EINVAL;
3777 			goto fail_rport_msg;
3778 		}
3779 		break;
3780 	default:
3781 		ret = -EBADR;
3782 		goto fail_rport_msg;
3783 	}
3784 
3785 	/* check if we really have all the request data needed */
3786 	if (job->request_len < cmdlen) {
3787 		ret = -ENOMSG;
3788 		goto fail_rport_msg;
3789 	}
3790 
3791 	ret = i->f->bsg_request(job);
3792 	if (!ret)
3793 		return FC_DISPATCH_UNLOCKED;
3794 
3795 fail_rport_msg:
3796 	/* return the errno failure code as the only status */
3797 	BUG_ON(job->reply_len < sizeof(uint32_t));
3798 	job->reply->reply_payload_rcv_len = 0;
3799 	job->reply->result = ret;
3800 	job->reply_len = sizeof(uint32_t);
3801 	fc_bsg_jobdone(job);
3802 	return FC_DISPATCH_UNLOCKED;
3803 }
3804 
3805 
3806 /**
3807  * fc_bsg_request_handler - generic handler for bsg requests
3808  * @q:		request queue to manage
3809  * @shost:	Scsi_Host related to the bsg object
3810  * @rport:	FC remote port related to the bsg object (optional)
3811  * @dev:	device structure for bsg object
3812  */
3813 static void
3814 fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3815 		       struct fc_rport *rport, struct device *dev)
3816 {
3817 	struct request *req;
3818 	struct fc_bsg_job *job;
3819 	enum fc_dispatch_result ret;
3820 
3821 	if (!get_device(dev))
3822 		return;
3823 
3824 	while (!blk_queue_plugged(q)) {
3825 		if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
3826 		    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3827 			break;
3828 
3829 		req = blk_fetch_request(q);
3830 		if (!req)
3831 			break;
3832 
3833 		if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3834 			req->errors = -ENXIO;
3835 			spin_unlock_irq(q->queue_lock);
3836 			blk_end_request(req, -ENXIO, blk_rq_bytes(req));
3837 			spin_lock_irq(q->queue_lock);
3838 			continue;
3839 		}
3840 
3841 		spin_unlock_irq(q->queue_lock);
3842 
3843 		ret = fc_req_to_bsgjob(shost, rport, req);
3844 		if (ret) {
3845 			req->errors = ret;
3846 			blk_end_request(req, ret, blk_rq_bytes(req));
3847 			spin_lock_irq(q->queue_lock);
3848 			continue;
3849 		}
3850 
3851 		job = req->special;
3852 
3853 		/* check if we have the msgcode value at least */
3854 		if (job->request_len < sizeof(uint32_t)) {
3855 			BUG_ON(job->reply_len < sizeof(uint32_t));
3856 			job->reply->reply_payload_rcv_len = 0;
3857 			job->reply->result = -ENOMSG;
3858 			job->reply_len = sizeof(uint32_t);
3859 			fc_bsg_jobdone(job);
3860 			spin_lock_irq(q->queue_lock);
3861 			continue;
3862 		}
3863 
3864 		/* the dispatch routines will unlock the queue_lock */
3865 		if (rport)
3866 			ret = fc_bsg_rport_dispatch(q, shost, rport, job);
3867 		else
3868 			ret = fc_bsg_host_dispatch(q, shost, job);
3869 
3870 		/* did dispatcher hit state that can't process any more */
3871 		if (ret == FC_DISPATCH_BREAK)
3872 			break;
3873 
3874 		/* did dispatcher had released the lock */
3875 		if (ret == FC_DISPATCH_UNLOCKED)
3876 			spin_lock_irq(q->queue_lock);
3877 	}
3878 
3879 	spin_unlock_irq(q->queue_lock);
3880 	put_device(dev);
3881 	spin_lock_irq(q->queue_lock);
3882 }
3883 
3884 
3885 /**
3886  * fc_bsg_host_handler - handler for bsg requests for a fc host
3887  * @q:		fc host request queue
3888  */
3889 static void
3890 fc_bsg_host_handler(struct request_queue *q)
3891 {
3892 	struct Scsi_Host *shost = q->queuedata;
3893 
3894 	fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
3895 }
3896 
3897 
3898 /**
3899  * fc_bsg_rport_handler - handler for bsg requests for a fc rport
3900  * @q:		rport request queue
3901  */
3902 static void
3903 fc_bsg_rport_handler(struct request_queue *q)
3904 {
3905 	struct fc_rport *rport = q->queuedata;
3906 	struct Scsi_Host *shost = rport_to_shost(rport);
3907 
3908 	fc_bsg_request_handler(q, shost, rport, &rport->dev);
3909 }
3910 
3911 
3912 /**
3913  * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
3914  * @shost:	shost for fc_host
3915  * @fc_host:	fc_host adding the structures to
3916  */
3917 static int
3918 fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
3919 {
3920 	struct device *dev = &shost->shost_gendev;
3921 	struct fc_internal *i = to_fc_internal(shost->transportt);
3922 	struct request_queue *q;
3923 	int err;
3924 	char bsg_name[20];
3925 
3926 	fc_host->rqst_q = NULL;
3927 
3928 	if (!i->f->bsg_request)
3929 		return -ENOTSUPP;
3930 
3931 	snprintf(bsg_name, sizeof(bsg_name),
3932 		 "fc_host%d", shost->host_no);
3933 
3934 	q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
3935 	if (!q) {
3936 		printk(KERN_ERR "fc_host%d: bsg interface failed to "
3937 				"initialize - no request queue\n",
3938 				 shost->host_no);
3939 		return -ENOMEM;
3940 	}
3941 
3942 	q->queuedata = shost;
3943 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3944 	blk_queue_softirq_done(q, fc_bsg_softirq_done);
3945 	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3946 	blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
3947 
3948 	err = bsg_register_queue(q, dev, bsg_name, NULL);
3949 	if (err) {
3950 		printk(KERN_ERR "fc_host%d: bsg interface failed to "
3951 				"initialize - register queue\n",
3952 				shost->host_no);
3953 		blk_cleanup_queue(q);
3954 		return err;
3955 	}
3956 
3957 	fc_host->rqst_q = q;
3958 	return 0;
3959 }
3960 
3961 
3962 /**
3963  * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
3964  * @shost:	shost that rport is attached to
3965  * @rport:	rport that the bsg hooks are being attached to
3966  */
3967 static int
3968 fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
3969 {
3970 	struct device *dev = &rport->dev;
3971 	struct fc_internal *i = to_fc_internal(shost->transportt);
3972 	struct request_queue *q;
3973 	int err;
3974 
3975 	rport->rqst_q = NULL;
3976 
3977 	if (!i->f->bsg_request)
3978 		return -ENOTSUPP;
3979 
3980 	q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
3981 	if (!q) {
3982 		printk(KERN_ERR "%s: bsg interface failed to "
3983 				"initialize - no request queue\n",
3984 				 dev->kobj.name);
3985 		return -ENOMEM;
3986 	}
3987 
3988 	q->queuedata = rport;
3989 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3990 	blk_queue_softirq_done(q, fc_bsg_softirq_done);
3991 	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3992 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
3993 
3994 	err = bsg_register_queue(q, dev, NULL, NULL);
3995 	if (err) {
3996 		printk(KERN_ERR "%s: bsg interface failed to "
3997 				"initialize - register queue\n",
3998 				 dev->kobj.name);
3999 		blk_cleanup_queue(q);
4000 		return err;
4001 	}
4002 
4003 	rport->rqst_q = q;
4004 	return 0;
4005 }
4006 
4007 
4008 /**
4009  * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
4010  * @q:	the request_queue that is to be torn down.
4011  */
4012 static void
4013 fc_bsg_remove(struct request_queue *q)
4014 {
4015 	if (q) {
4016 		bsg_unregister_queue(q);
4017 		blk_cleanup_queue(q);
4018 	}
4019 }
4020 
4021 
4022 /* Original Author:  Martin Hicks */
4023 MODULE_AUTHOR("James Smart");
4024 MODULE_DESCRIPTION("FC Transport Attributes");
4025 MODULE_LICENSE("GPL");
4026 
4027 module_init(fc_transport_init);
4028 module_exit(fc_transport_exit);
4029