xref: /openbmc/linux/drivers/target/sbp/sbp_target.c (revision 20055477)
1 /*
2  * SBP2 target driver (SCSI over IEEE1394 in target mode)
3  *
4  * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20 
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/firewire.h>
32 #include <linux/firewire-constants.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_tcq.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <target/target_core_fabric_configfs.h>
39 #include <target/target_core_configfs.h>
40 #include <target/configfs_macros.h>
41 #include <asm/unaligned.h>
42 
43 #include "sbp_target.h"
44 
45 /* Local pointer to allocated TCM configfs fabric module */
46 static struct target_fabric_configfs *sbp_fabric_configfs;
47 
48 /* FireWire address region for management and command block address handlers */
49 static const struct fw_address_region sbp_register_region = {
50 	.start	= CSR_REGISTER_BASE + 0x10000,
51 	.end	= 0x1000000000000ULL,
52 };
53 
54 static const u32 sbp_unit_directory_template[] = {
55 	0x1200609e, /* unit_specifier_id: NCITS/T10 */
56 	0x13010483, /* unit_sw_version: 1155D Rev 4 */
57 	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
58 	0x390104d8, /* command_set: SPC-2 */
59 	0x3b000000, /* command_set_revision: 0 */
60 	0x3c000001, /* firmware_revision: 1 */
61 };
62 
63 #define SESSION_MAINTENANCE_INTERVAL HZ
64 
65 static atomic_t login_id = ATOMIC_INIT(0);
66 
67 static void session_maintenance_work(struct work_struct *);
68 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
69 		unsigned long long, void *, size_t);
70 
71 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
72 {
73 	int ret;
74 	__be32 high, low;
75 
76 	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
77 			req->node_addr, req->generation, req->speed,
78 			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
79 			&high, sizeof(high));
80 	if (ret != RCODE_COMPLETE)
81 		return ret;
82 
83 	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
84 			req->node_addr, req->generation, req->speed,
85 			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
86 			&low, sizeof(low));
87 	if (ret != RCODE_COMPLETE)
88 		return ret;
89 
90 	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
91 
92 	return RCODE_COMPLETE;
93 }
94 
95 static struct sbp_session *sbp_session_find_by_guid(
96 	struct sbp_tpg *tpg, u64 guid)
97 {
98 	struct se_session *se_sess;
99 	struct sbp_session *sess, *found = NULL;
100 
101 	spin_lock_bh(&tpg->se_tpg.session_lock);
102 	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
103 		sess = se_sess->fabric_sess_ptr;
104 		if (sess->guid == guid)
105 			found = sess;
106 	}
107 	spin_unlock_bh(&tpg->se_tpg.session_lock);
108 
109 	return found;
110 }
111 
112 static struct sbp_login_descriptor *sbp_login_find_by_lun(
113 		struct sbp_session *session, struct se_lun *lun)
114 {
115 	struct sbp_login_descriptor *login, *found = NULL;
116 
117 	spin_lock_bh(&session->lock);
118 	list_for_each_entry(login, &session->login_list, link) {
119 		if (login->lun == lun)
120 			found = login;
121 	}
122 	spin_unlock_bh(&session->lock);
123 
124 	return found;
125 }
126 
127 static int sbp_login_count_all_by_lun(
128 		struct sbp_tpg *tpg,
129 		struct se_lun *lun,
130 		int exclusive)
131 {
132 	struct se_session *se_sess;
133 	struct sbp_session *sess;
134 	struct sbp_login_descriptor *login;
135 	int count = 0;
136 
137 	spin_lock_bh(&tpg->se_tpg.session_lock);
138 	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
139 		sess = se_sess->fabric_sess_ptr;
140 
141 		spin_lock_bh(&sess->lock);
142 		list_for_each_entry(login, &sess->login_list, link) {
143 			if (login->lun != lun)
144 				continue;
145 
146 			if (!exclusive || login->exclusive)
147 				count++;
148 		}
149 		spin_unlock_bh(&sess->lock);
150 	}
151 	spin_unlock_bh(&tpg->se_tpg.session_lock);
152 
153 	return count;
154 }
155 
156 static struct sbp_login_descriptor *sbp_login_find_by_id(
157 	struct sbp_tpg *tpg, int login_id)
158 {
159 	struct se_session *se_sess;
160 	struct sbp_session *sess;
161 	struct sbp_login_descriptor *login, *found = NULL;
162 
163 	spin_lock_bh(&tpg->se_tpg.session_lock);
164 	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
165 		sess = se_sess->fabric_sess_ptr;
166 
167 		spin_lock_bh(&sess->lock);
168 		list_for_each_entry(login, &sess->login_list, link) {
169 			if (login->login_id == login_id)
170 				found = login;
171 		}
172 		spin_unlock_bh(&sess->lock);
173 	}
174 	spin_unlock_bh(&tpg->se_tpg.session_lock);
175 
176 	return found;
177 }
178 
179 static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
180 {
181 	struct se_portal_group *se_tpg = &tpg->se_tpg;
182 	struct se_lun *se_lun;
183 
184 	if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
185 		return ERR_PTR(-EINVAL);
186 
187 	spin_lock(&se_tpg->tpg_lun_lock);
188 	se_lun = se_tpg->tpg_lun_list[lun];
189 
190 	if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
191 		se_lun = ERR_PTR(-ENODEV);
192 
193 	spin_unlock(&se_tpg->tpg_lun_lock);
194 
195 	return se_lun;
196 }
197 
198 static struct sbp_session *sbp_session_create(
199 		struct sbp_tpg *tpg,
200 		u64 guid)
201 {
202 	struct sbp_session *sess;
203 	int ret;
204 	char guid_str[17];
205 	struct se_node_acl *se_nacl;
206 
207 	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
208 	if (!sess) {
209 		pr_err("failed to allocate session descriptor\n");
210 		return ERR_PTR(-ENOMEM);
211 	}
212 
213 	sess->se_sess = transport_init_session();
214 	if (IS_ERR(sess->se_sess)) {
215 		pr_err("failed to init se_session\n");
216 
217 		ret = PTR_ERR(sess->se_sess);
218 		kfree(sess);
219 		return ERR_PTR(ret);
220 	}
221 
222 	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
223 
224 	se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
225 	if (!se_nacl) {
226 		pr_warn("Node ACL not found for %s\n", guid_str);
227 
228 		transport_free_session(sess->se_sess);
229 		kfree(sess);
230 
231 		return ERR_PTR(-EPERM);
232 	}
233 
234 	sess->se_sess->se_node_acl = se_nacl;
235 
236 	spin_lock_init(&sess->lock);
237 	INIT_LIST_HEAD(&sess->login_list);
238 	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
239 
240 	sess->guid = guid;
241 
242 	transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
243 
244 	return sess;
245 }
246 
247 static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
248 {
249 	spin_lock_bh(&sess->lock);
250 	if (!list_empty(&sess->login_list)) {
251 		spin_unlock_bh(&sess->lock);
252 		return;
253 	}
254 	spin_unlock_bh(&sess->lock);
255 
256 	if (cancel_work)
257 		cancel_delayed_work_sync(&sess->maint_work);
258 
259 	transport_deregister_session_configfs(sess->se_sess);
260 	transport_deregister_session(sess->se_sess);
261 
262 	if (sess->card)
263 		fw_card_put(sess->card);
264 
265 	kfree(sess);
266 }
267 
268 static void sbp_target_agent_unregister(struct sbp_target_agent *);
269 
270 static void sbp_login_release(struct sbp_login_descriptor *login,
271 	bool cancel_work)
272 {
273 	struct sbp_session *sess = login->sess;
274 
275 	/* FIXME: abort/wait on tasks */
276 
277 	sbp_target_agent_unregister(login->tgt_agt);
278 
279 	if (sess) {
280 		spin_lock_bh(&sess->lock);
281 		list_del(&login->link);
282 		spin_unlock_bh(&sess->lock);
283 
284 		sbp_session_release(sess, cancel_work);
285 	}
286 
287 	kfree(login);
288 }
289 
290 static struct sbp_target_agent *sbp_target_agent_register(
291 	struct sbp_login_descriptor *);
292 
293 static void sbp_management_request_login(
294 	struct sbp_management_agent *agent, struct sbp_management_request *req,
295 	int *status_data_size)
296 {
297 	struct sbp_tport *tport = agent->tport;
298 	struct sbp_tpg *tpg = tport->tpg;
299 	struct se_lun *se_lun;
300 	int ret;
301 	u64 guid;
302 	struct sbp_session *sess;
303 	struct sbp_login_descriptor *login;
304 	struct sbp_login_response_block *response;
305 	int login_response_len;
306 
307 	se_lun = sbp_get_lun_from_tpg(tpg,
308 			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
309 	if (IS_ERR(se_lun)) {
310 		pr_notice("login to unknown LUN: %d\n",
311 			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
312 
313 		req->status.status = cpu_to_be32(
314 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
315 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
316 		return;
317 	}
318 
319 	ret = read_peer_guid(&guid, req);
320 	if (ret != RCODE_COMPLETE) {
321 		pr_warn("failed to read peer GUID: %d\n", ret);
322 
323 		req->status.status = cpu_to_be32(
324 			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
325 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
326 		return;
327 	}
328 
329 	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
330 		se_lun->unpacked_lun, guid);
331 
332 	sess = sbp_session_find_by_guid(tpg, guid);
333 	if (sess) {
334 		login = sbp_login_find_by_lun(sess, se_lun);
335 		if (login) {
336 			pr_notice("initiator already logged-in\n");
337 
338 			/*
339 			 * SBP-2 R4 says we should return access denied, but
340 			 * that can confuse initiators. Instead we need to
341 			 * treat this like a reconnect, but send the login
342 			 * response block like a fresh login.
343 			 *
344 			 * This is required particularly in the case of Apple
345 			 * devices booting off the FireWire target, where
346 			 * the firmware has an active login to the target. When
347 			 * the OS takes control of the session it issues its own
348 			 * LOGIN rather than a RECONNECT. To avoid the machine
349 			 * waiting until the reconnect_hold expires, we can skip
350 			 * the ACCESS_DENIED errors to speed things up.
351 			 */
352 
353 			goto already_logged_in;
354 		}
355 	}
356 
357 	/*
358 	 * check exclusive bit in login request
359 	 * reject with access_denied if any logins present
360 	 */
361 	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
362 			sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
363 		pr_warn("refusing exclusive login with other active logins\n");
364 
365 		req->status.status = cpu_to_be32(
366 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
367 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
368 		return;
369 	}
370 
371 	/*
372 	 * check exclusive bit in any existing login descriptor
373 	 * reject with access_denied if any exclusive logins present
374 	 */
375 	if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
376 		pr_warn("refusing login while another exclusive login present\n");
377 
378 		req->status.status = cpu_to_be32(
379 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
380 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
381 		return;
382 	}
383 
384 	/*
385 	 * check we haven't exceeded the number of allowed logins
386 	 * reject with resources_unavailable if we have
387 	 */
388 	if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
389 			tport->max_logins_per_lun) {
390 		pr_warn("max number of logins reached\n");
391 
392 		req->status.status = cpu_to_be32(
393 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
394 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
395 		return;
396 	}
397 
398 	if (!sess) {
399 		sess = sbp_session_create(tpg, guid);
400 		if (IS_ERR(sess)) {
401 			switch (PTR_ERR(sess)) {
402 			case -EPERM:
403 				ret = SBP_STATUS_ACCESS_DENIED;
404 				break;
405 			default:
406 				ret = SBP_STATUS_RESOURCES_UNAVAIL;
407 				break;
408 			}
409 
410 			req->status.status = cpu_to_be32(
411 				STATUS_BLOCK_RESP(
412 					STATUS_RESP_REQUEST_COMPLETE) |
413 				STATUS_BLOCK_SBP_STATUS(ret));
414 			return;
415 		}
416 
417 		sess->node_id = req->node_addr;
418 		sess->card = fw_card_get(req->card);
419 		sess->generation = req->generation;
420 		sess->speed = req->speed;
421 
422 		schedule_delayed_work(&sess->maint_work,
423 				SESSION_MAINTENANCE_INTERVAL);
424 	}
425 
426 	/* only take the latest reconnect_hold into account */
427 	sess->reconnect_hold = min(
428 		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
429 		tport->max_reconnect_timeout) - 1;
430 
431 	login = kmalloc(sizeof(*login), GFP_KERNEL);
432 	if (!login) {
433 		pr_err("failed to allocate login descriptor\n");
434 
435 		sbp_session_release(sess, true);
436 
437 		req->status.status = cpu_to_be32(
438 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
439 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
440 		return;
441 	}
442 
443 	login->sess = sess;
444 	login->lun = se_lun;
445 	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
446 	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
447 	login->login_id = atomic_inc_return(&login_id);
448 
449 	login->tgt_agt = sbp_target_agent_register(login);
450 	if (IS_ERR(login->tgt_agt)) {
451 		ret = PTR_ERR(login->tgt_agt);
452 		pr_err("failed to map command block handler: %d\n", ret);
453 
454 		sbp_session_release(sess, true);
455 		kfree(login);
456 
457 		req->status.status = cpu_to_be32(
458 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
459 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
460 		return;
461 	}
462 
463 	spin_lock_bh(&sess->lock);
464 	list_add_tail(&login->link, &sess->login_list);
465 	spin_unlock_bh(&sess->lock);
466 
467 already_logged_in:
468 	response = kzalloc(sizeof(*response), GFP_KERNEL);
469 	if (!response) {
470 		pr_err("failed to allocate login response block\n");
471 
472 		sbp_login_release(login, true);
473 
474 		req->status.status = cpu_to_be32(
475 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
476 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
477 		return;
478 	}
479 
480 	login_response_len = clamp_val(
481 			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
482 			12, sizeof(*response));
483 	response->misc = cpu_to_be32(
484 		((login_response_len & 0xffff) << 16) |
485 		(login->login_id & 0xffff));
486 	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
487 	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
488 		&response->command_block_agent);
489 
490 	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
491 		sess->node_id, sess->generation, sess->speed,
492 		sbp2_pointer_to_addr(&req->orb.ptr2), response,
493 		login_response_len);
494 	if (ret != RCODE_COMPLETE) {
495 		pr_debug("failed to write login response block: %x\n", ret);
496 
497 		kfree(response);
498 		sbp_login_release(login, true);
499 
500 		req->status.status = cpu_to_be32(
501 			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
502 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
503 		return;
504 	}
505 
506 	kfree(response);
507 
508 	req->status.status = cpu_to_be32(
509 		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
510 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
511 }
512 
513 static void sbp_management_request_query_logins(
514 	struct sbp_management_agent *agent, struct sbp_management_request *req,
515 	int *status_data_size)
516 {
517 	pr_notice("QUERY LOGINS not implemented\n");
518 	/* FIXME: implement */
519 
520 	req->status.status = cpu_to_be32(
521 		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
522 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
523 }
524 
525 static void sbp_management_request_reconnect(
526 	struct sbp_management_agent *agent, struct sbp_management_request *req,
527 	int *status_data_size)
528 {
529 	struct sbp_tport *tport = agent->tport;
530 	struct sbp_tpg *tpg = tport->tpg;
531 	int ret;
532 	u64 guid;
533 	struct sbp_login_descriptor *login;
534 
535 	ret = read_peer_guid(&guid, req);
536 	if (ret != RCODE_COMPLETE) {
537 		pr_warn("failed to read peer GUID: %d\n", ret);
538 
539 		req->status.status = cpu_to_be32(
540 			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
541 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
542 		return;
543 	}
544 
545 	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
546 
547 	login = sbp_login_find_by_id(tpg,
548 		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
549 
550 	if (!login) {
551 		pr_err("mgt_agent RECONNECT unknown login ID\n");
552 
553 		req->status.status = cpu_to_be32(
554 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
555 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
556 		return;
557 	}
558 
559 	if (login->sess->guid != guid) {
560 		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
561 
562 		req->status.status = cpu_to_be32(
563 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
564 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
565 		return;
566 	}
567 
568 	spin_lock_bh(&login->sess->lock);
569 	if (login->sess->card)
570 		fw_card_put(login->sess->card);
571 
572 	/* update the node details */
573 	login->sess->generation = req->generation;
574 	login->sess->node_id = req->node_addr;
575 	login->sess->card = fw_card_get(req->card);
576 	login->sess->speed = req->speed;
577 	spin_unlock_bh(&login->sess->lock);
578 
579 	req->status.status = cpu_to_be32(
580 		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
581 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
582 }
583 
584 static void sbp_management_request_logout(
585 	struct sbp_management_agent *agent, struct sbp_management_request *req,
586 	int *status_data_size)
587 {
588 	struct sbp_tport *tport = agent->tport;
589 	struct sbp_tpg *tpg = tport->tpg;
590 	int id;
591 	struct sbp_login_descriptor *login;
592 
593 	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
594 
595 	login = sbp_login_find_by_id(tpg, id);
596 	if (!login) {
597 		pr_warn("cannot find login: %d\n", id);
598 
599 		req->status.status = cpu_to_be32(
600 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
601 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
602 		return;
603 	}
604 
605 	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
606 		login->lun->unpacked_lun, login->login_id);
607 
608 	if (req->node_addr != login->sess->node_id) {
609 		pr_warn("logout from different node ID\n");
610 
611 		req->status.status = cpu_to_be32(
612 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
613 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
614 		return;
615 	}
616 
617 	sbp_login_release(login, true);
618 
619 	req->status.status = cpu_to_be32(
620 		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
621 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
622 }
623 
624 static void session_check_for_reset(struct sbp_session *sess)
625 {
626 	bool card_valid = false;
627 
628 	spin_lock_bh(&sess->lock);
629 
630 	if (sess->card) {
631 		spin_lock_irq(&sess->card->lock);
632 		card_valid = (sess->card->local_node != NULL);
633 		spin_unlock_irq(&sess->card->lock);
634 
635 		if (!card_valid) {
636 			fw_card_put(sess->card);
637 			sess->card = NULL;
638 		}
639 	}
640 
641 	if (!card_valid || (sess->generation != sess->card->generation)) {
642 		pr_info("Waiting for reconnect from node: %016llx\n",
643 				sess->guid);
644 
645 		sess->node_id = -1;
646 		sess->reconnect_expires = get_jiffies_64() +
647 			((sess->reconnect_hold + 1) * HZ);
648 	}
649 
650 	spin_unlock_bh(&sess->lock);
651 }
652 
653 static void session_reconnect_expired(struct sbp_session *sess)
654 {
655 	struct sbp_login_descriptor *login, *temp;
656 	LIST_HEAD(login_list);
657 
658 	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
659 
660 	spin_lock_bh(&sess->lock);
661 	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
662 		login->sess = NULL;
663 		list_move_tail(&login->link, &login_list);
664 	}
665 	spin_unlock_bh(&sess->lock);
666 
667 	list_for_each_entry_safe(login, temp, &login_list, link) {
668 		list_del(&login->link);
669 		sbp_login_release(login, false);
670 	}
671 
672 	sbp_session_release(sess, false);
673 }
674 
675 static void session_maintenance_work(struct work_struct *work)
676 {
677 	struct sbp_session *sess = container_of(work, struct sbp_session,
678 			maint_work.work);
679 
680 	/* could be called while tearing down the session */
681 	spin_lock_bh(&sess->lock);
682 	if (list_empty(&sess->login_list)) {
683 		spin_unlock_bh(&sess->lock);
684 		return;
685 	}
686 	spin_unlock_bh(&sess->lock);
687 
688 	if (sess->node_id != -1) {
689 		/* check for bus reset and make node_id invalid */
690 		session_check_for_reset(sess);
691 
692 		schedule_delayed_work(&sess->maint_work,
693 				SESSION_MAINTENANCE_INTERVAL);
694 	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
695 		/* still waiting for reconnect */
696 		schedule_delayed_work(&sess->maint_work,
697 				SESSION_MAINTENANCE_INTERVAL);
698 	} else {
699 		/* reconnect timeout has expired */
700 		session_reconnect_expired(sess);
701 	}
702 }
703 
704 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
705 		struct sbp_target_agent *agent)
706 {
707 	int state;
708 
709 	switch (tcode) {
710 	case TCODE_READ_QUADLET_REQUEST:
711 		pr_debug("tgt_agent AGENT_STATE READ\n");
712 
713 		spin_lock_bh(&agent->lock);
714 		state = agent->state;
715 		spin_unlock_bh(&agent->lock);
716 
717 		*(__be32 *)data = cpu_to_be32(state);
718 
719 		return RCODE_COMPLETE;
720 
721 	case TCODE_WRITE_QUADLET_REQUEST:
722 		/* ignored */
723 		return RCODE_COMPLETE;
724 
725 	default:
726 		return RCODE_TYPE_ERROR;
727 	}
728 }
729 
730 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
731 		struct sbp_target_agent *agent)
732 {
733 	switch (tcode) {
734 	case TCODE_WRITE_QUADLET_REQUEST:
735 		pr_debug("tgt_agent AGENT_RESET\n");
736 		spin_lock_bh(&agent->lock);
737 		agent->state = AGENT_STATE_RESET;
738 		spin_unlock_bh(&agent->lock);
739 		return RCODE_COMPLETE;
740 
741 	default:
742 		return RCODE_TYPE_ERROR;
743 	}
744 }
745 
746 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
747 		struct sbp_target_agent *agent)
748 {
749 	struct sbp2_pointer *ptr = data;
750 
751 	switch (tcode) {
752 	case TCODE_WRITE_BLOCK_REQUEST:
753 		spin_lock_bh(&agent->lock);
754 		if (agent->state != AGENT_STATE_SUSPENDED &&
755 				agent->state != AGENT_STATE_RESET) {
756 			spin_unlock_bh(&agent->lock);
757 			pr_notice("Ignoring ORB_POINTER write while active.\n");
758 			return RCODE_CONFLICT_ERROR;
759 		}
760 		agent->state = AGENT_STATE_ACTIVE;
761 		spin_unlock_bh(&agent->lock);
762 
763 		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
764 		agent->doorbell = false;
765 
766 		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
767 				agent->orb_pointer);
768 
769 		queue_work(system_unbound_wq, &agent->work);
770 
771 		return RCODE_COMPLETE;
772 
773 	case TCODE_READ_BLOCK_REQUEST:
774 		pr_debug("tgt_agent ORB_POINTER READ\n");
775 		spin_lock_bh(&agent->lock);
776 		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
777 		spin_unlock_bh(&agent->lock);
778 		return RCODE_COMPLETE;
779 
780 	default:
781 		return RCODE_TYPE_ERROR;
782 	}
783 }
784 
785 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
786 		struct sbp_target_agent *agent)
787 {
788 	switch (tcode) {
789 	case TCODE_WRITE_QUADLET_REQUEST:
790 		spin_lock_bh(&agent->lock);
791 		if (agent->state != AGENT_STATE_SUSPENDED) {
792 			spin_unlock_bh(&agent->lock);
793 			pr_debug("Ignoring DOORBELL while active.\n");
794 			return RCODE_CONFLICT_ERROR;
795 		}
796 		agent->state = AGENT_STATE_ACTIVE;
797 		spin_unlock_bh(&agent->lock);
798 
799 		agent->doorbell = true;
800 
801 		pr_debug("tgt_agent DOORBELL\n");
802 
803 		queue_work(system_unbound_wq, &agent->work);
804 
805 		return RCODE_COMPLETE;
806 
807 	case TCODE_READ_QUADLET_REQUEST:
808 		return RCODE_COMPLETE;
809 
810 	default:
811 		return RCODE_TYPE_ERROR;
812 	}
813 }
814 
815 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
816 		int tcode, void *data, struct sbp_target_agent *agent)
817 {
818 	switch (tcode) {
819 	case TCODE_WRITE_QUADLET_REQUEST:
820 		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
821 		/* ignored as we don't send unsolicited status */
822 		return RCODE_COMPLETE;
823 
824 	case TCODE_READ_QUADLET_REQUEST:
825 		return RCODE_COMPLETE;
826 
827 	default:
828 		return RCODE_TYPE_ERROR;
829 	}
830 }
831 
832 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
833 		int tcode, int destination, int source, int generation,
834 		unsigned long long offset, void *data, size_t length,
835 		void *callback_data)
836 {
837 	struct sbp_target_agent *agent = callback_data;
838 	struct sbp_session *sess = agent->login->sess;
839 	int sess_gen, sess_node, rcode;
840 
841 	spin_lock_bh(&sess->lock);
842 	sess_gen = sess->generation;
843 	sess_node = sess->node_id;
844 	spin_unlock_bh(&sess->lock);
845 
846 	if (generation != sess_gen) {
847 		pr_notice("ignoring request with wrong generation\n");
848 		rcode = RCODE_TYPE_ERROR;
849 		goto out;
850 	}
851 
852 	if (source != sess_node) {
853 		pr_notice("ignoring request from foreign node (%x != %x)\n",
854 				source, sess_node);
855 		rcode = RCODE_TYPE_ERROR;
856 		goto out;
857 	}
858 
859 	/* turn offset into the offset from the start of the block */
860 	offset -= agent->handler.offset;
861 
862 	if (offset == 0x00 && length == 4) {
863 		/* AGENT_STATE */
864 		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
865 	} else if (offset == 0x04 && length == 4) {
866 		/* AGENT_RESET */
867 		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
868 	} else if (offset == 0x08 && length == 8) {
869 		/* ORB_POINTER */
870 		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
871 	} else if (offset == 0x10 && length == 4) {
872 		/* DOORBELL */
873 		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
874 	} else if (offset == 0x14 && length == 4) {
875 		/* UNSOLICITED_STATUS_ENABLE */
876 		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
877 				data, agent);
878 	} else {
879 		rcode = RCODE_ADDRESS_ERROR;
880 	}
881 
882 out:
883 	fw_send_response(card, request, rcode);
884 }
885 
886 static void sbp_handle_command(struct sbp_target_request *);
887 static int sbp_send_status(struct sbp_target_request *);
888 static void sbp_free_request(struct sbp_target_request *);
889 
890 static void tgt_agent_process_work(struct work_struct *work)
891 {
892 	struct sbp_target_request *req =
893 		container_of(work, struct sbp_target_request, work);
894 
895 	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
896 			req->orb_pointer,
897 			sbp2_pointer_to_addr(&req->orb.next_orb),
898 			sbp2_pointer_to_addr(&req->orb.data_descriptor),
899 			be32_to_cpu(req->orb.misc));
900 
901 	if (req->orb_pointer >> 32)
902 		pr_debug("ORB with high bits set\n");
903 
904 	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
905 		case 0:/* Format specified by this standard */
906 			sbp_handle_command(req);
907 			return;
908 		case 1: /* Reserved for future standardization */
909 		case 2: /* Vendor-dependent */
910 			req->status.status |= cpu_to_be32(
911 					STATUS_BLOCK_RESP(
912 						STATUS_RESP_REQUEST_COMPLETE) |
913 					STATUS_BLOCK_DEAD(0) |
914 					STATUS_BLOCK_LEN(1) |
915 					STATUS_BLOCK_SBP_STATUS(
916 						SBP_STATUS_REQ_TYPE_NOTSUPP));
917 			sbp_send_status(req);
918 			sbp_free_request(req);
919 			return;
920 		case 3: /* Dummy ORB */
921 			req->status.status |= cpu_to_be32(
922 					STATUS_BLOCK_RESP(
923 						STATUS_RESP_REQUEST_COMPLETE) |
924 					STATUS_BLOCK_DEAD(0) |
925 					STATUS_BLOCK_LEN(1) |
926 					STATUS_BLOCK_SBP_STATUS(
927 						SBP_STATUS_DUMMY_ORB_COMPLETE));
928 			sbp_send_status(req);
929 			sbp_free_request(req);
930 			return;
931 		default:
932 			BUG();
933 	}
934 }
935 
936 /* used to double-check we haven't been issued an AGENT_RESET */
937 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
938 {
939 	bool active;
940 
941 	spin_lock_bh(&agent->lock);
942 	active = (agent->state == AGENT_STATE_ACTIVE);
943 	spin_unlock_bh(&agent->lock);
944 
945 	return active;
946 }
947 
948 static void tgt_agent_fetch_work(struct work_struct *work)
949 {
950 	struct sbp_target_agent *agent =
951 		container_of(work, struct sbp_target_agent, work);
952 	struct sbp_session *sess = agent->login->sess;
953 	struct sbp_target_request *req;
954 	int ret;
955 	bool doorbell = agent->doorbell;
956 	u64 next_orb = agent->orb_pointer;
957 
958 	while (next_orb && tgt_agent_check_active(agent)) {
959 		req = kzalloc(sizeof(*req), GFP_KERNEL);
960 		if (!req) {
961 			spin_lock_bh(&agent->lock);
962 			agent->state = AGENT_STATE_DEAD;
963 			spin_unlock_bh(&agent->lock);
964 			return;
965 		}
966 
967 		req->login = agent->login;
968 		req->orb_pointer = next_orb;
969 
970 		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
971 					req->orb_pointer >> 32));
972 		req->status.orb_low = cpu_to_be32(
973 				req->orb_pointer & 0xfffffffc);
974 
975 		/* read in the ORB */
976 		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
977 				sess->node_id, sess->generation, sess->speed,
978 				req->orb_pointer, &req->orb, sizeof(req->orb));
979 		if (ret != RCODE_COMPLETE) {
980 			pr_debug("tgt_orb fetch failed: %x\n", ret);
981 			req->status.status |= cpu_to_be32(
982 					STATUS_BLOCK_SRC(
983 						STATUS_SRC_ORB_FINISHED) |
984 					STATUS_BLOCK_RESP(
985 						STATUS_RESP_TRANSPORT_FAILURE) |
986 					STATUS_BLOCK_DEAD(1) |
987 					STATUS_BLOCK_LEN(1) |
988 					STATUS_BLOCK_SBP_STATUS(
989 						SBP_STATUS_UNSPECIFIED_ERROR));
990 			spin_lock_bh(&agent->lock);
991 			agent->state = AGENT_STATE_DEAD;
992 			spin_unlock_bh(&agent->lock);
993 
994 			sbp_send_status(req);
995 			sbp_free_request(req);
996 			return;
997 		}
998 
999 		/* check the next_ORB field */
1000 		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
1001 			next_orb = 0;
1002 			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1003 						STATUS_SRC_ORB_FINISHED));
1004 		} else {
1005 			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1006 			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1007 						STATUS_SRC_ORB_CONTINUING));
1008 		}
1009 
1010 		if (tgt_agent_check_active(agent) && !doorbell) {
1011 			INIT_WORK(&req->work, tgt_agent_process_work);
1012 			queue_work(system_unbound_wq, &req->work);
1013 		} else {
1014 			/* don't process this request, just check next_ORB */
1015 			sbp_free_request(req);
1016 		}
1017 
1018 		spin_lock_bh(&agent->lock);
1019 		doorbell = agent->doorbell = false;
1020 
1021 		/* check if we should carry on processing */
1022 		if (next_orb)
1023 			agent->orb_pointer = next_orb;
1024 		else
1025 			agent->state = AGENT_STATE_SUSPENDED;
1026 
1027 		spin_unlock_bh(&agent->lock);
1028 	};
1029 }
1030 
1031 static struct sbp_target_agent *sbp_target_agent_register(
1032 		struct sbp_login_descriptor *login)
1033 {
1034 	struct sbp_target_agent *agent;
1035 	int ret;
1036 
1037 	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1038 	if (!agent)
1039 		return ERR_PTR(-ENOMEM);
1040 
1041 	spin_lock_init(&agent->lock);
1042 
1043 	agent->handler.length = 0x20;
1044 	agent->handler.address_callback = tgt_agent_rw;
1045 	agent->handler.callback_data = agent;
1046 
1047 	agent->login = login;
1048 	agent->state = AGENT_STATE_RESET;
1049 	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1050 	agent->orb_pointer = 0;
1051 	agent->doorbell = false;
1052 
1053 	ret = fw_core_add_address_handler(&agent->handler,
1054 			&sbp_register_region);
1055 	if (ret < 0) {
1056 		kfree(agent);
1057 		return ERR_PTR(ret);
1058 	}
1059 
1060 	return agent;
1061 }
1062 
1063 static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1064 {
1065 	fw_core_remove_address_handler(&agent->handler);
1066 	cancel_work_sync(&agent->work);
1067 	kfree(agent);
1068 }
1069 
1070 /*
1071  * Simple wrapper around fw_run_transaction that retries the transaction several
1072  * times in case of failure, with an exponential backoff.
1073  */
1074 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1075 		int generation, int speed, unsigned long long offset,
1076 		void *payload, size_t length)
1077 {
1078 	int attempt, ret, delay;
1079 
1080 	for (attempt = 1; attempt <= 5; attempt++) {
1081 		ret = fw_run_transaction(card, tcode, destination_id,
1082 				generation, speed, offset, payload, length);
1083 
1084 		switch (ret) {
1085 		case RCODE_COMPLETE:
1086 		case RCODE_TYPE_ERROR:
1087 		case RCODE_ADDRESS_ERROR:
1088 		case RCODE_GENERATION:
1089 			return ret;
1090 
1091 		default:
1092 			delay = 5 * attempt * attempt;
1093 			usleep_range(delay, delay * 2);
1094 		}
1095 	}
1096 
1097 	return ret;
1098 }
1099 
1100 /*
1101  * Wrapper around sbp_run_transaction that gets the card, destination,
1102  * generation and speed out of the request's session.
1103  */
1104 static int sbp_run_request_transaction(struct sbp_target_request *req,
1105 		int tcode, unsigned long long offset, void *payload,
1106 		size_t length)
1107 {
1108 	struct sbp_login_descriptor *login = req->login;
1109 	struct sbp_session *sess = login->sess;
1110 	struct fw_card *card;
1111 	int node_id, generation, speed, ret;
1112 
1113 	spin_lock_bh(&sess->lock);
1114 	card = fw_card_get(sess->card);
1115 	node_id = sess->node_id;
1116 	generation = sess->generation;
1117 	speed = sess->speed;
1118 	spin_unlock_bh(&sess->lock);
1119 
1120 	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1121 			offset, payload, length);
1122 
1123 	fw_card_put(card);
1124 
1125 	return ret;
1126 }
1127 
1128 static int sbp_fetch_command(struct sbp_target_request *req)
1129 {
1130 	int ret, cmd_len, copy_len;
1131 
1132 	cmd_len = scsi_command_size(req->orb.command_block);
1133 
1134 	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1135 	if (!req->cmd_buf)
1136 		return -ENOMEM;
1137 
1138 	memcpy(req->cmd_buf, req->orb.command_block,
1139 		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1140 
1141 	if (cmd_len > sizeof(req->orb.command_block)) {
1142 		pr_debug("sbp_fetch_command: filling in long command\n");
1143 		copy_len = cmd_len - sizeof(req->orb.command_block);
1144 
1145 		ret = sbp_run_request_transaction(req,
1146 				TCODE_READ_BLOCK_REQUEST,
1147 				req->orb_pointer + sizeof(req->orb),
1148 				req->cmd_buf + sizeof(req->orb.command_block),
1149 				copy_len);
1150 		if (ret != RCODE_COMPLETE)
1151 			return -EIO;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 static int sbp_fetch_page_table(struct sbp_target_request *req)
1158 {
1159 	int pg_tbl_sz, ret;
1160 	struct sbp_page_table_entry *pg_tbl;
1161 
1162 	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1163 		return 0;
1164 
1165 	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1166 		sizeof(struct sbp_page_table_entry);
1167 
1168 	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1169 	if (!pg_tbl)
1170 		return -ENOMEM;
1171 
1172 	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1173 			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1174 			pg_tbl, pg_tbl_sz);
1175 	if (ret != RCODE_COMPLETE) {
1176 		kfree(pg_tbl);
1177 		return -EIO;
1178 	}
1179 
1180 	req->pg_tbl = pg_tbl;
1181 	return 0;
1182 }
1183 
1184 static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1185 	u32 *data_len, enum dma_data_direction *data_dir)
1186 {
1187 	int data_size, direction, idx;
1188 
1189 	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1190 	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1191 
1192 	if (!data_size) {
1193 		*data_len = 0;
1194 		*data_dir = DMA_NONE;
1195 		return;
1196 	}
1197 
1198 	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1199 
1200 	if (req->pg_tbl) {
1201 		*data_len = 0;
1202 		for (idx = 0; idx < data_size; idx++) {
1203 			*data_len += be16_to_cpu(
1204 					req->pg_tbl[idx].segment_length);
1205 		}
1206 	} else {
1207 		*data_len = data_size;
1208 	}
1209 }
1210 
1211 static void sbp_handle_command(struct sbp_target_request *req)
1212 {
1213 	struct sbp_login_descriptor *login = req->login;
1214 	struct sbp_session *sess = login->sess;
1215 	int ret, unpacked_lun;
1216 	u32 data_length;
1217 	enum dma_data_direction data_dir;
1218 
1219 	ret = sbp_fetch_command(req);
1220 	if (ret) {
1221 		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1222 		goto err;
1223 	}
1224 
1225 	ret = sbp_fetch_page_table(req);
1226 	if (ret) {
1227 		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1228 			ret);
1229 		goto err;
1230 	}
1231 
1232 	unpacked_lun = req->login->lun->unpacked_lun;
1233 	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1234 
1235 	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1236 			req->orb_pointer, unpacked_lun, data_length, data_dir);
1237 
1238 	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1239 			      req->sense_buf, unpacked_lun, data_length,
1240 			      MSG_SIMPLE_TAG, data_dir, 0))
1241 		goto err;
1242 
1243 	return;
1244 
1245 err:
1246 	req->status.status |= cpu_to_be32(
1247 		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1248 		STATUS_BLOCK_DEAD(0) |
1249 		STATUS_BLOCK_LEN(1) |
1250 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1251 	sbp_send_status(req);
1252 	sbp_free_request(req);
1253 }
1254 
1255 /*
1256  * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1257  * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1258  */
1259 static int sbp_rw_data(struct sbp_target_request *req)
1260 {
1261 	struct sbp_session *sess = req->login->sess;
1262 	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1263 		generation, num_pte, length, tfr_length,
1264 		rcode = RCODE_COMPLETE;
1265 	struct sbp_page_table_entry *pte;
1266 	unsigned long long offset;
1267 	struct fw_card *card;
1268 	struct sg_mapping_iter iter;
1269 
1270 	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1271 		tcode = TCODE_WRITE_BLOCK_REQUEST;
1272 		sg_miter_flags = SG_MITER_FROM_SG;
1273 	} else {
1274 		tcode = TCODE_READ_BLOCK_REQUEST;
1275 		sg_miter_flags = SG_MITER_TO_SG;
1276 	}
1277 
1278 	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1279 	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1280 
1281 	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1282 	if (pg_size) {
1283 		pr_err("sbp_run_transaction: page size ignored\n");
1284 		pg_size = 0x100 << pg_size;
1285 	}
1286 
1287 	spin_lock_bh(&sess->lock);
1288 	card = fw_card_get(sess->card);
1289 	node_id = sess->node_id;
1290 	generation = sess->generation;
1291 	spin_unlock_bh(&sess->lock);
1292 
1293 	if (req->pg_tbl) {
1294 		pte = req->pg_tbl;
1295 		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1296 
1297 		offset = 0;
1298 		length = 0;
1299 	} else {
1300 		pte = NULL;
1301 		num_pte = 0;
1302 
1303 		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1304 		length = req->se_cmd.data_length;
1305 	}
1306 
1307 	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1308 		sg_miter_flags);
1309 
1310 	while (length || num_pte) {
1311 		if (!length) {
1312 			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1313 				be32_to_cpu(pte->segment_base_lo);
1314 			length = be16_to_cpu(pte->segment_length);
1315 
1316 			pte++;
1317 			num_pte--;
1318 		}
1319 
1320 		sg_miter_next(&iter);
1321 
1322 		tfr_length = min3(length, max_payload, (int)iter.length);
1323 
1324 		/* FIXME: take page_size into account */
1325 
1326 		rcode = sbp_run_transaction(card, tcode, node_id,
1327 				generation, speed,
1328 				offset, iter.addr, tfr_length);
1329 
1330 		if (rcode != RCODE_COMPLETE)
1331 			break;
1332 
1333 		length -= tfr_length;
1334 		offset += tfr_length;
1335 		iter.consumed = tfr_length;
1336 	}
1337 
1338 	sg_miter_stop(&iter);
1339 	fw_card_put(card);
1340 
1341 	if (rcode == RCODE_COMPLETE) {
1342 		WARN_ON(length != 0);
1343 		return 0;
1344 	} else {
1345 		return -EIO;
1346 	}
1347 }
1348 
1349 static int sbp_send_status(struct sbp_target_request *req)
1350 {
1351 	int ret, length;
1352 	struct sbp_login_descriptor *login = req->login;
1353 
1354 	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1355 
1356 	ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1357 			login->status_fifo_addr, &req->status, length);
1358 	if (ret != RCODE_COMPLETE) {
1359 		pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
1360 		return -EIO;
1361 	}
1362 
1363 	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1364 			req->orb_pointer);
1365 
1366 	return 0;
1367 }
1368 
1369 static void sbp_sense_mangle(struct sbp_target_request *req)
1370 {
1371 	struct se_cmd *se_cmd = &req->se_cmd;
1372 	u8 *sense = req->sense_buf;
1373 	u8 *status = req->status.data;
1374 
1375 	WARN_ON(se_cmd->scsi_sense_length < 18);
1376 
1377 	switch (sense[0] & 0x7f) { 		/* sfmt */
1378 	case 0x70: /* current, fixed */
1379 		status[0] = 0 << 6;
1380 		break;
1381 	case 0x71: /* deferred, fixed */
1382 		status[0] = 1 << 6;
1383 		break;
1384 	case 0x72: /* current, descriptor */
1385 	case 0x73: /* deferred, descriptor */
1386 	default:
1387 		/*
1388 		 * TODO: SBP-3 specifies what we should do with descriptor
1389 		 * format sense data
1390 		 */
1391 		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1392 			sense[0]);
1393 		req->status.status |= cpu_to_be32(
1394 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1395 			STATUS_BLOCK_DEAD(0) |
1396 			STATUS_BLOCK_LEN(1) |
1397 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1398 		return;
1399 	}
1400 
1401 	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1402 	status[1] =
1403 		(sense[0] & 0x80) |		/* valid */
1404 		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1405 		(sense[2] & 0x0f);		/* sense_key */
1406 	status[2] = se_cmd->scsi_asc;		/* sense_code */
1407 	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1408 
1409 	/* information */
1410 	status[4] = sense[3];
1411 	status[5] = sense[4];
1412 	status[6] = sense[5];
1413 	status[7] = sense[6];
1414 
1415 	/* CDB-dependent */
1416 	status[8] = sense[8];
1417 	status[9] = sense[9];
1418 	status[10] = sense[10];
1419 	status[11] = sense[11];
1420 
1421 	/* fru */
1422 	status[12] = sense[14];
1423 
1424 	/* sense_key-dependent */
1425 	status[13] = sense[15];
1426 	status[14] = sense[16];
1427 	status[15] = sense[17];
1428 
1429 	req->status.status |= cpu_to_be32(
1430 		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1431 		STATUS_BLOCK_DEAD(0) |
1432 		STATUS_BLOCK_LEN(5) |
1433 		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1434 }
1435 
1436 static int sbp_send_sense(struct sbp_target_request *req)
1437 {
1438 	struct se_cmd *se_cmd = &req->se_cmd;
1439 
1440 	if (se_cmd->scsi_sense_length) {
1441 		sbp_sense_mangle(req);
1442 	} else {
1443 		req->status.status |= cpu_to_be32(
1444 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1445 			STATUS_BLOCK_DEAD(0) |
1446 			STATUS_BLOCK_LEN(1) |
1447 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1448 	}
1449 
1450 	return sbp_send_status(req);
1451 }
1452 
1453 static void sbp_free_request(struct sbp_target_request *req)
1454 {
1455 	kfree(req->pg_tbl);
1456 	kfree(req->cmd_buf);
1457 	kfree(req);
1458 }
1459 
1460 static void sbp_mgt_agent_process(struct work_struct *work)
1461 {
1462 	struct sbp_management_agent *agent =
1463 		container_of(work, struct sbp_management_agent, work);
1464 	struct sbp_management_request *req = agent->request;
1465 	int ret;
1466 	int status_data_len = 0;
1467 
1468 	/* fetch the ORB from the initiator */
1469 	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1470 		req->node_addr, req->generation, req->speed,
1471 		agent->orb_offset, &req->orb, sizeof(req->orb));
1472 	if (ret != RCODE_COMPLETE) {
1473 		pr_debug("mgt_orb fetch failed: %x\n", ret);
1474 		goto out;
1475 	}
1476 
1477 	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1478 		sbp2_pointer_to_addr(&req->orb.ptr1),
1479 		sbp2_pointer_to_addr(&req->orb.ptr2),
1480 		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1481 		sbp2_pointer_to_addr(&req->orb.status_fifo));
1482 
1483 	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1484 		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1485 		pr_err("mgt_orb bad request\n");
1486 		goto out;
1487 	}
1488 
1489 	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1490 	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1491 		sbp_management_request_login(agent, req, &status_data_len);
1492 		break;
1493 
1494 	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1495 		sbp_management_request_query_logins(agent, req,
1496 				&status_data_len);
1497 		break;
1498 
1499 	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1500 		sbp_management_request_reconnect(agent, req, &status_data_len);
1501 		break;
1502 
1503 	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1504 		pr_notice("SET PASSWORD not implemented\n");
1505 
1506 		req->status.status = cpu_to_be32(
1507 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1508 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1509 
1510 		break;
1511 
1512 	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1513 		sbp_management_request_logout(agent, req, &status_data_len);
1514 		break;
1515 
1516 	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1517 		pr_notice("ABORT TASK not implemented\n");
1518 
1519 		req->status.status = cpu_to_be32(
1520 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1521 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1522 
1523 		break;
1524 
1525 	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1526 		pr_notice("ABORT TASK SET not implemented\n");
1527 
1528 		req->status.status = cpu_to_be32(
1529 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1530 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1531 
1532 		break;
1533 
1534 	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1535 		pr_notice("LOGICAL UNIT RESET not implemented\n");
1536 
1537 		req->status.status = cpu_to_be32(
1538 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1539 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1540 
1541 		break;
1542 
1543 	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1544 		pr_notice("TARGET RESET not implemented\n");
1545 
1546 		req->status.status = cpu_to_be32(
1547 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1548 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1549 
1550 		break;
1551 
1552 	default:
1553 		pr_notice("unknown management function 0x%x\n",
1554 			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1555 
1556 		req->status.status = cpu_to_be32(
1557 			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1558 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1559 
1560 		break;
1561 	}
1562 
1563 	req->status.status |= cpu_to_be32(
1564 		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1565 		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1566 		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1567 	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1568 
1569 	/* write the status block back to the initiator */
1570 	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1571 		req->node_addr, req->generation, req->speed,
1572 		sbp2_pointer_to_addr(&req->orb.status_fifo),
1573 		&req->status, 8 + status_data_len);
1574 	if (ret != RCODE_COMPLETE) {
1575 		pr_debug("mgt_orb status write failed: %x\n", ret);
1576 		goto out;
1577 	}
1578 
1579 out:
1580 	fw_card_put(req->card);
1581 	kfree(req);
1582 
1583 	spin_lock_bh(&agent->lock);
1584 	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1585 	spin_unlock_bh(&agent->lock);
1586 }
1587 
1588 static void sbp_mgt_agent_rw(struct fw_card *card,
1589 	struct fw_request *request, int tcode, int destination, int source,
1590 	int generation, unsigned long long offset, void *data, size_t length,
1591 	void *callback_data)
1592 {
1593 	struct sbp_management_agent *agent = callback_data;
1594 	struct sbp2_pointer *ptr = data;
1595 	int rcode = RCODE_ADDRESS_ERROR;
1596 
1597 	if (!agent->tport->enable)
1598 		goto out;
1599 
1600 	if ((offset != agent->handler.offset) || (length != 8))
1601 		goto out;
1602 
1603 	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1604 		struct sbp_management_request *req;
1605 		int prev_state;
1606 
1607 		spin_lock_bh(&agent->lock);
1608 		prev_state = agent->state;
1609 		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1610 		spin_unlock_bh(&agent->lock);
1611 
1612 		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1613 			pr_notice("ignoring management request while busy\n");
1614 			rcode = RCODE_CONFLICT_ERROR;
1615 			goto out;
1616 		}
1617 
1618 		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1619 		if (!req) {
1620 			rcode = RCODE_CONFLICT_ERROR;
1621 			goto out;
1622 		}
1623 
1624 		req->card = fw_card_get(card);
1625 		req->generation = generation;
1626 		req->node_addr = source;
1627 		req->speed = fw_get_request_speed(request);
1628 
1629 		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1630 		agent->request = req;
1631 
1632 		queue_work(system_unbound_wq, &agent->work);
1633 		rcode = RCODE_COMPLETE;
1634 	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1635 		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1636 		rcode = RCODE_COMPLETE;
1637 	} else {
1638 		rcode = RCODE_TYPE_ERROR;
1639 	}
1640 
1641 out:
1642 	fw_send_response(card, request, rcode);
1643 }
1644 
1645 static struct sbp_management_agent *sbp_management_agent_register(
1646 		struct sbp_tport *tport)
1647 {
1648 	int ret;
1649 	struct sbp_management_agent *agent;
1650 
1651 	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1652 	if (!agent)
1653 		return ERR_PTR(-ENOMEM);
1654 
1655 	spin_lock_init(&agent->lock);
1656 	agent->tport = tport;
1657 	agent->handler.length = 0x08;
1658 	agent->handler.address_callback = sbp_mgt_agent_rw;
1659 	agent->handler.callback_data = agent;
1660 	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1661 	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1662 	agent->orb_offset = 0;
1663 	agent->request = NULL;
1664 
1665 	ret = fw_core_add_address_handler(&agent->handler,
1666 			&sbp_register_region);
1667 	if (ret < 0) {
1668 		kfree(agent);
1669 		return ERR_PTR(ret);
1670 	}
1671 
1672 	return agent;
1673 }
1674 
1675 static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1676 {
1677 	fw_core_remove_address_handler(&agent->handler);
1678 	cancel_work_sync(&agent->work);
1679 	kfree(agent);
1680 }
1681 
1682 static int sbp_check_true(struct se_portal_group *se_tpg)
1683 {
1684 	return 1;
1685 }
1686 
1687 static int sbp_check_false(struct se_portal_group *se_tpg)
1688 {
1689 	return 0;
1690 }
1691 
1692 static char *sbp_get_fabric_name(void)
1693 {
1694 	return "sbp";
1695 }
1696 
1697 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1698 {
1699 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1700 	struct sbp_tport *tport = tpg->tport;
1701 
1702 	return &tport->tport_name[0];
1703 }
1704 
1705 static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1706 {
1707 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1708 	return tpg->tport_tpgt;
1709 }
1710 
1711 static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
1712 {
1713 	return 1;
1714 }
1715 
1716 static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
1717 {
1718 	struct sbp_nacl *nacl;
1719 
1720 	nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
1721 	if (!nacl) {
1722 		pr_err("Unable to allocate struct sbp_nacl\n");
1723 		return NULL;
1724 	}
1725 
1726 	return &nacl->se_node_acl;
1727 }
1728 
1729 static void sbp_release_fabric_acl(
1730 	struct se_portal_group *se_tpg,
1731 	struct se_node_acl *se_nacl)
1732 {
1733 	struct sbp_nacl *nacl =
1734 		container_of(se_nacl, struct sbp_nacl, se_node_acl);
1735 	kfree(nacl);
1736 }
1737 
1738 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1739 {
1740 	return 1;
1741 }
1742 
1743 static void sbp_release_cmd(struct se_cmd *se_cmd)
1744 {
1745 	struct sbp_target_request *req = container_of(se_cmd,
1746 			struct sbp_target_request, se_cmd);
1747 
1748 	sbp_free_request(req);
1749 }
1750 
1751 static int sbp_shutdown_session(struct se_session *se_sess)
1752 {
1753 	return 0;
1754 }
1755 
1756 static void sbp_close_session(struct se_session *se_sess)
1757 {
1758 	return;
1759 }
1760 
1761 static u32 sbp_sess_get_index(struct se_session *se_sess)
1762 {
1763 	return 0;
1764 }
1765 
1766 static int sbp_write_pending(struct se_cmd *se_cmd)
1767 {
1768 	struct sbp_target_request *req = container_of(se_cmd,
1769 			struct sbp_target_request, se_cmd);
1770 	int ret;
1771 
1772 	ret = sbp_rw_data(req);
1773 	if (ret) {
1774 		req->status.status |= cpu_to_be32(
1775 			STATUS_BLOCK_RESP(
1776 				STATUS_RESP_TRANSPORT_FAILURE) |
1777 			STATUS_BLOCK_DEAD(0) |
1778 			STATUS_BLOCK_LEN(1) |
1779 			STATUS_BLOCK_SBP_STATUS(
1780 				SBP_STATUS_UNSPECIFIED_ERROR));
1781 		sbp_send_status(req);
1782 		return ret;
1783 	}
1784 
1785 	target_execute_cmd(se_cmd);
1786 	return 0;
1787 }
1788 
1789 static int sbp_write_pending_status(struct se_cmd *se_cmd)
1790 {
1791 	return 0;
1792 }
1793 
1794 static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1795 {
1796 	return;
1797 }
1798 
1799 static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
1800 {
1801 	struct sbp_target_request *req = container_of(se_cmd,
1802 			struct sbp_target_request, se_cmd);
1803 
1804 	/* only used for printk until we do TMRs */
1805 	return (u32)req->orb_pointer;
1806 }
1807 
1808 static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1809 {
1810 	return 0;
1811 }
1812 
1813 static int sbp_queue_data_in(struct se_cmd *se_cmd)
1814 {
1815 	struct sbp_target_request *req = container_of(se_cmd,
1816 			struct sbp_target_request, se_cmd);
1817 	int ret;
1818 
1819 	ret = sbp_rw_data(req);
1820 	if (ret) {
1821 		req->status.status |= cpu_to_be32(
1822 			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1823 			STATUS_BLOCK_DEAD(0) |
1824 			STATUS_BLOCK_LEN(1) |
1825 			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1826 		sbp_send_status(req);
1827 		return ret;
1828 	}
1829 
1830 	return sbp_send_sense(req);
1831 }
1832 
1833 /*
1834  * Called after command (no data transfer) or after the write (to device)
1835  * operation is completed
1836  */
1837 static int sbp_queue_status(struct se_cmd *se_cmd)
1838 {
1839 	struct sbp_target_request *req = container_of(se_cmd,
1840 			struct sbp_target_request, se_cmd);
1841 
1842 	return sbp_send_sense(req);
1843 }
1844 
1845 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1846 {
1847 }
1848 
1849 static int sbp_check_stop_free(struct se_cmd *se_cmd)
1850 {
1851 	struct sbp_target_request *req = container_of(se_cmd,
1852 			struct sbp_target_request, se_cmd);
1853 
1854 	transport_generic_free_cmd(&req->se_cmd, 0);
1855 	return 1;
1856 }
1857 
1858 /*
1859  * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
1860  */
1861 static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
1862 {
1863 	/*
1864 	 * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
1865 	 * This is defined in section 7.5.1 Table 362 in spc4r17
1866 	 */
1867 	return SCSI_PROTOCOL_SBP;
1868 }
1869 
1870 static u32 sbp_get_pr_transport_id(
1871 	struct se_portal_group *se_tpg,
1872 	struct se_node_acl *se_nacl,
1873 	struct t10_pr_registration *pr_reg,
1874 	int *format_code,
1875 	unsigned char *buf)
1876 {
1877 	int ret;
1878 
1879 	/*
1880 	 * Set PROTOCOL IDENTIFIER to 3h for SBP
1881 	 */
1882 	buf[0] = SCSI_PROTOCOL_SBP;
1883 	/*
1884 	 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1885 	 * over IEEE 1394
1886 	 */
1887 	ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
1888 	if (ret < 0)
1889 		pr_debug("sbp transport_id: invalid hex string\n");
1890 
1891 	/*
1892 	 * The IEEE 1394 Transport ID is a hardcoded 24-byte length
1893 	 */
1894 	return 24;
1895 }
1896 
1897 static u32 sbp_get_pr_transport_id_len(
1898 	struct se_portal_group *se_tpg,
1899 	struct se_node_acl *se_nacl,
1900 	struct t10_pr_registration *pr_reg,
1901 	int *format_code)
1902 {
1903 	*format_code = 0;
1904 	/*
1905 	 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1906 	 * over IEEE 1394
1907 	 *
1908 	 * The SBP Transport ID is a hardcoded 24-byte length
1909 	 */
1910 	return 24;
1911 }
1912 
1913 /*
1914  * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
1915  * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
1916  */
1917 static char *sbp_parse_pr_out_transport_id(
1918 	struct se_portal_group *se_tpg,
1919 	const char *buf,
1920 	u32 *out_tid_len,
1921 	char **port_nexus_ptr)
1922 {
1923 	/*
1924 	 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
1925 	 * for initiator ports using SCSI over SBP Serial SCSI Protocol
1926 	 *
1927 	 * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
1928 	 * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
1929 	 * so we return the **port_nexus_ptr set to NULL.
1930 	 */
1931 	*port_nexus_ptr = NULL;
1932 	*out_tid_len = 24;
1933 
1934 	return (char *)&buf[8];
1935 }
1936 
1937 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1938 {
1939 	int i, count = 0;
1940 
1941 	spin_lock(&tpg->tpg_lun_lock);
1942 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1943 		struct se_lun *se_lun = tpg->tpg_lun_list[i];
1944 
1945 		if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
1946 			continue;
1947 
1948 		count++;
1949 	}
1950 	spin_unlock(&tpg->tpg_lun_lock);
1951 
1952 	return count;
1953 }
1954 
1955 static int sbp_update_unit_directory(struct sbp_tport *tport)
1956 {
1957 	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
1958 	u32 *data;
1959 
1960 	if (tport->unit_directory.data) {
1961 		fw_core_remove_descriptor(&tport->unit_directory);
1962 		kfree(tport->unit_directory.data);
1963 		tport->unit_directory.data = NULL;
1964 	}
1965 
1966 	if (!tport->enable || !tport->tpg)
1967 		return 0;
1968 
1969 	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1970 
1971 	/*
1972 	 * Number of entries in the final unit directory:
1973 	 *  - all of those in the template
1974 	 *  - management_agent
1975 	 *  - unit_characteristics
1976 	 *  - reconnect_timeout
1977 	 *  - unit unique ID
1978 	 *  - one for each LUN
1979 	 *
1980 	 *  MUST NOT include leaf or sub-directory entries
1981 	 */
1982 	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1983 
1984 	if (tport->directory_id != -1)
1985 		num_entries++;
1986 
1987 	/* allocate num_entries + 4 for the header and unique ID leaf */
1988 	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1989 	if (!data)
1990 		return -ENOMEM;
1991 
1992 	/* directory_length */
1993 	data[idx++] = num_entries << 16;
1994 
1995 	/* directory_id */
1996 	if (tport->directory_id != -1)
1997 		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1998 
1999 	/* unit directory template */
2000 	memcpy(&data[idx], sbp_unit_directory_template,
2001 			sizeof(sbp_unit_directory_template));
2002 	idx += ARRAY_SIZE(sbp_unit_directory_template);
2003 
2004 	/* management_agent */
2005 	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
2006 	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
2007 
2008 	/* unit_characteristics */
2009 	data[idx++] = 0x3a000000 |
2010 		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
2011 		SBP_ORB_FETCH_SIZE;
2012 
2013 	/* reconnect_timeout */
2014 	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
2015 
2016 	/* unit unique ID (leaf is just after LUNs) */
2017 	data[idx++] = 0x8d000000 | (num_luns + 1);
2018 
2019 	spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2020 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
2021 		struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
2022 		struct se_device *dev;
2023 		int type;
2024 
2025 		if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
2026 			continue;
2027 
2028 		spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2029 
2030 		dev = se_lun->lun_se_dev;
2031 		type = dev->transport->get_device_type(dev);
2032 
2033 		/* logical_unit_number */
2034 		data[idx++] = 0x14000000 |
2035 			((type << 16) & 0x1f0000) |
2036 			(se_lun->unpacked_lun & 0xffff);
2037 
2038 		spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
2039 	}
2040 	spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
2041 
2042 	/* unit unique ID leaf */
2043 	data[idx++] = 2 << 16;
2044 	data[idx++] = tport->guid >> 32;
2045 	data[idx++] = tport->guid;
2046 
2047 	tport->unit_directory.length = idx;
2048 	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
2049 	tport->unit_directory.data = data;
2050 
2051 	ret = fw_core_add_descriptor(&tport->unit_directory);
2052 	if (ret < 0) {
2053 		kfree(tport->unit_directory.data);
2054 		tport->unit_directory.data = NULL;
2055 	}
2056 
2057 	return ret;
2058 }
2059 
2060 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
2061 {
2062 	const char *cp;
2063 	char c, nibble;
2064 	int pos = 0, err;
2065 
2066 	*wwn = 0;
2067 	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
2068 		c = *cp;
2069 		if (c == '\n' && cp[1] == '\0')
2070 			continue;
2071 		if (c == '\0') {
2072 			err = 2;
2073 			if (pos != 16)
2074 				goto fail;
2075 			return cp - name;
2076 		}
2077 		err = 3;
2078 		if (isdigit(c))
2079 			nibble = c - '0';
2080 		else if (isxdigit(c))
2081 			nibble = tolower(c) - 'a' + 10;
2082 		else
2083 			goto fail;
2084 		*wwn = (*wwn << 4) | nibble;
2085 		pos++;
2086 	}
2087 	err = 4;
2088 fail:
2089 	printk(KERN_INFO "err %u len %zu pos %u\n",
2090 			err, cp - name, pos);
2091 	return -1;
2092 }
2093 
2094 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
2095 {
2096 	return snprintf(buf, len, "%016llx", wwn);
2097 }
2098 
2099 static struct se_node_acl *sbp_make_nodeacl(
2100 		struct se_portal_group *se_tpg,
2101 		struct config_group *group,
2102 		const char *name)
2103 {
2104 	struct se_node_acl *se_nacl, *se_nacl_new;
2105 	struct sbp_nacl *nacl;
2106 	u64 guid = 0;
2107 	u32 nexus_depth = 1;
2108 
2109 	if (sbp_parse_wwn(name, &guid) < 0)
2110 		return ERR_PTR(-EINVAL);
2111 
2112 	se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
2113 	if (!se_nacl_new)
2114 		return ERR_PTR(-ENOMEM);
2115 
2116 	/*
2117 	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
2118 	 * when converting a NodeACL from demo mode -> explict
2119 	 */
2120 	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
2121 			name, nexus_depth);
2122 	if (IS_ERR(se_nacl)) {
2123 		sbp_release_fabric_acl(se_tpg, se_nacl_new);
2124 		return se_nacl;
2125 	}
2126 
2127 	nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
2128 	nacl->guid = guid;
2129 	sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
2130 
2131 	return se_nacl;
2132 }
2133 
2134 static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
2135 {
2136 	struct sbp_nacl *nacl =
2137 		container_of(se_acl, struct sbp_nacl, se_node_acl);
2138 
2139 	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
2140 	kfree(nacl);
2141 }
2142 
2143 static int sbp_post_link_lun(
2144 		struct se_portal_group *se_tpg,
2145 		struct se_lun *se_lun)
2146 {
2147 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2148 
2149 	return sbp_update_unit_directory(tpg->tport);
2150 }
2151 
2152 static void sbp_pre_unlink_lun(
2153 		struct se_portal_group *se_tpg,
2154 		struct se_lun *se_lun)
2155 {
2156 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2157 	struct sbp_tport *tport = tpg->tport;
2158 	int ret;
2159 
2160 	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2161 		tport->enable = 0;
2162 
2163 	ret = sbp_update_unit_directory(tport);
2164 	if (ret < 0)
2165 		pr_err("unlink LUN: failed to update unit directory\n");
2166 }
2167 
2168 static struct se_portal_group *sbp_make_tpg(
2169 		struct se_wwn *wwn,
2170 		struct config_group *group,
2171 		const char *name)
2172 {
2173 	struct sbp_tport *tport =
2174 		container_of(wwn, struct sbp_tport, tport_wwn);
2175 
2176 	struct sbp_tpg *tpg;
2177 	unsigned long tpgt;
2178 	int ret;
2179 
2180 	if (strstr(name, "tpgt_") != name)
2181 		return ERR_PTR(-EINVAL);
2182 	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2183 		return ERR_PTR(-EINVAL);
2184 
2185 	if (tport->tpg) {
2186 		pr_err("Only one TPG per Unit is possible.\n");
2187 		return ERR_PTR(-EBUSY);
2188 	}
2189 
2190 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2191 	if (!tpg) {
2192 		pr_err("Unable to allocate struct sbp_tpg\n");
2193 		return ERR_PTR(-ENOMEM);
2194 	}
2195 
2196 	tpg->tport = tport;
2197 	tpg->tport_tpgt = tpgt;
2198 	tport->tpg = tpg;
2199 
2200 	/* default attribute values */
2201 	tport->enable = 0;
2202 	tport->directory_id = -1;
2203 	tport->mgt_orb_timeout = 15;
2204 	tport->max_reconnect_timeout = 5;
2205 	tport->max_logins_per_lun = 1;
2206 
2207 	tport->mgt_agt = sbp_management_agent_register(tport);
2208 	if (IS_ERR(tport->mgt_agt)) {
2209 		ret = PTR_ERR(tport->mgt_agt);
2210 		goto out_free_tpg;
2211 	}
2212 
2213 	ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
2214 			&tpg->se_tpg, (void *)tpg,
2215 			TRANSPORT_TPG_TYPE_NORMAL);
2216 	if (ret < 0)
2217 		goto out_unreg_mgt_agt;
2218 
2219 	return &tpg->se_tpg;
2220 
2221 out_unreg_mgt_agt:
2222 	sbp_management_agent_unregister(tport->mgt_agt);
2223 out_free_tpg:
2224 	tport->tpg = NULL;
2225 	kfree(tpg);
2226 	return ERR_PTR(ret);
2227 }
2228 
2229 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2230 {
2231 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2232 	struct sbp_tport *tport = tpg->tport;
2233 
2234 	core_tpg_deregister(se_tpg);
2235 	sbp_management_agent_unregister(tport->mgt_agt);
2236 	tport->tpg = NULL;
2237 	kfree(tpg);
2238 }
2239 
2240 static struct se_wwn *sbp_make_tport(
2241 		struct target_fabric_configfs *tf,
2242 		struct config_group *group,
2243 		const char *name)
2244 {
2245 	struct sbp_tport *tport;
2246 	u64 guid = 0;
2247 
2248 	if (sbp_parse_wwn(name, &guid) < 0)
2249 		return ERR_PTR(-EINVAL);
2250 
2251 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2252 	if (!tport) {
2253 		pr_err("Unable to allocate struct sbp_tport\n");
2254 		return ERR_PTR(-ENOMEM);
2255 	}
2256 
2257 	tport->guid = guid;
2258 	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2259 
2260 	return &tport->tport_wwn;
2261 }
2262 
2263 static void sbp_drop_tport(struct se_wwn *wwn)
2264 {
2265 	struct sbp_tport *tport =
2266 		container_of(wwn, struct sbp_tport, tport_wwn);
2267 
2268 	kfree(tport);
2269 }
2270 
2271 static ssize_t sbp_wwn_show_attr_version(
2272 		struct target_fabric_configfs *tf,
2273 		char *page)
2274 {
2275 	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2276 }
2277 
2278 TF_WWN_ATTR_RO(sbp, version);
2279 
2280 static struct configfs_attribute *sbp_wwn_attrs[] = {
2281 	&sbp_wwn_version.attr,
2282 	NULL,
2283 };
2284 
2285 static ssize_t sbp_tpg_show_directory_id(
2286 		struct se_portal_group *se_tpg,
2287 		char *page)
2288 {
2289 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2290 	struct sbp_tport *tport = tpg->tport;
2291 
2292 	if (tport->directory_id == -1)
2293 		return sprintf(page, "implicit\n");
2294 	else
2295 		return sprintf(page, "%06x\n", tport->directory_id);
2296 }
2297 
2298 static ssize_t sbp_tpg_store_directory_id(
2299 		struct se_portal_group *se_tpg,
2300 		const char *page,
2301 		size_t count)
2302 {
2303 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2304 	struct sbp_tport *tport = tpg->tport;
2305 	unsigned long val;
2306 
2307 	if (tport->enable) {
2308 		pr_err("Cannot change the directory_id on an active target.\n");
2309 		return -EBUSY;
2310 	}
2311 
2312 	if (strstr(page, "implicit") == page) {
2313 		tport->directory_id = -1;
2314 	} else {
2315 		if (kstrtoul(page, 16, &val) < 0)
2316 			return -EINVAL;
2317 		if (val > 0xffffff)
2318 			return -EINVAL;
2319 
2320 		tport->directory_id = val;
2321 	}
2322 
2323 	return count;
2324 }
2325 
2326 static ssize_t sbp_tpg_show_enable(
2327 		struct se_portal_group *se_tpg,
2328 		char *page)
2329 {
2330 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2331 	struct sbp_tport *tport = tpg->tport;
2332 	return sprintf(page, "%d\n", tport->enable);
2333 }
2334 
2335 static ssize_t sbp_tpg_store_enable(
2336 		struct se_portal_group *se_tpg,
2337 		const char *page,
2338 		size_t count)
2339 {
2340 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2341 	struct sbp_tport *tport = tpg->tport;
2342 	unsigned long val;
2343 	int ret;
2344 
2345 	if (kstrtoul(page, 0, &val) < 0)
2346 		return -EINVAL;
2347 	if ((val != 0) && (val != 1))
2348 		return -EINVAL;
2349 
2350 	if (tport->enable == val)
2351 		return count;
2352 
2353 	if (val) {
2354 		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2355 			pr_err("Cannot enable a target with no LUNs!\n");
2356 			return -EINVAL;
2357 		}
2358 	} else {
2359 		/* XXX: force-shutdown sessions instead? */
2360 		spin_lock_bh(&se_tpg->session_lock);
2361 		if (!list_empty(&se_tpg->tpg_sess_list)) {
2362 			spin_unlock_bh(&se_tpg->session_lock);
2363 			return -EBUSY;
2364 		}
2365 		spin_unlock_bh(&se_tpg->session_lock);
2366 	}
2367 
2368 	tport->enable = val;
2369 
2370 	ret = sbp_update_unit_directory(tport);
2371 	if (ret < 0) {
2372 		pr_err("Could not update Config ROM\n");
2373 		return ret;
2374 	}
2375 
2376 	return count;
2377 }
2378 
2379 TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
2380 TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
2381 
2382 static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2383 	&sbp_tpg_directory_id.attr,
2384 	&sbp_tpg_enable.attr,
2385 	NULL,
2386 };
2387 
2388 static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
2389 		struct se_portal_group *se_tpg,
2390 		char *page)
2391 {
2392 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2393 	struct sbp_tport *tport = tpg->tport;
2394 	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2395 }
2396 
2397 static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
2398 		struct se_portal_group *se_tpg,
2399 		const char *page,
2400 		size_t count)
2401 {
2402 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2403 	struct sbp_tport *tport = tpg->tport;
2404 	unsigned long val;
2405 	int ret;
2406 
2407 	if (kstrtoul(page, 0, &val) < 0)
2408 		return -EINVAL;
2409 	if ((val < 1) || (val > 127))
2410 		return -EINVAL;
2411 
2412 	if (tport->mgt_orb_timeout == val)
2413 		return count;
2414 
2415 	tport->mgt_orb_timeout = val;
2416 
2417 	ret = sbp_update_unit_directory(tport);
2418 	if (ret < 0)
2419 		return ret;
2420 
2421 	return count;
2422 }
2423 
2424 static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
2425 		struct se_portal_group *se_tpg,
2426 		char *page)
2427 {
2428 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2429 	struct sbp_tport *tport = tpg->tport;
2430 	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2431 }
2432 
2433 static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
2434 		struct se_portal_group *se_tpg,
2435 		const char *page,
2436 		size_t count)
2437 {
2438 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2439 	struct sbp_tport *tport = tpg->tport;
2440 	unsigned long val;
2441 	int ret;
2442 
2443 	if (kstrtoul(page, 0, &val) < 0)
2444 		return -EINVAL;
2445 	if ((val < 1) || (val > 32767))
2446 		return -EINVAL;
2447 
2448 	if (tport->max_reconnect_timeout == val)
2449 		return count;
2450 
2451 	tport->max_reconnect_timeout = val;
2452 
2453 	ret = sbp_update_unit_directory(tport);
2454 	if (ret < 0)
2455 		return ret;
2456 
2457 	return count;
2458 }
2459 
2460 static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
2461 		struct se_portal_group *se_tpg,
2462 		char *page)
2463 {
2464 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2465 	struct sbp_tport *tport = tpg->tport;
2466 	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2467 }
2468 
2469 static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
2470 		struct se_portal_group *se_tpg,
2471 		const char *page,
2472 		size_t count)
2473 {
2474 	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2475 	struct sbp_tport *tport = tpg->tport;
2476 	unsigned long val;
2477 
2478 	if (kstrtoul(page, 0, &val) < 0)
2479 		return -EINVAL;
2480 	if ((val < 1) || (val > 127))
2481 		return -EINVAL;
2482 
2483 	/* XXX: also check against current count? */
2484 
2485 	tport->max_logins_per_lun = val;
2486 
2487 	return count;
2488 }
2489 
2490 TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
2491 TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
2492 TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
2493 
2494 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2495 	&sbp_tpg_attrib_mgt_orb_timeout.attr,
2496 	&sbp_tpg_attrib_max_reconnect_timeout.attr,
2497 	&sbp_tpg_attrib_max_logins_per_lun.attr,
2498 	NULL,
2499 };
2500 
2501 static struct target_core_fabric_ops sbp_ops = {
2502 	.get_fabric_name		= sbp_get_fabric_name,
2503 	.get_fabric_proto_ident		= sbp_get_fabric_proto_ident,
2504 	.tpg_get_wwn			= sbp_get_fabric_wwn,
2505 	.tpg_get_tag			= sbp_get_tag,
2506 	.tpg_get_default_depth		= sbp_get_default_depth,
2507 	.tpg_get_pr_transport_id	= sbp_get_pr_transport_id,
2508 	.tpg_get_pr_transport_id_len	= sbp_get_pr_transport_id_len,
2509 	.tpg_parse_pr_out_transport_id	= sbp_parse_pr_out_transport_id,
2510 	.tpg_check_demo_mode		= sbp_check_true,
2511 	.tpg_check_demo_mode_cache	= sbp_check_true,
2512 	.tpg_check_demo_mode_write_protect = sbp_check_false,
2513 	.tpg_check_prod_mode_write_protect = sbp_check_false,
2514 	.tpg_alloc_fabric_acl		= sbp_alloc_fabric_acl,
2515 	.tpg_release_fabric_acl		= sbp_release_fabric_acl,
2516 	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2517 	.release_cmd			= sbp_release_cmd,
2518 	.shutdown_session		= sbp_shutdown_session,
2519 	.close_session			= sbp_close_session,
2520 	.sess_get_index			= sbp_sess_get_index,
2521 	.write_pending			= sbp_write_pending,
2522 	.write_pending_status		= sbp_write_pending_status,
2523 	.set_default_node_attributes	= sbp_set_default_node_attrs,
2524 	.get_task_tag			= sbp_get_task_tag,
2525 	.get_cmd_state			= sbp_get_cmd_state,
2526 	.queue_data_in			= sbp_queue_data_in,
2527 	.queue_status			= sbp_queue_status,
2528 	.queue_tm_rsp			= sbp_queue_tm_rsp,
2529 	.check_stop_free		= sbp_check_stop_free,
2530 
2531 	.fabric_make_wwn		= sbp_make_tport,
2532 	.fabric_drop_wwn		= sbp_drop_tport,
2533 	.fabric_make_tpg		= sbp_make_tpg,
2534 	.fabric_drop_tpg		= sbp_drop_tpg,
2535 	.fabric_post_link		= sbp_post_link_lun,
2536 	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2537 	.fabric_make_np			= NULL,
2538 	.fabric_drop_np			= NULL,
2539 	.fabric_make_nodeacl		= sbp_make_nodeacl,
2540 	.fabric_drop_nodeacl		= sbp_drop_nodeacl,
2541 };
2542 
2543 static int sbp_register_configfs(void)
2544 {
2545 	struct target_fabric_configfs *fabric;
2546 	int ret;
2547 
2548 	fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
2549 	if (IS_ERR(fabric)) {
2550 		pr_err("target_fabric_configfs_init() failed\n");
2551 		return PTR_ERR(fabric);
2552 	}
2553 
2554 	fabric->tf_ops = sbp_ops;
2555 
2556 	/*
2557 	 * Setup default attribute lists for various fabric->tf_cit_tmpl
2558 	 */
2559 	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
2560 	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
2561 	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
2562 	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2563 	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2564 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2565 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2566 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2567 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2568 
2569 	ret = target_fabric_configfs_register(fabric);
2570 	if (ret < 0) {
2571 		pr_err("target_fabric_configfs_register() failed for SBP\n");
2572 		return ret;
2573 	}
2574 
2575 	sbp_fabric_configfs = fabric;
2576 
2577 	return 0;
2578 };
2579 
2580 static void sbp_deregister_configfs(void)
2581 {
2582 	if (!sbp_fabric_configfs)
2583 		return;
2584 
2585 	target_fabric_configfs_deregister(sbp_fabric_configfs);
2586 	sbp_fabric_configfs = NULL;
2587 };
2588 
2589 static int __init sbp_init(void)
2590 {
2591 	int ret;
2592 
2593 	ret = sbp_register_configfs();
2594 	if (ret < 0)
2595 		return ret;
2596 
2597 	return 0;
2598 };
2599 
2600 static void __exit sbp_exit(void)
2601 {
2602 	sbp_deregister_configfs();
2603 };
2604 
2605 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2606 MODULE_LICENSE("GPL");
2607 module_init(sbp_init);
2608 module_exit(sbp_exit);
2609