1 /*******************************************************************************
2  * Filename:  target_core_alua.c
3  *
4  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5  *
6  * Copyright (c) 2009-2010 Rising Tide Systems
7  * Copyright (c) 2009-2010 Linux-iSCSI.org
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  *
25  ******************************************************************************/
26 
27 #include <linux/version.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/configfs.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 
34 #include <target/target_core_base.h>
35 #include <target/target_core_device.h>
36 #include <target/target_core_transport.h>
37 #include <target/target_core_fabric_ops.h>
38 #include <target/target_core_configfs.h>
39 
40 #include "target_core_alua.h"
41 #include "target_core_hba.h"
42 #include "target_core_ua.h"
43 
44 static int core_alua_check_transition(int state, int *primary);
45 static int core_alua_set_tg_pt_secondary_state(
46 		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 		struct se_port *port, int explict, int offline);
48 
49 static u16 alua_lu_gps_counter;
50 static u32 alua_lu_gps_count;
51 
52 static DEFINE_SPINLOCK(lu_gps_lock);
53 static LIST_HEAD(lu_gps_list);
54 
55 struct t10_alua_lu_gp *default_lu_gp;
56 
57 /*
58  * REPORT_TARGET_PORT_GROUPS
59  *
60  * See spc4r17 section 6.27
61  */
62 int core_emulate_report_target_port_groups(struct se_cmd *cmd)
63 {
64 	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
65 	struct se_port *port;
66 	struct t10_alua_tg_pt_gp *tg_pt_gp;
67 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
68 	unsigned char *buf;
69 	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
70 				    Target port group descriptor */
71 
72 	buf = transport_kmap_first_data_page(cmd);
73 
74 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
75 	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
76 			tg_pt_gp_list) {
77 		/*
78 		 * PREF: Preferred target port bit, determine if this
79 		 * bit should be set for port group.
80 		 */
81 		if (tg_pt_gp->tg_pt_gp_pref)
82 			buf[off] = 0x80;
83 		/*
84 		 * Set the ASYMMETRIC ACCESS State
85 		 */
86 		buf[off++] |= (atomic_read(
87 			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
88 		/*
89 		 * Set supported ASYMMETRIC ACCESS State bits
90 		 */
91 		buf[off] = 0x80; /* T_SUP */
92 		buf[off] |= 0x40; /* O_SUP */
93 		buf[off] |= 0x8; /* U_SUP */
94 		buf[off] |= 0x4; /* S_SUP */
95 		buf[off] |= 0x2; /* AN_SUP */
96 		buf[off++] |= 0x1; /* AO_SUP */
97 		/*
98 		 * TARGET PORT GROUP
99 		 */
100 		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
101 		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
102 
103 		off++; /* Skip over Reserved */
104 		/*
105 		 * STATUS CODE
106 		 */
107 		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
108 		/*
109 		 * Vendor Specific field
110 		 */
111 		buf[off++] = 0x00;
112 		/*
113 		 * TARGET PORT COUNT
114 		 */
115 		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
116 		rd_len += 8;
117 
118 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
119 		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
120 				tg_pt_gp_mem_list) {
121 			port = tg_pt_gp_mem->tg_pt;
122 			/*
123 			 * Start Target Port descriptor format
124 			 *
125 			 * See spc4r17 section 6.2.7 Table 247
126 			 */
127 			off += 2; /* Skip over Obsolete */
128 			/*
129 			 * Set RELATIVE TARGET PORT IDENTIFIER
130 			 */
131 			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
132 			buf[off++] = (port->sep_rtpi & 0xff);
133 			rd_len += 4;
134 		}
135 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
136 	}
137 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
138 	/*
139 	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
140 	 */
141 	buf[0] = ((rd_len >> 24) & 0xff);
142 	buf[1] = ((rd_len >> 16) & 0xff);
143 	buf[2] = ((rd_len >> 8) & 0xff);
144 	buf[3] = (rd_len & 0xff);
145 
146 	transport_kunmap_first_data_page(cmd);
147 
148 	return 0;
149 }
150 
151 /*
152  * SET_TARGET_PORT_GROUPS for explict ALUA operation.
153  *
154  * See spc4r17 section 6.35
155  */
156 int core_emulate_set_target_port_groups(struct se_cmd *cmd)
157 {
158 	struct se_device *dev = cmd->se_dev;
159 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
160 	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
161 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
162 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
163 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
164 	unsigned char *buf;
165 	unsigned char *ptr;
166 	u32 len = 4; /* Skip over RESERVED area in header */
167 	int alua_access_state, primary = 0, rc;
168 	u16 tg_pt_id, rtpi;
169 
170 	if (!l_port)
171 		return PYX_TRANSPORT_LU_COMM_FAILURE;
172 
173 	buf = transport_kmap_first_data_page(cmd);
174 
175 	/*
176 	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
177 	 * for the local tg_pt_gp.
178 	 */
179 	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
180 	if (!l_tg_pt_gp_mem) {
181 		pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
182 		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
183 		goto out;
184 	}
185 	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
186 	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
187 	if (!l_tg_pt_gp) {
188 		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
189 		pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
190 		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
191 		goto out;
192 	}
193 	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
194 	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
195 
196 	if (!rc) {
197 		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
198 				" while TPGS_EXPLICT_ALUA is disabled\n");
199 		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
200 		goto out;
201 	}
202 
203 	ptr = &buf[4]; /* Skip over RESERVED area in header */
204 
205 	while (len < cmd->data_length) {
206 		alua_access_state = (ptr[0] & 0x0f);
207 		/*
208 		 * Check the received ALUA access state, and determine if
209 		 * the state is a primary or secondary target port asymmetric
210 		 * access state.
211 		 */
212 		rc = core_alua_check_transition(alua_access_state, &primary);
213 		if (rc != 0) {
214 			/*
215 			 * If the SET TARGET PORT GROUPS attempts to establish
216 			 * an invalid combination of target port asymmetric
217 			 * access states or attempts to establish an
218 			 * unsupported target port asymmetric access state,
219 			 * then the command shall be terminated with CHECK
220 			 * CONDITION status, with the sense key set to ILLEGAL
221 			 * REQUEST, and the additional sense code set to INVALID
222 			 * FIELD IN PARAMETER LIST.
223 			 */
224 			rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
225 			goto out;
226 		}
227 		rc = -1;
228 		/*
229 		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
230 		 * specifies a primary target port asymmetric access state,
231 		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
232 		 * a primary target port group for which the primary target
233 		 * port asymmetric access state shall be changed. If the
234 		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
235 		 * port asymmetric access state, then the TARGET PORT GROUP OR
236 		 * TARGET PORT field specifies the relative target port
237 		 * identifier (see 3.1.120) of the target port for which the
238 		 * secondary target port asymmetric access state shall be
239 		 * changed.
240 		 */
241 		if (primary) {
242 			tg_pt_id = ((ptr[2] << 8) & 0xff);
243 			tg_pt_id |= (ptr[3] & 0xff);
244 			/*
245 			 * Locate the matching target port group ID from
246 			 * the global tg_pt_gp list
247 			 */
248 			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
249 			list_for_each_entry(tg_pt_gp,
250 					&su_dev->t10_alua.tg_pt_gps_list,
251 					tg_pt_gp_list) {
252 				if (!tg_pt_gp->tg_pt_gp_valid_id)
253 					continue;
254 
255 				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
256 					continue;
257 
258 				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
259 				smp_mb__after_atomic_inc();
260 				spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
261 
262 				rc = core_alua_do_port_transition(tg_pt_gp,
263 						dev, l_port, nacl,
264 						alua_access_state, 1);
265 
266 				spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
267 				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
268 				smp_mb__after_atomic_dec();
269 				break;
270 			}
271 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
272 			/*
273 			 * If not matching target port group ID can be located
274 			 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
275 			 */
276 			if (rc != 0) {
277 				rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
278 				goto out;
279 			}
280 		} else {
281 			/*
282 			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
283 			 * the Target Port in question for the the incoming
284 			 * SET_TARGET_PORT_GROUPS op.
285 			 */
286 			rtpi = ((ptr[2] << 8) & 0xff);
287 			rtpi |= (ptr[3] & 0xff);
288 			/*
289 			 * Locate the matching relative target port identifer
290 			 * for the struct se_device storage object.
291 			 */
292 			spin_lock(&dev->se_port_lock);
293 			list_for_each_entry(port, &dev->dev_sep_list,
294 							sep_list) {
295 				if (port->sep_rtpi != rtpi)
296 					continue;
297 
298 				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
299 				spin_unlock(&dev->se_port_lock);
300 
301 				rc = core_alua_set_tg_pt_secondary_state(
302 						tg_pt_gp_mem, port, 1, 1);
303 
304 				spin_lock(&dev->se_port_lock);
305 				break;
306 			}
307 			spin_unlock(&dev->se_port_lock);
308 			/*
309 			 * If not matching relative target port identifier can
310 			 * be located, throw an exception with ASCQ:
311 			 * INVALID_PARAMETER_LIST
312 			 */
313 			if (rc != 0) {
314 				rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
315 				goto out;
316 			}
317 		}
318 
319 		ptr += 4;
320 		len += 4;
321 	}
322 
323 out:
324 	transport_kunmap_first_data_page(cmd);
325 
326 	return 0;
327 }
328 
329 static inline int core_alua_state_nonoptimized(
330 	struct se_cmd *cmd,
331 	unsigned char *cdb,
332 	int nonop_delay_msecs,
333 	u8 *alua_ascq)
334 {
335 	/*
336 	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
337 	 * later to determine if processing of this cmd needs to be
338 	 * temporarily delayed for the Active/NonOptimized primary access state.
339 	 */
340 	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
341 	cmd->alua_nonop_delay = nonop_delay_msecs;
342 	return 0;
343 }
344 
345 static inline int core_alua_state_standby(
346 	struct se_cmd *cmd,
347 	unsigned char *cdb,
348 	u8 *alua_ascq)
349 {
350 	/*
351 	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
352 	 * spc4r17 section 5.9.2.4.4
353 	 */
354 	switch (cdb[0]) {
355 	case INQUIRY:
356 	case LOG_SELECT:
357 	case LOG_SENSE:
358 	case MODE_SELECT:
359 	case MODE_SENSE:
360 	case REPORT_LUNS:
361 	case RECEIVE_DIAGNOSTIC:
362 	case SEND_DIAGNOSTIC:
363 	case MAINTENANCE_IN:
364 		switch (cdb[1]) {
365 		case MI_REPORT_TARGET_PGS:
366 			return 0;
367 		default:
368 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
369 			return 1;
370 		}
371 	case MAINTENANCE_OUT:
372 		switch (cdb[1]) {
373 		case MO_SET_TARGET_PGS:
374 			return 0;
375 		default:
376 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
377 			return 1;
378 		}
379 	case REQUEST_SENSE:
380 	case PERSISTENT_RESERVE_IN:
381 	case PERSISTENT_RESERVE_OUT:
382 	case READ_BUFFER:
383 	case WRITE_BUFFER:
384 		return 0;
385 	default:
386 		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
387 		return 1;
388 	}
389 
390 	return 0;
391 }
392 
393 static inline int core_alua_state_unavailable(
394 	struct se_cmd *cmd,
395 	unsigned char *cdb,
396 	u8 *alua_ascq)
397 {
398 	/*
399 	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
400 	 * spc4r17 section 5.9.2.4.5
401 	 */
402 	switch (cdb[0]) {
403 	case INQUIRY:
404 	case REPORT_LUNS:
405 	case MAINTENANCE_IN:
406 		switch (cdb[1]) {
407 		case MI_REPORT_TARGET_PGS:
408 			return 0;
409 		default:
410 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
411 			return 1;
412 		}
413 	case MAINTENANCE_OUT:
414 		switch (cdb[1]) {
415 		case MO_SET_TARGET_PGS:
416 			return 0;
417 		default:
418 			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
419 			return 1;
420 		}
421 	case REQUEST_SENSE:
422 	case READ_BUFFER:
423 	case WRITE_BUFFER:
424 		return 0;
425 	default:
426 		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
427 		return 1;
428 	}
429 
430 	return 0;
431 }
432 
433 static inline int core_alua_state_transition(
434 	struct se_cmd *cmd,
435 	unsigned char *cdb,
436 	u8 *alua_ascq)
437 {
438 	/*
439 	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
440 	 * spc4r17 section 5.9.2.5
441 	 */
442 	switch (cdb[0]) {
443 	case INQUIRY:
444 	case REPORT_LUNS:
445 	case MAINTENANCE_IN:
446 		switch (cdb[1]) {
447 		case MI_REPORT_TARGET_PGS:
448 			return 0;
449 		default:
450 			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
451 			return 1;
452 		}
453 	case REQUEST_SENSE:
454 	case READ_BUFFER:
455 	case WRITE_BUFFER:
456 		return 0;
457 	default:
458 		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
459 		return 1;
460 	}
461 
462 	return 0;
463 }
464 
465 /*
466  * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
467  * in transport_cmd_sequencer().  This function is assigned to
468  * struct t10_alua *->state_check() in core_setup_alua()
469  */
470 static int core_alua_state_check_nop(
471 	struct se_cmd *cmd,
472 	unsigned char *cdb,
473 	u8 *alua_ascq)
474 {
475 	return 0;
476 }
477 
478 /*
479  * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
480  * This function is assigned to struct t10_alua *->state_check() in
481  * core_setup_alua()
482  *
483  * Also, this function can return three different return codes to
484  * signal transport_generic_cmd_sequencer()
485  *
486  * return 1: Is used to signal LUN not accecsable, and check condition/not ready
487  * return 0: Used to signal success
488  * reutrn -1: Used to signal failure, and invalid cdb field
489  */
490 static int core_alua_state_check(
491 	struct se_cmd *cmd,
492 	unsigned char *cdb,
493 	u8 *alua_ascq)
494 {
495 	struct se_lun *lun = cmd->se_lun;
496 	struct se_port *port = lun->lun_sep;
497 	struct t10_alua_tg_pt_gp *tg_pt_gp;
498 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
499 	int out_alua_state, nonop_delay_msecs;
500 
501 	if (!port)
502 		return 0;
503 	/*
504 	 * First, check for a struct se_port specific secondary ALUA target port
505 	 * access state: OFFLINE
506 	 */
507 	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
508 		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
509 		pr_debug("ALUA: Got secondary offline status for local"
510 				" target port\n");
511 		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
512 		return 1;
513 	}
514 	 /*
515 	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
516 	 * ALUA target port group, to obtain current ALUA access state.
517 	 * Otherwise look for the underlying struct se_device association with
518 	 * a ALUA logical unit group.
519 	 */
520 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
521 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
522 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
523 	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
524 	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
525 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
526 	/*
527 	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
528 	 * statement so the compiler knows explicitly to check this case first.
529 	 * For the Optimized ALUA access state case, we want to process the
530 	 * incoming fabric cmd ASAP..
531 	 */
532 	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
533 		return 0;
534 
535 	switch (out_alua_state) {
536 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
537 		return core_alua_state_nonoptimized(cmd, cdb,
538 					nonop_delay_msecs, alua_ascq);
539 	case ALUA_ACCESS_STATE_STANDBY:
540 		return core_alua_state_standby(cmd, cdb, alua_ascq);
541 	case ALUA_ACCESS_STATE_UNAVAILABLE:
542 		return core_alua_state_unavailable(cmd, cdb, alua_ascq);
543 	case ALUA_ACCESS_STATE_TRANSITION:
544 		return core_alua_state_transition(cmd, cdb, alua_ascq);
545 	/*
546 	 * OFFLINE is a secondary ALUA target port group access state, that is
547 	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
548 	 */
549 	case ALUA_ACCESS_STATE_OFFLINE:
550 	default:
551 		pr_err("Unknown ALUA access state: 0x%02x\n",
552 				out_alua_state);
553 		return -EINVAL;
554 	}
555 
556 	return 0;
557 }
558 
559 /*
560  * Check implict and explict ALUA state change request.
561  */
562 static int core_alua_check_transition(int state, int *primary)
563 {
564 	switch (state) {
565 	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
566 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
567 	case ALUA_ACCESS_STATE_STANDBY:
568 	case ALUA_ACCESS_STATE_UNAVAILABLE:
569 		/*
570 		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
571 		 * defined as primary target port asymmetric access states.
572 		 */
573 		*primary = 1;
574 		break;
575 	case ALUA_ACCESS_STATE_OFFLINE:
576 		/*
577 		 * OFFLINE state is defined as a secondary target port
578 		 * asymmetric access state.
579 		 */
580 		*primary = 0;
581 		break;
582 	default:
583 		pr_err("Unknown ALUA access state: 0x%02x\n", state);
584 		return -EINVAL;
585 	}
586 
587 	return 0;
588 }
589 
590 static char *core_alua_dump_state(int state)
591 {
592 	switch (state) {
593 	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
594 		return "Active/Optimized";
595 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
596 		return "Active/NonOptimized";
597 	case ALUA_ACCESS_STATE_STANDBY:
598 		return "Standby";
599 	case ALUA_ACCESS_STATE_UNAVAILABLE:
600 		return "Unavailable";
601 	case ALUA_ACCESS_STATE_OFFLINE:
602 		return "Offline";
603 	default:
604 		return "Unknown";
605 	}
606 
607 	return NULL;
608 }
609 
610 char *core_alua_dump_status(int status)
611 {
612 	switch (status) {
613 	case ALUA_STATUS_NONE:
614 		return "None";
615 	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
616 		return "Altered by Explict STPG";
617 	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
618 		return "Altered by Implict ALUA";
619 	default:
620 		return "Unknown";
621 	}
622 
623 	return NULL;
624 }
625 
626 /*
627  * Used by fabric modules to determine when we need to delay processing
628  * for the Active/NonOptimized paths..
629  */
630 int core_alua_check_nonop_delay(
631 	struct se_cmd *cmd)
632 {
633 	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
634 		return 0;
635 	if (in_interrupt())
636 		return 0;
637 	/*
638 	 * The ALUA Active/NonOptimized access state delay can be disabled
639 	 * in via configfs with a value of zero
640 	 */
641 	if (!cmd->alua_nonop_delay)
642 		return 0;
643 	/*
644 	 * struct se_cmd->alua_nonop_delay gets set by a target port group
645 	 * defined interval in core_alua_state_nonoptimized()
646 	 */
647 	msleep_interruptible(cmd->alua_nonop_delay);
648 	return 0;
649 }
650 EXPORT_SYMBOL(core_alua_check_nonop_delay);
651 
652 /*
653  * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
654  *
655  */
656 static int core_alua_write_tpg_metadata(
657 	const char *path,
658 	unsigned char *md_buf,
659 	u32 md_buf_len)
660 {
661 	mm_segment_t old_fs;
662 	struct file *file;
663 	struct iovec iov[1];
664 	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
665 
666 	memset(iov, 0, sizeof(struct iovec));
667 
668 	file = filp_open(path, flags, 0600);
669 	if (IS_ERR(file) || !file || !file->f_dentry) {
670 		pr_err("filp_open(%s) for ALUA metadata failed\n",
671 			path);
672 		return -ENODEV;
673 	}
674 
675 	iov[0].iov_base = &md_buf[0];
676 	iov[0].iov_len = md_buf_len;
677 
678 	old_fs = get_fs();
679 	set_fs(get_ds());
680 	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
681 	set_fs(old_fs);
682 
683 	if (ret < 0) {
684 		pr_err("Error writing ALUA metadata file: %s\n", path);
685 		filp_close(file, NULL);
686 		return -EIO;
687 	}
688 	filp_close(file, NULL);
689 
690 	return 0;
691 }
692 
693 /*
694  * Called with tg_pt_gp->tg_pt_gp_md_mutex held
695  */
696 static int core_alua_update_tpg_primary_metadata(
697 	struct t10_alua_tg_pt_gp *tg_pt_gp,
698 	int primary_state,
699 	unsigned char *md_buf)
700 {
701 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
702 	struct t10_wwn *wwn = &su_dev->t10_wwn;
703 	char path[ALUA_METADATA_PATH_LEN];
704 	int len;
705 
706 	memset(path, 0, ALUA_METADATA_PATH_LEN);
707 
708 	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
709 			"tg_pt_gp_id=%hu\n"
710 			"alua_access_state=0x%02x\n"
711 			"alua_access_status=0x%02x\n",
712 			tg_pt_gp->tg_pt_gp_id, primary_state,
713 			tg_pt_gp->tg_pt_gp_alua_access_status);
714 
715 	snprintf(path, ALUA_METADATA_PATH_LEN,
716 		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
717 		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
718 
719 	return core_alua_write_tpg_metadata(path, md_buf, len);
720 }
721 
722 static int core_alua_do_transition_tg_pt(
723 	struct t10_alua_tg_pt_gp *tg_pt_gp,
724 	struct se_port *l_port,
725 	struct se_node_acl *nacl,
726 	unsigned char *md_buf,
727 	int new_state,
728 	int explict)
729 {
730 	struct se_dev_entry *se_deve;
731 	struct se_lun_acl *lacl;
732 	struct se_port *port;
733 	struct t10_alua_tg_pt_gp_member *mem;
734 	int old_state = 0;
735 	/*
736 	 * Save the old primary ALUA access state, and set the current state
737 	 * to ALUA_ACCESS_STATE_TRANSITION.
738 	 */
739 	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
740 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
741 			ALUA_ACCESS_STATE_TRANSITION);
742 	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
743 				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
744 				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
745 	/*
746 	 * Check for the optional ALUA primary state transition delay
747 	 */
748 	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
749 		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
750 
751 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
752 	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
753 				tg_pt_gp_mem_list) {
754 		port = mem->tg_pt;
755 		/*
756 		 * After an implicit target port asymmetric access state
757 		 * change, a device server shall establish a unit attention
758 		 * condition for the initiator port associated with every I_T
759 		 * nexus with the additional sense code set to ASYMMETRIC
760 		 * ACCESS STATE CHAGED.
761 		 *
762 		 * After an explicit target port asymmetric access state
763 		 * change, a device server shall establish a unit attention
764 		 * condition with the additional sense code set to ASYMMETRIC
765 		 * ACCESS STATE CHANGED for the initiator port associated with
766 		 * every I_T nexus other than the I_T nexus on which the SET
767 		 * TARGET PORT GROUPS command
768 		 */
769 		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
770 		smp_mb__after_atomic_inc();
771 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
772 
773 		spin_lock_bh(&port->sep_alua_lock);
774 		list_for_each_entry(se_deve, &port->sep_alua_list,
775 					alua_port_list) {
776 			lacl = se_deve->se_lun_acl;
777 			/*
778 			 * se_deve->se_lun_acl pointer may be NULL for a
779 			 * entry created without explict Node+MappedLUN ACLs
780 			 */
781 			if (!lacl)
782 				continue;
783 
784 			if (explict &&
785 			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
786 			   (l_port != NULL) && (l_port == port))
787 				continue;
788 
789 			core_scsi3_ua_allocate(lacl->se_lun_nacl,
790 				se_deve->mapped_lun, 0x2A,
791 				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
792 		}
793 		spin_unlock_bh(&port->sep_alua_lock);
794 
795 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
796 		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
797 		smp_mb__after_atomic_dec();
798 	}
799 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
800 	/*
801 	 * Update the ALUA metadata buf that has been allocated in
802 	 * core_alua_do_port_transition(), this metadata will be written
803 	 * to struct file.
804 	 *
805 	 * Note that there is the case where we do not want to update the
806 	 * metadata when the saved metadata is being parsed in userspace
807 	 * when setting the existing port access state and access status.
808 	 *
809 	 * Also note that the failure to write out the ALUA metadata to
810 	 * struct file does NOT affect the actual ALUA transition.
811 	 */
812 	if (tg_pt_gp->tg_pt_gp_write_metadata) {
813 		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
814 		core_alua_update_tpg_primary_metadata(tg_pt_gp,
815 					new_state, md_buf);
816 		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
817 	}
818 	/*
819 	 * Set the current primary ALUA access state to the requested new state
820 	 */
821 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
822 
823 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
824 		" from primary access state %s to %s\n", (explict) ? "explict" :
825 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
826 		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
827 		core_alua_dump_state(new_state));
828 
829 	return 0;
830 }
831 
832 int core_alua_do_port_transition(
833 	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
834 	struct se_device *l_dev,
835 	struct se_port *l_port,
836 	struct se_node_acl *l_nacl,
837 	int new_state,
838 	int explict)
839 {
840 	struct se_device *dev;
841 	struct se_port *port;
842 	struct se_subsystem_dev *su_dev;
843 	struct se_node_acl *nacl;
844 	struct t10_alua_lu_gp *lu_gp;
845 	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
846 	struct t10_alua_tg_pt_gp *tg_pt_gp;
847 	unsigned char *md_buf;
848 	int primary;
849 
850 	if (core_alua_check_transition(new_state, &primary) != 0)
851 		return -EINVAL;
852 
853 	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
854 	if (!md_buf) {
855 		pr_err("Unable to allocate buf for ALUA metadata\n");
856 		return -ENOMEM;
857 	}
858 
859 	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
860 	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
861 	lu_gp = local_lu_gp_mem->lu_gp;
862 	atomic_inc(&lu_gp->lu_gp_ref_cnt);
863 	smp_mb__after_atomic_inc();
864 	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
865 	/*
866 	 * For storage objects that are members of the 'default_lu_gp',
867 	 * we only do transition on the passed *l_tp_pt_gp, and not
868 	 * on all of the matching target port groups IDs in default_lu_gp.
869 	 */
870 	if (!lu_gp->lu_gp_id) {
871 		/*
872 		 * core_alua_do_transition_tg_pt() will always return
873 		 * success.
874 		 */
875 		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
876 					md_buf, new_state, explict);
877 		atomic_dec(&lu_gp->lu_gp_ref_cnt);
878 		smp_mb__after_atomic_dec();
879 		kfree(md_buf);
880 		return 0;
881 	}
882 	/*
883 	 * For all other LU groups aside from 'default_lu_gp', walk all of
884 	 * the associated storage objects looking for a matching target port
885 	 * group ID from the local target port group.
886 	 */
887 	spin_lock(&lu_gp->lu_gp_lock);
888 	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
889 				lu_gp_mem_list) {
890 
891 		dev = lu_gp_mem->lu_gp_mem_dev;
892 		su_dev = dev->se_sub_dev;
893 		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
894 		smp_mb__after_atomic_inc();
895 		spin_unlock(&lu_gp->lu_gp_lock);
896 
897 		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
898 		list_for_each_entry(tg_pt_gp,
899 				&su_dev->t10_alua.tg_pt_gps_list,
900 				tg_pt_gp_list) {
901 
902 			if (!tg_pt_gp->tg_pt_gp_valid_id)
903 				continue;
904 			/*
905 			 * If the target behavior port asymmetric access state
906 			 * is changed for any target port group accessiable via
907 			 * a logical unit within a LU group, the target port
908 			 * behavior group asymmetric access states for the same
909 			 * target port group accessible via other logical units
910 			 * in that LU group will also change.
911 			 */
912 			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
913 				continue;
914 
915 			if (l_tg_pt_gp == tg_pt_gp) {
916 				port = l_port;
917 				nacl = l_nacl;
918 			} else {
919 				port = NULL;
920 				nacl = NULL;
921 			}
922 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
923 			smp_mb__after_atomic_inc();
924 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
925 			/*
926 			 * core_alua_do_transition_tg_pt() will always return
927 			 * success.
928 			 */
929 			core_alua_do_transition_tg_pt(tg_pt_gp, port,
930 					nacl, md_buf, new_state, explict);
931 
932 			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
933 			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
934 			smp_mb__after_atomic_dec();
935 		}
936 		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
937 
938 		spin_lock(&lu_gp->lu_gp_lock);
939 		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
940 		smp_mb__after_atomic_dec();
941 	}
942 	spin_unlock(&lu_gp->lu_gp_lock);
943 
944 	pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
945 		" Group IDs: %hu %s transition to primary state: %s\n",
946 		config_item_name(&lu_gp->lu_gp_group.cg_item),
947 		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
948 		core_alua_dump_state(new_state));
949 
950 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
951 	smp_mb__after_atomic_dec();
952 	kfree(md_buf);
953 	return 0;
954 }
955 
956 /*
957  * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
958  */
959 static int core_alua_update_tpg_secondary_metadata(
960 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
961 	struct se_port *port,
962 	unsigned char *md_buf,
963 	u32 md_buf_len)
964 {
965 	struct se_portal_group *se_tpg = port->sep_tpg;
966 	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
967 	int len;
968 
969 	memset(path, 0, ALUA_METADATA_PATH_LEN);
970 	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
971 
972 	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
973 			se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
974 
975 	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
976 		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
977 				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
978 
979 	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
980 			"alua_tg_pt_status=0x%02x\n",
981 			atomic_read(&port->sep_tg_pt_secondary_offline),
982 			port->sep_tg_pt_secondary_stat);
983 
984 	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
985 			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
986 			port->sep_lun->unpacked_lun);
987 
988 	return core_alua_write_tpg_metadata(path, md_buf, len);
989 }
990 
991 static int core_alua_set_tg_pt_secondary_state(
992 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
993 	struct se_port *port,
994 	int explict,
995 	int offline)
996 {
997 	struct t10_alua_tg_pt_gp *tg_pt_gp;
998 	unsigned char *md_buf;
999 	u32 md_buf_len;
1000 	int trans_delay_msecs;
1001 
1002 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1003 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1004 	if (!tg_pt_gp) {
1005 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1006 		pr_err("Unable to complete secondary state"
1007 				" transition\n");
1008 		return -EINVAL;
1009 	}
1010 	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1011 	/*
1012 	 * Set the secondary ALUA target port access state to OFFLINE
1013 	 * or release the previously secondary state for struct se_port
1014 	 */
1015 	if (offline)
1016 		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1017 	else
1018 		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1019 
1020 	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1021 	port->sep_tg_pt_secondary_stat = (explict) ?
1022 			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1023 			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1024 
1025 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1026 		" to secondary access state: %s\n", (explict) ? "explict" :
1027 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1028 		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1029 
1030 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1031 	/*
1032 	 * Do the optional transition delay after we set the secondary
1033 	 * ALUA access state.
1034 	 */
1035 	if (trans_delay_msecs != 0)
1036 		msleep_interruptible(trans_delay_msecs);
1037 	/*
1038 	 * See if we need to update the ALUA fabric port metadata for
1039 	 * secondary state and status
1040 	 */
1041 	if (port->sep_tg_pt_secondary_write_md) {
1042 		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1043 		if (!md_buf) {
1044 			pr_err("Unable to allocate md_buf for"
1045 				" secondary ALUA access metadata\n");
1046 			return -ENOMEM;
1047 		}
1048 		mutex_lock(&port->sep_tg_pt_md_mutex);
1049 		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1050 				md_buf, md_buf_len);
1051 		mutex_unlock(&port->sep_tg_pt_md_mutex);
1052 
1053 		kfree(md_buf);
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 struct t10_alua_lu_gp *
1060 core_alua_allocate_lu_gp(const char *name, int def_group)
1061 {
1062 	struct t10_alua_lu_gp *lu_gp;
1063 
1064 	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1065 	if (!lu_gp) {
1066 		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1067 		return ERR_PTR(-ENOMEM);
1068 	}
1069 	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1070 	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1071 	spin_lock_init(&lu_gp->lu_gp_lock);
1072 	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1073 
1074 	if (def_group) {
1075 		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1076 		lu_gp->lu_gp_valid_id = 1;
1077 		alua_lu_gps_count++;
1078 	}
1079 
1080 	return lu_gp;
1081 }
1082 
1083 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1084 {
1085 	struct t10_alua_lu_gp *lu_gp_tmp;
1086 	u16 lu_gp_id_tmp;
1087 	/*
1088 	 * The lu_gp->lu_gp_id may only be set once..
1089 	 */
1090 	if (lu_gp->lu_gp_valid_id) {
1091 		pr_warn("ALUA LU Group already has a valid ID,"
1092 			" ignoring request\n");
1093 		return -EINVAL;
1094 	}
1095 
1096 	spin_lock(&lu_gps_lock);
1097 	if (alua_lu_gps_count == 0x0000ffff) {
1098 		pr_err("Maximum ALUA alua_lu_gps_count:"
1099 				" 0x0000ffff reached\n");
1100 		spin_unlock(&lu_gps_lock);
1101 		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1102 		return -ENOSPC;
1103 	}
1104 again:
1105 	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1106 				alua_lu_gps_counter++;
1107 
1108 	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1109 		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1110 			if (!lu_gp_id)
1111 				goto again;
1112 
1113 			pr_warn("ALUA Logical Unit Group ID: %hu"
1114 				" already exists, ignoring request\n",
1115 				lu_gp_id);
1116 			spin_unlock(&lu_gps_lock);
1117 			return -EINVAL;
1118 		}
1119 	}
1120 
1121 	lu_gp->lu_gp_id = lu_gp_id_tmp;
1122 	lu_gp->lu_gp_valid_id = 1;
1123 	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1124 	alua_lu_gps_count++;
1125 	spin_unlock(&lu_gps_lock);
1126 
1127 	return 0;
1128 }
1129 
1130 static struct t10_alua_lu_gp_member *
1131 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1132 {
1133 	struct t10_alua_lu_gp_member *lu_gp_mem;
1134 
1135 	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1136 	if (!lu_gp_mem) {
1137 		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1138 		return ERR_PTR(-ENOMEM);
1139 	}
1140 	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1141 	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1142 	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1143 
1144 	lu_gp_mem->lu_gp_mem_dev = dev;
1145 	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1146 
1147 	return lu_gp_mem;
1148 }
1149 
1150 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1151 {
1152 	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1153 	/*
1154 	 * Once we have reached this point, config_item_put() has
1155 	 * already been called from target_core_alua_drop_lu_gp().
1156 	 *
1157 	 * Here, we remove the *lu_gp from the global list so that
1158 	 * no associations can be made while we are releasing
1159 	 * struct t10_alua_lu_gp.
1160 	 */
1161 	spin_lock(&lu_gps_lock);
1162 	atomic_set(&lu_gp->lu_gp_shutdown, 1);
1163 	list_del(&lu_gp->lu_gp_node);
1164 	alua_lu_gps_count--;
1165 	spin_unlock(&lu_gps_lock);
1166 	/*
1167 	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1168 	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1169 	 * released with core_alua_put_lu_gp_from_name()
1170 	 */
1171 	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1172 		cpu_relax();
1173 	/*
1174 	 * Release reference to struct t10_alua_lu_gp * from all associated
1175 	 * struct se_device.
1176 	 */
1177 	spin_lock(&lu_gp->lu_gp_lock);
1178 	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1179 				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1180 		if (lu_gp_mem->lu_gp_assoc) {
1181 			list_del(&lu_gp_mem->lu_gp_mem_list);
1182 			lu_gp->lu_gp_members--;
1183 			lu_gp_mem->lu_gp_assoc = 0;
1184 		}
1185 		spin_unlock(&lu_gp->lu_gp_lock);
1186 		/*
1187 		 *
1188 		 * lu_gp_mem is associated with a single
1189 		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1190 		 * struct se_device is released via core_alua_free_lu_gp_mem().
1191 		 *
1192 		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1193 		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1194 		 */
1195 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1196 		if (lu_gp != default_lu_gp)
1197 			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1198 					default_lu_gp);
1199 		else
1200 			lu_gp_mem->lu_gp = NULL;
1201 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1202 
1203 		spin_lock(&lu_gp->lu_gp_lock);
1204 	}
1205 	spin_unlock(&lu_gp->lu_gp_lock);
1206 
1207 	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1208 }
1209 
1210 void core_alua_free_lu_gp_mem(struct se_device *dev)
1211 {
1212 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1213 	struct t10_alua *alua = &su_dev->t10_alua;
1214 	struct t10_alua_lu_gp *lu_gp;
1215 	struct t10_alua_lu_gp_member *lu_gp_mem;
1216 
1217 	if (alua->alua_type != SPC3_ALUA_EMULATED)
1218 		return;
1219 
1220 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1221 	if (!lu_gp_mem)
1222 		return;
1223 
1224 	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1225 		cpu_relax();
1226 
1227 	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1228 	lu_gp = lu_gp_mem->lu_gp;
1229 	if (lu_gp) {
1230 		spin_lock(&lu_gp->lu_gp_lock);
1231 		if (lu_gp_mem->lu_gp_assoc) {
1232 			list_del(&lu_gp_mem->lu_gp_mem_list);
1233 			lu_gp->lu_gp_members--;
1234 			lu_gp_mem->lu_gp_assoc = 0;
1235 		}
1236 		spin_unlock(&lu_gp->lu_gp_lock);
1237 		lu_gp_mem->lu_gp = NULL;
1238 	}
1239 	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1240 
1241 	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1242 }
1243 
1244 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1245 {
1246 	struct t10_alua_lu_gp *lu_gp;
1247 	struct config_item *ci;
1248 
1249 	spin_lock(&lu_gps_lock);
1250 	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1251 		if (!lu_gp->lu_gp_valid_id)
1252 			continue;
1253 		ci = &lu_gp->lu_gp_group.cg_item;
1254 		if (!strcmp(config_item_name(ci), name)) {
1255 			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1256 			spin_unlock(&lu_gps_lock);
1257 			return lu_gp;
1258 		}
1259 	}
1260 	spin_unlock(&lu_gps_lock);
1261 
1262 	return NULL;
1263 }
1264 
1265 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1266 {
1267 	spin_lock(&lu_gps_lock);
1268 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1269 	spin_unlock(&lu_gps_lock);
1270 }
1271 
1272 /*
1273  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1274  */
1275 void __core_alua_attach_lu_gp_mem(
1276 	struct t10_alua_lu_gp_member *lu_gp_mem,
1277 	struct t10_alua_lu_gp *lu_gp)
1278 {
1279 	spin_lock(&lu_gp->lu_gp_lock);
1280 	lu_gp_mem->lu_gp = lu_gp;
1281 	lu_gp_mem->lu_gp_assoc = 1;
1282 	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1283 	lu_gp->lu_gp_members++;
1284 	spin_unlock(&lu_gp->lu_gp_lock);
1285 }
1286 
1287 /*
1288  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1289  */
1290 void __core_alua_drop_lu_gp_mem(
1291 	struct t10_alua_lu_gp_member *lu_gp_mem,
1292 	struct t10_alua_lu_gp *lu_gp)
1293 {
1294 	spin_lock(&lu_gp->lu_gp_lock);
1295 	list_del(&lu_gp_mem->lu_gp_mem_list);
1296 	lu_gp_mem->lu_gp = NULL;
1297 	lu_gp_mem->lu_gp_assoc = 0;
1298 	lu_gp->lu_gp_members--;
1299 	spin_unlock(&lu_gp->lu_gp_lock);
1300 }
1301 
1302 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1303 	struct se_subsystem_dev *su_dev,
1304 	const char *name,
1305 	int def_group)
1306 {
1307 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1308 
1309 	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1310 	if (!tg_pt_gp) {
1311 		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1312 		return NULL;
1313 	}
1314 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1315 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1316 	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1317 	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1318 	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1319 	tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1320 	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1321 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1322 		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1323 	/*
1324 	 * Enable both explict and implict ALUA support by default
1325 	 */
1326 	tg_pt_gp->tg_pt_gp_alua_access_type =
1327 			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1328 	/*
1329 	 * Set the default Active/NonOptimized Delay in milliseconds
1330 	 */
1331 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1332 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1333 
1334 	if (def_group) {
1335 		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1336 		tg_pt_gp->tg_pt_gp_id =
1337 				su_dev->t10_alua.alua_tg_pt_gps_counter++;
1338 		tg_pt_gp->tg_pt_gp_valid_id = 1;
1339 		su_dev->t10_alua.alua_tg_pt_gps_count++;
1340 		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1341 			      &su_dev->t10_alua.tg_pt_gps_list);
1342 		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1343 	}
1344 
1345 	return tg_pt_gp;
1346 }
1347 
1348 int core_alua_set_tg_pt_gp_id(
1349 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1350 	u16 tg_pt_gp_id)
1351 {
1352 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1353 	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1354 	u16 tg_pt_gp_id_tmp;
1355 	/*
1356 	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1357 	 */
1358 	if (tg_pt_gp->tg_pt_gp_valid_id) {
1359 		pr_warn("ALUA TG PT Group already has a valid ID,"
1360 			" ignoring request\n");
1361 		return -EINVAL;
1362 	}
1363 
1364 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1365 	if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1366 		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1367 			" 0x0000ffff reached\n");
1368 		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1369 		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1370 		return -ENOSPC;
1371 	}
1372 again:
1373 	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1374 			su_dev->t10_alua.alua_tg_pt_gps_counter++;
1375 
1376 	list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1377 			tg_pt_gp_list) {
1378 		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1379 			if (!tg_pt_gp_id)
1380 				goto again;
1381 
1382 			pr_err("ALUA Target Port Group ID: %hu already"
1383 				" exists, ignoring request\n", tg_pt_gp_id);
1384 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1385 			return -EINVAL;
1386 		}
1387 	}
1388 
1389 	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1390 	tg_pt_gp->tg_pt_gp_valid_id = 1;
1391 	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1392 			&su_dev->t10_alua.tg_pt_gps_list);
1393 	su_dev->t10_alua.alua_tg_pt_gps_count++;
1394 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1395 
1396 	return 0;
1397 }
1398 
1399 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1400 	struct se_port *port)
1401 {
1402 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1403 
1404 	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1405 				GFP_KERNEL);
1406 	if (!tg_pt_gp_mem) {
1407 		pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1408 		return ERR_PTR(-ENOMEM);
1409 	}
1410 	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1411 	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1412 	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1413 
1414 	tg_pt_gp_mem->tg_pt = port;
1415 	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1416 	atomic_set(&port->sep_tg_pt_gp_active, 1);
1417 
1418 	return tg_pt_gp_mem;
1419 }
1420 
1421 void core_alua_free_tg_pt_gp(
1422 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1423 {
1424 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1425 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1426 	/*
1427 	 * Once we have reached this point, config_item_put() has already
1428 	 * been called from target_core_alua_drop_tg_pt_gp().
1429 	 *
1430 	 * Here we remove *tg_pt_gp from the global list so that
1431 	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1432 	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1433 	 */
1434 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1435 	list_del(&tg_pt_gp->tg_pt_gp_list);
1436 	su_dev->t10_alua.alua_tg_pt_gps_counter--;
1437 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1438 	/*
1439 	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1440 	 * core_alua_get_tg_pt_gp_by_name() in
1441 	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1442 	 * to be released with core_alua_put_tg_pt_gp_from_name().
1443 	 */
1444 	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1445 		cpu_relax();
1446 	/*
1447 	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1448 	 * struct se_port.
1449 	 */
1450 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1451 	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1452 			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1453 		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1454 			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1455 			tg_pt_gp->tg_pt_gp_members--;
1456 			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1457 		}
1458 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1459 		/*
1460 		 * tg_pt_gp_mem is associated with a single
1461 		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1462 		 * core_alua_free_tg_pt_gp_mem().
1463 		 *
1464 		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1465 		 * assume we want to re-assocate a given tg_pt_gp_mem with
1466 		 * default_tg_pt_gp.
1467 		 */
1468 		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1469 		if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
1470 			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1471 					su_dev->t10_alua.default_tg_pt_gp);
1472 		} else
1473 			tg_pt_gp_mem->tg_pt_gp = NULL;
1474 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1475 
1476 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1477 	}
1478 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1479 
1480 	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1481 }
1482 
1483 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1484 {
1485 	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1486 	struct t10_alua *alua = &su_dev->t10_alua;
1487 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1488 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1489 
1490 	if (alua->alua_type != SPC3_ALUA_EMULATED)
1491 		return;
1492 
1493 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1494 	if (!tg_pt_gp_mem)
1495 		return;
1496 
1497 	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1498 		cpu_relax();
1499 
1500 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1501 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1502 	if (tg_pt_gp) {
1503 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1504 		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1505 			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1506 			tg_pt_gp->tg_pt_gp_members--;
1507 			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1508 		}
1509 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1510 		tg_pt_gp_mem->tg_pt_gp = NULL;
1511 	}
1512 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1513 
1514 	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1515 }
1516 
1517 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1518 	struct se_subsystem_dev *su_dev,
1519 	const char *name)
1520 {
1521 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1522 	struct config_item *ci;
1523 
1524 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1525 	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1526 			tg_pt_gp_list) {
1527 		if (!tg_pt_gp->tg_pt_gp_valid_id)
1528 			continue;
1529 		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1530 		if (!strcmp(config_item_name(ci), name)) {
1531 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1532 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1533 			return tg_pt_gp;
1534 		}
1535 	}
1536 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1537 
1538 	return NULL;
1539 }
1540 
1541 static void core_alua_put_tg_pt_gp_from_name(
1542 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1543 {
1544 	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1545 
1546 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1547 	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1548 	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1549 }
1550 
1551 /*
1552  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1553  */
1554 void __core_alua_attach_tg_pt_gp_mem(
1555 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1556 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1557 {
1558 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1559 	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1560 	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1561 	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1562 			&tg_pt_gp->tg_pt_gp_mem_list);
1563 	tg_pt_gp->tg_pt_gp_members++;
1564 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1565 }
1566 
1567 /*
1568  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1569  */
1570 static void __core_alua_drop_tg_pt_gp_mem(
1571 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1572 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1573 {
1574 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1575 	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1576 	tg_pt_gp_mem->tg_pt_gp = NULL;
1577 	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1578 	tg_pt_gp->tg_pt_gp_members--;
1579 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1580 }
1581 
1582 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1583 {
1584 	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1585 	struct config_item *tg_pt_ci;
1586 	struct t10_alua *alua = &su_dev->t10_alua;
1587 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1588 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1589 	ssize_t len = 0;
1590 
1591 	if (alua->alua_type != SPC3_ALUA_EMULATED)
1592 		return len;
1593 
1594 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1595 	if (!tg_pt_gp_mem)
1596 		return len;
1597 
1598 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1599 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1600 	if (tg_pt_gp) {
1601 		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1602 		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1603 			" %hu\nTG Port Primary Access State: %s\nTG Port "
1604 			"Primary Access Status: %s\nTG Port Secondary Access"
1605 			" State: %s\nTG Port Secondary Access Status: %s\n",
1606 			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1607 			core_alua_dump_state(atomic_read(
1608 					&tg_pt_gp->tg_pt_gp_alua_access_state)),
1609 			core_alua_dump_status(
1610 				tg_pt_gp->tg_pt_gp_alua_access_status),
1611 			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1612 			"Offline" : "None",
1613 			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1614 	}
1615 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1616 
1617 	return len;
1618 }
1619 
1620 ssize_t core_alua_store_tg_pt_gp_info(
1621 	struct se_port *port,
1622 	const char *page,
1623 	size_t count)
1624 {
1625 	struct se_portal_group *tpg;
1626 	struct se_lun *lun;
1627 	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1628 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1629 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1630 	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1631 	int move = 0;
1632 
1633 	tpg = port->sep_tpg;
1634 	lun = port->sep_lun;
1635 
1636 	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1637 		pr_warn("SPC3_ALUA_EMULATED not enabled for"
1638 			" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1639 			tpg->se_tpg_tfo->tpg_get_tag(tpg),
1640 			config_item_name(&lun->lun_group.cg_item));
1641 		return -EINVAL;
1642 	}
1643 
1644 	if (count > TG_PT_GROUP_NAME_BUF) {
1645 		pr_err("ALUA Target Port Group alias too large!\n");
1646 		return -EINVAL;
1647 	}
1648 	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1649 	memcpy(buf, page, count);
1650 	/*
1651 	 * Any ALUA target port group alias besides "NULL" means we will be
1652 	 * making a new group association.
1653 	 */
1654 	if (strcmp(strstrip(buf), "NULL")) {
1655 		/*
1656 		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1657 		 * struct t10_alua_tg_pt_gp.  This reference is released with
1658 		 * core_alua_put_tg_pt_gp_from_name() below.
1659 		 */
1660 		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1661 					strstrip(buf));
1662 		if (!tg_pt_gp_new)
1663 			return -ENODEV;
1664 	}
1665 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1666 	if (!tg_pt_gp_mem) {
1667 		if (tg_pt_gp_new)
1668 			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1669 		pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1670 		return -EINVAL;
1671 	}
1672 
1673 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1674 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1675 	if (tg_pt_gp) {
1676 		/*
1677 		 * Clearing an existing tg_pt_gp association, and replacing
1678 		 * with the default_tg_pt_gp.
1679 		 */
1680 		if (!tg_pt_gp_new) {
1681 			pr_debug("Target_Core_ConfigFS: Moving"
1682 				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1683 				" alua/%s, ID: %hu back to"
1684 				" default_tg_pt_gp\n",
1685 				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1686 				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1687 				config_item_name(&lun->lun_group.cg_item),
1688 				config_item_name(
1689 					&tg_pt_gp->tg_pt_gp_group.cg_item),
1690 				tg_pt_gp->tg_pt_gp_id);
1691 
1692 			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1693 			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1694 					su_dev->t10_alua.default_tg_pt_gp);
1695 			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1696 
1697 			return count;
1698 		}
1699 		/*
1700 		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1701 		 */
1702 		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1703 		move = 1;
1704 	}
1705 	/*
1706 	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1707 	 */
1708 	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1709 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1710 	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1711 		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1712 		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1713 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1714 		config_item_name(&lun->lun_group.cg_item),
1715 		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1716 		tg_pt_gp_new->tg_pt_gp_id);
1717 
1718 	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1719 	return count;
1720 }
1721 
1722 ssize_t core_alua_show_access_type(
1723 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1724 	char *page)
1725 {
1726 	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1727 	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1728 		return sprintf(page, "Implict and Explict\n");
1729 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1730 		return sprintf(page, "Implict\n");
1731 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1732 		return sprintf(page, "Explict\n");
1733 	else
1734 		return sprintf(page, "None\n");
1735 }
1736 
1737 ssize_t core_alua_store_access_type(
1738 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1739 	const char *page,
1740 	size_t count)
1741 {
1742 	unsigned long tmp;
1743 	int ret;
1744 
1745 	ret = strict_strtoul(page, 0, &tmp);
1746 	if (ret < 0) {
1747 		pr_err("Unable to extract alua_access_type\n");
1748 		return -EINVAL;
1749 	}
1750 	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1751 		pr_err("Illegal value for alua_access_type:"
1752 				" %lu\n", tmp);
1753 		return -EINVAL;
1754 	}
1755 	if (tmp == 3)
1756 		tg_pt_gp->tg_pt_gp_alua_access_type =
1757 			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1758 	else if (tmp == 2)
1759 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1760 	else if (tmp == 1)
1761 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1762 	else
1763 		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1764 
1765 	return count;
1766 }
1767 
1768 ssize_t core_alua_show_nonop_delay_msecs(
1769 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1770 	char *page)
1771 {
1772 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1773 }
1774 
1775 ssize_t core_alua_store_nonop_delay_msecs(
1776 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1777 	const char *page,
1778 	size_t count)
1779 {
1780 	unsigned long tmp;
1781 	int ret;
1782 
1783 	ret = strict_strtoul(page, 0, &tmp);
1784 	if (ret < 0) {
1785 		pr_err("Unable to extract nonop_delay_msecs\n");
1786 		return -EINVAL;
1787 	}
1788 	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1789 		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1790 			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1791 			ALUA_MAX_NONOP_DELAY_MSECS);
1792 		return -EINVAL;
1793 	}
1794 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1795 
1796 	return count;
1797 }
1798 
1799 ssize_t core_alua_show_trans_delay_msecs(
1800 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1801 	char *page)
1802 {
1803 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1804 }
1805 
1806 ssize_t core_alua_store_trans_delay_msecs(
1807 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1808 	const char *page,
1809 	size_t count)
1810 {
1811 	unsigned long tmp;
1812 	int ret;
1813 
1814 	ret = strict_strtoul(page, 0, &tmp);
1815 	if (ret < 0) {
1816 		pr_err("Unable to extract trans_delay_msecs\n");
1817 		return -EINVAL;
1818 	}
1819 	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1820 		pr_err("Passed trans_delay_msecs: %lu, exceeds"
1821 			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1822 			ALUA_MAX_TRANS_DELAY_MSECS);
1823 		return -EINVAL;
1824 	}
1825 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1826 
1827 	return count;
1828 }
1829 
1830 ssize_t core_alua_show_preferred_bit(
1831 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1832 	char *page)
1833 {
1834 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1835 }
1836 
1837 ssize_t core_alua_store_preferred_bit(
1838 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1839 	const char *page,
1840 	size_t count)
1841 {
1842 	unsigned long tmp;
1843 	int ret;
1844 
1845 	ret = strict_strtoul(page, 0, &tmp);
1846 	if (ret < 0) {
1847 		pr_err("Unable to extract preferred ALUA value\n");
1848 		return -EINVAL;
1849 	}
1850 	if ((tmp != 0) && (tmp != 1)) {
1851 		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1852 		return -EINVAL;
1853 	}
1854 	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1855 
1856 	return count;
1857 }
1858 
1859 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1860 {
1861 	if (!lun->lun_sep)
1862 		return -ENODEV;
1863 
1864 	return sprintf(page, "%d\n",
1865 		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1866 }
1867 
1868 ssize_t core_alua_store_offline_bit(
1869 	struct se_lun *lun,
1870 	const char *page,
1871 	size_t count)
1872 {
1873 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1874 	unsigned long tmp;
1875 	int ret;
1876 
1877 	if (!lun->lun_sep)
1878 		return -ENODEV;
1879 
1880 	ret = strict_strtoul(page, 0, &tmp);
1881 	if (ret < 0) {
1882 		pr_err("Unable to extract alua_tg_pt_offline value\n");
1883 		return -EINVAL;
1884 	}
1885 	if ((tmp != 0) && (tmp != 1)) {
1886 		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1887 				tmp);
1888 		return -EINVAL;
1889 	}
1890 	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1891 	if (!tg_pt_gp_mem) {
1892 		pr_err("Unable to locate *tg_pt_gp_mem\n");
1893 		return -EINVAL;
1894 	}
1895 
1896 	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1897 			lun->lun_sep, 0, (int)tmp);
1898 	if (ret < 0)
1899 		return -EINVAL;
1900 
1901 	return count;
1902 }
1903 
1904 ssize_t core_alua_show_secondary_status(
1905 	struct se_lun *lun,
1906 	char *page)
1907 {
1908 	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1909 }
1910 
1911 ssize_t core_alua_store_secondary_status(
1912 	struct se_lun *lun,
1913 	const char *page,
1914 	size_t count)
1915 {
1916 	unsigned long tmp;
1917 	int ret;
1918 
1919 	ret = strict_strtoul(page, 0, &tmp);
1920 	if (ret < 0) {
1921 		pr_err("Unable to extract alua_tg_pt_status\n");
1922 		return -EINVAL;
1923 	}
1924 	if ((tmp != ALUA_STATUS_NONE) &&
1925 	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1926 	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1927 		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1928 				tmp);
1929 		return -EINVAL;
1930 	}
1931 	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1932 
1933 	return count;
1934 }
1935 
1936 ssize_t core_alua_show_secondary_write_metadata(
1937 	struct se_lun *lun,
1938 	char *page)
1939 {
1940 	return sprintf(page, "%d\n",
1941 			lun->lun_sep->sep_tg_pt_secondary_write_md);
1942 }
1943 
1944 ssize_t core_alua_store_secondary_write_metadata(
1945 	struct se_lun *lun,
1946 	const char *page,
1947 	size_t count)
1948 {
1949 	unsigned long tmp;
1950 	int ret;
1951 
1952 	ret = strict_strtoul(page, 0, &tmp);
1953 	if (ret < 0) {
1954 		pr_err("Unable to extract alua_tg_pt_write_md\n");
1955 		return -EINVAL;
1956 	}
1957 	if ((tmp != 0) && (tmp != 1)) {
1958 		pr_err("Illegal value for alua_tg_pt_write_md:"
1959 				" %lu\n", tmp);
1960 		return -EINVAL;
1961 	}
1962 	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1963 
1964 	return count;
1965 }
1966 
1967 int core_setup_alua(struct se_device *dev, int force_pt)
1968 {
1969 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1970 	struct t10_alua *alua = &su_dev->t10_alua;
1971 	struct t10_alua_lu_gp_member *lu_gp_mem;
1972 	/*
1973 	 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
1974 	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
1975 	 * cause a problem because libata and some SATA RAID HBAs appear
1976 	 * under Linux/SCSI, but emulate SCSI logic themselves.
1977 	 */
1978 	if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
1979 	    !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
1980 		alua->alua_type = SPC_ALUA_PASSTHROUGH;
1981 		alua->alua_state_check = &core_alua_state_check_nop;
1982 		pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
1983 			" emulation\n", dev->transport->name);
1984 		return 0;
1985 	}
1986 	/*
1987 	 * If SPC-3 or above is reported by real or emulated struct se_device,
1988 	 * use emulated ALUA.
1989 	 */
1990 	if (dev->transport->get_device_rev(dev) >= SCSI_3) {
1991 		pr_debug("%s: Enabling ALUA Emulation for SPC-3"
1992 			" device\n", dev->transport->name);
1993 		/*
1994 		 * Associate this struct se_device with the default ALUA
1995 		 * LUN Group.
1996 		 */
1997 		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
1998 		if (IS_ERR(lu_gp_mem))
1999 			return PTR_ERR(lu_gp_mem);
2000 
2001 		alua->alua_type = SPC3_ALUA_EMULATED;
2002 		alua->alua_state_check = &core_alua_state_check;
2003 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2004 		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2005 				default_lu_gp);
2006 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2007 
2008 		pr_debug("%s: Adding to default ALUA LU Group:"
2009 			" core/alua/lu_gps/default_lu_gp\n",
2010 			dev->transport->name);
2011 	} else {
2012 		alua->alua_type = SPC2_ALUA_DISABLED;
2013 		alua->alua_state_check = &core_alua_state_check_nop;
2014 		pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2015 			" device\n", dev->transport->name);
2016 	}
2017 
2018 	return 0;
2019 }
2020