1 /*******************************************************************************
2  * Filename:  target_core_alua.c
3  *
4  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5  *
6  * (c) Copyright 2009-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25 
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/configfs.h>
29 #include <linux/delay.h>
30 #include <linux/export.h>
31 #include <linux/fcntl.h>
32 #include <linux/file.h>
33 #include <linux/fs.h>
34 #include <scsi/scsi_proto.h>
35 #include <asm/unaligned.h>
36 
37 #include <target/target_core_base.h>
38 #include <target/target_core_backend.h>
39 #include <target/target_core_fabric.h>
40 
41 #include "target_core_internal.h"
42 #include "target_core_alua.h"
43 #include "target_core_ua.h"
44 
45 static sense_reason_t core_alua_check_transition(int state, int valid,
46 						 int *primary, int explicit);
47 static int core_alua_set_tg_pt_secondary_state(
48 		struct se_lun *lun, int explicit, int offline);
49 
50 static char *core_alua_dump_state(int state);
51 
52 static void __target_attach_tg_pt_gp(struct se_lun *lun,
53 		struct t10_alua_tg_pt_gp *tg_pt_gp);
54 
55 static u16 alua_lu_gps_counter;
56 static u32 alua_lu_gps_count;
57 
58 static DEFINE_SPINLOCK(lu_gps_lock);
59 static LIST_HEAD(lu_gps_list);
60 
61 struct t10_alua_lu_gp *default_lu_gp;
62 
63 /*
64  * REPORT REFERRALS
65  *
66  * See sbc3r35 section 5.23
67  */
68 sense_reason_t
69 target_emulate_report_referrals(struct se_cmd *cmd)
70 {
71 	struct se_device *dev = cmd->se_dev;
72 	struct t10_alua_lba_map *map;
73 	struct t10_alua_lba_map_member *map_mem;
74 	unsigned char *buf;
75 	u32 rd_len = 0, off;
76 
77 	if (cmd->data_length < 4) {
78 		pr_warn("REPORT REFERRALS allocation length %u too"
79 			" small\n", cmd->data_length);
80 		return TCM_INVALID_CDB_FIELD;
81 	}
82 
83 	buf = transport_kmap_data_sg(cmd);
84 	if (!buf)
85 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
86 
87 	off = 4;
88 	spin_lock(&dev->t10_alua.lba_map_lock);
89 	if (list_empty(&dev->t10_alua.lba_map_list)) {
90 		spin_unlock(&dev->t10_alua.lba_map_lock);
91 		transport_kunmap_data_sg(cmd);
92 
93 		return TCM_UNSUPPORTED_SCSI_OPCODE;
94 	}
95 
96 	list_for_each_entry(map, &dev->t10_alua.lba_map_list,
97 			    lba_map_list) {
98 		int desc_num = off + 3;
99 		int pg_num;
100 
101 		off += 4;
102 		if (cmd->data_length > off)
103 			put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
104 		off += 8;
105 		if (cmd->data_length > off)
106 			put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
107 		off += 8;
108 		rd_len += 20;
109 		pg_num = 0;
110 		list_for_each_entry(map_mem, &map->lba_map_mem_list,
111 				    lba_map_mem_list) {
112 			int alua_state = map_mem->lba_map_mem_alua_state;
113 			int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
114 
115 			if (cmd->data_length > off)
116 				buf[off] = alua_state & 0x0f;
117 			off += 2;
118 			if (cmd->data_length > off)
119 				buf[off] = (alua_pg_id >> 8) & 0xff;
120 			off++;
121 			if (cmd->data_length > off)
122 				buf[off] = (alua_pg_id & 0xff);
123 			off++;
124 			rd_len += 4;
125 			pg_num++;
126 		}
127 		if (cmd->data_length > desc_num)
128 			buf[desc_num] = pg_num;
129 	}
130 	spin_unlock(&dev->t10_alua.lba_map_lock);
131 
132 	/*
133 	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
134 	 */
135 	put_unaligned_be16(rd_len, &buf[2]);
136 
137 	transport_kunmap_data_sg(cmd);
138 
139 	target_complete_cmd(cmd, GOOD);
140 	return 0;
141 }
142 
143 /*
144  * REPORT_TARGET_PORT_GROUPS
145  *
146  * See spc4r17 section 6.27
147  */
148 sense_reason_t
149 target_emulate_report_target_port_groups(struct se_cmd *cmd)
150 {
151 	struct se_device *dev = cmd->se_dev;
152 	struct t10_alua_tg_pt_gp *tg_pt_gp;
153 	struct se_lun *lun;
154 	unsigned char *buf;
155 	u32 rd_len = 0, off;
156 	int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
157 
158 	/*
159 	 * Skip over RESERVED area to first Target port group descriptor
160 	 * depending on the PARAMETER DATA FORMAT type..
161 	 */
162 	if (ext_hdr != 0)
163 		off = 8;
164 	else
165 		off = 4;
166 
167 	if (cmd->data_length < off) {
168 		pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
169 			" small for %s header\n", cmd->data_length,
170 			(ext_hdr) ? "extended" : "normal");
171 		return TCM_INVALID_CDB_FIELD;
172 	}
173 	buf = transport_kmap_data_sg(cmd);
174 	if (!buf)
175 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
176 
177 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
178 	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
179 			tg_pt_gp_list) {
180 		/*
181 		 * Check if the Target port group and Target port descriptor list
182 		 * based on tg_pt_gp_members count will fit into the response payload.
183 		 * Otherwise, bump rd_len to let the initiator know we have exceeded
184 		 * the allocation length and the response is truncated.
185 		 */
186 		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
187 		     cmd->data_length) {
188 			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
189 			continue;
190 		}
191 		/*
192 		 * PREF: Preferred target port bit, determine if this
193 		 * bit should be set for port group.
194 		 */
195 		if (tg_pt_gp->tg_pt_gp_pref)
196 			buf[off] = 0x80;
197 		/*
198 		 * Set the ASYMMETRIC ACCESS State
199 		 */
200 		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
201 		/*
202 		 * Set supported ASYMMETRIC ACCESS State bits
203 		 */
204 		buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
205 		/*
206 		 * TARGET PORT GROUP
207 		 */
208 		put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
209 		off += 2;
210 
211 		off++; /* Skip over Reserved */
212 		/*
213 		 * STATUS CODE
214 		 */
215 		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
216 		/*
217 		 * Vendor Specific field
218 		 */
219 		buf[off++] = 0x00;
220 		/*
221 		 * TARGET PORT COUNT
222 		 */
223 		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
224 		rd_len += 8;
225 
226 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
227 		list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
228 				lun_tg_pt_gp_link) {
229 			/*
230 			 * Start Target Port descriptor format
231 			 *
232 			 * See spc4r17 section 6.2.7 Table 247
233 			 */
234 			off += 2; /* Skip over Obsolete */
235 			/*
236 			 * Set RELATIVE TARGET PORT IDENTIFIER
237 			 */
238 			put_unaligned_be16(lun->lun_rtpi, &buf[off]);
239 			off += 2;
240 			rd_len += 4;
241 		}
242 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
243 	}
244 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
245 	/*
246 	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
247 	 */
248 	put_unaligned_be32(rd_len, &buf[0]);
249 
250 	/*
251 	 * Fill in the Extended header parameter data format if requested
252 	 */
253 	if (ext_hdr != 0) {
254 		buf[4] = 0x10;
255 		/*
256 		 * Set the implicit transition time (in seconds) for the application
257 		 * client to use as a base for it's transition timeout value.
258 		 *
259 		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
260 		 * this CDB was received upon to determine this value individually
261 		 * for ALUA target port group.
262 		 */
263 		spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock);
264 		tg_pt_gp = cmd->se_lun->lun_tg_pt_gp;
265 		if (tg_pt_gp)
266 			buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
267 		spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock);
268 	}
269 	transport_kunmap_data_sg(cmd);
270 
271 	target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
272 	return 0;
273 }
274 
275 /*
276  * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
277  *
278  * See spc4r17 section 6.35
279  */
280 sense_reason_t
281 target_emulate_set_target_port_groups(struct se_cmd *cmd)
282 {
283 	struct se_device *dev = cmd->se_dev;
284 	struct se_lun *l_lun = cmd->se_lun;
285 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
286 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
287 	unsigned char *buf;
288 	unsigned char *ptr;
289 	sense_reason_t rc = TCM_NO_SENSE;
290 	u32 len = 4; /* Skip over RESERVED area in header */
291 	int alua_access_state, primary = 0, valid_states;
292 	u16 tg_pt_id, rtpi;
293 
294 	if (cmd->data_length < 4) {
295 		pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
296 			" small\n", cmd->data_length);
297 		return TCM_INVALID_PARAMETER_LIST;
298 	}
299 
300 	buf = transport_kmap_data_sg(cmd);
301 	if (!buf)
302 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
303 
304 	/*
305 	 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
306 	 * for the local tg_pt_gp.
307 	 */
308 	spin_lock(&l_lun->lun_tg_pt_gp_lock);
309 	l_tg_pt_gp = l_lun->lun_tg_pt_gp;
310 	if (!l_tg_pt_gp) {
311 		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
312 		pr_err("Unable to access l_lun->tg_pt_gp\n");
313 		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
314 		goto out;
315 	}
316 
317 	if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
318 		spin_unlock(&l_lun->lun_tg_pt_gp_lock);
319 		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
320 				" while TPGS_EXPLICIT_ALUA is disabled\n");
321 		rc = TCM_UNSUPPORTED_SCSI_OPCODE;
322 		goto out;
323 	}
324 	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
325 	spin_unlock(&l_lun->lun_tg_pt_gp_lock);
326 
327 	ptr = &buf[4]; /* Skip over RESERVED area in header */
328 
329 	while (len < cmd->data_length) {
330 		bool found = false;
331 		alua_access_state = (ptr[0] & 0x0f);
332 		/*
333 		 * Check the received ALUA access state, and determine if
334 		 * the state is a primary or secondary target port asymmetric
335 		 * access state.
336 		 */
337 		rc = core_alua_check_transition(alua_access_state, valid_states,
338 						&primary, 1);
339 		if (rc) {
340 			/*
341 			 * If the SET TARGET PORT GROUPS attempts to establish
342 			 * an invalid combination of target port asymmetric
343 			 * access states or attempts to establish an
344 			 * unsupported target port asymmetric access state,
345 			 * then the command shall be terminated with CHECK
346 			 * CONDITION status, with the sense key set to ILLEGAL
347 			 * REQUEST, and the additional sense code set to INVALID
348 			 * FIELD IN PARAMETER LIST.
349 			 */
350 			goto out;
351 		}
352 
353 		/*
354 		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
355 		 * specifies a primary target port asymmetric access state,
356 		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
357 		 * a primary target port group for which the primary target
358 		 * port asymmetric access state shall be changed. If the
359 		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
360 		 * port asymmetric access state, then the TARGET PORT GROUP OR
361 		 * TARGET PORT field specifies the relative target port
362 		 * identifier (see 3.1.120) of the target port for which the
363 		 * secondary target port asymmetric access state shall be
364 		 * changed.
365 		 */
366 		if (primary) {
367 			tg_pt_id = get_unaligned_be16(ptr + 2);
368 			/*
369 			 * Locate the matching target port group ID from
370 			 * the global tg_pt_gp list
371 			 */
372 			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
373 			list_for_each_entry(tg_pt_gp,
374 					&dev->t10_alua.tg_pt_gps_list,
375 					tg_pt_gp_list) {
376 				if (!tg_pt_gp->tg_pt_gp_valid_id)
377 					continue;
378 
379 				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
380 					continue;
381 
382 				atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
383 
384 				spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
385 
386 				if (!core_alua_do_port_transition(tg_pt_gp,
387 						dev, l_lun, nacl,
388 						alua_access_state, 1))
389 					found = true;
390 
391 				spin_lock(&dev->t10_alua.tg_pt_gps_lock);
392 				atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
393 				break;
394 			}
395 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
396 		} else {
397 			struct se_lun *lun;
398 
399 			/*
400 			 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
401 			 * the Target Port in question for the the incoming
402 			 * SET_TARGET_PORT_GROUPS op.
403 			 */
404 			rtpi = get_unaligned_be16(ptr + 2);
405 			/*
406 			 * Locate the matching relative target port identifier
407 			 * for the struct se_device storage object.
408 			 */
409 			spin_lock(&dev->se_port_lock);
410 			list_for_each_entry(lun, &dev->dev_sep_list,
411 							lun_dev_link) {
412 				if (lun->lun_rtpi != rtpi)
413 					continue;
414 
415 				// XXX: racy unlock
416 				spin_unlock(&dev->se_port_lock);
417 
418 				if (!core_alua_set_tg_pt_secondary_state(
419 						lun, 1, 1))
420 					found = true;
421 
422 				spin_lock(&dev->se_port_lock);
423 				break;
424 			}
425 			spin_unlock(&dev->se_port_lock);
426 		}
427 
428 		if (!found) {
429 			rc = TCM_INVALID_PARAMETER_LIST;
430 			goto out;
431 		}
432 
433 		ptr += 4;
434 		len += 4;
435 	}
436 
437 out:
438 	transport_kunmap_data_sg(cmd);
439 	if (!rc)
440 		target_complete_cmd(cmd, GOOD);
441 	return rc;
442 }
443 
444 static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
445 {
446 	/*
447 	 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
448 	 * The ALUA additional sense code qualifier (ASCQ) is determined
449 	 * by the ALUA primary or secondary access state..
450 	 */
451 	pr_debug("[%s]: ALUA TG Port not available, "
452 		"SenseKey: NOT_READY, ASC/ASCQ: "
453 		"0x04/0x%02x\n",
454 		cmd->se_tfo->get_fabric_name(), alua_ascq);
455 
456 	cmd->scsi_asc = 0x04;
457 	cmd->scsi_ascq = alua_ascq;
458 }
459 
460 static inline void core_alua_state_nonoptimized(
461 	struct se_cmd *cmd,
462 	unsigned char *cdb,
463 	int nonop_delay_msecs)
464 {
465 	/*
466 	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
467 	 * later to determine if processing of this cmd needs to be
468 	 * temporarily delayed for the Active/NonOptimized primary access state.
469 	 */
470 	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
471 	cmd->alua_nonop_delay = nonop_delay_msecs;
472 }
473 
474 static inline int core_alua_state_lba_dependent(
475 	struct se_cmd *cmd,
476 	struct t10_alua_tg_pt_gp *tg_pt_gp)
477 {
478 	struct se_device *dev = cmd->se_dev;
479 	u64 segment_size, segment_mult, sectors, lba;
480 
481 	/* Only need to check for cdb actually containing LBAs */
482 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
483 		return 0;
484 
485 	spin_lock(&dev->t10_alua.lba_map_lock);
486 	segment_size = dev->t10_alua.lba_map_segment_size;
487 	segment_mult = dev->t10_alua.lba_map_segment_multiplier;
488 	sectors = cmd->data_length / dev->dev_attrib.block_size;
489 
490 	lba = cmd->t_task_lba;
491 	while (lba < cmd->t_task_lba + sectors) {
492 		struct t10_alua_lba_map *cur_map = NULL, *map;
493 		struct t10_alua_lba_map_member *map_mem;
494 
495 		list_for_each_entry(map, &dev->t10_alua.lba_map_list,
496 				    lba_map_list) {
497 			u64 start_lba, last_lba;
498 			u64 first_lba = map->lba_map_first_lba;
499 
500 			if (segment_mult) {
501 				u64 tmp = lba;
502 				start_lba = do_div(tmp, segment_size * segment_mult);
503 
504 				last_lba = first_lba + segment_size - 1;
505 				if (start_lba >= first_lba &&
506 				    start_lba <= last_lba) {
507 					lba += segment_size;
508 					cur_map = map;
509 					break;
510 				}
511 			} else {
512 				last_lba = map->lba_map_last_lba;
513 				if (lba >= first_lba && lba <= last_lba) {
514 					lba = last_lba + 1;
515 					cur_map = map;
516 					break;
517 				}
518 			}
519 		}
520 		if (!cur_map) {
521 			spin_unlock(&dev->t10_alua.lba_map_lock);
522 			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
523 			return 1;
524 		}
525 		list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
526 				    lba_map_mem_list) {
527 			if (map_mem->lba_map_mem_alua_pg_id !=
528 			    tg_pt_gp->tg_pt_gp_id)
529 				continue;
530 			switch(map_mem->lba_map_mem_alua_state) {
531 			case ALUA_ACCESS_STATE_STANDBY:
532 				spin_unlock(&dev->t10_alua.lba_map_lock);
533 				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
534 				return 1;
535 			case ALUA_ACCESS_STATE_UNAVAILABLE:
536 				spin_unlock(&dev->t10_alua.lba_map_lock);
537 				set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
538 				return 1;
539 			default:
540 				break;
541 			}
542 		}
543 	}
544 	spin_unlock(&dev->t10_alua.lba_map_lock);
545 	return 0;
546 }
547 
548 static inline int core_alua_state_standby(
549 	struct se_cmd *cmd,
550 	unsigned char *cdb)
551 {
552 	/*
553 	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
554 	 * spc4r17 section 5.9.2.4.4
555 	 */
556 	switch (cdb[0]) {
557 	case INQUIRY:
558 	case LOG_SELECT:
559 	case LOG_SENSE:
560 	case MODE_SELECT:
561 	case MODE_SENSE:
562 	case REPORT_LUNS:
563 	case RECEIVE_DIAGNOSTIC:
564 	case SEND_DIAGNOSTIC:
565 	case READ_CAPACITY:
566 		return 0;
567 	case SERVICE_ACTION_IN_16:
568 		switch (cdb[1] & 0x1f) {
569 		case SAI_READ_CAPACITY_16:
570 			return 0;
571 		default:
572 			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
573 			return 1;
574 		}
575 	case MAINTENANCE_IN:
576 		switch (cdb[1] & 0x1f) {
577 		case MI_REPORT_TARGET_PGS:
578 			return 0;
579 		default:
580 			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
581 			return 1;
582 		}
583 	case MAINTENANCE_OUT:
584 		switch (cdb[1]) {
585 		case MO_SET_TARGET_PGS:
586 			return 0;
587 		default:
588 			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
589 			return 1;
590 		}
591 	case REQUEST_SENSE:
592 	case PERSISTENT_RESERVE_IN:
593 	case PERSISTENT_RESERVE_OUT:
594 	case READ_BUFFER:
595 	case WRITE_BUFFER:
596 		return 0;
597 	default:
598 		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
599 		return 1;
600 	}
601 
602 	return 0;
603 }
604 
605 static inline int core_alua_state_unavailable(
606 	struct se_cmd *cmd,
607 	unsigned char *cdb)
608 {
609 	/*
610 	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
611 	 * spc4r17 section 5.9.2.4.5
612 	 */
613 	switch (cdb[0]) {
614 	case INQUIRY:
615 	case REPORT_LUNS:
616 		return 0;
617 	case MAINTENANCE_IN:
618 		switch (cdb[1] & 0x1f) {
619 		case MI_REPORT_TARGET_PGS:
620 			return 0;
621 		default:
622 			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
623 			return 1;
624 		}
625 	case MAINTENANCE_OUT:
626 		switch (cdb[1]) {
627 		case MO_SET_TARGET_PGS:
628 			return 0;
629 		default:
630 			set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
631 			return 1;
632 		}
633 	case REQUEST_SENSE:
634 	case READ_BUFFER:
635 	case WRITE_BUFFER:
636 		return 0;
637 	default:
638 		set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
639 		return 1;
640 	}
641 
642 	return 0;
643 }
644 
645 static inline int core_alua_state_transition(
646 	struct se_cmd *cmd,
647 	unsigned char *cdb)
648 {
649 	/*
650 	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
651 	 * spc4r17 section 5.9.2.5
652 	 */
653 	switch (cdb[0]) {
654 	case INQUIRY:
655 	case REPORT_LUNS:
656 		return 0;
657 	case MAINTENANCE_IN:
658 		switch (cdb[1] & 0x1f) {
659 		case MI_REPORT_TARGET_PGS:
660 			return 0;
661 		default:
662 			set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
663 			return 1;
664 		}
665 	case REQUEST_SENSE:
666 	case READ_BUFFER:
667 	case WRITE_BUFFER:
668 		return 0;
669 	default:
670 		set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
671 		return 1;
672 	}
673 
674 	return 0;
675 }
676 
677 /*
678  * return 1: Is used to signal LUN not accessible, and check condition/not ready
679  * return 0: Used to signal success
680  * return -1: Used to signal failure, and invalid cdb field
681  */
682 sense_reason_t
683 target_alua_state_check(struct se_cmd *cmd)
684 {
685 	struct se_device *dev = cmd->se_dev;
686 	unsigned char *cdb = cmd->t_task_cdb;
687 	struct se_lun *lun = cmd->se_lun;
688 	struct t10_alua_tg_pt_gp *tg_pt_gp;
689 	int out_alua_state, nonop_delay_msecs;
690 
691 	if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
692 		return 0;
693 	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
694 		return 0;
695 
696 	/*
697 	 * First, check for a struct se_port specific secondary ALUA target port
698 	 * access state: OFFLINE
699 	 */
700 	if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
701 		pr_debug("ALUA: Got secondary offline status for local"
702 				" target port\n");
703 		set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
704 		return TCM_CHECK_CONDITION_NOT_READY;
705 	}
706 
707 	if (!lun->lun_tg_pt_gp)
708 		return 0;
709 
710 	spin_lock(&lun->lun_tg_pt_gp_lock);
711 	tg_pt_gp = lun->lun_tg_pt_gp;
712 	out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
713 	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
714 
715 	// XXX: keeps using tg_pt_gp witout reference after unlock
716 	spin_unlock(&lun->lun_tg_pt_gp_lock);
717 	/*
718 	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
719 	 * statement so the compiler knows explicitly to check this case first.
720 	 * For the Optimized ALUA access state case, we want to process the
721 	 * incoming fabric cmd ASAP..
722 	 */
723 	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
724 		return 0;
725 
726 	switch (out_alua_state) {
727 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
728 		core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
729 		break;
730 	case ALUA_ACCESS_STATE_STANDBY:
731 		if (core_alua_state_standby(cmd, cdb))
732 			return TCM_CHECK_CONDITION_NOT_READY;
733 		break;
734 	case ALUA_ACCESS_STATE_UNAVAILABLE:
735 		if (core_alua_state_unavailable(cmd, cdb))
736 			return TCM_CHECK_CONDITION_NOT_READY;
737 		break;
738 	case ALUA_ACCESS_STATE_TRANSITION:
739 		if (core_alua_state_transition(cmd, cdb))
740 			return TCM_CHECK_CONDITION_NOT_READY;
741 		break;
742 	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
743 		if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
744 			return TCM_CHECK_CONDITION_NOT_READY;
745 		break;
746 	/*
747 	 * OFFLINE is a secondary ALUA target port group access state, that is
748 	 * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
749 	 */
750 	case ALUA_ACCESS_STATE_OFFLINE:
751 	default:
752 		pr_err("Unknown ALUA access state: 0x%02x\n",
753 				out_alua_state);
754 		return TCM_INVALID_CDB_FIELD;
755 	}
756 
757 	return 0;
758 }
759 
760 /*
761  * Check implicit and explicit ALUA state change request.
762  */
763 static sense_reason_t
764 core_alua_check_transition(int state, int valid, int *primary, int explicit)
765 {
766 	/*
767 	 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
768 	 * defined as primary target port asymmetric access states.
769 	 */
770 	switch (state) {
771 	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
772 		if (!(valid & ALUA_AO_SUP))
773 			goto not_supported;
774 		*primary = 1;
775 		break;
776 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
777 		if (!(valid & ALUA_AN_SUP))
778 			goto not_supported;
779 		*primary = 1;
780 		break;
781 	case ALUA_ACCESS_STATE_STANDBY:
782 		if (!(valid & ALUA_S_SUP))
783 			goto not_supported;
784 		*primary = 1;
785 		break;
786 	case ALUA_ACCESS_STATE_UNAVAILABLE:
787 		if (!(valid & ALUA_U_SUP))
788 			goto not_supported;
789 		*primary = 1;
790 		break;
791 	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
792 		if (!(valid & ALUA_LBD_SUP))
793 			goto not_supported;
794 		*primary = 1;
795 		break;
796 	case ALUA_ACCESS_STATE_OFFLINE:
797 		/*
798 		 * OFFLINE state is defined as a secondary target port
799 		 * asymmetric access state.
800 		 */
801 		if (!(valid & ALUA_O_SUP))
802 			goto not_supported;
803 		*primary = 0;
804 		break;
805 	case ALUA_ACCESS_STATE_TRANSITION:
806 		if (!(valid & ALUA_T_SUP) || explicit)
807 			/*
808 			 * Transitioning is set internally and by tcmu daemon,
809 			 * and cannot be selected through a STPG.
810 			 */
811 			goto not_supported;
812 		*primary = 0;
813 		break;
814 	default:
815 		pr_err("Unknown ALUA access state: 0x%02x\n", state);
816 		return TCM_INVALID_PARAMETER_LIST;
817 	}
818 
819 	return 0;
820 
821 not_supported:
822 	pr_err("ALUA access state %s not supported",
823 	       core_alua_dump_state(state));
824 	return TCM_INVALID_PARAMETER_LIST;
825 }
826 
827 static char *core_alua_dump_state(int state)
828 {
829 	switch (state) {
830 	case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
831 		return "Active/Optimized";
832 	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
833 		return "Active/NonOptimized";
834 	case ALUA_ACCESS_STATE_LBA_DEPENDENT:
835 		return "LBA Dependent";
836 	case ALUA_ACCESS_STATE_STANDBY:
837 		return "Standby";
838 	case ALUA_ACCESS_STATE_UNAVAILABLE:
839 		return "Unavailable";
840 	case ALUA_ACCESS_STATE_OFFLINE:
841 		return "Offline";
842 	case ALUA_ACCESS_STATE_TRANSITION:
843 		return "Transitioning";
844 	default:
845 		return "Unknown";
846 	}
847 
848 	return NULL;
849 }
850 
851 char *core_alua_dump_status(int status)
852 {
853 	switch (status) {
854 	case ALUA_STATUS_NONE:
855 		return "None";
856 	case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
857 		return "Altered by Explicit STPG";
858 	case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
859 		return "Altered by Implicit ALUA";
860 	default:
861 		return "Unknown";
862 	}
863 
864 	return NULL;
865 }
866 
867 /*
868  * Used by fabric modules to determine when we need to delay processing
869  * for the Active/NonOptimized paths..
870  */
871 int core_alua_check_nonop_delay(
872 	struct se_cmd *cmd)
873 {
874 	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
875 		return 0;
876 	if (in_interrupt())
877 		return 0;
878 	/*
879 	 * The ALUA Active/NonOptimized access state delay can be disabled
880 	 * in via configfs with a value of zero
881 	 */
882 	if (!cmd->alua_nonop_delay)
883 		return 0;
884 	/*
885 	 * struct se_cmd->alua_nonop_delay gets set by a target port group
886 	 * defined interval in core_alua_state_nonoptimized()
887 	 */
888 	msleep_interruptible(cmd->alua_nonop_delay);
889 	return 0;
890 }
891 EXPORT_SYMBOL(core_alua_check_nonop_delay);
892 
893 static int core_alua_write_tpg_metadata(
894 	const char *path,
895 	unsigned char *md_buf,
896 	u32 md_buf_len)
897 {
898 	struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
899 	loff_t pos = 0;
900 	int ret;
901 
902 	if (IS_ERR(file)) {
903 		pr_err("filp_open(%s) for ALUA metadata failed\n", path);
904 		return -ENODEV;
905 	}
906 	ret = kernel_write(file, md_buf, md_buf_len, &pos);
907 	if (ret < 0)
908 		pr_err("Error writing ALUA metadata file: %s\n", path);
909 	fput(file);
910 	return (ret < 0) ? -EIO : 0;
911 }
912 
913 /*
914  * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
915  */
916 static int core_alua_update_tpg_primary_metadata(
917 	struct t10_alua_tg_pt_gp *tg_pt_gp)
918 {
919 	unsigned char *md_buf;
920 	struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
921 	char *path;
922 	int len, rc;
923 
924 	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
925 	if (!md_buf) {
926 		pr_err("Unable to allocate buf for ALUA metadata\n");
927 		return -ENOMEM;
928 	}
929 
930 	len = snprintf(md_buf, ALUA_MD_BUF_LEN,
931 			"tg_pt_gp_id=%hu\n"
932 			"alua_access_state=0x%02x\n"
933 			"alua_access_status=0x%02x\n",
934 			tg_pt_gp->tg_pt_gp_id,
935 			tg_pt_gp->tg_pt_gp_alua_access_state,
936 			tg_pt_gp->tg_pt_gp_alua_access_status);
937 
938 	rc = -ENOMEM;
939 	path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
940 			&wwn->unit_serial[0],
941 			config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
942 	if (path) {
943 		rc = core_alua_write_tpg_metadata(path, md_buf, len);
944 		kfree(path);
945 	}
946 	kfree(md_buf);
947 	return rc;
948 }
949 
950 static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
951 {
952 	struct se_dev_entry *se_deve;
953 	struct se_lun *lun;
954 	struct se_lun_acl *lacl;
955 
956 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
957 	list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
958 				lun_tg_pt_gp_link) {
959 		/*
960 		 * After an implicit target port asymmetric access state
961 		 * change, a device server shall establish a unit attention
962 		 * condition for the initiator port associated with every I_T
963 		 * nexus with the additional sense code set to ASYMMETRIC
964 		 * ACCESS STATE CHANGED.
965 		 *
966 		 * After an explicit target port asymmetric access state
967 		 * change, a device server shall establish a unit attention
968 		 * condition with the additional sense code set to ASYMMETRIC
969 		 * ACCESS STATE CHANGED for the initiator port associated with
970 		 * every I_T nexus other than the I_T nexus on which the SET
971 		 * TARGET PORT GROUPS command
972 		 */
973 		if (!percpu_ref_tryget_live(&lun->lun_ref))
974 			continue;
975 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
976 
977 		spin_lock(&lun->lun_deve_lock);
978 		list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
979 			lacl = rcu_dereference_check(se_deve->se_lun_acl,
980 					lockdep_is_held(&lun->lun_deve_lock));
981 
982 			/*
983 			 * spc4r37 p.242:
984 			 * After an explicit target port asymmetric access
985 			 * state change, a device server shall establish a
986 			 * unit attention condition with the additional sense
987 			 * code set to ASYMMETRIC ACCESS STATE CHANGED for
988 			 * the initiator port associated with every I_T nexus
989 			 * other than the I_T nexus on which the SET TARGET
990 			 * PORT GROUPS command was received.
991 			 */
992 			if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
993 			     ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
994 			   (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
995 			    (tg_pt_gp->tg_pt_gp_alua_lun == lun))
996 				continue;
997 
998 			/*
999 			 * se_deve->se_lun_acl pointer may be NULL for a
1000 			 * entry created without explicit Node+MappedLUN ACLs
1001 			 */
1002 			if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1003 			    (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
1004 				continue;
1005 
1006 			core_scsi3_ua_allocate(se_deve, 0x2A,
1007 				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1008 		}
1009 		spin_unlock(&lun->lun_deve_lock);
1010 
1011 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1012 		percpu_ref_put(&lun->lun_ref);
1013 	}
1014 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1015 }
1016 
1017 static int core_alua_do_transition_tg_pt(
1018 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1019 	int new_state,
1020 	int explicit)
1021 {
1022 	int prev_state;
1023 
1024 	mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1025 	/* Nothing to be done here */
1026 	if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1027 		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1028 		return 0;
1029 	}
1030 
1031 	if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1032 		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1033 		return -EAGAIN;
1034 	}
1035 
1036 	/*
1037 	 * Save the old primary ALUA access state, and set the current state
1038 	 * to ALUA_ACCESS_STATE_TRANSITION.
1039 	 */
1040 	prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1041 	tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1042 	tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1043 				ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1044 				ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1045 
1046 	core_alua_queue_state_change_ua(tg_pt_gp);
1047 
1048 	if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1049 		mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1050 		return 0;
1051 	}
1052 
1053 	/*
1054 	 * Check for the optional ALUA primary state transition delay
1055 	 */
1056 	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1057 		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1058 
1059 	/*
1060 	 * Set the current primary ALUA access state to the requested new state
1061 	 */
1062 	tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1063 
1064 	/*
1065 	 * Update the ALUA metadata buf that has been allocated in
1066 	 * core_alua_do_port_transition(), this metadata will be written
1067 	 * to struct file.
1068 	 *
1069 	 * Note that there is the case where we do not want to update the
1070 	 * metadata when the saved metadata is being parsed in userspace
1071 	 * when setting the existing port access state and access status.
1072 	 *
1073 	 * Also note that the failure to write out the ALUA metadata to
1074 	 * struct file does NOT affect the actual ALUA transition.
1075 	 */
1076 	if (tg_pt_gp->tg_pt_gp_write_metadata) {
1077 		core_alua_update_tpg_primary_metadata(tg_pt_gp);
1078 	}
1079 
1080 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1081 		" from primary access state %s to %s\n", (explicit) ? "explicit" :
1082 		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1083 		tg_pt_gp->tg_pt_gp_id,
1084 		core_alua_dump_state(prev_state),
1085 		core_alua_dump_state(new_state));
1086 
1087 	core_alua_queue_state_change_ua(tg_pt_gp);
1088 
1089 	mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1090 	return 0;
1091 }
1092 
1093 int core_alua_do_port_transition(
1094 	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1095 	struct se_device *l_dev,
1096 	struct se_lun *l_lun,
1097 	struct se_node_acl *l_nacl,
1098 	int new_state,
1099 	int explicit)
1100 {
1101 	struct se_device *dev;
1102 	struct t10_alua_lu_gp *lu_gp;
1103 	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1104 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1105 	int primary, valid_states, rc = 0;
1106 
1107 	if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1108 		return -ENODEV;
1109 
1110 	valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1111 	if (core_alua_check_transition(new_state, valid_states, &primary,
1112 				       explicit) != 0)
1113 		return -EINVAL;
1114 
1115 	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1116 	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1117 	lu_gp = local_lu_gp_mem->lu_gp;
1118 	atomic_inc(&lu_gp->lu_gp_ref_cnt);
1119 	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1120 	/*
1121 	 * For storage objects that are members of the 'default_lu_gp',
1122 	 * we only do transition on the passed *l_tp_pt_gp, and not
1123 	 * on all of the matching target port groups IDs in default_lu_gp.
1124 	 */
1125 	if (!lu_gp->lu_gp_id) {
1126 		/*
1127 		 * core_alua_do_transition_tg_pt() will always return
1128 		 * success.
1129 		 */
1130 		l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1131 		l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1132 		rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1133 						   new_state, explicit);
1134 		atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1135 		return rc;
1136 	}
1137 	/*
1138 	 * For all other LU groups aside from 'default_lu_gp', walk all of
1139 	 * the associated storage objects looking for a matching target port
1140 	 * group ID from the local target port group.
1141 	 */
1142 	spin_lock(&lu_gp->lu_gp_lock);
1143 	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1144 				lu_gp_mem_list) {
1145 
1146 		dev = lu_gp_mem->lu_gp_mem_dev;
1147 		atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1148 		spin_unlock(&lu_gp->lu_gp_lock);
1149 
1150 		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1151 		list_for_each_entry(tg_pt_gp,
1152 				&dev->t10_alua.tg_pt_gps_list,
1153 				tg_pt_gp_list) {
1154 
1155 			if (!tg_pt_gp->tg_pt_gp_valid_id)
1156 				continue;
1157 			/*
1158 			 * If the target behavior port asymmetric access state
1159 			 * is changed for any target port group accessible via
1160 			 * a logical unit within a LU group, the target port
1161 			 * behavior group asymmetric access states for the same
1162 			 * target port group accessible via other logical units
1163 			 * in that LU group will also change.
1164 			 */
1165 			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1166 				continue;
1167 
1168 			if (l_tg_pt_gp == tg_pt_gp) {
1169 				tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1170 				tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1171 			} else {
1172 				tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1173 				tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1174 			}
1175 			atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1176 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1177 			/*
1178 			 * core_alua_do_transition_tg_pt() will always return
1179 			 * success.
1180 			 */
1181 			rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1182 					new_state, explicit);
1183 
1184 			spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1185 			atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1186 			if (rc)
1187 				break;
1188 		}
1189 		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1190 
1191 		spin_lock(&lu_gp->lu_gp_lock);
1192 		atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1193 	}
1194 	spin_unlock(&lu_gp->lu_gp_lock);
1195 
1196 	if (!rc) {
1197 		pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1198 			 " Group IDs: %hu %s transition to primary state: %s\n",
1199 			 config_item_name(&lu_gp->lu_gp_group.cg_item),
1200 			 l_tg_pt_gp->tg_pt_gp_id,
1201 			 (explicit) ? "explicit" : "implicit",
1202 			 core_alua_dump_state(new_state));
1203 	}
1204 
1205 	atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1206 	return rc;
1207 }
1208 
1209 static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1210 {
1211 	struct se_portal_group *se_tpg = lun->lun_tpg;
1212 	unsigned char *md_buf;
1213 	char *path;
1214 	int len, rc;
1215 
1216 	mutex_lock(&lun->lun_tg_pt_md_mutex);
1217 
1218 	md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1219 	if (!md_buf) {
1220 		pr_err("Unable to allocate buf for ALUA metadata\n");
1221 		rc = -ENOMEM;
1222 		goto out_unlock;
1223 	}
1224 
1225 	len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1226 			"alua_tg_pt_status=0x%02x\n",
1227 			atomic_read(&lun->lun_tg_pt_secondary_offline),
1228 			lun->lun_tg_pt_secondary_stat);
1229 
1230 	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1231 		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1232 				db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
1233 				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1234 				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1235 				lun->unpacked_lun);
1236 	} else {
1237 		path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1238 				db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
1239 				se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1240 				lun->unpacked_lun);
1241 	}
1242 	if (!path) {
1243 		rc = -ENOMEM;
1244 		goto out_free;
1245 	}
1246 
1247 	rc = core_alua_write_tpg_metadata(path, md_buf, len);
1248 	kfree(path);
1249 out_free:
1250 	kfree(md_buf);
1251 out_unlock:
1252 	mutex_unlock(&lun->lun_tg_pt_md_mutex);
1253 	return rc;
1254 }
1255 
1256 static int core_alua_set_tg_pt_secondary_state(
1257 	struct se_lun *lun,
1258 	int explicit,
1259 	int offline)
1260 {
1261 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1262 	int trans_delay_msecs;
1263 
1264 	spin_lock(&lun->lun_tg_pt_gp_lock);
1265 	tg_pt_gp = lun->lun_tg_pt_gp;
1266 	if (!tg_pt_gp) {
1267 		spin_unlock(&lun->lun_tg_pt_gp_lock);
1268 		pr_err("Unable to complete secondary state"
1269 				" transition\n");
1270 		return -EINVAL;
1271 	}
1272 	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1273 	/*
1274 	 * Set the secondary ALUA target port access state to OFFLINE
1275 	 * or release the previously secondary state for struct se_lun
1276 	 */
1277 	if (offline)
1278 		atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1279 	else
1280 		atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1281 
1282 	lun->lun_tg_pt_secondary_stat = (explicit) ?
1283 			ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1284 			ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1285 
1286 	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1287 		" to secondary access state: %s\n", (explicit) ? "explicit" :
1288 		"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1289 		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1290 
1291 	spin_unlock(&lun->lun_tg_pt_gp_lock);
1292 	/*
1293 	 * Do the optional transition delay after we set the secondary
1294 	 * ALUA access state.
1295 	 */
1296 	if (trans_delay_msecs != 0)
1297 		msleep_interruptible(trans_delay_msecs);
1298 	/*
1299 	 * See if we need to update the ALUA fabric port metadata for
1300 	 * secondary state and status
1301 	 */
1302 	if (lun->lun_tg_pt_secondary_write_md)
1303 		core_alua_update_tpg_secondary_metadata(lun);
1304 
1305 	return 0;
1306 }
1307 
1308 struct t10_alua_lba_map *
1309 core_alua_allocate_lba_map(struct list_head *list,
1310 			   u64 first_lba, u64 last_lba)
1311 {
1312 	struct t10_alua_lba_map *lba_map;
1313 
1314 	lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1315 	if (!lba_map) {
1316 		pr_err("Unable to allocate struct t10_alua_lba_map\n");
1317 		return ERR_PTR(-ENOMEM);
1318 	}
1319 	INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1320 	lba_map->lba_map_first_lba = first_lba;
1321 	lba_map->lba_map_last_lba = last_lba;
1322 
1323 	list_add_tail(&lba_map->lba_map_list, list);
1324 	return lba_map;
1325 }
1326 
1327 int
1328 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1329 			       int pg_id, int state)
1330 {
1331 	struct t10_alua_lba_map_member *lba_map_mem;
1332 
1333 	list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1334 			    lba_map_mem_list) {
1335 		if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1336 			pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1337 			return -EINVAL;
1338 		}
1339 	}
1340 
1341 	lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1342 	if (!lba_map_mem) {
1343 		pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1344 		return -ENOMEM;
1345 	}
1346 	lba_map_mem->lba_map_mem_alua_state = state;
1347 	lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1348 
1349 	list_add_tail(&lba_map_mem->lba_map_mem_list,
1350 		      &lba_map->lba_map_mem_list);
1351 	return 0;
1352 }
1353 
1354 void
1355 core_alua_free_lba_map(struct list_head *lba_list)
1356 {
1357 	struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1358 	struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1359 
1360 	list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1361 				 lba_map_list) {
1362 		list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1363 					 &lba_map->lba_map_mem_list,
1364 					 lba_map_mem_list) {
1365 			list_del(&lba_map_mem->lba_map_mem_list);
1366 			kmem_cache_free(t10_alua_lba_map_mem_cache,
1367 					lba_map_mem);
1368 		}
1369 		list_del(&lba_map->lba_map_list);
1370 		kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1371 	}
1372 }
1373 
1374 void
1375 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1376 		      int segment_size, int segment_mult)
1377 {
1378 	struct list_head old_lba_map_list;
1379 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1380 	int activate = 0, supported;
1381 
1382 	INIT_LIST_HEAD(&old_lba_map_list);
1383 	spin_lock(&dev->t10_alua.lba_map_lock);
1384 	dev->t10_alua.lba_map_segment_size = segment_size;
1385 	dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1386 	list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1387 	if (lba_map_list) {
1388 		list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1389 		activate = 1;
1390 	}
1391 	spin_unlock(&dev->t10_alua.lba_map_lock);
1392 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1393 	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1394 			    tg_pt_gp_list) {
1395 
1396 		if (!tg_pt_gp->tg_pt_gp_valid_id)
1397 			continue;
1398 		supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1399 		if (activate)
1400 			supported |= ALUA_LBD_SUP;
1401 		else
1402 			supported &= ~ALUA_LBD_SUP;
1403 		tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1404 	}
1405 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1406 	core_alua_free_lba_map(&old_lba_map_list);
1407 }
1408 
1409 struct t10_alua_lu_gp *
1410 core_alua_allocate_lu_gp(const char *name, int def_group)
1411 {
1412 	struct t10_alua_lu_gp *lu_gp;
1413 
1414 	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1415 	if (!lu_gp) {
1416 		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1417 		return ERR_PTR(-ENOMEM);
1418 	}
1419 	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1420 	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1421 	spin_lock_init(&lu_gp->lu_gp_lock);
1422 	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1423 
1424 	if (def_group) {
1425 		lu_gp->lu_gp_id = alua_lu_gps_counter++;
1426 		lu_gp->lu_gp_valid_id = 1;
1427 		alua_lu_gps_count++;
1428 	}
1429 
1430 	return lu_gp;
1431 }
1432 
1433 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1434 {
1435 	struct t10_alua_lu_gp *lu_gp_tmp;
1436 	u16 lu_gp_id_tmp;
1437 	/*
1438 	 * The lu_gp->lu_gp_id may only be set once..
1439 	 */
1440 	if (lu_gp->lu_gp_valid_id) {
1441 		pr_warn("ALUA LU Group already has a valid ID,"
1442 			" ignoring request\n");
1443 		return -EINVAL;
1444 	}
1445 
1446 	spin_lock(&lu_gps_lock);
1447 	if (alua_lu_gps_count == 0x0000ffff) {
1448 		pr_err("Maximum ALUA alua_lu_gps_count:"
1449 				" 0x0000ffff reached\n");
1450 		spin_unlock(&lu_gps_lock);
1451 		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1452 		return -ENOSPC;
1453 	}
1454 again:
1455 	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1456 				alua_lu_gps_counter++;
1457 
1458 	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1459 		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1460 			if (!lu_gp_id)
1461 				goto again;
1462 
1463 			pr_warn("ALUA Logical Unit Group ID: %hu"
1464 				" already exists, ignoring request\n",
1465 				lu_gp_id);
1466 			spin_unlock(&lu_gps_lock);
1467 			return -EINVAL;
1468 		}
1469 	}
1470 
1471 	lu_gp->lu_gp_id = lu_gp_id_tmp;
1472 	lu_gp->lu_gp_valid_id = 1;
1473 	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1474 	alua_lu_gps_count++;
1475 	spin_unlock(&lu_gps_lock);
1476 
1477 	return 0;
1478 }
1479 
1480 static struct t10_alua_lu_gp_member *
1481 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1482 {
1483 	struct t10_alua_lu_gp_member *lu_gp_mem;
1484 
1485 	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1486 	if (!lu_gp_mem) {
1487 		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1488 		return ERR_PTR(-ENOMEM);
1489 	}
1490 	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1491 	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1492 	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1493 
1494 	lu_gp_mem->lu_gp_mem_dev = dev;
1495 	dev->dev_alua_lu_gp_mem = lu_gp_mem;
1496 
1497 	return lu_gp_mem;
1498 }
1499 
1500 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1501 {
1502 	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1503 	/*
1504 	 * Once we have reached this point, config_item_put() has
1505 	 * already been called from target_core_alua_drop_lu_gp().
1506 	 *
1507 	 * Here, we remove the *lu_gp from the global list so that
1508 	 * no associations can be made while we are releasing
1509 	 * struct t10_alua_lu_gp.
1510 	 */
1511 	spin_lock(&lu_gps_lock);
1512 	list_del(&lu_gp->lu_gp_node);
1513 	alua_lu_gps_count--;
1514 	spin_unlock(&lu_gps_lock);
1515 	/*
1516 	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1517 	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1518 	 * released with core_alua_put_lu_gp_from_name()
1519 	 */
1520 	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1521 		cpu_relax();
1522 	/*
1523 	 * Release reference to struct t10_alua_lu_gp * from all associated
1524 	 * struct se_device.
1525 	 */
1526 	spin_lock(&lu_gp->lu_gp_lock);
1527 	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1528 				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1529 		if (lu_gp_mem->lu_gp_assoc) {
1530 			list_del(&lu_gp_mem->lu_gp_mem_list);
1531 			lu_gp->lu_gp_members--;
1532 			lu_gp_mem->lu_gp_assoc = 0;
1533 		}
1534 		spin_unlock(&lu_gp->lu_gp_lock);
1535 		/*
1536 		 *
1537 		 * lu_gp_mem is associated with a single
1538 		 * struct se_device->dev_alua_lu_gp_mem, and is released when
1539 		 * struct se_device is released via core_alua_free_lu_gp_mem().
1540 		 *
1541 		 * If the passed lu_gp does NOT match the default_lu_gp, assume
1542 		 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1543 		 */
1544 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1545 		if (lu_gp != default_lu_gp)
1546 			__core_alua_attach_lu_gp_mem(lu_gp_mem,
1547 					default_lu_gp);
1548 		else
1549 			lu_gp_mem->lu_gp = NULL;
1550 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1551 
1552 		spin_lock(&lu_gp->lu_gp_lock);
1553 	}
1554 	spin_unlock(&lu_gp->lu_gp_lock);
1555 
1556 	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1557 }
1558 
1559 void core_alua_free_lu_gp_mem(struct se_device *dev)
1560 {
1561 	struct t10_alua_lu_gp *lu_gp;
1562 	struct t10_alua_lu_gp_member *lu_gp_mem;
1563 
1564 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
1565 	if (!lu_gp_mem)
1566 		return;
1567 
1568 	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1569 		cpu_relax();
1570 
1571 	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1572 	lu_gp = lu_gp_mem->lu_gp;
1573 	if (lu_gp) {
1574 		spin_lock(&lu_gp->lu_gp_lock);
1575 		if (lu_gp_mem->lu_gp_assoc) {
1576 			list_del(&lu_gp_mem->lu_gp_mem_list);
1577 			lu_gp->lu_gp_members--;
1578 			lu_gp_mem->lu_gp_assoc = 0;
1579 		}
1580 		spin_unlock(&lu_gp->lu_gp_lock);
1581 		lu_gp_mem->lu_gp = NULL;
1582 	}
1583 	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1584 
1585 	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1586 }
1587 
1588 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1589 {
1590 	struct t10_alua_lu_gp *lu_gp;
1591 	struct config_item *ci;
1592 
1593 	spin_lock(&lu_gps_lock);
1594 	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1595 		if (!lu_gp->lu_gp_valid_id)
1596 			continue;
1597 		ci = &lu_gp->lu_gp_group.cg_item;
1598 		if (!strcmp(config_item_name(ci), name)) {
1599 			atomic_inc(&lu_gp->lu_gp_ref_cnt);
1600 			spin_unlock(&lu_gps_lock);
1601 			return lu_gp;
1602 		}
1603 	}
1604 	spin_unlock(&lu_gps_lock);
1605 
1606 	return NULL;
1607 }
1608 
1609 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1610 {
1611 	spin_lock(&lu_gps_lock);
1612 	atomic_dec(&lu_gp->lu_gp_ref_cnt);
1613 	spin_unlock(&lu_gps_lock);
1614 }
1615 
1616 /*
1617  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1618  */
1619 void __core_alua_attach_lu_gp_mem(
1620 	struct t10_alua_lu_gp_member *lu_gp_mem,
1621 	struct t10_alua_lu_gp *lu_gp)
1622 {
1623 	spin_lock(&lu_gp->lu_gp_lock);
1624 	lu_gp_mem->lu_gp = lu_gp;
1625 	lu_gp_mem->lu_gp_assoc = 1;
1626 	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1627 	lu_gp->lu_gp_members++;
1628 	spin_unlock(&lu_gp->lu_gp_lock);
1629 }
1630 
1631 /*
1632  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1633  */
1634 void __core_alua_drop_lu_gp_mem(
1635 	struct t10_alua_lu_gp_member *lu_gp_mem,
1636 	struct t10_alua_lu_gp *lu_gp)
1637 {
1638 	spin_lock(&lu_gp->lu_gp_lock);
1639 	list_del(&lu_gp_mem->lu_gp_mem_list);
1640 	lu_gp_mem->lu_gp = NULL;
1641 	lu_gp_mem->lu_gp_assoc = 0;
1642 	lu_gp->lu_gp_members--;
1643 	spin_unlock(&lu_gp->lu_gp_lock);
1644 }
1645 
1646 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1647 		const char *name, int def_group)
1648 {
1649 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1650 
1651 	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1652 	if (!tg_pt_gp) {
1653 		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1654 		return NULL;
1655 	}
1656 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1657 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1658 	mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1659 	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1660 	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1661 	tg_pt_gp->tg_pt_gp_dev = dev;
1662 	tg_pt_gp->tg_pt_gp_alua_access_state =
1663 			ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1664 	/*
1665 	 * Enable both explicit and implicit ALUA support by default
1666 	 */
1667 	tg_pt_gp->tg_pt_gp_alua_access_type =
1668 			TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1669 	/*
1670 	 * Set the default Active/NonOptimized Delay in milliseconds
1671 	 */
1672 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1673 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1674 	tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1675 
1676 	/*
1677 	 * Enable all supported states
1678 	 */
1679 	tg_pt_gp->tg_pt_gp_alua_supported_states =
1680 	    ALUA_T_SUP | ALUA_O_SUP |
1681 	    ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1682 
1683 	if (def_group) {
1684 		spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1685 		tg_pt_gp->tg_pt_gp_id =
1686 				dev->t10_alua.alua_tg_pt_gps_counter++;
1687 		tg_pt_gp->tg_pt_gp_valid_id = 1;
1688 		dev->t10_alua.alua_tg_pt_gps_count++;
1689 		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1690 			      &dev->t10_alua.tg_pt_gps_list);
1691 		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1692 	}
1693 
1694 	return tg_pt_gp;
1695 }
1696 
1697 int core_alua_set_tg_pt_gp_id(
1698 	struct t10_alua_tg_pt_gp *tg_pt_gp,
1699 	u16 tg_pt_gp_id)
1700 {
1701 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1702 	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1703 	u16 tg_pt_gp_id_tmp;
1704 
1705 	/*
1706 	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1707 	 */
1708 	if (tg_pt_gp->tg_pt_gp_valid_id) {
1709 		pr_warn("ALUA TG PT Group already has a valid ID,"
1710 			" ignoring request\n");
1711 		return -EINVAL;
1712 	}
1713 
1714 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1715 	if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1716 		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1717 			" 0x0000ffff reached\n");
1718 		spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1719 		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1720 		return -ENOSPC;
1721 	}
1722 again:
1723 	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1724 			dev->t10_alua.alua_tg_pt_gps_counter++;
1725 
1726 	list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1727 			tg_pt_gp_list) {
1728 		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1729 			if (!tg_pt_gp_id)
1730 				goto again;
1731 
1732 			pr_err("ALUA Target Port Group ID: %hu already"
1733 				" exists, ignoring request\n", tg_pt_gp_id);
1734 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1735 			return -EINVAL;
1736 		}
1737 	}
1738 
1739 	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1740 	tg_pt_gp->tg_pt_gp_valid_id = 1;
1741 	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1742 			&dev->t10_alua.tg_pt_gps_list);
1743 	dev->t10_alua.alua_tg_pt_gps_count++;
1744 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1745 
1746 	return 0;
1747 }
1748 
1749 void core_alua_free_tg_pt_gp(
1750 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1751 {
1752 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1753 	struct se_lun *lun, *next;
1754 
1755 	/*
1756 	 * Once we have reached this point, config_item_put() has already
1757 	 * been called from target_core_alua_drop_tg_pt_gp().
1758 	 *
1759 	 * Here we remove *tg_pt_gp from the global list so that
1760 	 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1761 	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1762 	 */
1763 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1764 	list_del(&tg_pt_gp->tg_pt_gp_list);
1765 	dev->t10_alua.alua_tg_pt_gps_counter--;
1766 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1767 
1768 	/*
1769 	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1770 	 * core_alua_get_tg_pt_gp_by_name() in
1771 	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1772 	 * to be released with core_alua_put_tg_pt_gp_from_name().
1773 	 */
1774 	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1775 		cpu_relax();
1776 
1777 	/*
1778 	 * Release reference to struct t10_alua_tg_pt_gp from all associated
1779 	 * struct se_port.
1780 	 */
1781 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1782 	list_for_each_entry_safe(lun, next,
1783 			&tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1784 		list_del_init(&lun->lun_tg_pt_gp_link);
1785 		tg_pt_gp->tg_pt_gp_members--;
1786 
1787 		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1788 		/*
1789 		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1790 		 * assume we want to re-associate a given tg_pt_gp_mem with
1791 		 * default_tg_pt_gp.
1792 		 */
1793 		spin_lock(&lun->lun_tg_pt_gp_lock);
1794 		if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1795 			__target_attach_tg_pt_gp(lun,
1796 					dev->t10_alua.default_tg_pt_gp);
1797 		} else
1798 			lun->lun_tg_pt_gp = NULL;
1799 		spin_unlock(&lun->lun_tg_pt_gp_lock);
1800 
1801 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1802 	}
1803 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1804 
1805 	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1806 }
1807 
1808 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1809 		struct se_device *dev, const char *name)
1810 {
1811 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1812 	struct config_item *ci;
1813 
1814 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1815 	list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1816 			tg_pt_gp_list) {
1817 		if (!tg_pt_gp->tg_pt_gp_valid_id)
1818 			continue;
1819 		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1820 		if (!strcmp(config_item_name(ci), name)) {
1821 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1822 			spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1823 			return tg_pt_gp;
1824 		}
1825 	}
1826 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1827 
1828 	return NULL;
1829 }
1830 
1831 static void core_alua_put_tg_pt_gp_from_name(
1832 	struct t10_alua_tg_pt_gp *tg_pt_gp)
1833 {
1834 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1835 
1836 	spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1837 	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1838 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1839 }
1840 
1841 static void __target_attach_tg_pt_gp(struct se_lun *lun,
1842 		struct t10_alua_tg_pt_gp *tg_pt_gp)
1843 {
1844 	struct se_dev_entry *se_deve;
1845 
1846 	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1847 
1848 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1849 	lun->lun_tg_pt_gp = tg_pt_gp;
1850 	list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1851 	tg_pt_gp->tg_pt_gp_members++;
1852 	spin_lock(&lun->lun_deve_lock);
1853 	list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1854 		core_scsi3_ua_allocate(se_deve, 0x3f,
1855 				       ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1856 	spin_unlock(&lun->lun_deve_lock);
1857 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1858 }
1859 
1860 void target_attach_tg_pt_gp(struct se_lun *lun,
1861 		struct t10_alua_tg_pt_gp *tg_pt_gp)
1862 {
1863 	spin_lock(&lun->lun_tg_pt_gp_lock);
1864 	__target_attach_tg_pt_gp(lun, tg_pt_gp);
1865 	spin_unlock(&lun->lun_tg_pt_gp_lock);
1866 }
1867 
1868 static void __target_detach_tg_pt_gp(struct se_lun *lun,
1869 		struct t10_alua_tg_pt_gp *tg_pt_gp)
1870 {
1871 	assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1872 
1873 	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1874 	list_del_init(&lun->lun_tg_pt_gp_link);
1875 	tg_pt_gp->tg_pt_gp_members--;
1876 	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1877 
1878 	lun->lun_tg_pt_gp = NULL;
1879 }
1880 
1881 void target_detach_tg_pt_gp(struct se_lun *lun)
1882 {
1883 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1884 
1885 	spin_lock(&lun->lun_tg_pt_gp_lock);
1886 	tg_pt_gp = lun->lun_tg_pt_gp;
1887 	if (tg_pt_gp)
1888 		__target_detach_tg_pt_gp(lun, tg_pt_gp);
1889 	spin_unlock(&lun->lun_tg_pt_gp_lock);
1890 }
1891 
1892 ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1893 {
1894 	struct config_item *tg_pt_ci;
1895 	struct t10_alua_tg_pt_gp *tg_pt_gp;
1896 	ssize_t len = 0;
1897 
1898 	spin_lock(&lun->lun_tg_pt_gp_lock);
1899 	tg_pt_gp = lun->lun_tg_pt_gp;
1900 	if (tg_pt_gp) {
1901 		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1902 		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1903 			" %hu\nTG Port Primary Access State: %s\nTG Port "
1904 			"Primary Access Status: %s\nTG Port Secondary Access"
1905 			" State: %s\nTG Port Secondary Access Status: %s\n",
1906 			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1907 			core_alua_dump_state(
1908 				tg_pt_gp->tg_pt_gp_alua_access_state),
1909 			core_alua_dump_status(
1910 				tg_pt_gp->tg_pt_gp_alua_access_status),
1911 			atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1912 			"Offline" : "None",
1913 			core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1914 	}
1915 	spin_unlock(&lun->lun_tg_pt_gp_lock);
1916 
1917 	return len;
1918 }
1919 
1920 ssize_t core_alua_store_tg_pt_gp_info(
1921 	struct se_lun *lun,
1922 	const char *page,
1923 	size_t count)
1924 {
1925 	struct se_portal_group *tpg = lun->lun_tpg;
1926 	/*
1927 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
1928 	 * reference to se_device->dev_group.
1929 	 */
1930 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1931 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1932 	unsigned char buf[TG_PT_GROUP_NAME_BUF];
1933 	int move = 0;
1934 
1935 	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1936 	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1937 		return -ENODEV;
1938 
1939 	if (count > TG_PT_GROUP_NAME_BUF) {
1940 		pr_err("ALUA Target Port Group alias too large!\n");
1941 		return -EINVAL;
1942 	}
1943 	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1944 	memcpy(buf, page, count);
1945 	/*
1946 	 * Any ALUA target port group alias besides "NULL" means we will be
1947 	 * making a new group association.
1948 	 */
1949 	if (strcmp(strstrip(buf), "NULL")) {
1950 		/*
1951 		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1952 		 * struct t10_alua_tg_pt_gp.  This reference is released with
1953 		 * core_alua_put_tg_pt_gp_from_name() below.
1954 		 */
1955 		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1956 					strstrip(buf));
1957 		if (!tg_pt_gp_new)
1958 			return -ENODEV;
1959 	}
1960 
1961 	spin_lock(&lun->lun_tg_pt_gp_lock);
1962 	tg_pt_gp = lun->lun_tg_pt_gp;
1963 	if (tg_pt_gp) {
1964 		/*
1965 		 * Clearing an existing tg_pt_gp association, and replacing
1966 		 * with the default_tg_pt_gp.
1967 		 */
1968 		if (!tg_pt_gp_new) {
1969 			pr_debug("Target_Core_ConfigFS: Moving"
1970 				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
1971 				" alua/%s, ID: %hu back to"
1972 				" default_tg_pt_gp\n",
1973 				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1974 				tpg->se_tpg_tfo->tpg_get_tag(tpg),
1975 				config_item_name(&lun->lun_group.cg_item),
1976 				config_item_name(
1977 					&tg_pt_gp->tg_pt_gp_group.cg_item),
1978 				tg_pt_gp->tg_pt_gp_id);
1979 
1980 			__target_detach_tg_pt_gp(lun, tg_pt_gp);
1981 			__target_attach_tg_pt_gp(lun,
1982 					dev->t10_alua.default_tg_pt_gp);
1983 			spin_unlock(&lun->lun_tg_pt_gp_lock);
1984 
1985 			return count;
1986 		}
1987 		__target_detach_tg_pt_gp(lun, tg_pt_gp);
1988 		move = 1;
1989 	}
1990 
1991 	__target_attach_tg_pt_gp(lun, tg_pt_gp_new);
1992 	spin_unlock(&lun->lun_tg_pt_gp_lock);
1993 	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1994 		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
1995 		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1996 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
1997 		config_item_name(&lun->lun_group.cg_item),
1998 		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1999 		tg_pt_gp_new->tg_pt_gp_id);
2000 
2001 	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2002 	return count;
2003 }
2004 
2005 ssize_t core_alua_show_access_type(
2006 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2007 	char *page)
2008 {
2009 	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2010 	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2011 		return sprintf(page, "Implicit and Explicit\n");
2012 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2013 		return sprintf(page, "Implicit\n");
2014 	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2015 		return sprintf(page, "Explicit\n");
2016 	else
2017 		return sprintf(page, "None\n");
2018 }
2019 
2020 ssize_t core_alua_store_access_type(
2021 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2022 	const char *page,
2023 	size_t count)
2024 {
2025 	unsigned long tmp;
2026 	int ret;
2027 
2028 	ret = kstrtoul(page, 0, &tmp);
2029 	if (ret < 0) {
2030 		pr_err("Unable to extract alua_access_type\n");
2031 		return ret;
2032 	}
2033 	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2034 		pr_err("Illegal value for alua_access_type:"
2035 				" %lu\n", tmp);
2036 		return -EINVAL;
2037 	}
2038 	if (tmp == 3)
2039 		tg_pt_gp->tg_pt_gp_alua_access_type =
2040 			TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2041 	else if (tmp == 2)
2042 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2043 	else if (tmp == 1)
2044 		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2045 	else
2046 		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2047 
2048 	return count;
2049 }
2050 
2051 ssize_t core_alua_show_nonop_delay_msecs(
2052 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2053 	char *page)
2054 {
2055 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2056 }
2057 
2058 ssize_t core_alua_store_nonop_delay_msecs(
2059 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2060 	const char *page,
2061 	size_t count)
2062 {
2063 	unsigned long tmp;
2064 	int ret;
2065 
2066 	ret = kstrtoul(page, 0, &tmp);
2067 	if (ret < 0) {
2068 		pr_err("Unable to extract nonop_delay_msecs\n");
2069 		return ret;
2070 	}
2071 	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2072 		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2073 			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2074 			ALUA_MAX_NONOP_DELAY_MSECS);
2075 		return -EINVAL;
2076 	}
2077 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2078 
2079 	return count;
2080 }
2081 
2082 ssize_t core_alua_show_trans_delay_msecs(
2083 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2084 	char *page)
2085 {
2086 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2087 }
2088 
2089 ssize_t core_alua_store_trans_delay_msecs(
2090 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2091 	const char *page,
2092 	size_t count)
2093 {
2094 	unsigned long tmp;
2095 	int ret;
2096 
2097 	ret = kstrtoul(page, 0, &tmp);
2098 	if (ret < 0) {
2099 		pr_err("Unable to extract trans_delay_msecs\n");
2100 		return ret;
2101 	}
2102 	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2103 		pr_err("Passed trans_delay_msecs: %lu, exceeds"
2104 			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2105 			ALUA_MAX_TRANS_DELAY_MSECS);
2106 		return -EINVAL;
2107 	}
2108 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2109 
2110 	return count;
2111 }
2112 
2113 ssize_t core_alua_show_implicit_trans_secs(
2114 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2115 	char *page)
2116 {
2117 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2118 }
2119 
2120 ssize_t core_alua_store_implicit_trans_secs(
2121 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2122 	const char *page,
2123 	size_t count)
2124 {
2125 	unsigned long tmp;
2126 	int ret;
2127 
2128 	ret = kstrtoul(page, 0, &tmp);
2129 	if (ret < 0) {
2130 		pr_err("Unable to extract implicit_trans_secs\n");
2131 		return ret;
2132 	}
2133 	if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2134 		pr_err("Passed implicit_trans_secs: %lu, exceeds"
2135 			" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2136 			ALUA_MAX_IMPLICIT_TRANS_SECS);
2137 		return  -EINVAL;
2138 	}
2139 	tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2140 
2141 	return count;
2142 }
2143 
2144 ssize_t core_alua_show_preferred_bit(
2145 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2146 	char *page)
2147 {
2148 	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2149 }
2150 
2151 ssize_t core_alua_store_preferred_bit(
2152 	struct t10_alua_tg_pt_gp *tg_pt_gp,
2153 	const char *page,
2154 	size_t count)
2155 {
2156 	unsigned long tmp;
2157 	int ret;
2158 
2159 	ret = kstrtoul(page, 0, &tmp);
2160 	if (ret < 0) {
2161 		pr_err("Unable to extract preferred ALUA value\n");
2162 		return ret;
2163 	}
2164 	if ((tmp != 0) && (tmp != 1)) {
2165 		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2166 		return -EINVAL;
2167 	}
2168 	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2169 
2170 	return count;
2171 }
2172 
2173 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2174 {
2175 	return sprintf(page, "%d\n",
2176 		atomic_read(&lun->lun_tg_pt_secondary_offline));
2177 }
2178 
2179 ssize_t core_alua_store_offline_bit(
2180 	struct se_lun *lun,
2181 	const char *page,
2182 	size_t count)
2183 {
2184 	/*
2185 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
2186 	 * reference to se_device->dev_group.
2187 	 */
2188 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2189 	unsigned long tmp;
2190 	int ret;
2191 
2192 	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2193 	    (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2194 		return -ENODEV;
2195 
2196 	ret = kstrtoul(page, 0, &tmp);
2197 	if (ret < 0) {
2198 		pr_err("Unable to extract alua_tg_pt_offline value\n");
2199 		return ret;
2200 	}
2201 	if ((tmp != 0) && (tmp != 1)) {
2202 		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2203 				tmp);
2204 		return -EINVAL;
2205 	}
2206 
2207 	ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2208 	if (ret < 0)
2209 		return -EINVAL;
2210 
2211 	return count;
2212 }
2213 
2214 ssize_t core_alua_show_secondary_status(
2215 	struct se_lun *lun,
2216 	char *page)
2217 {
2218 	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2219 }
2220 
2221 ssize_t core_alua_store_secondary_status(
2222 	struct se_lun *lun,
2223 	const char *page,
2224 	size_t count)
2225 {
2226 	unsigned long tmp;
2227 	int ret;
2228 
2229 	ret = kstrtoul(page, 0, &tmp);
2230 	if (ret < 0) {
2231 		pr_err("Unable to extract alua_tg_pt_status\n");
2232 		return ret;
2233 	}
2234 	if ((tmp != ALUA_STATUS_NONE) &&
2235 	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2236 	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2237 		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2238 				tmp);
2239 		return -EINVAL;
2240 	}
2241 	lun->lun_tg_pt_secondary_stat = (int)tmp;
2242 
2243 	return count;
2244 }
2245 
2246 ssize_t core_alua_show_secondary_write_metadata(
2247 	struct se_lun *lun,
2248 	char *page)
2249 {
2250 	return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2251 }
2252 
2253 ssize_t core_alua_store_secondary_write_metadata(
2254 	struct se_lun *lun,
2255 	const char *page,
2256 	size_t count)
2257 {
2258 	unsigned long tmp;
2259 	int ret;
2260 
2261 	ret = kstrtoul(page, 0, &tmp);
2262 	if (ret < 0) {
2263 		pr_err("Unable to extract alua_tg_pt_write_md\n");
2264 		return ret;
2265 	}
2266 	if ((tmp != 0) && (tmp != 1)) {
2267 		pr_err("Illegal value for alua_tg_pt_write_md:"
2268 				" %lu\n", tmp);
2269 		return -EINVAL;
2270 	}
2271 	lun->lun_tg_pt_secondary_write_md = (int)tmp;
2272 
2273 	return count;
2274 }
2275 
2276 int core_setup_alua(struct se_device *dev)
2277 {
2278 	if (!(dev->transport->transport_flags &
2279 	     TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2280 	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2281 		struct t10_alua_lu_gp_member *lu_gp_mem;
2282 
2283 		/*
2284 		 * Associate this struct se_device with the default ALUA
2285 		 * LUN Group.
2286 		 */
2287 		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2288 		if (IS_ERR(lu_gp_mem))
2289 			return PTR_ERR(lu_gp_mem);
2290 
2291 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2292 		__core_alua_attach_lu_gp_mem(lu_gp_mem,
2293 				default_lu_gp);
2294 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2295 
2296 		pr_debug("%s: Adding to default ALUA LU Group:"
2297 			" core/alua/lu_gps/default_lu_gp\n",
2298 			dev->transport->name);
2299 	}
2300 
2301 	return 0;
2302 }
2303