1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 #include <scsi/sas_ata.h>
56 #include "host.h"
57 #include "isci.h"
58 #include "remote_device.h"
59 #include "remote_node_context.h"
60 #include "scu_event_codes.h"
61 #include "scu_task_context.h"
62 
63 #undef C
64 #define C(a) (#a)
65 const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
66 {
67 	static const char * const strings[] = RNC_STATES;
68 
69 	if (state >= ARRAY_SIZE(strings))
70 		return "UNKNOWN";
71 
72 	return strings[state];
73 }
74 #undef C
75 
76 /**
77  * sci_remote_node_context_is_ready()
78  * @sci_rnc: The state of the remote node context object to check.
79  *
80  * This method will return true if the remote node context is in a READY state
81  * otherwise it will return false bool true if the remote node context is in
82  * the ready state. false if the remote node context is not in the ready state.
83  */
84 bool sci_remote_node_context_is_ready(
85 	struct sci_remote_node_context *sci_rnc)
86 {
87 	u32 current_state = sci_rnc->sm.current_state_id;
88 
89 	if (current_state == SCI_RNC_READY) {
90 		return true;
91 	}
92 
93 	return false;
94 }
95 
96 bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
97 {
98 	u32 current_state = sci_rnc->sm.current_state_id;
99 
100 	if (current_state == SCI_RNC_TX_RX_SUSPENDED)
101 		return true;
102 	return false;
103 }
104 
105 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
106 {
107 	if (id < ihost->remote_node_entries &&
108 	    ihost->device_table[id])
109 		return &ihost->remote_node_context_table[id];
110 
111 	return NULL;
112 }
113 
114 static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
115 {
116 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
117 	struct domain_device *dev = idev->domain_dev;
118 	int rni = sci_rnc->remote_node_index;
119 	union scu_remote_node_context *rnc;
120 	struct isci_host *ihost;
121 	__le64 sas_addr;
122 
123 	ihost = idev->owning_port->owning_controller;
124 	rnc = sci_rnc_by_id(ihost, rni);
125 
126 	memset(rnc, 0, sizeof(union scu_remote_node_context)
127 		* sci_remote_device_node_count(idev));
128 
129 	rnc->ssp.remote_node_index = rni;
130 	rnc->ssp.remote_node_port_width = idev->device_port_width;
131 	rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
132 
133 	/* sas address is __be64, context ram format is __le64 */
134 	sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
135 	rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
136 	rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
137 
138 	rnc->ssp.nexus_loss_timer_enable = true;
139 	rnc->ssp.check_bit               = false;
140 	rnc->ssp.is_valid                = false;
141 	rnc->ssp.is_remote_node_context  = true;
142 	rnc->ssp.function_number         = 0;
143 
144 	rnc->ssp.arbitration_wait_time = 0;
145 
146 	if (dev_is_sata(dev)) {
147 		rnc->ssp.connection_occupancy_timeout =
148 			ihost->user_parameters.stp_max_occupancy_timeout;
149 		rnc->ssp.connection_inactivity_timeout =
150 			ihost->user_parameters.stp_inactivity_timeout;
151 	} else {
152 		rnc->ssp.connection_occupancy_timeout  =
153 			ihost->user_parameters.ssp_max_occupancy_timeout;
154 		rnc->ssp.connection_inactivity_timeout =
155 			ihost->user_parameters.ssp_inactivity_timeout;
156 	}
157 
158 	rnc->ssp.initial_arbitration_wait_time = 0;
159 
160 	/* Open Address Frame Parameters */
161 	rnc->ssp.oaf_connection_rate = idev->connection_rate;
162 	rnc->ssp.oaf_features = 0;
163 	rnc->ssp.oaf_source_zone_group = 0;
164 	rnc->ssp.oaf_more_compatibility_features = 0;
165 }
166 /*
167  * This method will setup the remote node context object so it will transition
168  * to its ready state.  If the remote node context is already setup to
169  * transition to its final state then this function does nothing. none
170  */
171 static void sci_remote_node_context_setup_to_resume(
172 	struct sci_remote_node_context *sci_rnc,
173 	scics_sds_remote_node_context_callback callback,
174 	void *callback_parameter,
175 	enum sci_remote_node_context_destination_state dest_param)
176 {
177 	if (sci_rnc->destination_state != RNC_DEST_FINAL) {
178 		sci_rnc->destination_state = dest_param;
179 		if (callback != NULL) {
180 			sci_rnc->user_callback = callback;
181 			sci_rnc->user_cookie   = callback_parameter;
182 		}
183 	}
184 }
185 
186 static void sci_remote_node_context_setup_to_destroy(
187 	struct sci_remote_node_context *sci_rnc,
188 	scics_sds_remote_node_context_callback callback,
189 	void *callback_parameter)
190 {
191 	struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
192 
193 	sci_rnc->destination_state = RNC_DEST_FINAL;
194 	sci_rnc->user_callback     = callback;
195 	sci_rnc->user_cookie       = callback_parameter;
196 
197 	wake_up(&ihost->eventq);
198 }
199 
200 /*
201  * This method just calls the user callback function and then resets the
202  * callback.
203  */
204 static void sci_remote_node_context_notify_user(
205 	struct sci_remote_node_context *rnc)
206 {
207 	if (rnc->user_callback != NULL) {
208 		(*rnc->user_callback)(rnc->user_cookie);
209 
210 		rnc->user_callback = NULL;
211 		rnc->user_cookie = NULL;
212 	}
213 }
214 
215 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
216 {
217 	switch (rnc->destination_state) {
218 	case RNC_DEST_READY:
219 	case RNC_DEST_SUSPENDED_RESUME:
220 		rnc->destination_state = RNC_DEST_READY;
221 		fallthrough;
222 	case RNC_DEST_FINAL:
223 		sci_remote_node_context_resume(rnc, rnc->user_callback,
224 					       rnc->user_cookie);
225 		break;
226 	default:
227 		rnc->destination_state = RNC_DEST_UNSPECIFIED;
228 		break;
229 	}
230 }
231 
232 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
233 {
234 	union scu_remote_node_context *rnc_buffer;
235 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
236 	struct domain_device *dev = idev->domain_dev;
237 	struct isci_host *ihost = idev->owning_port->owning_controller;
238 
239 	rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
240 
241 	rnc_buffer->ssp.is_valid = true;
242 
243 	if (dev_is_sata(dev) && dev->parent) {
244 		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
245 	} else {
246 		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
247 
248 		if (!dev->parent)
249 			sci_port_setup_transports(idev->owning_port,
250 						  sci_rnc->remote_node_index);
251 	}
252 }
253 
254 static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
255 {
256 	union scu_remote_node_context *rnc_buffer;
257 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
258 	struct isci_host *ihost = idev->owning_port->owning_controller;
259 
260 	rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
261 
262 	rnc_buffer->ssp.is_valid = false;
263 
264 	sci_remote_device_post_request(rnc_to_dev(sci_rnc),
265 				       SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
266 }
267 
268 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
269 {
270 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
271 	struct isci_remote_device *idev = rnc_to_dev(rnc);
272 	struct isci_host *ihost = idev->owning_port->owning_controller;
273 
274 	/* Check to see if we have gotten back to the initial state because
275 	 * someone requested to destroy the remote node context object.
276 	 */
277 	if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
278 		rnc->destination_state = RNC_DEST_UNSPECIFIED;
279 		sci_remote_node_context_notify_user(rnc);
280 
281 		smp_wmb();
282 		wake_up(&ihost->eventq);
283 	}
284 }
285 
286 static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
287 {
288 	struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
289 
290 	sci_remote_node_context_validate_context_buffer(sci_rnc);
291 }
292 
293 static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
294 {
295 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
296 
297 	/* Terminate all outstanding requests. */
298 	sci_remote_device_terminate_requests(rnc_to_dev(rnc));
299 	sci_remote_node_context_invalidate_context_buffer(rnc);
300 }
301 
302 static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
303 {
304 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
305 	struct isci_remote_device *idev;
306 	struct domain_device *dev;
307 
308 	idev = rnc_to_dev(rnc);
309 	dev = idev->domain_dev;
310 
311 	/*
312 	 * For direct attached SATA devices we need to clear the TLCR
313 	 * NCQ to TCi tag mapping on the phy and in cases where we
314 	 * resume because of a target reset we also need to update
315 	 * the STPTLDARNI register with the RNi of the device
316 	 */
317 	if (dev_is_sata(dev) && !dev->parent)
318 		sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
319 
320 	sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
321 }
322 
323 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
324 {
325 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
326 	enum sci_remote_node_context_destination_state dest_select;
327 	int tell_user = 1;
328 
329 	dest_select = rnc->destination_state;
330 	rnc->destination_state = RNC_DEST_UNSPECIFIED;
331 
332 	if ((dest_select == RNC_DEST_SUSPENDED) ||
333 	    (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
334 		sci_remote_node_context_suspend(
335 			rnc, rnc->suspend_reason,
336 			SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
337 
338 		if (dest_select == RNC_DEST_SUSPENDED_RESUME)
339 			tell_user = 0;  /* Wait until ready again. */
340 	}
341 	if (tell_user)
342 		sci_remote_node_context_notify_user(rnc);
343 }
344 
345 static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
346 {
347 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
348 
349 	sci_remote_node_context_continue_state_transitions(rnc);
350 }
351 
352 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
353 {
354 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
355 	struct isci_remote_device *idev = rnc_to_dev(rnc);
356 	struct isci_host *ihost = idev->owning_port->owning_controller;
357 	u32 new_count = rnc->suspend_count + 1;
358 
359 	if (new_count == 0)
360 		rnc->suspend_count = 1;
361 	else
362 		rnc->suspend_count = new_count;
363 	smp_wmb();
364 
365 	/* Terminate outstanding requests pending abort. */
366 	sci_remote_device_abort_requests_pending_abort(idev);
367 
368 	wake_up(&ihost->eventq);
369 	sci_remote_node_context_continue_state_transitions(rnc);
370 }
371 
372 static void sci_remote_node_context_await_suspend_state_exit(
373 	struct sci_base_state_machine *sm)
374 {
375 	struct sci_remote_node_context *rnc
376 		= container_of(sm, typeof(*rnc), sm);
377 	struct isci_remote_device *idev = rnc_to_dev(rnc);
378 
379 	if (dev_is_sata(idev->domain_dev))
380 		isci_dev_set_hang_detection_timeout(idev, 0);
381 }
382 
383 static const struct sci_base_state sci_remote_node_context_state_table[] = {
384 	[SCI_RNC_INITIAL] = {
385 		.enter_state = sci_remote_node_context_initial_state_enter,
386 	},
387 	[SCI_RNC_POSTING] = {
388 		.enter_state = sci_remote_node_context_posting_state_enter,
389 	},
390 	[SCI_RNC_INVALIDATING] = {
391 		.enter_state = sci_remote_node_context_invalidating_state_enter,
392 	},
393 	[SCI_RNC_RESUMING] = {
394 		.enter_state = sci_remote_node_context_resuming_state_enter,
395 	},
396 	[SCI_RNC_READY] = {
397 		.enter_state = sci_remote_node_context_ready_state_enter,
398 	},
399 	[SCI_RNC_TX_SUSPENDED] = {
400 		.enter_state = sci_remote_node_context_tx_suspended_state_enter,
401 	},
402 	[SCI_RNC_TX_RX_SUSPENDED] = {
403 		.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
404 	},
405 	[SCI_RNC_AWAIT_SUSPENSION] = {
406 		.exit_state = sci_remote_node_context_await_suspend_state_exit,
407 	},
408 };
409 
410 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
411 					    u16 remote_node_index)
412 {
413 	memset(rnc, 0, sizeof(struct sci_remote_node_context));
414 
415 	rnc->remote_node_index = remote_node_index;
416 	rnc->destination_state = RNC_DEST_UNSPECIFIED;
417 
418 	sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
419 }
420 
421 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
422 							   u32 event_code)
423 {
424 	enum scis_sds_remote_node_context_states state;
425 	u32 next_state;
426 
427 	state = sci_rnc->sm.current_state_id;
428 	switch (state) {
429 	case SCI_RNC_POSTING:
430 		switch (scu_get_event_code(event_code)) {
431 		case SCU_EVENT_POST_RNC_COMPLETE:
432 			sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
433 			break;
434 		default:
435 			goto out;
436 		}
437 		break;
438 	case SCI_RNC_INVALIDATING:
439 		if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
440 			if (sci_rnc->destination_state == RNC_DEST_FINAL)
441 				next_state = SCI_RNC_INITIAL;
442 			else
443 				next_state = SCI_RNC_POSTING;
444 			sci_change_state(&sci_rnc->sm, next_state);
445 		} else {
446 			switch (scu_get_event_type(event_code)) {
447 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
448 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
449 				/* We really dont care if the hardware is going to suspend
450 				 * the device since it's being invalidated anyway */
451 				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
452 					"%s: SCIC Remote Node Context 0x%p was "
453 					"suspended by hardware while being "
454 					"invalidated.\n", __func__, sci_rnc);
455 				break;
456 			default:
457 				goto out;
458 			}
459 		}
460 		break;
461 	case SCI_RNC_RESUMING:
462 		if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
463 			sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
464 		} else {
465 			switch (scu_get_event_type(event_code)) {
466 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
467 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
468 				/* We really dont care if the hardware is going to suspend
469 				 * the device since it's being resumed anyway */
470 				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
471 					"%s: SCIC Remote Node Context 0x%p was "
472 					"suspended by hardware while being resumed.\n",
473 					__func__, sci_rnc);
474 				break;
475 			default:
476 				goto out;
477 			}
478 		}
479 		break;
480 	case SCI_RNC_READY:
481 		switch (scu_get_event_type(event_code)) {
482 		case SCU_EVENT_TL_RNC_SUSPEND_TX:
483 			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
484 			sci_rnc->suspend_type = scu_get_event_type(event_code);
485 			break;
486 		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
487 			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
488 			sci_rnc->suspend_type = scu_get_event_type(event_code);
489 			break;
490 		default:
491 			goto out;
492 		}
493 		break;
494 	case SCI_RNC_AWAIT_SUSPENSION:
495 		switch (scu_get_event_type(event_code)) {
496 		case SCU_EVENT_TL_RNC_SUSPEND_TX:
497 			next_state = SCI_RNC_TX_SUSPENDED;
498 			break;
499 		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
500 			next_state = SCI_RNC_TX_RX_SUSPENDED;
501 			break;
502 		default:
503 			goto out;
504 		}
505 		if (sci_rnc->suspend_type == scu_get_event_type(event_code))
506 			sci_change_state(&sci_rnc->sm, next_state);
507 		break;
508 	default:
509 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
510 			 "%s: invalid state: %s\n", __func__,
511 			 rnc_state_name(state));
512 		return SCI_FAILURE_INVALID_STATE;
513 	}
514 	return SCI_SUCCESS;
515 
516  out:
517 	dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
518 		 "%s: code: %#x state: %s\n", __func__, event_code,
519 		 rnc_state_name(state));
520 	return SCI_FAILURE;
521 
522 }
523 
524 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
525 						      scics_sds_remote_node_context_callback cb_fn,
526 						      void *cb_p)
527 {
528 	enum scis_sds_remote_node_context_states state;
529 
530 	state = sci_rnc->sm.current_state_id;
531 	switch (state) {
532 	case SCI_RNC_INVALIDATING:
533 		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
534 		return SCI_SUCCESS;
535 	case SCI_RNC_POSTING:
536 	case SCI_RNC_RESUMING:
537 	case SCI_RNC_READY:
538 	case SCI_RNC_TX_SUSPENDED:
539 	case SCI_RNC_TX_RX_SUSPENDED:
540 		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
541 		sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
542 		return SCI_SUCCESS;
543 	case SCI_RNC_AWAIT_SUSPENSION:
544 		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
545 		return SCI_SUCCESS;
546 	case SCI_RNC_INITIAL:
547 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
548 			 "%s: invalid state: %s\n", __func__,
549 			 rnc_state_name(state));
550 		/* We have decided that the destruct request on the remote node context
551 		 * can not fail since it is either in the initial/destroyed state or is
552 		 * can be destroyed.
553 		 */
554 		return SCI_SUCCESS;
555 	default:
556 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
557 			 "%s: invalid state %s\n", __func__,
558 			 rnc_state_name(state));
559 		return SCI_FAILURE_INVALID_STATE;
560 	}
561 }
562 
563 enum sci_status sci_remote_node_context_suspend(
564 			struct sci_remote_node_context *sci_rnc,
565 			enum sci_remote_node_suspension_reasons suspend_reason,
566 			u32 suspend_type)
567 {
568 	enum scis_sds_remote_node_context_states state
569 		= sci_rnc->sm.current_state_id;
570 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
571 	enum sci_status status = SCI_FAILURE_INVALID_STATE;
572 	enum sci_remote_node_context_destination_state dest_param =
573 		RNC_DEST_UNSPECIFIED;
574 
575 	dev_dbg(scirdev_to_dev(idev),
576 		"%s: current state %s, current suspend_type %x dest state %d,"
577 			" arg suspend_reason %d, arg suspend_type %x",
578 		__func__, rnc_state_name(state), sci_rnc->suspend_type,
579 		sci_rnc->destination_state, suspend_reason,
580 		suspend_type);
581 
582 	/* Disable automatic state continuations if explicitly suspending. */
583 	if ((suspend_reason == SCI_HW_SUSPEND) ||
584 	    (sci_rnc->destination_state == RNC_DEST_FINAL))
585 		dest_param = sci_rnc->destination_state;
586 
587 	switch (state) {
588 	case SCI_RNC_READY:
589 		break;
590 	case SCI_RNC_INVALIDATING:
591 		if (sci_rnc->destination_state == RNC_DEST_FINAL) {
592 			dev_warn(scirdev_to_dev(idev),
593 				 "%s: already destroying %p\n",
594 				 __func__, sci_rnc);
595 			return SCI_FAILURE_INVALID_STATE;
596 		}
597 		fallthrough;	/* and handle like SCI_RNC_POSTING */
598 	case SCI_RNC_RESUMING:
599 		fallthrough;	/* and handle like SCI_RNC_POSTING */
600 	case SCI_RNC_POSTING:
601 		/* Set the destination state to AWAIT - this signals the
602 		 * entry into the SCI_RNC_READY state that a suspension
603 		 * needs to be done immediately.
604 		 */
605 		if (sci_rnc->destination_state != RNC_DEST_FINAL)
606 			sci_rnc->destination_state = RNC_DEST_SUSPENDED;
607 		sci_rnc->suspend_type = suspend_type;
608 		sci_rnc->suspend_reason = suspend_reason;
609 		return SCI_SUCCESS;
610 
611 	case SCI_RNC_TX_SUSPENDED:
612 		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
613 			status = SCI_SUCCESS;
614 		break;
615 	case SCI_RNC_TX_RX_SUSPENDED:
616 		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
617 			status = SCI_SUCCESS;
618 		break;
619 	case SCI_RNC_AWAIT_SUSPENSION:
620 		if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
621 		    || (suspend_type == sci_rnc->suspend_type))
622 			return SCI_SUCCESS;
623 		break;
624 	default:
625 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
626 			 "%s: invalid state %s\n", __func__,
627 			 rnc_state_name(state));
628 		return SCI_FAILURE_INVALID_STATE;
629 	}
630 	sci_rnc->destination_state = dest_param;
631 	sci_rnc->suspend_type = suspend_type;
632 	sci_rnc->suspend_reason = suspend_reason;
633 
634 	if (status == SCI_SUCCESS) { /* Already in the destination state? */
635 		struct isci_host *ihost = idev->owning_port->owning_controller;
636 
637 		wake_up_all(&ihost->eventq); /* Let observers look. */
638 		return SCI_SUCCESS;
639 	}
640 	if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
641 	    (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
642 
643 		if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
644 			isci_dev_set_hang_detection_timeout(idev, 0x00000001);
645 
646 		sci_remote_device_post_request(
647 			idev, SCI_SOFTWARE_SUSPEND_CMD);
648 	}
649 	if (state != SCI_RNC_AWAIT_SUSPENSION)
650 		sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
651 
652 	return SCI_SUCCESS;
653 }
654 
655 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
656 						    scics_sds_remote_node_context_callback cb_fn,
657 						    void *cb_p)
658 {
659 	enum scis_sds_remote_node_context_states state;
660 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
661 
662 	state = sci_rnc->sm.current_state_id;
663 	dev_dbg(scirdev_to_dev(idev),
664 		"%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
665 			"dev resume path %s\n",
666 		__func__, rnc_state_name(state), cb_fn, cb_p,
667 		sci_rnc->destination_state,
668 		test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
669 			? "<abort active>" : "<normal>");
670 
671 	switch (state) {
672 	case SCI_RNC_INITIAL:
673 		if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
674 			return SCI_FAILURE_INVALID_STATE;
675 
676 		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn,	cb_p,
677 							RNC_DEST_READY);
678 		if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
679 			sci_remote_node_context_construct_buffer(sci_rnc);
680 			sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
681 		}
682 		return SCI_SUCCESS;
683 
684 	case SCI_RNC_POSTING:
685 	case SCI_RNC_INVALIDATING:
686 	case SCI_RNC_RESUMING:
687 		/* We are still waiting to post when a resume was
688 		 * requested.
689 		 */
690 		switch (sci_rnc->destination_state) {
691 		case RNC_DEST_SUSPENDED:
692 		case RNC_DEST_SUSPENDED_RESUME:
693 			/* Previously waiting to suspend after posting.
694 			 * Now continue onto resumption.
695 			 */
696 			sci_remote_node_context_setup_to_resume(
697 				sci_rnc, cb_fn, cb_p,
698 				RNC_DEST_SUSPENDED_RESUME);
699 			break;
700 		default:
701 			sci_remote_node_context_setup_to_resume(
702 				sci_rnc, cb_fn, cb_p,
703 				RNC_DEST_READY);
704 			break;
705 		}
706 		return SCI_SUCCESS;
707 
708 	case SCI_RNC_TX_SUSPENDED:
709 	case SCI_RNC_TX_RX_SUSPENDED:
710 		{
711 			struct domain_device *dev = idev->domain_dev;
712 			/* If this is an expander attached SATA device we must
713 			 * invalidate and repost the RNC since this is the only
714 			 * way to clear the TCi to NCQ tag mapping table for
715 			 * the RNi. All other device types we can just resume.
716 			 */
717 			sci_remote_node_context_setup_to_resume(
718 				sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
719 
720 			if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
721 				if ((dev_is_sata(dev) && dev->parent) ||
722 				    (sci_rnc->destination_state == RNC_DEST_FINAL))
723 					sci_change_state(&sci_rnc->sm,
724 							 SCI_RNC_INVALIDATING);
725 				else
726 					sci_change_state(&sci_rnc->sm,
727 							 SCI_RNC_RESUMING);
728 			}
729 		}
730 		return SCI_SUCCESS;
731 
732 	case SCI_RNC_AWAIT_SUSPENSION:
733 		sci_remote_node_context_setup_to_resume(
734 			sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
735 		return SCI_SUCCESS;
736 	default:
737 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
738 			 "%s: invalid state %s\n", __func__,
739 			 rnc_state_name(state));
740 		return SCI_FAILURE_INVALID_STATE;
741 	}
742 }
743 
744 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
745 							     struct isci_request *ireq)
746 {
747 	enum scis_sds_remote_node_context_states state;
748 
749 	state = sci_rnc->sm.current_state_id;
750 
751 	switch (state) {
752 	case SCI_RNC_READY:
753 		return SCI_SUCCESS;
754 	case SCI_RNC_TX_SUSPENDED:
755 	case SCI_RNC_TX_RX_SUSPENDED:
756 	case SCI_RNC_AWAIT_SUSPENSION:
757 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
758 			 "%s: invalid state %s\n", __func__,
759 			 rnc_state_name(state));
760 		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
761 	default:
762 		dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
763 			"%s: invalid state %s\n", __func__,
764 			rnc_state_name(state));
765 		return SCI_FAILURE_INVALID_STATE;
766 	}
767 }
768 
769 enum sci_status sci_remote_node_context_start_task(
770 	struct sci_remote_node_context *sci_rnc,
771 	struct isci_request *ireq,
772 	scics_sds_remote_node_context_callback cb_fn,
773 	void *cb_p)
774 {
775 	enum sci_status status = sci_remote_node_context_resume(sci_rnc,
776 								cb_fn, cb_p);
777 	if (status != SCI_SUCCESS)
778 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
779 			"%s: resume failed: %d\n", __func__, status);
780 	return status;
781 }
782 
783 int sci_remote_node_context_is_safe_to_abort(
784 	struct sci_remote_node_context *sci_rnc)
785 {
786 	enum scis_sds_remote_node_context_states state;
787 
788 	state = sci_rnc->sm.current_state_id;
789 	switch (state) {
790 	case SCI_RNC_INVALIDATING:
791 	case SCI_RNC_TX_RX_SUSPENDED:
792 		return 1;
793 	case SCI_RNC_POSTING:
794 	case SCI_RNC_RESUMING:
795 	case SCI_RNC_READY:
796 	case SCI_RNC_TX_SUSPENDED:
797 	case SCI_RNC_AWAIT_SUSPENSION:
798 	case SCI_RNC_INITIAL:
799 		return 0;
800 	default:
801 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
802 			 "%s: invalid state %d\n", __func__, state);
803 		return 0;
804 	}
805 }
806