1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 #include <scsi/sas_ata.h>
56 #include "host.h"
57 #include "isci.h"
58 #include "remote_device.h"
59 #include "remote_node_context.h"
60 #include "scu_event_codes.h"
61 #include "scu_task_context.h"
62 
63 #undef C
64 #define C(a) (#a)
65 const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
66 {
67 	static const char * const strings[] = RNC_STATES;
68 
69 	if (state >= ARRAY_SIZE(strings))
70 		return "UNKNOWN";
71 
72 	return strings[state];
73 }
74 #undef C
75 
76 /**
77  *
78  * @sci_rnc: The state of the remote node context object to check.
79  *
80  * This method will return true if the remote node context is in a READY state
81  * otherwise it will return false bool true if the remote node context is in
82  * the ready state. false if the remote node context is not in the ready state.
83  */
84 bool sci_remote_node_context_is_ready(
85 	struct sci_remote_node_context *sci_rnc)
86 {
87 	u32 current_state = sci_rnc->sm.current_state_id;
88 
89 	if (current_state == SCI_RNC_READY) {
90 		return true;
91 	}
92 
93 	return false;
94 }
95 
96 bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
97 {
98 	u32 current_state = sci_rnc->sm.current_state_id;
99 
100 	if (current_state == SCI_RNC_TX_RX_SUSPENDED)
101 		return true;
102 	return false;
103 }
104 
105 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
106 {
107 	if (id < ihost->remote_node_entries &&
108 	    ihost->device_table[id])
109 		return &ihost->remote_node_context_table[id];
110 
111 	return NULL;
112 }
113 
114 static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
115 {
116 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
117 	struct domain_device *dev = idev->domain_dev;
118 	int rni = sci_rnc->remote_node_index;
119 	union scu_remote_node_context *rnc;
120 	struct isci_host *ihost;
121 	__le64 sas_addr;
122 
123 	ihost = idev->owning_port->owning_controller;
124 	rnc = sci_rnc_by_id(ihost, rni);
125 
126 	memset(rnc, 0, sizeof(union scu_remote_node_context)
127 		* sci_remote_device_node_count(idev));
128 
129 	rnc->ssp.remote_node_index = rni;
130 	rnc->ssp.remote_node_port_width = idev->device_port_width;
131 	rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
132 
133 	/* sas address is __be64, context ram format is __le64 */
134 	sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
135 	rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
136 	rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
137 
138 	rnc->ssp.nexus_loss_timer_enable = true;
139 	rnc->ssp.check_bit               = false;
140 	rnc->ssp.is_valid                = false;
141 	rnc->ssp.is_remote_node_context  = true;
142 	rnc->ssp.function_number         = 0;
143 
144 	rnc->ssp.arbitration_wait_time = 0;
145 
146 	if (dev_is_sata(dev)) {
147 		rnc->ssp.connection_occupancy_timeout =
148 			ihost->user_parameters.stp_max_occupancy_timeout;
149 		rnc->ssp.connection_inactivity_timeout =
150 			ihost->user_parameters.stp_inactivity_timeout;
151 	} else {
152 		rnc->ssp.connection_occupancy_timeout  =
153 			ihost->user_parameters.ssp_max_occupancy_timeout;
154 		rnc->ssp.connection_inactivity_timeout =
155 			ihost->user_parameters.ssp_inactivity_timeout;
156 	}
157 
158 	rnc->ssp.initial_arbitration_wait_time = 0;
159 
160 	/* Open Address Frame Parameters */
161 	rnc->ssp.oaf_connection_rate = idev->connection_rate;
162 	rnc->ssp.oaf_features = 0;
163 	rnc->ssp.oaf_source_zone_group = 0;
164 	rnc->ssp.oaf_more_compatibility_features = 0;
165 }
166 /**
167  *
168  * @sci_rnc:
169  * @callback:
170  * @callback_parameter:
171  *
172  * This method will setup the remote node context object so it will transition
173  * to its ready state.  If the remote node context is already setup to
174  * transition to its final state then this function does nothing. none
175  */
176 static void sci_remote_node_context_setup_to_resume(
177 	struct sci_remote_node_context *sci_rnc,
178 	scics_sds_remote_node_context_callback callback,
179 	void *callback_parameter,
180 	enum sci_remote_node_context_destination_state dest_param)
181 {
182 	if (sci_rnc->destination_state != RNC_DEST_FINAL) {
183 		sci_rnc->destination_state = dest_param;
184 		if (callback != NULL) {
185 			sci_rnc->user_callback = callback;
186 			sci_rnc->user_cookie   = callback_parameter;
187 		}
188 	}
189 }
190 
191 static void sci_remote_node_context_setup_to_destroy(
192 	struct sci_remote_node_context *sci_rnc,
193 	scics_sds_remote_node_context_callback callback,
194 	void *callback_parameter)
195 {
196 	struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
197 
198 	sci_rnc->destination_state = RNC_DEST_FINAL;
199 	sci_rnc->user_callback     = callback;
200 	sci_rnc->user_cookie       = callback_parameter;
201 
202 	wake_up(&ihost->eventq);
203 }
204 
205 /**
206  *
207  *
208  * This method just calls the user callback function and then resets the
209  * callback.
210  */
211 static void sci_remote_node_context_notify_user(
212 	struct sci_remote_node_context *rnc)
213 {
214 	if (rnc->user_callback != NULL) {
215 		(*rnc->user_callback)(rnc->user_cookie);
216 
217 		rnc->user_callback = NULL;
218 		rnc->user_cookie = NULL;
219 	}
220 }
221 
222 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
223 {
224 	switch (rnc->destination_state) {
225 	case RNC_DEST_READY:
226 	case RNC_DEST_SUSPENDED_RESUME:
227 		rnc->destination_state = RNC_DEST_READY;
228 		/* Fall through... */
229 	case RNC_DEST_FINAL:
230 		sci_remote_node_context_resume(rnc, rnc->user_callback,
231 					       rnc->user_cookie);
232 		break;
233 	default:
234 		rnc->destination_state = RNC_DEST_UNSPECIFIED;
235 		break;
236 	}
237 }
238 
239 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
240 {
241 	union scu_remote_node_context *rnc_buffer;
242 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
243 	struct domain_device *dev = idev->domain_dev;
244 	struct isci_host *ihost = idev->owning_port->owning_controller;
245 
246 	rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
247 
248 	rnc_buffer->ssp.is_valid = true;
249 
250 	if (dev_is_sata(dev) && dev->parent) {
251 		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
252 	} else {
253 		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
254 
255 		if (!dev->parent)
256 			sci_port_setup_transports(idev->owning_port,
257 						  sci_rnc->remote_node_index);
258 	}
259 }
260 
261 static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
262 {
263 	union scu_remote_node_context *rnc_buffer;
264 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
265 	struct isci_host *ihost = idev->owning_port->owning_controller;
266 
267 	rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
268 
269 	rnc_buffer->ssp.is_valid = false;
270 
271 	sci_remote_device_post_request(rnc_to_dev(sci_rnc),
272 				       SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
273 }
274 
275 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
276 {
277 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
278 	struct isci_remote_device *idev = rnc_to_dev(rnc);
279 	struct isci_host *ihost = idev->owning_port->owning_controller;
280 
281 	/* Check to see if we have gotten back to the initial state because
282 	 * someone requested to destroy the remote node context object.
283 	 */
284 	if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
285 		rnc->destination_state = RNC_DEST_UNSPECIFIED;
286 		sci_remote_node_context_notify_user(rnc);
287 
288 		smp_wmb();
289 		wake_up(&ihost->eventq);
290 	}
291 }
292 
293 static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
294 {
295 	struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
296 
297 	sci_remote_node_context_validate_context_buffer(sci_rnc);
298 }
299 
300 static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
301 {
302 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
303 
304 	/* Terminate all outstanding requests. */
305 	sci_remote_device_terminate_requests(rnc_to_dev(rnc));
306 	sci_remote_node_context_invalidate_context_buffer(rnc);
307 }
308 
309 static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
310 {
311 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
312 	struct isci_remote_device *idev;
313 	struct domain_device *dev;
314 
315 	idev = rnc_to_dev(rnc);
316 	dev = idev->domain_dev;
317 
318 	/*
319 	 * For direct attached SATA devices we need to clear the TLCR
320 	 * NCQ to TCi tag mapping on the phy and in cases where we
321 	 * resume because of a target reset we also need to update
322 	 * the STPTLDARNI register with the RNi of the device
323 	 */
324 	if (dev_is_sata(dev) && !dev->parent)
325 		sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
326 
327 	sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
328 }
329 
330 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
331 {
332 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
333 	enum sci_remote_node_context_destination_state dest_select;
334 	int tell_user = 1;
335 
336 	dest_select = rnc->destination_state;
337 	rnc->destination_state = RNC_DEST_UNSPECIFIED;
338 
339 	if ((dest_select == RNC_DEST_SUSPENDED) ||
340 	    (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
341 		sci_remote_node_context_suspend(
342 			rnc, rnc->suspend_reason,
343 			SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
344 
345 		if (dest_select == RNC_DEST_SUSPENDED_RESUME)
346 			tell_user = 0;  /* Wait until ready again. */
347 	}
348 	if (tell_user)
349 		sci_remote_node_context_notify_user(rnc);
350 }
351 
352 static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
353 {
354 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
355 
356 	sci_remote_node_context_continue_state_transitions(rnc);
357 }
358 
359 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
360 {
361 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
362 	struct isci_remote_device *idev = rnc_to_dev(rnc);
363 	struct isci_host *ihost = idev->owning_port->owning_controller;
364 	u32 new_count = rnc->suspend_count + 1;
365 
366 	if (new_count == 0)
367 		rnc->suspend_count = 1;
368 	else
369 		rnc->suspend_count = new_count;
370 	smp_wmb();
371 
372 	/* Terminate outstanding requests pending abort. */
373 	sci_remote_device_abort_requests_pending_abort(idev);
374 
375 	wake_up(&ihost->eventq);
376 	sci_remote_node_context_continue_state_transitions(rnc);
377 }
378 
379 static void sci_remote_node_context_await_suspend_state_exit(
380 	struct sci_base_state_machine *sm)
381 {
382 	struct sci_remote_node_context *rnc
383 		= container_of(sm, typeof(*rnc), sm);
384 	struct isci_remote_device *idev = rnc_to_dev(rnc);
385 
386 	if (dev_is_sata(idev->domain_dev))
387 		isci_dev_set_hang_detection_timeout(idev, 0);
388 }
389 
390 static const struct sci_base_state sci_remote_node_context_state_table[] = {
391 	[SCI_RNC_INITIAL] = {
392 		.enter_state = sci_remote_node_context_initial_state_enter,
393 	},
394 	[SCI_RNC_POSTING] = {
395 		.enter_state = sci_remote_node_context_posting_state_enter,
396 	},
397 	[SCI_RNC_INVALIDATING] = {
398 		.enter_state = sci_remote_node_context_invalidating_state_enter,
399 	},
400 	[SCI_RNC_RESUMING] = {
401 		.enter_state = sci_remote_node_context_resuming_state_enter,
402 	},
403 	[SCI_RNC_READY] = {
404 		.enter_state = sci_remote_node_context_ready_state_enter,
405 	},
406 	[SCI_RNC_TX_SUSPENDED] = {
407 		.enter_state = sci_remote_node_context_tx_suspended_state_enter,
408 	},
409 	[SCI_RNC_TX_RX_SUSPENDED] = {
410 		.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
411 	},
412 	[SCI_RNC_AWAIT_SUSPENSION] = {
413 		.exit_state = sci_remote_node_context_await_suspend_state_exit,
414 	},
415 };
416 
417 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
418 					    u16 remote_node_index)
419 {
420 	memset(rnc, 0, sizeof(struct sci_remote_node_context));
421 
422 	rnc->remote_node_index = remote_node_index;
423 	rnc->destination_state = RNC_DEST_UNSPECIFIED;
424 
425 	sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
426 }
427 
428 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
429 							   u32 event_code)
430 {
431 	enum scis_sds_remote_node_context_states state;
432 	u32 next_state;
433 
434 	state = sci_rnc->sm.current_state_id;
435 	switch (state) {
436 	case SCI_RNC_POSTING:
437 		switch (scu_get_event_code(event_code)) {
438 		case SCU_EVENT_POST_RNC_COMPLETE:
439 			sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
440 			break;
441 		default:
442 			goto out;
443 		}
444 		break;
445 	case SCI_RNC_INVALIDATING:
446 		if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
447 			if (sci_rnc->destination_state == RNC_DEST_FINAL)
448 				next_state = SCI_RNC_INITIAL;
449 			else
450 				next_state = SCI_RNC_POSTING;
451 			sci_change_state(&sci_rnc->sm, next_state);
452 		} else {
453 			switch (scu_get_event_type(event_code)) {
454 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
455 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
456 				/* We really dont care if the hardware is going to suspend
457 				 * the device since it's being invalidated anyway */
458 				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
459 					"%s: SCIC Remote Node Context 0x%p was "
460 					"suspended by hardware while being "
461 					"invalidated.\n", __func__, sci_rnc);
462 				break;
463 			default:
464 				goto out;
465 			}
466 		}
467 		break;
468 	case SCI_RNC_RESUMING:
469 		if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
470 			sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
471 		} else {
472 			switch (scu_get_event_type(event_code)) {
473 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
474 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
475 				/* We really dont care if the hardware is going to suspend
476 				 * the device since it's being resumed anyway */
477 				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
478 					"%s: SCIC Remote Node Context 0x%p was "
479 					"suspended by hardware while being resumed.\n",
480 					__func__, sci_rnc);
481 				break;
482 			default:
483 				goto out;
484 			}
485 		}
486 		break;
487 	case SCI_RNC_READY:
488 		switch (scu_get_event_type(event_code)) {
489 		case SCU_EVENT_TL_RNC_SUSPEND_TX:
490 			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
491 			sci_rnc->suspend_type = scu_get_event_type(event_code);
492 			break;
493 		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
494 			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
495 			sci_rnc->suspend_type = scu_get_event_type(event_code);
496 			break;
497 		default:
498 			goto out;
499 		}
500 		break;
501 	case SCI_RNC_AWAIT_SUSPENSION:
502 		switch (scu_get_event_type(event_code)) {
503 		case SCU_EVENT_TL_RNC_SUSPEND_TX:
504 			next_state = SCI_RNC_TX_SUSPENDED;
505 			break;
506 		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
507 			next_state = SCI_RNC_TX_RX_SUSPENDED;
508 			break;
509 		default:
510 			goto out;
511 		}
512 		if (sci_rnc->suspend_type == scu_get_event_type(event_code))
513 			sci_change_state(&sci_rnc->sm, next_state);
514 		break;
515 	default:
516 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
517 			 "%s: invalid state: %s\n", __func__,
518 			 rnc_state_name(state));
519 		return SCI_FAILURE_INVALID_STATE;
520 	}
521 	return SCI_SUCCESS;
522 
523  out:
524 	dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
525 		 "%s: code: %#x state: %s\n", __func__, event_code,
526 		 rnc_state_name(state));
527 	return SCI_FAILURE;
528 
529 }
530 
531 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
532 						      scics_sds_remote_node_context_callback cb_fn,
533 						      void *cb_p)
534 {
535 	enum scis_sds_remote_node_context_states state;
536 
537 	state = sci_rnc->sm.current_state_id;
538 	switch (state) {
539 	case SCI_RNC_INVALIDATING:
540 		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
541 		return SCI_SUCCESS;
542 	case SCI_RNC_POSTING:
543 	case SCI_RNC_RESUMING:
544 	case SCI_RNC_READY:
545 	case SCI_RNC_TX_SUSPENDED:
546 	case SCI_RNC_TX_RX_SUSPENDED:
547 		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
548 		sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
549 		return SCI_SUCCESS;
550 	case SCI_RNC_AWAIT_SUSPENSION:
551 		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
552 		return SCI_SUCCESS;
553 	case SCI_RNC_INITIAL:
554 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
555 			 "%s: invalid state: %s\n", __func__,
556 			 rnc_state_name(state));
557 		/* We have decided that the destruct request on the remote node context
558 		 * can not fail since it is either in the initial/destroyed state or is
559 		 * can be destroyed.
560 		 */
561 		return SCI_SUCCESS;
562 	default:
563 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
564 			 "%s: invalid state %s\n", __func__,
565 			 rnc_state_name(state));
566 		return SCI_FAILURE_INVALID_STATE;
567 	}
568 }
569 
570 enum sci_status sci_remote_node_context_suspend(
571 			struct sci_remote_node_context *sci_rnc,
572 			enum sci_remote_node_suspension_reasons suspend_reason,
573 			u32 suspend_type)
574 {
575 	enum scis_sds_remote_node_context_states state
576 		= sci_rnc->sm.current_state_id;
577 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
578 	enum sci_status status = SCI_FAILURE_INVALID_STATE;
579 	enum sci_remote_node_context_destination_state dest_param =
580 		RNC_DEST_UNSPECIFIED;
581 
582 	dev_dbg(scirdev_to_dev(idev),
583 		"%s: current state %s, current suspend_type %x dest state %d,"
584 			" arg suspend_reason %d, arg suspend_type %x",
585 		__func__, rnc_state_name(state), sci_rnc->suspend_type,
586 		sci_rnc->destination_state, suspend_reason,
587 		suspend_type);
588 
589 	/* Disable automatic state continuations if explicitly suspending. */
590 	if ((suspend_reason == SCI_HW_SUSPEND) ||
591 	    (sci_rnc->destination_state == RNC_DEST_FINAL))
592 		dest_param = sci_rnc->destination_state;
593 
594 	switch (state) {
595 	case SCI_RNC_READY:
596 		break;
597 	case SCI_RNC_INVALIDATING:
598 		if (sci_rnc->destination_state == RNC_DEST_FINAL) {
599 			dev_warn(scirdev_to_dev(idev),
600 				 "%s: already destroying %p\n",
601 				 __func__, sci_rnc);
602 			return SCI_FAILURE_INVALID_STATE;
603 		}
604 		/* Fall through and handle like SCI_RNC_POSTING */
605 	case SCI_RNC_RESUMING:
606 		/* Fall through and handle like SCI_RNC_POSTING */
607 	case SCI_RNC_POSTING:
608 		/* Set the destination state to AWAIT - this signals the
609 		 * entry into the SCI_RNC_READY state that a suspension
610 		 * needs to be done immediately.
611 		 */
612 		if (sci_rnc->destination_state != RNC_DEST_FINAL)
613 			sci_rnc->destination_state = RNC_DEST_SUSPENDED;
614 		sci_rnc->suspend_type = suspend_type;
615 		sci_rnc->suspend_reason = suspend_reason;
616 		return SCI_SUCCESS;
617 
618 	case SCI_RNC_TX_SUSPENDED:
619 		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
620 			status = SCI_SUCCESS;
621 		break;
622 	case SCI_RNC_TX_RX_SUSPENDED:
623 		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
624 			status = SCI_SUCCESS;
625 		break;
626 	case SCI_RNC_AWAIT_SUSPENSION:
627 		if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
628 		    || (suspend_type == sci_rnc->suspend_type))
629 			return SCI_SUCCESS;
630 		break;
631 	default:
632 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
633 			 "%s: invalid state %s\n", __func__,
634 			 rnc_state_name(state));
635 		return SCI_FAILURE_INVALID_STATE;
636 	}
637 	sci_rnc->destination_state = dest_param;
638 	sci_rnc->suspend_type = suspend_type;
639 	sci_rnc->suspend_reason = suspend_reason;
640 
641 	if (status == SCI_SUCCESS) { /* Already in the destination state? */
642 		struct isci_host *ihost = idev->owning_port->owning_controller;
643 
644 		wake_up_all(&ihost->eventq); /* Let observers look. */
645 		return SCI_SUCCESS;
646 	}
647 	if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
648 	    (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
649 
650 		if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
651 			isci_dev_set_hang_detection_timeout(idev, 0x00000001);
652 
653 		sci_remote_device_post_request(
654 			idev, SCI_SOFTWARE_SUSPEND_CMD);
655 	}
656 	if (state != SCI_RNC_AWAIT_SUSPENSION)
657 		sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
658 
659 	return SCI_SUCCESS;
660 }
661 
662 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
663 						    scics_sds_remote_node_context_callback cb_fn,
664 						    void *cb_p)
665 {
666 	enum scis_sds_remote_node_context_states state;
667 	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
668 
669 	state = sci_rnc->sm.current_state_id;
670 	dev_dbg(scirdev_to_dev(idev),
671 		"%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
672 			"dev resume path %s\n",
673 		__func__, rnc_state_name(state), cb_fn, cb_p,
674 		sci_rnc->destination_state,
675 		test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
676 			? "<abort active>" : "<normal>");
677 
678 	switch (state) {
679 	case SCI_RNC_INITIAL:
680 		if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
681 			return SCI_FAILURE_INVALID_STATE;
682 
683 		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn,	cb_p,
684 							RNC_DEST_READY);
685 		if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
686 			sci_remote_node_context_construct_buffer(sci_rnc);
687 			sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
688 		}
689 		return SCI_SUCCESS;
690 
691 	case SCI_RNC_POSTING:
692 	case SCI_RNC_INVALIDATING:
693 	case SCI_RNC_RESUMING:
694 		/* We are still waiting to post when a resume was
695 		 * requested.
696 		 */
697 		switch (sci_rnc->destination_state) {
698 		case RNC_DEST_SUSPENDED:
699 		case RNC_DEST_SUSPENDED_RESUME:
700 			/* Previously waiting to suspend after posting.
701 			 * Now continue onto resumption.
702 			 */
703 			sci_remote_node_context_setup_to_resume(
704 				sci_rnc, cb_fn, cb_p,
705 				RNC_DEST_SUSPENDED_RESUME);
706 			break;
707 		default:
708 			sci_remote_node_context_setup_to_resume(
709 				sci_rnc, cb_fn, cb_p,
710 				RNC_DEST_READY);
711 			break;
712 		}
713 		return SCI_SUCCESS;
714 
715 	case SCI_RNC_TX_SUSPENDED:
716 	case SCI_RNC_TX_RX_SUSPENDED:
717 		{
718 			struct domain_device *dev = idev->domain_dev;
719 			/* If this is an expander attached SATA device we must
720 			 * invalidate and repost the RNC since this is the only
721 			 * way to clear the TCi to NCQ tag mapping table for
722 			 * the RNi. All other device types we can just resume.
723 			 */
724 			sci_remote_node_context_setup_to_resume(
725 				sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
726 
727 			if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
728 				if ((dev_is_sata(dev) && dev->parent) ||
729 				    (sci_rnc->destination_state == RNC_DEST_FINAL))
730 					sci_change_state(&sci_rnc->sm,
731 							 SCI_RNC_INVALIDATING);
732 				else
733 					sci_change_state(&sci_rnc->sm,
734 							 SCI_RNC_RESUMING);
735 			}
736 		}
737 		return SCI_SUCCESS;
738 
739 	case SCI_RNC_AWAIT_SUSPENSION:
740 		sci_remote_node_context_setup_to_resume(
741 			sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
742 		return SCI_SUCCESS;
743 	default:
744 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
745 			 "%s: invalid state %s\n", __func__,
746 			 rnc_state_name(state));
747 		return SCI_FAILURE_INVALID_STATE;
748 	}
749 }
750 
751 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
752 							     struct isci_request *ireq)
753 {
754 	enum scis_sds_remote_node_context_states state;
755 
756 	state = sci_rnc->sm.current_state_id;
757 
758 	switch (state) {
759 	case SCI_RNC_READY:
760 		return SCI_SUCCESS;
761 	case SCI_RNC_TX_SUSPENDED:
762 	case SCI_RNC_TX_RX_SUSPENDED:
763 	case SCI_RNC_AWAIT_SUSPENSION:
764 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
765 			 "%s: invalid state %s\n", __func__,
766 			 rnc_state_name(state));
767 		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
768 	default:
769 		dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
770 			"%s: invalid state %s\n", __func__,
771 			rnc_state_name(state));
772 		return SCI_FAILURE_INVALID_STATE;
773 	}
774 }
775 
776 enum sci_status sci_remote_node_context_start_task(
777 	struct sci_remote_node_context *sci_rnc,
778 	struct isci_request *ireq,
779 	scics_sds_remote_node_context_callback cb_fn,
780 	void *cb_p)
781 {
782 	enum sci_status status = sci_remote_node_context_resume(sci_rnc,
783 								cb_fn, cb_p);
784 	if (status != SCI_SUCCESS)
785 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
786 			"%s: resume failed: %d\n", __func__, status);
787 	return status;
788 }
789 
790 int sci_remote_node_context_is_safe_to_abort(
791 	struct sci_remote_node_context *sci_rnc)
792 {
793 	enum scis_sds_remote_node_context_states state;
794 
795 	state = sci_rnc->sm.current_state_id;
796 	switch (state) {
797 	case SCI_RNC_INVALIDATING:
798 	case SCI_RNC_TX_RX_SUSPENDED:
799 		return 1;
800 	case SCI_RNC_POSTING:
801 	case SCI_RNC_RESUMING:
802 	case SCI_RNC_READY:
803 	case SCI_RNC_TX_SUSPENDED:
804 	case SCI_RNC_AWAIT_SUSPENSION:
805 	case SCI_RNC_INITIAL:
806 		return 0;
807 	default:
808 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
809 			 "%s: invalid state %d\n", __func__, state);
810 		return 0;
811 	}
812 }
813