xref: /openbmc/linux/drivers/scsi/isci/host.c (revision 827634ad)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 #include <linux/circ_buf.h>
56 #include <linux/device.h>
57 #include <scsi/sas.h>
58 #include "host.h"
59 #include "isci.h"
60 #include "port.h"
61 #include "probe_roms.h"
62 #include "remote_device.h"
63 #include "request.h"
64 #include "scu_completion_codes.h"
65 #include "scu_event_codes.h"
66 #include "registers.h"
67 #include "scu_remote_node_context.h"
68 #include "scu_task_context.h"
69 
70 #define SCU_CONTEXT_RAM_INIT_STALL_TIME      200
71 
72 #define smu_max_ports(dcc_value) \
73 	(\
74 		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
75 		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
76 	)
77 
78 #define smu_max_task_contexts(dcc_value)	\
79 	(\
80 		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
81 		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
82 	)
83 
84 #define smu_max_rncs(dcc_value) \
85 	(\
86 		(((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
87 		 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
88 	)
89 
90 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
91 
92 /**
93  *
94  *
95  * The number of milliseconds to wait while a given phy is consuming power
96  * before allowing another set of phys to consume power. Ultimately, this will
97  * be specified by OEM parameter.
98  */
99 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
100 
101 /**
102  * NORMALIZE_PUT_POINTER() -
103  *
104  * This macro will normalize the completion queue put pointer so its value can
105  * be used as an array inde
106  */
107 #define NORMALIZE_PUT_POINTER(x) \
108 	((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
109 
110 
111 /**
112  * NORMALIZE_EVENT_POINTER() -
113  *
114  * This macro will normalize the completion queue event entry so its value can
115  * be used as an index.
116  */
117 #define NORMALIZE_EVENT_POINTER(x) \
118 	(\
119 		((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
120 		>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT	\
121 	)
122 
123 /**
124  * NORMALIZE_GET_POINTER() -
125  *
126  * This macro will normalize the completion queue get pointer so its value can
127  * be used as an index into an array
128  */
129 #define NORMALIZE_GET_POINTER(x) \
130 	((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
131 
132 /**
133  * NORMALIZE_GET_POINTER_CYCLE_BIT() -
134  *
135  * This macro will normalize the completion queue cycle pointer so it matches
136  * the completion queue cycle bit
137  */
138 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
139 	((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
140 
141 /**
142  * COMPLETION_QUEUE_CYCLE_BIT() -
143  *
144  * This macro will return the cycle bit of the completion queue entry
145  */
146 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
147 
148 /* Init the state machine and call the state entry function (if any) */
149 void sci_init_sm(struct sci_base_state_machine *sm,
150 		 const struct sci_base_state *state_table, u32 initial_state)
151 {
152 	sci_state_transition_t handler;
153 
154 	sm->initial_state_id    = initial_state;
155 	sm->previous_state_id   = initial_state;
156 	sm->current_state_id    = initial_state;
157 	sm->state_table         = state_table;
158 
159 	handler = sm->state_table[initial_state].enter_state;
160 	if (handler)
161 		handler(sm);
162 }
163 
164 /* Call the state exit fn, update the current state, call the state entry fn */
165 void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
166 {
167 	sci_state_transition_t handler;
168 
169 	handler = sm->state_table[sm->current_state_id].exit_state;
170 	if (handler)
171 		handler(sm);
172 
173 	sm->previous_state_id = sm->current_state_id;
174 	sm->current_state_id = next_state;
175 
176 	handler = sm->state_table[sm->current_state_id].enter_state;
177 	if (handler)
178 		handler(sm);
179 }
180 
181 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
182 {
183 	u32 get_value = ihost->completion_queue_get;
184 	u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
185 
186 	if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
187 	    COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
188 		return true;
189 
190 	return false;
191 }
192 
193 static bool sci_controller_isr(struct isci_host *ihost)
194 {
195 	if (sci_controller_completion_queue_has_entries(ihost))
196 		return true;
197 
198 	/* we have a spurious interrupt it could be that we have already
199 	 * emptied the completion queue from a previous interrupt
200 	 * FIXME: really!?
201 	 */
202 	writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203 
204 	/* There is a race in the hardware that could cause us not to be
205 	 * notified of an interrupt completion if we do not take this
206 	 * step.  We will mask then unmask the interrupts so if there is
207 	 * another interrupt pending the clearing of the interrupt
208 	 * source we get the next interrupt message.
209 	 */
210 	spin_lock(&ihost->scic_lock);
211 	if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
212 		writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
213 		writel(0, &ihost->smu_registers->interrupt_mask);
214 	}
215 	spin_unlock(&ihost->scic_lock);
216 
217 	return false;
218 }
219 
220 irqreturn_t isci_msix_isr(int vec, void *data)
221 {
222 	struct isci_host *ihost = data;
223 
224 	if (sci_controller_isr(ihost))
225 		tasklet_schedule(&ihost->completion_tasklet);
226 
227 	return IRQ_HANDLED;
228 }
229 
230 static bool sci_controller_error_isr(struct isci_host *ihost)
231 {
232 	u32 interrupt_status;
233 
234 	interrupt_status =
235 		readl(&ihost->smu_registers->interrupt_status);
236 	interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
237 
238 	if (interrupt_status != 0) {
239 		/*
240 		 * There is an error interrupt pending so let it through and handle
241 		 * in the callback */
242 		return true;
243 	}
244 
245 	/*
246 	 * There is a race in the hardware that could cause us not to be notified
247 	 * of an interrupt completion if we do not take this step.  We will mask
248 	 * then unmask the error interrupts so if there was another interrupt
249 	 * pending we will be notified.
250 	 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
251 	writel(0xff, &ihost->smu_registers->interrupt_mask);
252 	writel(0, &ihost->smu_registers->interrupt_mask);
253 
254 	return false;
255 }
256 
257 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
258 {
259 	u32 index = SCU_GET_COMPLETION_INDEX(ent);
260 	struct isci_request *ireq = ihost->reqs[index];
261 
262 	/* Make sure that we really want to process this IO request */
263 	if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
264 	    ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
265 	    ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
266 		/* Yep this is a valid io request pass it along to the
267 		 * io request handler
268 		 */
269 		sci_io_request_tc_completion(ireq, ent);
270 }
271 
272 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
273 {
274 	u32 index;
275 	struct isci_request *ireq;
276 	struct isci_remote_device *idev;
277 
278 	index = SCU_GET_COMPLETION_INDEX(ent);
279 
280 	switch (scu_get_command_request_type(ent)) {
281 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
282 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
283 		ireq = ihost->reqs[index];
284 		dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
285 			 __func__, ent, ireq);
286 		/* @todo For a post TC operation we need to fail the IO
287 		 * request
288 		 */
289 		break;
290 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
291 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
292 	case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
293 		idev = ihost->device_table[index];
294 		dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
295 			 __func__, ent, idev);
296 		/* @todo For a port RNC operation we need to fail the
297 		 * device
298 		 */
299 		break;
300 	default:
301 		dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
302 			 __func__, ent);
303 		break;
304 	}
305 }
306 
307 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
308 {
309 	u32 index;
310 	u32 frame_index;
311 
312 	struct scu_unsolicited_frame_header *frame_header;
313 	struct isci_phy *iphy;
314 	struct isci_remote_device *idev;
315 
316 	enum sci_status result = SCI_FAILURE;
317 
318 	frame_index = SCU_GET_FRAME_INDEX(ent);
319 
320 	frame_header = ihost->uf_control.buffers.array[frame_index].header;
321 	ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
322 
323 	if (SCU_GET_FRAME_ERROR(ent)) {
324 		/*
325 		 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
326 		 * /       this cause a problem? We expect the phy initialization will
327 		 * /       fail if there is an error in the frame. */
328 		sci_controller_release_frame(ihost, frame_index);
329 		return;
330 	}
331 
332 	if (frame_header->is_address_frame) {
333 		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
334 		iphy = &ihost->phys[index];
335 		result = sci_phy_frame_handler(iphy, frame_index);
336 	} else {
337 
338 		index = SCU_GET_COMPLETION_INDEX(ent);
339 
340 		if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
341 			/*
342 			 * This is a signature fis or a frame from a direct attached SATA
343 			 * device that has not yet been created.  In either case forwared
344 			 * the frame to the PE and let it take care of the frame data. */
345 			index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
346 			iphy = &ihost->phys[index];
347 			result = sci_phy_frame_handler(iphy, frame_index);
348 		} else {
349 			if (index < ihost->remote_node_entries)
350 				idev = ihost->device_table[index];
351 			else
352 				idev = NULL;
353 
354 			if (idev != NULL)
355 				result = sci_remote_device_frame_handler(idev, frame_index);
356 			else
357 				sci_controller_release_frame(ihost, frame_index);
358 		}
359 	}
360 
361 	if (result != SCI_SUCCESS) {
362 		/*
363 		 * / @todo Is there any reason to report some additional error message
364 		 * /       when we get this failure notifiction? */
365 	}
366 }
367 
368 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
369 {
370 	struct isci_remote_device *idev;
371 	struct isci_request *ireq;
372 	struct isci_phy *iphy;
373 	u32 index;
374 
375 	index = SCU_GET_COMPLETION_INDEX(ent);
376 
377 	switch (scu_get_event_type(ent)) {
378 	case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
379 		/* / @todo The driver did something wrong and we need to fix the condtion. */
380 		dev_err(&ihost->pdev->dev,
381 			"%s: SCIC Controller 0x%p received SMU command error "
382 			"0x%x\n",
383 			__func__,
384 			ihost,
385 			ent);
386 		break;
387 
388 	case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
389 	case SCU_EVENT_TYPE_SMU_ERROR:
390 	case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
391 		/*
392 		 * / @todo This is a hardware failure and its likely that we want to
393 		 * /       reset the controller. */
394 		dev_err(&ihost->pdev->dev,
395 			"%s: SCIC Controller 0x%p received fatal controller "
396 			"event  0x%x\n",
397 			__func__,
398 			ihost,
399 			ent);
400 		break;
401 
402 	case SCU_EVENT_TYPE_TRANSPORT_ERROR:
403 		ireq = ihost->reqs[index];
404 		sci_io_request_event_handler(ireq, ent);
405 		break;
406 
407 	case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
408 		switch (scu_get_event_specifier(ent)) {
409 		case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
410 		case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
411 			ireq = ihost->reqs[index];
412 			if (ireq != NULL)
413 				sci_io_request_event_handler(ireq, ent);
414 			else
415 				dev_warn(&ihost->pdev->dev,
416 					 "%s: SCIC Controller 0x%p received "
417 					 "event 0x%x for io request object "
418 					 "that doesnt exist.\n",
419 					 __func__,
420 					 ihost,
421 					 ent);
422 
423 			break;
424 
425 		case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
426 			idev = ihost->device_table[index];
427 			if (idev != NULL)
428 				sci_remote_device_event_handler(idev, ent);
429 			else
430 				dev_warn(&ihost->pdev->dev,
431 					 "%s: SCIC Controller 0x%p received "
432 					 "event 0x%x for remote device object "
433 					 "that doesnt exist.\n",
434 					 __func__,
435 					 ihost,
436 					 ent);
437 
438 			break;
439 		}
440 		break;
441 
442 	case SCU_EVENT_TYPE_BROADCAST_CHANGE:
443 	/*
444 	 * direct the broadcast change event to the phy first and then let
445 	 * the phy redirect the broadcast change to the port object */
446 	case SCU_EVENT_TYPE_ERR_CNT_EVENT:
447 	/*
448 	 * direct error counter event to the phy object since that is where
449 	 * we get the event notification.  This is a type 4 event. */
450 	case SCU_EVENT_TYPE_OSSP_EVENT:
451 		index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
452 		iphy = &ihost->phys[index];
453 		sci_phy_event_handler(iphy, ent);
454 		break;
455 
456 	case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
457 	case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
458 	case SCU_EVENT_TYPE_RNC_OPS_MISC:
459 		if (index < ihost->remote_node_entries) {
460 			idev = ihost->device_table[index];
461 
462 			if (idev != NULL)
463 				sci_remote_device_event_handler(idev, ent);
464 		} else
465 			dev_err(&ihost->pdev->dev,
466 				"%s: SCIC Controller 0x%p received event 0x%x "
467 				"for remote device object 0x%0x that doesnt "
468 				"exist.\n",
469 				__func__,
470 				ihost,
471 				ent,
472 				index);
473 
474 		break;
475 
476 	default:
477 		dev_warn(&ihost->pdev->dev,
478 			 "%s: SCIC Controller received unknown event code %x\n",
479 			 __func__,
480 			 ent);
481 		break;
482 	}
483 }
484 
485 static void sci_controller_process_completions(struct isci_host *ihost)
486 {
487 	u32 completion_count = 0;
488 	u32 ent;
489 	u32 get_index;
490 	u32 get_cycle;
491 	u32 event_get;
492 	u32 event_cycle;
493 
494 	dev_dbg(&ihost->pdev->dev,
495 		"%s: completion queue beginning get:0x%08x\n",
496 		__func__,
497 		ihost->completion_queue_get);
498 
499 	/* Get the component parts of the completion queue */
500 	get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
501 	get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
502 
503 	event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
504 	event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
505 
506 	while (
507 		NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
508 		== COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
509 		) {
510 		completion_count++;
511 
512 		ent = ihost->completion_queue[get_index];
513 
514 		/* increment the get pointer and check for rollover to toggle the cycle bit */
515 		get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
516 			     (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
517 		get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
518 
519 		dev_dbg(&ihost->pdev->dev,
520 			"%s: completion queue entry:0x%08x\n",
521 			__func__,
522 			ent);
523 
524 		switch (SCU_GET_COMPLETION_TYPE(ent)) {
525 		case SCU_COMPLETION_TYPE_TASK:
526 			sci_controller_task_completion(ihost, ent);
527 			break;
528 
529 		case SCU_COMPLETION_TYPE_SDMA:
530 			sci_controller_sdma_completion(ihost, ent);
531 			break;
532 
533 		case SCU_COMPLETION_TYPE_UFI:
534 			sci_controller_unsolicited_frame(ihost, ent);
535 			break;
536 
537 		case SCU_COMPLETION_TYPE_EVENT:
538 			sci_controller_event_completion(ihost, ent);
539 			break;
540 
541 		case SCU_COMPLETION_TYPE_NOTIFY: {
542 			event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
543 				       (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
544 			event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
545 
546 			sci_controller_event_completion(ihost, ent);
547 			break;
548 		}
549 		default:
550 			dev_warn(&ihost->pdev->dev,
551 				 "%s: SCIC Controller received unknown "
552 				 "completion type %x\n",
553 				 __func__,
554 				 ent);
555 			break;
556 		}
557 	}
558 
559 	/* Update the get register if we completed one or more entries */
560 	if (completion_count > 0) {
561 		ihost->completion_queue_get =
562 			SMU_CQGR_GEN_BIT(ENABLE) |
563 			SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
564 			event_cycle |
565 			SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
566 			get_cycle |
567 			SMU_CQGR_GEN_VAL(POINTER, get_index);
568 
569 		writel(ihost->completion_queue_get,
570 		       &ihost->smu_registers->completion_queue_get);
571 
572 	}
573 
574 	dev_dbg(&ihost->pdev->dev,
575 		"%s: completion queue ending get:0x%08x\n",
576 		__func__,
577 		ihost->completion_queue_get);
578 
579 }
580 
581 static void sci_controller_error_handler(struct isci_host *ihost)
582 {
583 	u32 interrupt_status;
584 
585 	interrupt_status =
586 		readl(&ihost->smu_registers->interrupt_status);
587 
588 	if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
589 	    sci_controller_completion_queue_has_entries(ihost)) {
590 
591 		sci_controller_process_completions(ihost);
592 		writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
593 	} else {
594 		dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
595 			interrupt_status);
596 
597 		sci_change_state(&ihost->sm, SCIC_FAILED);
598 
599 		return;
600 	}
601 
602 	/* If we dont process any completions I am not sure that we want to do this.
603 	 * We are in the middle of a hardware fault and should probably be reset.
604 	 */
605 	writel(0, &ihost->smu_registers->interrupt_mask);
606 }
607 
608 irqreturn_t isci_intx_isr(int vec, void *data)
609 {
610 	irqreturn_t ret = IRQ_NONE;
611 	struct isci_host *ihost = data;
612 
613 	if (sci_controller_isr(ihost)) {
614 		writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
615 		tasklet_schedule(&ihost->completion_tasklet);
616 		ret = IRQ_HANDLED;
617 	} else if (sci_controller_error_isr(ihost)) {
618 		spin_lock(&ihost->scic_lock);
619 		sci_controller_error_handler(ihost);
620 		spin_unlock(&ihost->scic_lock);
621 		ret = IRQ_HANDLED;
622 	}
623 
624 	return ret;
625 }
626 
627 irqreturn_t isci_error_isr(int vec, void *data)
628 {
629 	struct isci_host *ihost = data;
630 
631 	if (sci_controller_error_isr(ihost))
632 		sci_controller_error_handler(ihost);
633 
634 	return IRQ_HANDLED;
635 }
636 
637 /**
638  * isci_host_start_complete() - This function is called by the core library,
639  *    through the ISCI Module, to indicate controller start status.
640  * @isci_host: This parameter specifies the ISCI host object
641  * @completion_status: This parameter specifies the completion status from the
642  *    core library.
643  *
644  */
645 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
646 {
647 	if (completion_status != SCI_SUCCESS)
648 		dev_info(&ihost->pdev->dev,
649 			"controller start timed out, continuing...\n");
650 	clear_bit(IHOST_START_PENDING, &ihost->flags);
651 	wake_up(&ihost->eventq);
652 }
653 
654 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
655 {
656 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
657 	struct isci_host *ihost = ha->lldd_ha;
658 
659 	if (test_bit(IHOST_START_PENDING, &ihost->flags))
660 		return 0;
661 
662 	sas_drain_work(ha);
663 
664 	return 1;
665 }
666 
667 /**
668  * sci_controller_get_suggested_start_timeout() - This method returns the
669  *    suggested sci_controller_start() timeout amount.  The user is free to
670  *    use any timeout value, but this method provides the suggested minimum
671  *    start timeout value.  The returned value is based upon empirical
672  *    information determined as a result of interoperability testing.
673  * @controller: the handle to the controller object for which to return the
674  *    suggested start timeout.
675  *
676  * This method returns the number of milliseconds for the suggested start
677  * operation timeout.
678  */
679 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
680 {
681 	/* Validate the user supplied parameters. */
682 	if (!ihost)
683 		return 0;
684 
685 	/*
686 	 * The suggested minimum timeout value for a controller start operation:
687 	 *
688 	 *     Signature FIS Timeout
689 	 *   + Phy Start Timeout
690 	 *   + Number of Phy Spin Up Intervals
691 	 *   ---------------------------------
692 	 *   Number of milliseconds for the controller start operation.
693 	 *
694 	 * NOTE: The number of phy spin up intervals will be equivalent
695 	 *       to the number of phys divided by the number phys allowed
696 	 *       per interval - 1 (once OEM parameters are supported).
697 	 *       Currently we assume only 1 phy per interval. */
698 
699 	return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
700 		+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
701 		+ ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
702 }
703 
704 static void sci_controller_enable_interrupts(struct isci_host *ihost)
705 {
706 	set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
707 	writel(0, &ihost->smu_registers->interrupt_mask);
708 }
709 
710 void sci_controller_disable_interrupts(struct isci_host *ihost)
711 {
712 	clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
713 	writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714 	readl(&ihost->smu_registers->interrupt_mask); /* flush */
715 }
716 
717 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718 {
719 	u32 port_task_scheduler_value;
720 
721 	port_task_scheduler_value =
722 		readl(&ihost->scu_registers->peg0.ptsg.control);
723 	port_task_scheduler_value |=
724 		(SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
725 		 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
726 	writel(port_task_scheduler_value,
727 	       &ihost->scu_registers->peg0.ptsg.control);
728 }
729 
730 static void sci_controller_assign_task_entries(struct isci_host *ihost)
731 {
732 	u32 task_assignment;
733 
734 	/*
735 	 * Assign all the TCs to function 0
736 	 * TODO: Do we actually need to read this register to write it back?
737 	 */
738 
739 	task_assignment =
740 		readl(&ihost->smu_registers->task_context_assignment[0]);
741 
742 	task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743 		(SMU_TCA_GEN_VAL(ENDING,  ihost->task_context_entries - 1)) |
744 		(SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
745 
746 	writel(task_assignment,
747 		&ihost->smu_registers->task_context_assignment[0]);
748 
749 }
750 
751 static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752 {
753 	u32 index;
754 	u32 completion_queue_control_value;
755 	u32 completion_queue_get_value;
756 	u32 completion_queue_put_value;
757 
758 	ihost->completion_queue_get = 0;
759 
760 	completion_queue_control_value =
761 		(SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
762 		 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763 
764 	writel(completion_queue_control_value,
765 	       &ihost->smu_registers->completion_queue_control);
766 
767 
768 	/* Set the completion queue get pointer and enable the queue */
769 	completion_queue_get_value = (
770 		(SMU_CQGR_GEN_VAL(POINTER, 0))
771 		| (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
772 		| (SMU_CQGR_GEN_BIT(ENABLE))
773 		| (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
774 		);
775 
776 	writel(completion_queue_get_value,
777 	       &ihost->smu_registers->completion_queue_get);
778 
779 	/* Set the completion queue put pointer */
780 	completion_queue_put_value = (
781 		(SMU_CQPR_GEN_VAL(POINTER, 0))
782 		| (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
783 		);
784 
785 	writel(completion_queue_put_value,
786 	       &ihost->smu_registers->completion_queue_put);
787 
788 	/* Initialize the cycle bit of the completion queue entries */
789 	for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790 		/*
791 		 * If get.cycle_bit != completion_queue.cycle_bit
792 		 * its not a valid completion queue entry
793 		 * so at system start all entries are invalid */
794 		ihost->completion_queue[index] = 0x80000000;
795 	}
796 }
797 
798 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799 {
800 	u32 frame_queue_control_value;
801 	u32 frame_queue_get_value;
802 	u32 frame_queue_put_value;
803 
804 	/* Write the queue size */
805 	frame_queue_control_value =
806 		SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807 
808 	writel(frame_queue_control_value,
809 	       &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810 
811 	/* Setup the get pointer for the unsolicited frame queue */
812 	frame_queue_get_value = (
813 		SCU_UFQGP_GEN_VAL(POINTER, 0)
814 		|  SCU_UFQGP_GEN_BIT(ENABLE_BIT)
815 		);
816 
817 	writel(frame_queue_get_value,
818 	       &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819 	/* Setup the put pointer for the unsolicited frame queue */
820 	frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
821 	writel(frame_queue_put_value,
822 	       &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823 }
824 
825 void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826 {
827 	if (ihost->sm.current_state_id == SCIC_STARTING) {
828 		/*
829 		 * We move into the ready state, because some of the phys/ports
830 		 * may be up and operational.
831 		 */
832 		sci_change_state(&ihost->sm, SCIC_READY);
833 
834 		isci_host_start_complete(ihost, status);
835 	}
836 }
837 
838 static bool is_phy_starting(struct isci_phy *iphy)
839 {
840 	enum sci_phy_states state;
841 
842 	state = iphy->sm.current_state_id;
843 	switch (state) {
844 	case SCI_PHY_STARTING:
845 	case SCI_PHY_SUB_INITIAL:
846 	case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
847 	case SCI_PHY_SUB_AWAIT_IAF_UF:
848 	case SCI_PHY_SUB_AWAIT_SAS_POWER:
849 	case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 	case SCI_PHY_SUB_AWAIT_OSSP_EN:
853 	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
854 	case SCI_PHY_SUB_FINAL:
855 		return true;
856 	default:
857 		return false;
858 	}
859 }
860 
861 bool is_controller_start_complete(struct isci_host *ihost)
862 {
863 	int i;
864 
865 	for (i = 0; i < SCI_MAX_PHYS; i++) {
866 		struct isci_phy *iphy = &ihost->phys[i];
867 		u32 state = iphy->sm.current_state_id;
868 
869 		/* in apc mode we need to check every phy, in
870 		 * mpc mode we only need to check phys that have
871 		 * been configured into a port
872 		 */
873 		if (is_port_config_apc(ihost))
874 			/* pass */;
875 		else if (!phy_get_non_dummy_port(iphy))
876 			continue;
877 
878 		/* The controller start operation is complete iff:
879 		 * - all links have been given an opportunity to start
880 		 * - have no indication of a connected device
881 		 * - have an indication of a connected device and it has
882 		 *   finished the link training process.
883 		 */
884 		if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
885 		    (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
886 		    (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
887 		    (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
888 			return false;
889 	}
890 
891 	return true;
892 }
893 
894 /**
895  * sci_controller_start_next_phy - start phy
896  * @scic: controller
897  *
898  * If all the phys have been started, then attempt to transition the
899  * controller to the READY state and inform the user
900  * (sci_cb_controller_start_complete()).
901  */
902 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
903 {
904 	struct sci_oem_params *oem = &ihost->oem_parameters;
905 	struct isci_phy *iphy;
906 	enum sci_status status;
907 
908 	status = SCI_SUCCESS;
909 
910 	if (ihost->phy_startup_timer_pending)
911 		return status;
912 
913 	if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
914 		if (is_controller_start_complete(ihost)) {
915 			sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
916 			sci_del_timer(&ihost->phy_timer);
917 			ihost->phy_startup_timer_pending = false;
918 		}
919 	} else {
920 		iphy = &ihost->phys[ihost->next_phy_to_start];
921 
922 		if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
923 			if (phy_get_non_dummy_port(iphy) == NULL) {
924 				ihost->next_phy_to_start++;
925 
926 				/* Caution recursion ahead be forwarned
927 				 *
928 				 * The PHY was never added to a PORT in MPC mode
929 				 * so start the next phy in sequence This phy
930 				 * will never go link up and will not draw power
931 				 * the OEM parameters either configured the phy
932 				 * incorrectly for the PORT or it was never
933 				 * assigned to a PORT
934 				 */
935 				return sci_controller_start_next_phy(ihost);
936 			}
937 		}
938 
939 		status = sci_phy_start(iphy);
940 
941 		if (status == SCI_SUCCESS) {
942 			sci_mod_timer(&ihost->phy_timer,
943 				      SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
944 			ihost->phy_startup_timer_pending = true;
945 		} else {
946 			dev_warn(&ihost->pdev->dev,
947 				 "%s: Controller stop operation failed "
948 				 "to stop phy %d because of status "
949 				 "%d.\n",
950 				 __func__,
951 				 ihost->phys[ihost->next_phy_to_start].phy_index,
952 				 status);
953 		}
954 
955 		ihost->next_phy_to_start++;
956 	}
957 
958 	return status;
959 }
960 
961 static void phy_startup_timeout(unsigned long data)
962 {
963 	struct sci_timer *tmr = (struct sci_timer *)data;
964 	struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
965 	unsigned long flags;
966 	enum sci_status status;
967 
968 	spin_lock_irqsave(&ihost->scic_lock, flags);
969 
970 	if (tmr->cancel)
971 		goto done;
972 
973 	ihost->phy_startup_timer_pending = false;
974 
975 	do {
976 		status = sci_controller_start_next_phy(ihost);
977 	} while (status != SCI_SUCCESS);
978 
979 done:
980 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
981 }
982 
983 static u16 isci_tci_active(struct isci_host *ihost)
984 {
985 	return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
986 }
987 
988 static enum sci_status sci_controller_start(struct isci_host *ihost,
989 					     u32 timeout)
990 {
991 	enum sci_status result;
992 	u16 index;
993 
994 	if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
995 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
996 			 __func__, ihost->sm.current_state_id);
997 		return SCI_FAILURE_INVALID_STATE;
998 	}
999 
1000 	/* Build the TCi free pool */
1001 	BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1002 	ihost->tci_head = 0;
1003 	ihost->tci_tail = 0;
1004 	for (index = 0; index < ihost->task_context_entries; index++)
1005 		isci_tci_free(ihost, index);
1006 
1007 	/* Build the RNi free pool */
1008 	sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1009 					 ihost->remote_node_entries);
1010 
1011 	/*
1012 	 * Before anything else lets make sure we will not be
1013 	 * interrupted by the hardware.
1014 	 */
1015 	sci_controller_disable_interrupts(ihost);
1016 
1017 	/* Enable the port task scheduler */
1018 	sci_controller_enable_port_task_scheduler(ihost);
1019 
1020 	/* Assign all the task entries to ihost physical function */
1021 	sci_controller_assign_task_entries(ihost);
1022 
1023 	/* Now initialize the completion queue */
1024 	sci_controller_initialize_completion_queue(ihost);
1025 
1026 	/* Initialize the unsolicited frame queue for use */
1027 	sci_controller_initialize_unsolicited_frame_queue(ihost);
1028 
1029 	/* Start all of the ports on this controller */
1030 	for (index = 0; index < ihost->logical_port_entries; index++) {
1031 		struct isci_port *iport = &ihost->ports[index];
1032 
1033 		result = sci_port_start(iport);
1034 		if (result)
1035 			return result;
1036 	}
1037 
1038 	sci_controller_start_next_phy(ihost);
1039 
1040 	sci_mod_timer(&ihost->timer, timeout);
1041 
1042 	sci_change_state(&ihost->sm, SCIC_STARTING);
1043 
1044 	return SCI_SUCCESS;
1045 }
1046 
1047 void isci_host_start(struct Scsi_Host *shost)
1048 {
1049 	struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050 	unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1051 
1052 	set_bit(IHOST_START_PENDING, &ihost->flags);
1053 
1054 	spin_lock_irq(&ihost->scic_lock);
1055 	sci_controller_start(ihost, tmo);
1056 	sci_controller_enable_interrupts(ihost);
1057 	spin_unlock_irq(&ihost->scic_lock);
1058 }
1059 
1060 static void isci_host_stop_complete(struct isci_host *ihost)
1061 {
1062 	sci_controller_disable_interrupts(ihost);
1063 	clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1064 	wake_up(&ihost->eventq);
1065 }
1066 
1067 static void sci_controller_completion_handler(struct isci_host *ihost)
1068 {
1069 	/* Empty out the completion queue */
1070 	if (sci_controller_completion_queue_has_entries(ihost))
1071 		sci_controller_process_completions(ihost);
1072 
1073 	/* Clear the interrupt and enable all interrupts again */
1074 	writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1075 	/* Could we write the value of SMU_ISR_COMPLETION? */
1076 	writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1077 	writel(0, &ihost->smu_registers->interrupt_mask);
1078 }
1079 
1080 void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1081 {
1082 	if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1083 	    !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1084 		if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
1085 			/* Normal notification (task_done) */
1086 			dev_dbg(&ihost->pdev->dev,
1087 				"%s: Normal - ireq/task = %p/%p\n",
1088 				__func__, ireq, task);
1089 			task->lldd_task = NULL;
1090 			task->task_done(task);
1091 		} else {
1092 			dev_dbg(&ihost->pdev->dev,
1093 				"%s: Error - ireq/task = %p/%p\n",
1094 				__func__, ireq, task);
1095 			if (sas_protocol_ata(task->task_proto))
1096 				task->lldd_task = NULL;
1097 			sas_task_abort(task);
1098 		}
1099 	} else
1100 		task->lldd_task = NULL;
1101 
1102 	if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1103 		wake_up_all(&ihost->eventq);
1104 
1105 	if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1106 		isci_free_tag(ihost, ireq->io_tag);
1107 }
1108 /**
1109  * isci_host_completion_routine() - This function is the delayed service
1110  *    routine that calls the sci core library's completion handler. It's
1111  *    scheduled as a tasklet from the interrupt service routine when interrupts
1112  *    in use, or set as the timeout function in polled mode.
1113  * @data: This parameter specifies the ISCI host object
1114  *
1115  */
1116 void isci_host_completion_routine(unsigned long data)
1117 {
1118 	struct isci_host *ihost = (struct isci_host *)data;
1119 	u16 active;
1120 
1121 	spin_lock_irq(&ihost->scic_lock);
1122 	sci_controller_completion_handler(ihost);
1123 	spin_unlock_irq(&ihost->scic_lock);
1124 
1125 	/*
1126 	 * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
1127 	 * issued for hardware issue workaround
1128 	 */
1129 	active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1130 
1131 	/*
1132 	 * the coalesence timeout doubles at each encoding step, so
1133 	 * update it based on the ilog2 value of the outstanding requests
1134 	 */
1135 	writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1136 	       SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1137 	       &ihost->smu_registers->interrupt_coalesce_control);
1138 }
1139 
1140 /**
1141  * sci_controller_stop() - This method will stop an individual controller
1142  *    object.This method will invoke the associated user callback upon
1143  *    completion.  The completion callback is called when the following
1144  *    conditions are met: -# the method return status is SCI_SUCCESS. -# the
1145  *    controller has been quiesced. This method will ensure that all IO
1146  *    requests are quiesced, phys are stopped, and all additional operation by
1147  *    the hardware is halted.
1148  * @controller: the handle to the controller object to stop.
1149  * @timeout: This parameter specifies the number of milliseconds in which the
1150  *    stop operation should complete.
1151  *
1152  * The controller must be in the STARTED or STOPPED state. Indicate if the
1153  * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1154  * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1155  * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1156  * controller is not either in the STARTED or STOPPED states.
1157  */
1158 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1159 {
1160 	if (ihost->sm.current_state_id != SCIC_READY) {
1161 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1162 			 __func__, ihost->sm.current_state_id);
1163 		return SCI_FAILURE_INVALID_STATE;
1164 	}
1165 
1166 	sci_mod_timer(&ihost->timer, timeout);
1167 	sci_change_state(&ihost->sm, SCIC_STOPPING);
1168 	return SCI_SUCCESS;
1169 }
1170 
1171 /**
1172  * sci_controller_reset() - This method will reset the supplied core
1173  *    controller regardless of the state of said controller.  This operation is
1174  *    considered destructive.  In other words, all current operations are wiped
1175  *    out.  No IO completions for outstanding devices occur.  Outstanding IO
1176  *    requests are not aborted or completed at the actual remote device.
1177  * @controller: the handle to the controller object to reset.
1178  *
1179  * Indicate if the controller reset method succeeded or failed in some way.
1180  * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1181  * the controller reset operation is unable to complete.
1182  */
1183 static enum sci_status sci_controller_reset(struct isci_host *ihost)
1184 {
1185 	switch (ihost->sm.current_state_id) {
1186 	case SCIC_RESET:
1187 	case SCIC_READY:
1188 	case SCIC_STOPPING:
1189 	case SCIC_FAILED:
1190 		/*
1191 		 * The reset operation is not a graceful cleanup, just
1192 		 * perform the state transition.
1193 		 */
1194 		sci_change_state(&ihost->sm, SCIC_RESETTING);
1195 		return SCI_SUCCESS;
1196 	default:
1197 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1198 			 __func__, ihost->sm.current_state_id);
1199 		return SCI_FAILURE_INVALID_STATE;
1200 	}
1201 }
1202 
1203 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1204 {
1205 	u32 index;
1206 	enum sci_status status;
1207 	enum sci_status phy_status;
1208 
1209 	status = SCI_SUCCESS;
1210 
1211 	for (index = 0; index < SCI_MAX_PHYS; index++) {
1212 		phy_status = sci_phy_stop(&ihost->phys[index]);
1213 
1214 		if (phy_status != SCI_SUCCESS &&
1215 		    phy_status != SCI_FAILURE_INVALID_STATE) {
1216 			status = SCI_FAILURE;
1217 
1218 			dev_warn(&ihost->pdev->dev,
1219 				 "%s: Controller stop operation failed to stop "
1220 				 "phy %d because of status %d.\n",
1221 				 __func__,
1222 				 ihost->phys[index].phy_index, phy_status);
1223 		}
1224 	}
1225 
1226 	return status;
1227 }
1228 
1229 
1230 /**
1231  * isci_host_deinit - shutdown frame reception and dma
1232  * @ihost: host to take down
1233  *
1234  * This is called in either the driver shutdown or the suspend path.  In
1235  * the shutdown case libsas went through port teardown and normal device
1236  * removal (i.e. physical links stayed up to service scsi_device removal
1237  * commands).  In the suspend case we disable the hardware without
1238  * notifying libsas of the link down events since we want libsas to
1239  * remember the domain across the suspend/resume cycle
1240  */
1241 void isci_host_deinit(struct isci_host *ihost)
1242 {
1243 	int i;
1244 
1245 	/* disable output data selects */
1246 	for (i = 0; i < isci_gpio_count(ihost); i++)
1247 		writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1248 
1249 	set_bit(IHOST_STOP_PENDING, &ihost->flags);
1250 
1251 	spin_lock_irq(&ihost->scic_lock);
1252 	sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1253 	spin_unlock_irq(&ihost->scic_lock);
1254 
1255 	wait_for_stop(ihost);
1256 
1257 	/* phy stop is after controller stop to allow port and device to
1258 	 * go idle before shutting down the phys, but the expectation is
1259 	 * that i/o has been shut off well before we reach this
1260 	 * function.
1261 	 */
1262 	sci_controller_stop_phys(ihost);
1263 
1264 	/* disable sgpio: where the above wait should give time for the
1265 	 * enclosure to sample the gpios going inactive
1266 	 */
1267 	writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1268 
1269 	spin_lock_irq(&ihost->scic_lock);
1270 	sci_controller_reset(ihost);
1271 	spin_unlock_irq(&ihost->scic_lock);
1272 
1273 	/* Cancel any/all outstanding port timers */
1274 	for (i = 0; i < ihost->logical_port_entries; i++) {
1275 		struct isci_port *iport = &ihost->ports[i];
1276 		del_timer_sync(&iport->timer.timer);
1277 	}
1278 
1279 	/* Cancel any/all outstanding phy timers */
1280 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1281 		struct isci_phy *iphy = &ihost->phys[i];
1282 		del_timer_sync(&iphy->sata_timer.timer);
1283 	}
1284 
1285 	del_timer_sync(&ihost->port_agent.timer.timer);
1286 
1287 	del_timer_sync(&ihost->power_control.timer.timer);
1288 
1289 	del_timer_sync(&ihost->timer.timer);
1290 
1291 	del_timer_sync(&ihost->phy_timer.timer);
1292 }
1293 
1294 static void __iomem *scu_base(struct isci_host *isci_host)
1295 {
1296 	struct pci_dev *pdev = isci_host->pdev;
1297 	int id = isci_host->id;
1298 
1299 	return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1300 }
1301 
1302 static void __iomem *smu_base(struct isci_host *isci_host)
1303 {
1304 	struct pci_dev *pdev = isci_host->pdev;
1305 	int id = isci_host->id;
1306 
1307 	return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1308 }
1309 
1310 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1311 {
1312 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1313 
1314 	sci_change_state(&ihost->sm, SCIC_RESET);
1315 }
1316 
1317 static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1318 {
1319 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1320 
1321 	sci_del_timer(&ihost->timer);
1322 }
1323 
1324 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1325 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1326 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US                    2700000
1327 #define INTERRUPT_COALESCE_NUMBER_MAX                        256
1328 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN                7
1329 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX                28
1330 
1331 /**
1332  * sci_controller_set_interrupt_coalescence() - This method allows the user to
1333  *    configure the interrupt coalescence.
1334  * @controller: This parameter represents the handle to the controller object
1335  *    for which its interrupt coalesce register is overridden.
1336  * @coalesce_number: Used to control the number of entries in the Completion
1337  *    Queue before an interrupt is generated. If the number of entries exceed
1338  *    this number, an interrupt will be generated. The valid range of the input
1339  *    is [0, 256]. A setting of 0 results in coalescing being disabled.
1340  * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1341  *    input is [0, 2700000] . A setting of 0 is allowed and results in no
1342  *    interrupt coalescing timeout.
1343  *
1344  * Indicate if the user successfully set the interrupt coalesce parameters.
1345  * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1346  * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1347  */
1348 static enum sci_status
1349 sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1350 					 u32 coalesce_number,
1351 					 u32 coalesce_timeout)
1352 {
1353 	u8 timeout_encode = 0;
1354 	u32 min = 0;
1355 	u32 max = 0;
1356 
1357 	/* Check if the input parameters fall in the range. */
1358 	if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1359 		return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1360 
1361 	/*
1362 	 *  Defined encoding for interrupt coalescing timeout:
1363 	 *              Value   Min      Max     Units
1364 	 *              -----   ---      ---     -----
1365 	 *              0       -        -       Disabled
1366 	 *              1       13.3     20.0    ns
1367 	 *              2       26.7     40.0
1368 	 *              3       53.3     80.0
1369 	 *              4       106.7    160.0
1370 	 *              5       213.3    320.0
1371 	 *              6       426.7    640.0
1372 	 *              7       853.3    1280.0
1373 	 *              8       1.7      2.6     us
1374 	 *              9       3.4      5.1
1375 	 *              10      6.8      10.2
1376 	 *              11      13.7     20.5
1377 	 *              12      27.3     41.0
1378 	 *              13      54.6     81.9
1379 	 *              14      109.2    163.8
1380 	 *              15      218.5    327.7
1381 	 *              16      436.9    655.4
1382 	 *              17      873.8    1310.7
1383 	 *              18      1.7      2.6     ms
1384 	 *              19      3.5      5.2
1385 	 *              20      7.0      10.5
1386 	 *              21      14.0     21.0
1387 	 *              22      28.0     41.9
1388 	 *              23      55.9     83.9
1389 	 *              24      111.8    167.8
1390 	 *              25      223.7    335.5
1391 	 *              26      447.4    671.1
1392 	 *              27      894.8    1342.2
1393 	 *              28      1.8      2.7     s
1394 	 *              Others Undefined */
1395 
1396 	/*
1397 	 * Use the table above to decide the encode of interrupt coalescing timeout
1398 	 * value for register writing. */
1399 	if (coalesce_timeout == 0)
1400 		timeout_encode = 0;
1401 	else{
1402 		/* make the timeout value in unit of (10 ns). */
1403 		coalesce_timeout = coalesce_timeout * 100;
1404 		min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1405 		max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1406 
1407 		/* get the encode of timeout for register writing. */
1408 		for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1409 		      timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1410 		      timeout_encode++) {
1411 			if (min <= coalesce_timeout &&  max > coalesce_timeout)
1412 				break;
1413 			else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1414 				 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1415 				if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1416 					break;
1417 				else{
1418 					timeout_encode++;
1419 					break;
1420 				}
1421 			} else {
1422 				max = max * 2;
1423 				min = min * 2;
1424 			}
1425 		}
1426 
1427 		if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1428 			/* the value is out of range. */
1429 			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1430 	}
1431 
1432 	writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1433 	       SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1434 	       &ihost->smu_registers->interrupt_coalesce_control);
1435 
1436 
1437 	ihost->interrupt_coalesce_number = (u16)coalesce_number;
1438 	ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1439 
1440 	return SCI_SUCCESS;
1441 }
1442 
1443 
1444 static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1445 {
1446 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1447 	u32 val;
1448 
1449 	/* enable clock gating for power control of the scu unit */
1450 	val = readl(&ihost->smu_registers->clock_gating_control);
1451 	val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1452 		 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1453 		 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1454 	val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1455 	writel(val, &ihost->smu_registers->clock_gating_control);
1456 
1457 	/* set the default interrupt coalescence number and timeout value. */
1458 	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1459 }
1460 
1461 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1462 {
1463 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1464 
1465 	/* disable interrupt coalescence. */
1466 	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1467 }
1468 
1469 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1470 {
1471 	u32 index;
1472 	enum sci_status port_status;
1473 	enum sci_status status = SCI_SUCCESS;
1474 
1475 	for (index = 0; index < ihost->logical_port_entries; index++) {
1476 		struct isci_port *iport = &ihost->ports[index];
1477 
1478 		port_status = sci_port_stop(iport);
1479 
1480 		if ((port_status != SCI_SUCCESS) &&
1481 		    (port_status != SCI_FAILURE_INVALID_STATE)) {
1482 			status = SCI_FAILURE;
1483 
1484 			dev_warn(&ihost->pdev->dev,
1485 				 "%s: Controller stop operation failed to "
1486 				 "stop port %d because of status %d.\n",
1487 				 __func__,
1488 				 iport->logical_port_index,
1489 				 port_status);
1490 		}
1491 	}
1492 
1493 	return status;
1494 }
1495 
1496 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1497 {
1498 	u32 index;
1499 	enum sci_status status;
1500 	enum sci_status device_status;
1501 
1502 	status = SCI_SUCCESS;
1503 
1504 	for (index = 0; index < ihost->remote_node_entries; index++) {
1505 		if (ihost->device_table[index] != NULL) {
1506 			/* / @todo What timeout value do we want to provide to this request? */
1507 			device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1508 
1509 			if ((device_status != SCI_SUCCESS) &&
1510 			    (device_status != SCI_FAILURE_INVALID_STATE)) {
1511 				dev_warn(&ihost->pdev->dev,
1512 					 "%s: Controller stop operation failed "
1513 					 "to stop device 0x%p because of "
1514 					 "status %d.\n",
1515 					 __func__,
1516 					 ihost->device_table[index], device_status);
1517 			}
1518 		}
1519 	}
1520 
1521 	return status;
1522 }
1523 
1524 static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1525 {
1526 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1527 
1528 	sci_controller_stop_devices(ihost);
1529 	sci_controller_stop_ports(ihost);
1530 
1531 	if (!sci_controller_has_remote_devices_stopping(ihost))
1532 		isci_host_stop_complete(ihost);
1533 }
1534 
1535 static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1536 {
1537 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1538 
1539 	sci_del_timer(&ihost->timer);
1540 }
1541 
1542 static void sci_controller_reset_hardware(struct isci_host *ihost)
1543 {
1544 	/* Disable interrupts so we dont take any spurious interrupts */
1545 	sci_controller_disable_interrupts(ihost);
1546 
1547 	/* Reset the SCU */
1548 	writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1549 
1550 	/* Delay for 1ms to before clearing the CQP and UFQPR. */
1551 	udelay(1000);
1552 
1553 	/* The write to the CQGR clears the CQP */
1554 	writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1555 
1556 	/* The write to the UFQGP clears the UFQPR */
1557 	writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1558 
1559 	/* clear all interrupts */
1560 	writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1561 }
1562 
1563 static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1564 {
1565 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1566 
1567 	sci_controller_reset_hardware(ihost);
1568 	sci_change_state(&ihost->sm, SCIC_RESET);
1569 }
1570 
1571 static const struct sci_base_state sci_controller_state_table[] = {
1572 	[SCIC_INITIAL] = {
1573 		.enter_state = sci_controller_initial_state_enter,
1574 	},
1575 	[SCIC_RESET] = {},
1576 	[SCIC_INITIALIZING] = {},
1577 	[SCIC_INITIALIZED] = {},
1578 	[SCIC_STARTING] = {
1579 		.exit_state  = sci_controller_starting_state_exit,
1580 	},
1581 	[SCIC_READY] = {
1582 		.enter_state = sci_controller_ready_state_enter,
1583 		.exit_state  = sci_controller_ready_state_exit,
1584 	},
1585 	[SCIC_RESETTING] = {
1586 		.enter_state = sci_controller_resetting_state_enter,
1587 	},
1588 	[SCIC_STOPPING] = {
1589 		.enter_state = sci_controller_stopping_state_enter,
1590 		.exit_state = sci_controller_stopping_state_exit,
1591 	},
1592 	[SCIC_FAILED] = {}
1593 };
1594 
1595 static void controller_timeout(unsigned long data)
1596 {
1597 	struct sci_timer *tmr = (struct sci_timer *)data;
1598 	struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1599 	struct sci_base_state_machine *sm = &ihost->sm;
1600 	unsigned long flags;
1601 
1602 	spin_lock_irqsave(&ihost->scic_lock, flags);
1603 
1604 	if (tmr->cancel)
1605 		goto done;
1606 
1607 	if (sm->current_state_id == SCIC_STARTING)
1608 		sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1609 	else if (sm->current_state_id == SCIC_STOPPING) {
1610 		sci_change_state(sm, SCIC_FAILED);
1611 		isci_host_stop_complete(ihost);
1612 	} else	/* / @todo Now what do we want to do in this case? */
1613 		dev_err(&ihost->pdev->dev,
1614 			"%s: Controller timer fired when controller was not "
1615 			"in a state being timed.\n",
1616 			__func__);
1617 
1618 done:
1619 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1620 }
1621 
1622 static enum sci_status sci_controller_construct(struct isci_host *ihost,
1623 						void __iomem *scu_base,
1624 						void __iomem *smu_base)
1625 {
1626 	u8 i;
1627 
1628 	sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1629 
1630 	ihost->scu_registers = scu_base;
1631 	ihost->smu_registers = smu_base;
1632 
1633 	sci_port_configuration_agent_construct(&ihost->port_agent);
1634 
1635 	/* Construct the ports for this controller */
1636 	for (i = 0; i < SCI_MAX_PORTS; i++)
1637 		sci_port_construct(&ihost->ports[i], i, ihost);
1638 	sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1639 
1640 	/* Construct the phys for this controller */
1641 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1642 		/* Add all the PHYs to the dummy port */
1643 		sci_phy_construct(&ihost->phys[i],
1644 				  &ihost->ports[SCI_MAX_PORTS], i);
1645 	}
1646 
1647 	ihost->invalid_phy_mask = 0;
1648 
1649 	sci_init_timer(&ihost->timer, controller_timeout);
1650 
1651 	return sci_controller_reset(ihost);
1652 }
1653 
1654 int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1655 {
1656 	int i;
1657 
1658 	for (i = 0; i < SCI_MAX_PORTS; i++)
1659 		if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1660 			return -EINVAL;
1661 
1662 	for (i = 0; i < SCI_MAX_PHYS; i++)
1663 		if (oem->phys[i].sas_address.high == 0 &&
1664 		    oem->phys[i].sas_address.low == 0)
1665 			return -EINVAL;
1666 
1667 	if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1668 		for (i = 0; i < SCI_MAX_PHYS; i++)
1669 			if (oem->ports[i].phy_mask != 0)
1670 				return -EINVAL;
1671 	} else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1672 		u8 phy_mask = 0;
1673 
1674 		for (i = 0; i < SCI_MAX_PHYS; i++)
1675 			phy_mask |= oem->ports[i].phy_mask;
1676 
1677 		if (phy_mask == 0)
1678 			return -EINVAL;
1679 	} else
1680 		return -EINVAL;
1681 
1682 	if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
1683 	    oem->controller.max_concurr_spin_up < 1)
1684 		return -EINVAL;
1685 
1686 	if (oem->controller.do_enable_ssc) {
1687 		if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1688 			return -EINVAL;
1689 
1690 		if (version >= ISCI_ROM_VER_1_1) {
1691 			u8 test = oem->controller.ssc_sata_tx_spread_level;
1692 
1693 			switch (test) {
1694 			case 0:
1695 			case 2:
1696 			case 3:
1697 			case 6:
1698 			case 7:
1699 				break;
1700 			default:
1701 				return -EINVAL;
1702 			}
1703 
1704 			test = oem->controller.ssc_sas_tx_spread_level;
1705 			if (oem->controller.ssc_sas_tx_type == 0) {
1706 				switch (test) {
1707 				case 0:
1708 				case 2:
1709 				case 3:
1710 					break;
1711 				default:
1712 					return -EINVAL;
1713 				}
1714 			} else if (oem->controller.ssc_sas_tx_type == 1) {
1715 				switch (test) {
1716 				case 0:
1717 				case 3:
1718 				case 6:
1719 					break;
1720 				default:
1721 					return -EINVAL;
1722 				}
1723 			}
1724 		}
1725 	}
1726 
1727 	return 0;
1728 }
1729 
1730 static u8 max_spin_up(struct isci_host *ihost)
1731 {
1732 	if (ihost->user_parameters.max_concurr_spinup)
1733 		return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1734 			     MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1735 	else
1736 		return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1737 			     MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1738 }
1739 
1740 static void power_control_timeout(unsigned long data)
1741 {
1742 	struct sci_timer *tmr = (struct sci_timer *)data;
1743 	struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1744 	struct isci_phy *iphy;
1745 	unsigned long flags;
1746 	u8 i;
1747 
1748 	spin_lock_irqsave(&ihost->scic_lock, flags);
1749 
1750 	if (tmr->cancel)
1751 		goto done;
1752 
1753 	ihost->power_control.phys_granted_power = 0;
1754 
1755 	if (ihost->power_control.phys_waiting == 0) {
1756 		ihost->power_control.timer_started = false;
1757 		goto done;
1758 	}
1759 
1760 	for (i = 0; i < SCI_MAX_PHYS; i++) {
1761 
1762 		if (ihost->power_control.phys_waiting == 0)
1763 			break;
1764 
1765 		iphy = ihost->power_control.requesters[i];
1766 		if (iphy == NULL)
1767 			continue;
1768 
1769 		if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1770 			break;
1771 
1772 		ihost->power_control.requesters[i] = NULL;
1773 		ihost->power_control.phys_waiting--;
1774 		ihost->power_control.phys_granted_power++;
1775 		sci_phy_consume_power_handler(iphy);
1776 
1777 		if (iphy->protocol == SAS_PROTOCOL_SSP) {
1778 			u8 j;
1779 
1780 			for (j = 0; j < SCI_MAX_PHYS; j++) {
1781 				struct isci_phy *requester = ihost->power_control.requesters[j];
1782 
1783 				/*
1784 				 * Search the power_control queue to see if there are other phys
1785 				 * attached to the same remote device. If found, take all of
1786 				 * them out of await_sas_power state.
1787 				 */
1788 				if (requester != NULL && requester != iphy) {
1789 					u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1790 							  iphy->frame_rcvd.iaf.sas_addr,
1791 							  sizeof(requester->frame_rcvd.iaf.sas_addr));
1792 
1793 					if (other == 0) {
1794 						ihost->power_control.requesters[j] = NULL;
1795 						ihost->power_control.phys_waiting--;
1796 						sci_phy_consume_power_handler(requester);
1797 					}
1798 				}
1799 			}
1800 		}
1801 	}
1802 
1803 	/*
1804 	 * It doesn't matter if the power list is empty, we need to start the
1805 	 * timer in case another phy becomes ready.
1806 	 */
1807 	sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1808 	ihost->power_control.timer_started = true;
1809 
1810 done:
1811 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
1812 }
1813 
1814 void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1815 					       struct isci_phy *iphy)
1816 {
1817 	BUG_ON(iphy == NULL);
1818 
1819 	if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1820 		ihost->power_control.phys_granted_power++;
1821 		sci_phy_consume_power_handler(iphy);
1822 
1823 		/*
1824 		 * stop and start the power_control timer. When the timer fires, the
1825 		 * no_of_phys_granted_power will be set to 0
1826 		 */
1827 		if (ihost->power_control.timer_started)
1828 			sci_del_timer(&ihost->power_control.timer);
1829 
1830 		sci_mod_timer(&ihost->power_control.timer,
1831 				 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1832 		ihost->power_control.timer_started = true;
1833 
1834 	} else {
1835 		/*
1836 		 * There are phys, attached to the same sas address as this phy, are
1837 		 * already in READY state, this phy don't need wait.
1838 		 */
1839 		u8 i;
1840 		struct isci_phy *current_phy;
1841 
1842 		for (i = 0; i < SCI_MAX_PHYS; i++) {
1843 			u8 other;
1844 			current_phy = &ihost->phys[i];
1845 
1846 			other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1847 				       iphy->frame_rcvd.iaf.sas_addr,
1848 				       sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1849 
1850 			if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1851 			    current_phy->protocol == SAS_PROTOCOL_SSP &&
1852 			    other == 0) {
1853 				sci_phy_consume_power_handler(iphy);
1854 				break;
1855 			}
1856 		}
1857 
1858 		if (i == SCI_MAX_PHYS) {
1859 			/* Add the phy in the waiting list */
1860 			ihost->power_control.requesters[iphy->phy_index] = iphy;
1861 			ihost->power_control.phys_waiting++;
1862 		}
1863 	}
1864 }
1865 
1866 void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1867 					       struct isci_phy *iphy)
1868 {
1869 	BUG_ON(iphy == NULL);
1870 
1871 	if (ihost->power_control.requesters[iphy->phy_index])
1872 		ihost->power_control.phys_waiting--;
1873 
1874 	ihost->power_control.requesters[iphy->phy_index] = NULL;
1875 }
1876 
1877 static int is_long_cable(int phy, unsigned char selection_byte)
1878 {
1879 	return !!(selection_byte & (1 << phy));
1880 }
1881 
1882 static int is_medium_cable(int phy, unsigned char selection_byte)
1883 {
1884 	return !!(selection_byte & (1 << (phy + 4)));
1885 }
1886 
1887 static enum cable_selections decode_selection_byte(
1888 	int phy,
1889 	unsigned char selection_byte)
1890 {
1891 	return ((selection_byte & (1 << phy)) ? 1 : 0)
1892 		+ (selection_byte & (1 << (phy + 4)) ? 2 : 0);
1893 }
1894 
1895 static unsigned char *to_cable_select(struct isci_host *ihost)
1896 {
1897 	if (is_cable_select_overridden())
1898 		return ((unsigned char *)&cable_selection_override)
1899 			+ ihost->id;
1900 	else
1901 		return &ihost->oem_parameters.controller.cable_selection_mask;
1902 }
1903 
1904 enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
1905 {
1906 	return decode_selection_byte(phy, *to_cable_select(ihost));
1907 }
1908 
1909 char *lookup_cable_names(enum cable_selections selection)
1910 {
1911 	static char *cable_names[] = {
1912 		[short_cable]     = "short",
1913 		[long_cable]      = "long",
1914 		[medium_cable]    = "medium",
1915 		[undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
1916 	};
1917 	return (selection <= undefined_cable) ? cable_names[selection]
1918 					      : cable_names[undefined_cable];
1919 }
1920 
1921 #define AFE_REGISTER_WRITE_DELAY 10
1922 
1923 static void sci_controller_afe_initialization(struct isci_host *ihost)
1924 {
1925 	struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1926 	const struct sci_oem_params *oem = &ihost->oem_parameters;
1927 	struct pci_dev *pdev = ihost->pdev;
1928 	u32 afe_status;
1929 	u32 phy_id;
1930 	unsigned char cable_selection_mask = *to_cable_select(ihost);
1931 
1932 	/* Clear DFX Status registers */
1933 	writel(0x0081000f, &afe->afe_dfx_master_control0);
1934 	udelay(AFE_REGISTER_WRITE_DELAY);
1935 
1936 	if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1937 		/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1938 		 * Timer, PM Stagger Timer
1939 		 */
1940 		writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1941 		udelay(AFE_REGISTER_WRITE_DELAY);
1942 	}
1943 
1944 	/* Configure bias currents to normal */
1945 	if (is_a2(pdev))
1946 		writel(0x00005A00, &afe->afe_bias_control);
1947 	else if (is_b0(pdev) || is_c0(pdev))
1948 		writel(0x00005F00, &afe->afe_bias_control);
1949 	else if (is_c1(pdev))
1950 		writel(0x00005500, &afe->afe_bias_control);
1951 
1952 	udelay(AFE_REGISTER_WRITE_DELAY);
1953 
1954 	/* Enable PLL */
1955 	if (is_a2(pdev))
1956 		writel(0x80040908, &afe->afe_pll_control0);
1957 	else if (is_b0(pdev) || is_c0(pdev))
1958 		writel(0x80040A08, &afe->afe_pll_control0);
1959 	else if (is_c1(pdev)) {
1960 		writel(0x80000B08, &afe->afe_pll_control0);
1961 		udelay(AFE_REGISTER_WRITE_DELAY);
1962 		writel(0x00000B08, &afe->afe_pll_control0);
1963 		udelay(AFE_REGISTER_WRITE_DELAY);
1964 		writel(0x80000B08, &afe->afe_pll_control0);
1965 	}
1966 
1967 	udelay(AFE_REGISTER_WRITE_DELAY);
1968 
1969 	/* Wait for the PLL to lock */
1970 	do {
1971 		afe_status = readl(&afe->afe_common_block_status);
1972 		udelay(AFE_REGISTER_WRITE_DELAY);
1973 	} while ((afe_status & 0x00001000) == 0);
1974 
1975 	if (is_a2(pdev)) {
1976 		/* Shorten SAS SNW lock time (RxLock timer value from 76
1977 		 * us to 50 us)
1978 		 */
1979 		writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1980 		udelay(AFE_REGISTER_WRITE_DELAY);
1981 	}
1982 
1983 	for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1984 		struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
1985 		const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1986 		int cable_length_long =
1987 			is_long_cable(phy_id, cable_selection_mask);
1988 		int cable_length_medium =
1989 			is_medium_cable(phy_id, cable_selection_mask);
1990 
1991 		if (is_a2(pdev)) {
1992 			/* All defaults, except the Receive Word
1993 			 * Alignament/Comma Detect Enable....(0xe800)
1994 			 */
1995 			writel(0x00004512, &xcvr->afe_xcvr_control0);
1996 			udelay(AFE_REGISTER_WRITE_DELAY);
1997 
1998 			writel(0x0050100F, &xcvr->afe_xcvr_control1);
1999 			udelay(AFE_REGISTER_WRITE_DELAY);
2000 		} else if (is_b0(pdev)) {
2001 			/* Configure transmitter SSC parameters */
2002 			writel(0x00030000, &xcvr->afe_tx_ssc_control);
2003 			udelay(AFE_REGISTER_WRITE_DELAY);
2004 		} else if (is_c0(pdev)) {
2005 			/* Configure transmitter SSC parameters */
2006 			writel(0x00010202, &xcvr->afe_tx_ssc_control);
2007 			udelay(AFE_REGISTER_WRITE_DELAY);
2008 
2009 			/* All defaults, except the Receive Word
2010 			 * Alignament/Comma Detect Enable....(0xe800)
2011 			 */
2012 			writel(0x00014500, &xcvr->afe_xcvr_control0);
2013 			udelay(AFE_REGISTER_WRITE_DELAY);
2014 		} else if (is_c1(pdev)) {
2015 			/* Configure transmitter SSC parameters */
2016 			writel(0x00010202, &xcvr->afe_tx_ssc_control);
2017 			udelay(AFE_REGISTER_WRITE_DELAY);
2018 
2019 			/* All defaults, except the Receive Word
2020 			 * Alignament/Comma Detect Enable....(0xe800)
2021 			 */
2022 			writel(0x0001C500, &xcvr->afe_xcvr_control0);
2023 			udelay(AFE_REGISTER_WRITE_DELAY);
2024 		}
2025 
2026 		/* Power up TX and RX out from power down (PWRDNTX and
2027 		 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
2028 		 */
2029 		if (is_a2(pdev))
2030 			writel(0x000003F0, &xcvr->afe_channel_control);
2031 		else if (is_b0(pdev)) {
2032 			writel(0x000003D7, &xcvr->afe_channel_control);
2033 			udelay(AFE_REGISTER_WRITE_DELAY);
2034 
2035 			writel(0x000003D4, &xcvr->afe_channel_control);
2036 		} else if (is_c0(pdev)) {
2037 			writel(0x000001E7, &xcvr->afe_channel_control);
2038 			udelay(AFE_REGISTER_WRITE_DELAY);
2039 
2040 			writel(0x000001E4, &xcvr->afe_channel_control);
2041 		} else if (is_c1(pdev)) {
2042 			writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2043 			       &xcvr->afe_channel_control);
2044 			udelay(AFE_REGISTER_WRITE_DELAY);
2045 
2046 			writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2047 			       &xcvr->afe_channel_control);
2048 		}
2049 		udelay(AFE_REGISTER_WRITE_DELAY);
2050 
2051 		if (is_a2(pdev)) {
2052 			/* Enable TX equalization (0xe824) */
2053 			writel(0x00040000, &xcvr->afe_tx_control);
2054 			udelay(AFE_REGISTER_WRITE_DELAY);
2055 		}
2056 
2057 		if (is_a2(pdev) || is_b0(pdev))
2058 			/* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
2059 			 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
2060 			 * Enabled) ....(0xe800)
2061 			 */
2062 			writel(0x00004100, &xcvr->afe_xcvr_control0);
2063 		else if (is_c0(pdev))
2064 			writel(0x00014100, &xcvr->afe_xcvr_control0);
2065 		else if (is_c1(pdev))
2066 			writel(0x0001C100, &xcvr->afe_xcvr_control0);
2067 		udelay(AFE_REGISTER_WRITE_DELAY);
2068 
2069 		/* Leave DFE/FFE on */
2070 		if (is_a2(pdev))
2071 			writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2072 		else if (is_b0(pdev)) {
2073 			writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2074 			udelay(AFE_REGISTER_WRITE_DELAY);
2075 			/* Enable TX equalization (0xe824) */
2076 			writel(0x00040000, &xcvr->afe_tx_control);
2077 		} else if (is_c0(pdev)) {
2078 			writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2079 			udelay(AFE_REGISTER_WRITE_DELAY);
2080 
2081 			writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2082 			udelay(AFE_REGISTER_WRITE_DELAY);
2083 
2084 			/* Enable TX equalization (0xe824) */
2085 			writel(0x00040000, &xcvr->afe_tx_control);
2086 		} else if (is_c1(pdev)) {
2087 			writel(cable_length_long ? 0x01500C0C :
2088 			       cable_length_medium ? 0x01400C0D : 0x02400C0D,
2089 			       &xcvr->afe_xcvr_control1);
2090 			udelay(AFE_REGISTER_WRITE_DELAY);
2091 
2092 			writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2093 			udelay(AFE_REGISTER_WRITE_DELAY);
2094 
2095 			writel(cable_length_long ? 0x33091C1F :
2096 			       cable_length_medium ? 0x3315181F : 0x2B17161F,
2097 			       &xcvr->afe_rx_ssc_control0);
2098 			udelay(AFE_REGISTER_WRITE_DELAY);
2099 
2100 			/* Enable TX equalization (0xe824) */
2101 			writel(0x00040000, &xcvr->afe_tx_control);
2102 		}
2103 
2104 		udelay(AFE_REGISTER_WRITE_DELAY);
2105 
2106 		writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2107 		udelay(AFE_REGISTER_WRITE_DELAY);
2108 
2109 		writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2110 		udelay(AFE_REGISTER_WRITE_DELAY);
2111 
2112 		writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2113 		udelay(AFE_REGISTER_WRITE_DELAY);
2114 
2115 		writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2116 		udelay(AFE_REGISTER_WRITE_DELAY);
2117 	}
2118 
2119 	/* Transfer control to the PEs */
2120 	writel(0x00010f00, &afe->afe_dfx_master_control0);
2121 	udelay(AFE_REGISTER_WRITE_DELAY);
2122 }
2123 
2124 static void sci_controller_initialize_power_control(struct isci_host *ihost)
2125 {
2126 	sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2127 
2128 	memset(ihost->power_control.requesters, 0,
2129 	       sizeof(ihost->power_control.requesters));
2130 
2131 	ihost->power_control.phys_waiting = 0;
2132 	ihost->power_control.phys_granted_power = 0;
2133 }
2134 
2135 static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2136 {
2137 	struct sci_base_state_machine *sm = &ihost->sm;
2138 	enum sci_status result = SCI_FAILURE;
2139 	unsigned long i, state, val;
2140 
2141 	if (ihost->sm.current_state_id != SCIC_RESET) {
2142 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2143 			 __func__, ihost->sm.current_state_id);
2144 		return SCI_FAILURE_INVALID_STATE;
2145 	}
2146 
2147 	sci_change_state(sm, SCIC_INITIALIZING);
2148 
2149 	sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2150 
2151 	ihost->next_phy_to_start = 0;
2152 	ihost->phy_startup_timer_pending = false;
2153 
2154 	sci_controller_initialize_power_control(ihost);
2155 
2156 	/*
2157 	 * There is nothing to do here for B0 since we do not have to
2158 	 * program the AFE registers.
2159 	 * / @todo The AFE settings are supposed to be correct for the B0 but
2160 	 * /       presently they seem to be wrong. */
2161 	sci_controller_afe_initialization(ihost);
2162 
2163 
2164 	/* Take the hardware out of reset */
2165 	writel(0, &ihost->smu_registers->soft_reset_control);
2166 
2167 	/*
2168 	 * / @todo Provide meaningfull error code for hardware failure
2169 	 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2170 	for (i = 100; i >= 1; i--) {
2171 		u32 status;
2172 
2173 		/* Loop until the hardware reports success */
2174 		udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2175 		status = readl(&ihost->smu_registers->control_status);
2176 
2177 		if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2178 			break;
2179 	}
2180 	if (i == 0)
2181 		goto out;
2182 
2183 	/*
2184 	 * Determine what are the actaul device capacities that the
2185 	 * hardware will support */
2186 	val = readl(&ihost->smu_registers->device_context_capacity);
2187 
2188 	/* Record the smaller of the two capacity values */
2189 	ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2190 	ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2191 	ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2192 
2193 	/*
2194 	 * Make all PEs that are unassigned match up with the
2195 	 * logical ports
2196 	 */
2197 	for (i = 0; i < ihost->logical_port_entries; i++) {
2198 		struct scu_port_task_scheduler_group_registers __iomem
2199 			*ptsg = &ihost->scu_registers->peg0.ptsg;
2200 
2201 		writel(i, &ptsg->protocol_engine[i]);
2202 	}
2203 
2204 	/* Initialize hardware PCI Relaxed ordering in DMA engines */
2205 	val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2206 	val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2207 	writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2208 
2209 	val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2210 	val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2211 	writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2212 
2213 	/*
2214 	 * Initialize the PHYs before the PORTs because the PHY registers
2215 	 * are accessed during the port initialization.
2216 	 */
2217 	for (i = 0; i < SCI_MAX_PHYS; i++) {
2218 		result = sci_phy_initialize(&ihost->phys[i],
2219 					    &ihost->scu_registers->peg0.pe[i].tl,
2220 					    &ihost->scu_registers->peg0.pe[i].ll);
2221 		if (result != SCI_SUCCESS)
2222 			goto out;
2223 	}
2224 
2225 	for (i = 0; i < ihost->logical_port_entries; i++) {
2226 		struct isci_port *iport = &ihost->ports[i];
2227 
2228 		iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2229 		iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2230 		iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2231 	}
2232 
2233 	result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2234 
2235  out:
2236 	/* Advance the controller state machine */
2237 	if (result == SCI_SUCCESS)
2238 		state = SCIC_INITIALIZED;
2239 	else
2240 		state = SCIC_FAILED;
2241 	sci_change_state(sm, state);
2242 
2243 	return result;
2244 }
2245 
2246 static int sci_controller_dma_alloc(struct isci_host *ihost)
2247 {
2248 	struct device *dev = &ihost->pdev->dev;
2249 	size_t size;
2250 	int i;
2251 
2252 	/* detect re-initialization */
2253 	if (ihost->completion_queue)
2254 		return 0;
2255 
2256 	size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2257 	ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2258 						      GFP_KERNEL);
2259 	if (!ihost->completion_queue)
2260 		return -ENOMEM;
2261 
2262 	size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2263 	ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2264 							       GFP_KERNEL);
2265 
2266 	if (!ihost->remote_node_context_table)
2267 		return -ENOMEM;
2268 
2269 	size = ihost->task_context_entries * sizeof(struct scu_task_context),
2270 	ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2271 							GFP_KERNEL);
2272 	if (!ihost->task_context_table)
2273 		return -ENOMEM;
2274 
2275 	size = SCI_UFI_TOTAL_SIZE;
2276 	ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2277 	if (!ihost->ufi_buf)
2278 		return -ENOMEM;
2279 
2280 	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2281 		struct isci_request *ireq;
2282 		dma_addr_t dma;
2283 
2284 		ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2285 		if (!ireq)
2286 			return -ENOMEM;
2287 
2288 		ireq->tc = &ihost->task_context_table[i];
2289 		ireq->owning_controller = ihost;
2290 		ireq->request_daddr = dma;
2291 		ireq->isci_host = ihost;
2292 		ihost->reqs[i] = ireq;
2293 	}
2294 
2295 	return 0;
2296 }
2297 
2298 static int sci_controller_mem_init(struct isci_host *ihost)
2299 {
2300 	int err = sci_controller_dma_alloc(ihost);
2301 
2302 	if (err)
2303 		return err;
2304 
2305 	writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2306 	writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2307 
2308 	writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2309 	writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2310 
2311 	writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2312 	writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2313 
2314 	sci_unsolicited_frame_control_construct(ihost);
2315 
2316 	/*
2317 	 * Inform the silicon as to the location of the UF headers and
2318 	 * address table.
2319 	 */
2320 	writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2321 		&ihost->scu_registers->sdma.uf_header_base_address_lower);
2322 	writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2323 		&ihost->scu_registers->sdma.uf_header_base_address_upper);
2324 
2325 	writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2326 		&ihost->scu_registers->sdma.uf_address_table_lower);
2327 	writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2328 		&ihost->scu_registers->sdma.uf_address_table_upper);
2329 
2330 	return 0;
2331 }
2332 
2333 /**
2334  * isci_host_init - (re-)initialize hardware and internal (private) state
2335  * @ihost: host to init
2336  *
2337  * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
2338  * one-time initialization objects like locks and waitqueues, are
2339  * not touched (they are initialized in isci_host_alloc)
2340  */
2341 int isci_host_init(struct isci_host *ihost)
2342 {
2343 	int i, err;
2344 	enum sci_status status;
2345 
2346 	spin_lock_irq(&ihost->scic_lock);
2347 	status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2348 	spin_unlock_irq(&ihost->scic_lock);
2349 	if (status != SCI_SUCCESS) {
2350 		dev_err(&ihost->pdev->dev,
2351 			"%s: sci_controller_construct failed - status = %x\n",
2352 			__func__,
2353 			status);
2354 		return -ENODEV;
2355 	}
2356 
2357 	spin_lock_irq(&ihost->scic_lock);
2358 	status = sci_controller_initialize(ihost);
2359 	spin_unlock_irq(&ihost->scic_lock);
2360 	if (status != SCI_SUCCESS) {
2361 		dev_warn(&ihost->pdev->dev,
2362 			 "%s: sci_controller_initialize failed -"
2363 			 " status = 0x%x\n",
2364 			 __func__, status);
2365 		return -ENODEV;
2366 	}
2367 
2368 	err = sci_controller_mem_init(ihost);
2369 	if (err)
2370 		return err;
2371 
2372 	/* enable sgpio */
2373 	writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2374 	for (i = 0; i < isci_gpio_count(ihost); i++)
2375 		writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2376 	writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2377 
2378 	return 0;
2379 }
2380 
2381 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2382 			    struct isci_phy *iphy)
2383 {
2384 	switch (ihost->sm.current_state_id) {
2385 	case SCIC_STARTING:
2386 		sci_del_timer(&ihost->phy_timer);
2387 		ihost->phy_startup_timer_pending = false;
2388 		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2389 						  iport, iphy);
2390 		sci_controller_start_next_phy(ihost);
2391 		break;
2392 	case SCIC_READY:
2393 		ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2394 						  iport, iphy);
2395 		break;
2396 	default:
2397 		dev_dbg(&ihost->pdev->dev,
2398 			"%s: SCIC Controller linkup event from phy %d in "
2399 			"unexpected state %d\n", __func__, iphy->phy_index,
2400 			ihost->sm.current_state_id);
2401 	}
2402 }
2403 
2404 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2405 			      struct isci_phy *iphy)
2406 {
2407 	switch (ihost->sm.current_state_id) {
2408 	case SCIC_STARTING:
2409 	case SCIC_READY:
2410 		ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2411 						   iport, iphy);
2412 		break;
2413 	default:
2414 		dev_dbg(&ihost->pdev->dev,
2415 			"%s: SCIC Controller linkdown event from phy %d in "
2416 			"unexpected state %d\n",
2417 			__func__,
2418 			iphy->phy_index,
2419 			ihost->sm.current_state_id);
2420 	}
2421 }
2422 
2423 bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2424 {
2425 	u32 index;
2426 
2427 	for (index = 0; index < ihost->remote_node_entries; index++) {
2428 		if ((ihost->device_table[index] != NULL) &&
2429 		   (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2430 			return true;
2431 	}
2432 
2433 	return false;
2434 }
2435 
2436 void sci_controller_remote_device_stopped(struct isci_host *ihost,
2437 					  struct isci_remote_device *idev)
2438 {
2439 	if (ihost->sm.current_state_id != SCIC_STOPPING) {
2440 		dev_dbg(&ihost->pdev->dev,
2441 			"SCIC Controller 0x%p remote device stopped event "
2442 			"from device 0x%p in unexpected state %d\n",
2443 			ihost, idev,
2444 			ihost->sm.current_state_id);
2445 		return;
2446 	}
2447 
2448 	if (!sci_controller_has_remote_devices_stopping(ihost))
2449 		isci_host_stop_complete(ihost);
2450 }
2451 
2452 void sci_controller_post_request(struct isci_host *ihost, u32 request)
2453 {
2454 	dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2455 		__func__, ihost->id, request);
2456 
2457 	writel(request, &ihost->smu_registers->post_context_port);
2458 }
2459 
2460 struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2461 {
2462 	u16 task_index;
2463 	u16 task_sequence;
2464 
2465 	task_index = ISCI_TAG_TCI(io_tag);
2466 
2467 	if (task_index < ihost->task_context_entries) {
2468 		struct isci_request *ireq = ihost->reqs[task_index];
2469 
2470 		if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2471 			task_sequence = ISCI_TAG_SEQ(io_tag);
2472 
2473 			if (task_sequence == ihost->io_request_sequence[task_index])
2474 				return ireq;
2475 		}
2476 	}
2477 
2478 	return NULL;
2479 }
2480 
2481 /**
2482  * This method allocates remote node index and the reserves the remote node
2483  *    context space for use. This method can fail if there are no more remote
2484  *    node index available.
2485  * @scic: This is the controller object which contains the set of
2486  *    free remote node ids
2487  * @sci_dev: This is the device object which is requesting the a remote node
2488  *    id
2489  * @node_id: This is the remote node id that is assinged to the device if one
2490  *    is available
2491  *
2492  * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2493  * node index available.
2494  */
2495 enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2496 							    struct isci_remote_device *idev,
2497 							    u16 *node_id)
2498 {
2499 	u16 node_index;
2500 	u32 remote_node_count = sci_remote_device_node_count(idev);
2501 
2502 	node_index = sci_remote_node_table_allocate_remote_node(
2503 		&ihost->available_remote_nodes, remote_node_count
2504 		);
2505 
2506 	if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2507 		ihost->device_table[node_index] = idev;
2508 
2509 		*node_id = node_index;
2510 
2511 		return SCI_SUCCESS;
2512 	}
2513 
2514 	return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2515 }
2516 
2517 void sci_controller_free_remote_node_context(struct isci_host *ihost,
2518 					     struct isci_remote_device *idev,
2519 					     u16 node_id)
2520 {
2521 	u32 remote_node_count = sci_remote_device_node_count(idev);
2522 
2523 	if (ihost->device_table[node_id] == idev) {
2524 		ihost->device_table[node_id] = NULL;
2525 
2526 		sci_remote_node_table_release_remote_node_index(
2527 			&ihost->available_remote_nodes, remote_node_count, node_id
2528 			);
2529 	}
2530 }
2531 
2532 void sci_controller_copy_sata_response(void *response_buffer,
2533 				       void *frame_header,
2534 				       void *frame_buffer)
2535 {
2536 	/* XXX type safety? */
2537 	memcpy(response_buffer, frame_header, sizeof(u32));
2538 
2539 	memcpy(response_buffer + sizeof(u32),
2540 	       frame_buffer,
2541 	       sizeof(struct dev_to_host_fis) - sizeof(u32));
2542 }
2543 
2544 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2545 {
2546 	if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2547 		writel(ihost->uf_control.get,
2548 			&ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2549 }
2550 
2551 void isci_tci_free(struct isci_host *ihost, u16 tci)
2552 {
2553 	u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2554 
2555 	ihost->tci_pool[tail] = tci;
2556 	ihost->tci_tail = tail + 1;
2557 }
2558 
2559 static u16 isci_tci_alloc(struct isci_host *ihost)
2560 {
2561 	u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2562 	u16 tci = ihost->tci_pool[head];
2563 
2564 	ihost->tci_head = head + 1;
2565 	return tci;
2566 }
2567 
2568 static u16 isci_tci_space(struct isci_host *ihost)
2569 {
2570 	return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2571 }
2572 
2573 u16 isci_alloc_tag(struct isci_host *ihost)
2574 {
2575 	if (isci_tci_space(ihost)) {
2576 		u16 tci = isci_tci_alloc(ihost);
2577 		u8 seq = ihost->io_request_sequence[tci];
2578 
2579 		return ISCI_TAG(seq, tci);
2580 	}
2581 
2582 	return SCI_CONTROLLER_INVALID_IO_TAG;
2583 }
2584 
2585 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2586 {
2587 	u16 tci = ISCI_TAG_TCI(io_tag);
2588 	u16 seq = ISCI_TAG_SEQ(io_tag);
2589 
2590 	/* prevent tail from passing head */
2591 	if (isci_tci_active(ihost) == 0)
2592 		return SCI_FAILURE_INVALID_IO_TAG;
2593 
2594 	if (seq == ihost->io_request_sequence[tci]) {
2595 		ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2596 
2597 		isci_tci_free(ihost, tci);
2598 
2599 		return SCI_SUCCESS;
2600 	}
2601 	return SCI_FAILURE_INVALID_IO_TAG;
2602 }
2603 
2604 enum sci_status sci_controller_start_io(struct isci_host *ihost,
2605 					struct isci_remote_device *idev,
2606 					struct isci_request *ireq)
2607 {
2608 	enum sci_status status;
2609 
2610 	if (ihost->sm.current_state_id != SCIC_READY) {
2611 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2612 			 __func__, ihost->sm.current_state_id);
2613 		return SCI_FAILURE_INVALID_STATE;
2614 	}
2615 
2616 	status = sci_remote_device_start_io(ihost, idev, ireq);
2617 	if (status != SCI_SUCCESS)
2618 		return status;
2619 
2620 	set_bit(IREQ_ACTIVE, &ireq->flags);
2621 	sci_controller_post_request(ihost, ireq->post_context);
2622 	return SCI_SUCCESS;
2623 }
2624 
2625 enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2626 						 struct isci_remote_device *idev,
2627 						 struct isci_request *ireq)
2628 {
2629 	/* terminate an ongoing (i.e. started) core IO request.  This does not
2630 	 * abort the IO request at the target, but rather removes the IO
2631 	 * request from the host controller.
2632 	 */
2633 	enum sci_status status;
2634 
2635 	if (ihost->sm.current_state_id != SCIC_READY) {
2636 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2637 			 __func__, ihost->sm.current_state_id);
2638 		return SCI_FAILURE_INVALID_STATE;
2639 	}
2640 	status = sci_io_request_terminate(ireq);
2641 
2642 	dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2643 		__func__, status, ireq, ireq->flags);
2644 
2645 	if ((status == SCI_SUCCESS) &&
2646 	    !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2647 	    !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
2648 		/* Utilize the original post context command and or in the
2649 		 * POST_TC_ABORT request sub-type.
2650 		 */
2651 		sci_controller_post_request(
2652 			ihost, ireq->post_context |
2653 				SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2654 	}
2655 	return status;
2656 }
2657 
2658 /**
2659  * sci_controller_complete_io() - This method will perform core specific
2660  *    completion operations for an IO request.  After this method is invoked,
2661  *    the user should consider the IO request as invalid until it is properly
2662  *    reused (i.e. re-constructed).
2663  * @ihost: The handle to the controller object for which to complete the
2664  *    IO request.
2665  * @idev: The handle to the remote device object for which to complete
2666  *    the IO request.
2667  * @ireq: the handle to the io request object to complete.
2668  */
2669 enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2670 					   struct isci_remote_device *idev,
2671 					   struct isci_request *ireq)
2672 {
2673 	enum sci_status status;
2674 	u16 index;
2675 
2676 	switch (ihost->sm.current_state_id) {
2677 	case SCIC_STOPPING:
2678 		/* XXX: Implement this function */
2679 		return SCI_FAILURE;
2680 	case SCIC_READY:
2681 		status = sci_remote_device_complete_io(ihost, idev, ireq);
2682 		if (status != SCI_SUCCESS)
2683 			return status;
2684 
2685 		index = ISCI_TAG_TCI(ireq->io_tag);
2686 		clear_bit(IREQ_ACTIVE, &ireq->flags);
2687 		return SCI_SUCCESS;
2688 	default:
2689 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2690 			 __func__, ihost->sm.current_state_id);
2691 		return SCI_FAILURE_INVALID_STATE;
2692 	}
2693 
2694 }
2695 
2696 enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2697 {
2698 	struct isci_host *ihost = ireq->owning_controller;
2699 
2700 	if (ihost->sm.current_state_id != SCIC_READY) {
2701 		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2702 			 __func__, ihost->sm.current_state_id);
2703 		return SCI_FAILURE_INVALID_STATE;
2704 	}
2705 
2706 	set_bit(IREQ_ACTIVE, &ireq->flags);
2707 	sci_controller_post_request(ihost, ireq->post_context);
2708 	return SCI_SUCCESS;
2709 }
2710 
2711 /**
2712  * sci_controller_start_task() - This method is called by the SCIC user to
2713  *    send/start a framework task management request.
2714  * @controller: the handle to the controller object for which to start the task
2715  *    management request.
2716  * @remote_device: the handle to the remote device object for which to start
2717  *    the task management request.
2718  * @task_request: the handle to the task request object to start.
2719  */
2720 enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2721 					       struct isci_remote_device *idev,
2722 					       struct isci_request *ireq)
2723 {
2724 	enum sci_status status;
2725 
2726 	if (ihost->sm.current_state_id != SCIC_READY) {
2727 		dev_warn(&ihost->pdev->dev,
2728 			 "%s: SCIC Controller starting task from invalid "
2729 			 "state\n",
2730 			 __func__);
2731 		return SCI_TASK_FAILURE_INVALID_STATE;
2732 	}
2733 
2734 	status = sci_remote_device_start_task(ihost, idev, ireq);
2735 	switch (status) {
2736 	case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2737 		set_bit(IREQ_ACTIVE, &ireq->flags);
2738 
2739 		/*
2740 		 * We will let framework know this task request started successfully,
2741 		 * although core is still woring on starting the request (to post tc when
2742 		 * RNC is resumed.)
2743 		 */
2744 		return SCI_SUCCESS;
2745 	case SCI_SUCCESS:
2746 		set_bit(IREQ_ACTIVE, &ireq->flags);
2747 		sci_controller_post_request(ihost, ireq->post_context);
2748 		break;
2749 	default:
2750 		break;
2751 	}
2752 
2753 	return status;
2754 }
2755 
2756 static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2757 {
2758 	int d;
2759 
2760 	/* no support for TX_GP_CFG */
2761 	if (reg_index == 0)
2762 		return -EINVAL;
2763 
2764 	for (d = 0; d < isci_gpio_count(ihost); d++) {
2765 		u32 val = 0x444; /* all ODx.n clear */
2766 		int i;
2767 
2768 		for (i = 0; i < 3; i++) {
2769 			int bit = (i << 2) + 2;
2770 
2771 			bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
2772 						       write_data, reg_index,
2773 						       reg_count);
2774 			if (bit < 0)
2775 				break;
2776 
2777 			/* if od is set, clear the 'invert' bit */
2778 			val &= ~(bit << ((i << 2) + 2));
2779 		}
2780 
2781 		if (i < 3)
2782 			break;
2783 		writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2784 	}
2785 
2786 	/* unless reg_index is > 1, we should always be able to write at
2787 	 * least one register
2788 	 */
2789 	return d > 0;
2790 }
2791 
2792 int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
2793 		    u8 reg_count, u8 *write_data)
2794 {
2795 	struct isci_host *ihost = sas_ha->lldd_ha;
2796 	int written;
2797 
2798 	switch (reg_type) {
2799 	case SAS_GPIO_REG_TX_GP:
2800 		written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
2801 		break;
2802 	default:
2803 		written = -EINVAL;
2804 	}
2805 
2806 	return written;
2807 }
2808