xref: /openbmc/linux/drivers/scsi/isci/request.c (revision 3a9a231d)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #include "isci.h"
57 #include "task.h"
58 #include "request.h"
59 #include "scu_completion_codes.h"
60 #include "scu_event_codes.h"
61 #include "sas.h"
62 
63 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64 							int idx)
65 {
66 	if (idx == 0)
67 		return &ireq->tc->sgl_pair_ab;
68 	else if (idx == 1)
69 		return &ireq->tc->sgl_pair_cd;
70 	else if (idx < 0)
71 		return NULL;
72 	else
73 		return &ireq->sg_table[idx - 2];
74 }
75 
76 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
77 					  struct isci_request *ireq, u32 idx)
78 {
79 	u32 offset;
80 
81 	if (idx == 0) {
82 		offset = (void *) &ireq->tc->sgl_pair_ab -
83 			 (void *) &ihost->task_context_table[0];
84 		return ihost->task_context_dma + offset;
85 	} else if (idx == 1) {
86 		offset = (void *) &ireq->tc->sgl_pair_cd -
87 			 (void *) &ihost->task_context_table[0];
88 		return ihost->task_context_dma + offset;
89 	}
90 
91 	return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
92 }
93 
94 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
95 {
96 	e->length = sg_dma_len(sg);
97 	e->address_upper = upper_32_bits(sg_dma_address(sg));
98 	e->address_lower = lower_32_bits(sg_dma_address(sg));
99 	e->address_modifier = 0;
100 }
101 
102 static void sci_request_build_sgl(struct isci_request *ireq)
103 {
104 	struct isci_host *ihost = ireq->isci_host;
105 	struct sas_task *task = isci_request_access_task(ireq);
106 	struct scatterlist *sg = NULL;
107 	dma_addr_t dma_addr;
108 	u32 sg_idx = 0;
109 	struct scu_sgl_element_pair *scu_sg   = NULL;
110 	struct scu_sgl_element_pair *prev_sg  = NULL;
111 
112 	if (task->num_scatter > 0) {
113 		sg = task->scatter;
114 
115 		while (sg) {
116 			scu_sg = to_sgl_element_pair(ireq, sg_idx);
117 			init_sgl_element(&scu_sg->A, sg);
118 			sg = sg_next(sg);
119 			if (sg) {
120 				init_sgl_element(&scu_sg->B, sg);
121 				sg = sg_next(sg);
122 			} else
123 				memset(&scu_sg->B, 0, sizeof(scu_sg->B));
124 
125 			if (prev_sg) {
126 				dma_addr = to_sgl_element_pair_dma(ihost,
127 								   ireq,
128 								   sg_idx);
129 
130 				prev_sg->next_pair_upper =
131 					upper_32_bits(dma_addr);
132 				prev_sg->next_pair_lower =
133 					lower_32_bits(dma_addr);
134 			}
135 
136 			prev_sg = scu_sg;
137 			sg_idx++;
138 		}
139 	} else {	/* handle when no sg */
140 		scu_sg = to_sgl_element_pair(ireq, sg_idx);
141 
142 		dma_addr = dma_map_single(&ihost->pdev->dev,
143 					  task->scatter,
144 					  task->total_xfer_len,
145 					  task->data_dir);
146 
147 		ireq->zero_scatter_daddr = dma_addr;
148 
149 		scu_sg->A.length = task->total_xfer_len;
150 		scu_sg->A.address_upper = upper_32_bits(dma_addr);
151 		scu_sg->A.address_lower = lower_32_bits(dma_addr);
152 	}
153 
154 	if (scu_sg) {
155 		scu_sg->next_pair_upper = 0;
156 		scu_sg->next_pair_lower = 0;
157 	}
158 }
159 
160 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
161 {
162 	struct ssp_cmd_iu *cmd_iu;
163 	struct sas_task *task = isci_request_access_task(ireq);
164 
165 	cmd_iu = &ireq->ssp.cmd;
166 
167 	memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
168 	cmd_iu->add_cdb_len = 0;
169 	cmd_iu->_r_a = 0;
170 	cmd_iu->_r_b = 0;
171 	cmd_iu->en_fburst = 0; /* unsupported */
172 	cmd_iu->task_prio = task->ssp_task.task_prio;
173 	cmd_iu->task_attr = task->ssp_task.task_attr;
174 	cmd_iu->_r_c = 0;
175 
176 	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
177 		       sizeof(task->ssp_task.cdb) / sizeof(u32));
178 }
179 
180 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
181 {
182 	struct ssp_task_iu *task_iu;
183 	struct sas_task *task = isci_request_access_task(ireq);
184 	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
185 
186 	task_iu = &ireq->ssp.tmf;
187 
188 	memset(task_iu, 0, sizeof(struct ssp_task_iu));
189 
190 	memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
191 
192 	task_iu->task_func = isci_tmf->tmf_code;
193 	task_iu->task_tag =
194 		(ireq->ttype == tmf_task) ?
195 		isci_tmf->io_tag :
196 		SCI_CONTROLLER_INVALID_IO_TAG;
197 }
198 
199 /**
200  * This method is will fill in the SCU Task Context for any type of SSP request.
201  * @sci_req:
202  * @task_context:
203  *
204  */
205 static void scu_ssp_reqeust_construct_task_context(
206 	struct isci_request *ireq,
207 	struct scu_task_context *task_context)
208 {
209 	dma_addr_t dma_addr;
210 	struct isci_remote_device *idev;
211 	struct isci_port *iport;
212 
213 	idev = ireq->target_device;
214 	iport = idev->owning_port;
215 
216 	/* Fill in the TC with the its required data */
217 	task_context->abort = 0;
218 	task_context->priority = 0;
219 	task_context->initiator_request = 1;
220 	task_context->connection_rate = idev->connection_rate;
221 	task_context->protocol_engine_index = ISCI_PEG;
222 	task_context->logical_port_index = iport->physical_port_index;
223 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
224 	task_context->valid = SCU_TASK_CONTEXT_VALID;
225 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
226 
227 	task_context->remote_node_index = idev->rnc.remote_node_index;
228 	task_context->command_code = 0;
229 
230 	task_context->link_layer_control = 0;
231 	task_context->do_not_dma_ssp_good_response = 1;
232 	task_context->strict_ordering = 0;
233 	task_context->control_frame = 0;
234 	task_context->timeout_enable = 0;
235 	task_context->block_guard_enable = 0;
236 
237 	task_context->address_modifier = 0;
238 
239 	/* task_context->type.ssp.tag = ireq->io_tag; */
240 	task_context->task_phase = 0x01;
241 
242 	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
243 			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
244 			      (iport->physical_port_index <<
245 			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
246 			      ISCI_TAG_TCI(ireq->io_tag));
247 
248 	/*
249 	 * Copy the physical address for the command buffer to the
250 	 * SCU Task Context
251 	 */
252 	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
253 
254 	task_context->command_iu_upper = upper_32_bits(dma_addr);
255 	task_context->command_iu_lower = lower_32_bits(dma_addr);
256 
257 	/*
258 	 * Copy the physical address for the response buffer to the
259 	 * SCU Task Context
260 	 */
261 	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
262 
263 	task_context->response_iu_upper = upper_32_bits(dma_addr);
264 	task_context->response_iu_lower = lower_32_bits(dma_addr);
265 }
266 
267 /**
268  * This method is will fill in the SCU Task Context for a SSP IO request.
269  * @sci_req:
270  *
271  */
272 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
273 						      enum dma_data_direction dir,
274 						      u32 len)
275 {
276 	struct scu_task_context *task_context = ireq->tc;
277 
278 	scu_ssp_reqeust_construct_task_context(ireq, task_context);
279 
280 	task_context->ssp_command_iu_length =
281 		sizeof(struct ssp_cmd_iu) / sizeof(u32);
282 	task_context->type.ssp.frame_type = SSP_COMMAND;
283 
284 	switch (dir) {
285 	case DMA_FROM_DEVICE:
286 	case DMA_NONE:
287 	default:
288 		task_context->task_type = SCU_TASK_TYPE_IOREAD;
289 		break;
290 	case DMA_TO_DEVICE:
291 		task_context->task_type = SCU_TASK_TYPE_IOWRITE;
292 		break;
293 	}
294 
295 	task_context->transfer_length_bytes = len;
296 
297 	if (task_context->transfer_length_bytes > 0)
298 		sci_request_build_sgl(ireq);
299 }
300 
301 /**
302  * This method will fill in the SCU Task Context for a SSP Task request.  The
303  *    following important settings are utilized: -# priority ==
304  *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
305  *    ahead of other task destined for the same Remote Node. -# task_type ==
306  *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
307  *    (i.e. non-raw frame) is being utilized to perform task management. -#
308  *    control_frame == 1.  This ensures that the proper endianess is set so
309  *    that the bytes are transmitted in the right order for a task frame.
310  * @sci_req: This parameter specifies the task request object being
311  *    constructed.
312  *
313  */
314 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
315 {
316 	struct scu_task_context *task_context = ireq->tc;
317 
318 	scu_ssp_reqeust_construct_task_context(ireq, task_context);
319 
320 	task_context->control_frame                = 1;
321 	task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
322 	task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
323 	task_context->transfer_length_bytes        = 0;
324 	task_context->type.ssp.frame_type          = SSP_TASK;
325 	task_context->ssp_command_iu_length =
326 		sizeof(struct ssp_task_iu) / sizeof(u32);
327 }
328 
329 /**
330  * This method is will fill in the SCU Task Context for any type of SATA
331  *    request.  This is called from the various SATA constructors.
332  * @sci_req: The general IO request object which is to be used in
333  *    constructing the SCU task context.
334  * @task_context: The buffer pointer for the SCU task context which is being
335  *    constructed.
336  *
337  * The general io request construction is complete. The buffer assignment for
338  * the command buffer is complete. none Revisit task context construction to
339  * determine what is common for SSP/SMP/STP task context structures.
340  */
341 static void scu_sata_reqeust_construct_task_context(
342 	struct isci_request *ireq,
343 	struct scu_task_context *task_context)
344 {
345 	dma_addr_t dma_addr;
346 	struct isci_remote_device *idev;
347 	struct isci_port *iport;
348 
349 	idev = ireq->target_device;
350 	iport = idev->owning_port;
351 
352 	/* Fill in the TC with the its required data */
353 	task_context->abort = 0;
354 	task_context->priority = SCU_TASK_PRIORITY_NORMAL;
355 	task_context->initiator_request = 1;
356 	task_context->connection_rate = idev->connection_rate;
357 	task_context->protocol_engine_index = ISCI_PEG;
358 	task_context->logical_port_index = iport->physical_port_index;
359 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
360 	task_context->valid = SCU_TASK_CONTEXT_VALID;
361 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
362 
363 	task_context->remote_node_index = idev->rnc.remote_node_index;
364 	task_context->command_code = 0;
365 
366 	task_context->link_layer_control = 0;
367 	task_context->do_not_dma_ssp_good_response = 1;
368 	task_context->strict_ordering = 0;
369 	task_context->control_frame = 0;
370 	task_context->timeout_enable = 0;
371 	task_context->block_guard_enable = 0;
372 
373 	task_context->address_modifier = 0;
374 	task_context->task_phase = 0x01;
375 
376 	task_context->ssp_command_iu_length =
377 		(sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
378 
379 	/* Set the first word of the H2D REG FIS */
380 	task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
381 
382 	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
383 			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
384 			      (iport->physical_port_index <<
385 			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
386 			      ISCI_TAG_TCI(ireq->io_tag));
387 	/*
388 	 * Copy the physical address for the command buffer to the SCU Task
389 	 * Context. We must offset the command buffer by 4 bytes because the
390 	 * first 4 bytes are transfered in the body of the TC.
391 	 */
392 	dma_addr = sci_io_request_get_dma_addr(ireq,
393 						((char *) &ireq->stp.cmd) +
394 						sizeof(u32));
395 
396 	task_context->command_iu_upper = upper_32_bits(dma_addr);
397 	task_context->command_iu_lower = lower_32_bits(dma_addr);
398 
399 	/* SATA Requests do not have a response buffer */
400 	task_context->response_iu_upper = 0;
401 	task_context->response_iu_lower = 0;
402 }
403 
404 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
405 {
406 	struct scu_task_context *task_context = ireq->tc;
407 
408 	scu_sata_reqeust_construct_task_context(ireq, task_context);
409 
410 	task_context->control_frame         = 0;
411 	task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
412 	task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
413 	task_context->type.stp.fis_type     = FIS_REGH2D;
414 	task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
415 }
416 
417 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
418 							  bool copy_rx_frame)
419 {
420 	struct isci_stp_request *stp_req = &ireq->stp.req;
421 
422 	scu_stp_raw_request_construct_task_context(ireq);
423 
424 	stp_req->status = 0;
425 	stp_req->sgl.offset = 0;
426 	stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
427 
428 	if (copy_rx_frame) {
429 		sci_request_build_sgl(ireq);
430 		stp_req->sgl.index = 0;
431 	} else {
432 		/* The user does not want the data copied to the SGL buffer location */
433 		stp_req->sgl.index = -1;
434 	}
435 
436 	return SCI_SUCCESS;
437 }
438 
439 /**
440  *
441  * @sci_req: This parameter specifies the request to be constructed as an
442  *    optimized request.
443  * @optimized_task_type: This parameter specifies whether the request is to be
444  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
445  *    value of 1 indicates NCQ.
446  *
447  * This method will perform request construction common to all types of STP
448  * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
449  * returns an indication as to whether the construction was successful.
450  */
451 static void sci_stp_optimized_request_construct(struct isci_request *ireq,
452 						     u8 optimized_task_type,
453 						     u32 len,
454 						     enum dma_data_direction dir)
455 {
456 	struct scu_task_context *task_context = ireq->tc;
457 
458 	/* Build the STP task context structure */
459 	scu_sata_reqeust_construct_task_context(ireq, task_context);
460 
461 	/* Copy over the SGL elements */
462 	sci_request_build_sgl(ireq);
463 
464 	/* Copy over the number of bytes to be transfered */
465 	task_context->transfer_length_bytes = len;
466 
467 	if (dir == DMA_TO_DEVICE) {
468 		/*
469 		 * The difference between the DMA IN and DMA OUT request task type
470 		 * values are consistent with the difference between FPDMA READ
471 		 * and FPDMA WRITE values.  Add the supplied task type parameter
472 		 * to this difference to set the task type properly for this
473 		 * DATA OUT (WRITE) case. */
474 		task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
475 								 - SCU_TASK_TYPE_DMA_IN);
476 	} else {
477 		/*
478 		 * For the DATA IN (READ) case, simply save the supplied
479 		 * optimized task type. */
480 		task_context->task_type = optimized_task_type;
481 	}
482 }
483 
484 static void sci_atapi_construct(struct isci_request *ireq)
485 {
486 	struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
487 	struct sas_task *task;
488 
489 	/* To simplify the implementation we take advantage of the
490 	 * silicon's partial acceleration of atapi protocol (dma data
491 	 * transfers), so we promote all commands to dma protocol.  This
492 	 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
493 	 */
494 	h2d_fis->features |= ATAPI_PKT_DMA;
495 
496 	scu_stp_raw_request_construct_task_context(ireq);
497 
498 	task = isci_request_access_task(ireq);
499 	if (task->data_dir == DMA_NONE)
500 		task->total_xfer_len = 0;
501 
502 	/* clear the response so we can detect arrivial of an
503 	 * unsolicited h2d fis
504 	 */
505 	ireq->stp.rsp.fis_type = 0;
506 }
507 
508 static enum sci_status
509 sci_io_request_construct_sata(struct isci_request *ireq,
510 			       u32 len,
511 			       enum dma_data_direction dir,
512 			       bool copy)
513 {
514 	enum sci_status status = SCI_SUCCESS;
515 	struct sas_task *task = isci_request_access_task(ireq);
516 	struct domain_device *dev = ireq->target_device->domain_dev;
517 
518 	/* check for management protocols */
519 	if (ireq->ttype == tmf_task) {
520 		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
521 
522 		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
523 		    tmf->tmf_code == isci_tmf_sata_srst_low) {
524 			scu_stp_raw_request_construct_task_context(ireq);
525 			return SCI_SUCCESS;
526 		} else {
527 			dev_err(&ireq->owning_controller->pdev->dev,
528 				"%s: Request 0x%p received un-handled SAT "
529 				"management protocol 0x%x.\n",
530 				__func__, ireq, tmf->tmf_code);
531 
532 			return SCI_FAILURE;
533 		}
534 	}
535 
536 	if (!sas_protocol_ata(task->task_proto)) {
537 		dev_err(&ireq->owning_controller->pdev->dev,
538 			"%s: Non-ATA protocol in SATA path: 0x%x\n",
539 			__func__,
540 			task->task_proto);
541 		return SCI_FAILURE;
542 
543 	}
544 
545 	/* ATAPI */
546 	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
547 	    task->ata_task.fis.command == ATA_CMD_PACKET) {
548 		sci_atapi_construct(ireq);
549 		return SCI_SUCCESS;
550 	}
551 
552 	/* non data */
553 	if (task->data_dir == DMA_NONE) {
554 		scu_stp_raw_request_construct_task_context(ireq);
555 		return SCI_SUCCESS;
556 	}
557 
558 	/* NCQ */
559 	if (task->ata_task.use_ncq) {
560 		sci_stp_optimized_request_construct(ireq,
561 							 SCU_TASK_TYPE_FPDMAQ_READ,
562 							 len, dir);
563 		return SCI_SUCCESS;
564 	}
565 
566 	/* DMA */
567 	if (task->ata_task.dma_xfer) {
568 		sci_stp_optimized_request_construct(ireq,
569 							 SCU_TASK_TYPE_DMA_IN,
570 							 len, dir);
571 		return SCI_SUCCESS;
572 	} else /* PIO */
573 		return sci_stp_pio_request_construct(ireq, copy);
574 
575 	return status;
576 }
577 
578 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
579 {
580 	struct sas_task *task = isci_request_access_task(ireq);
581 
582 	ireq->protocol = SCIC_SSP_PROTOCOL;
583 
584 	scu_ssp_io_request_construct_task_context(ireq,
585 						  task->data_dir,
586 						  task->total_xfer_len);
587 
588 	sci_io_request_build_ssp_command_iu(ireq);
589 
590 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
591 
592 	return SCI_SUCCESS;
593 }
594 
595 enum sci_status sci_task_request_construct_ssp(
596 	struct isci_request *ireq)
597 {
598 	/* Construct the SSP Task SCU Task Context */
599 	scu_ssp_task_request_construct_task_context(ireq);
600 
601 	/* Fill in the SSP Task IU */
602 	sci_task_request_build_ssp_task_iu(ireq);
603 
604 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
605 
606 	return SCI_SUCCESS;
607 }
608 
609 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
610 {
611 	enum sci_status status;
612 	bool copy = false;
613 	struct sas_task *task = isci_request_access_task(ireq);
614 
615 	ireq->protocol = SCIC_STP_PROTOCOL;
616 
617 	copy = (task->data_dir == DMA_NONE) ? false : true;
618 
619 	status = sci_io_request_construct_sata(ireq,
620 						task->total_xfer_len,
621 						task->data_dir,
622 						copy);
623 
624 	if (status == SCI_SUCCESS)
625 		sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
626 
627 	return status;
628 }
629 
630 enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
631 {
632 	enum sci_status status = SCI_SUCCESS;
633 
634 	/* check for management protocols */
635 	if (ireq->ttype == tmf_task) {
636 		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
637 
638 		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
639 		    tmf->tmf_code == isci_tmf_sata_srst_low) {
640 			scu_stp_raw_request_construct_task_context(ireq);
641 		} else {
642 			dev_err(&ireq->owning_controller->pdev->dev,
643 				"%s: Request 0x%p received un-handled SAT "
644 				"Protocol 0x%x.\n",
645 				__func__, ireq, tmf->tmf_code);
646 
647 			return SCI_FAILURE;
648 		}
649 	}
650 
651 	if (status != SCI_SUCCESS)
652 		return status;
653 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
654 
655 	return status;
656 }
657 
658 /**
659  * sci_req_tx_bytes - bytes transferred when reply underruns request
660  * @ireq: request that was terminated early
661  */
662 #define SCU_TASK_CONTEXT_SRAM 0x200000
663 static u32 sci_req_tx_bytes(struct isci_request *ireq)
664 {
665 	struct isci_host *ihost = ireq->owning_controller;
666 	u32 ret_val = 0;
667 
668 	if (readl(&ihost->smu_registers->address_modifier) == 0) {
669 		void __iomem *scu_reg_base = ihost->scu_registers;
670 
671 		/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
672 		 *   BAR1 is the scu_registers
673 		 *   0x20002C = 0x200000 + 0x2c
674 		 *            = start of task context SRAM + offset of (type.ssp.data_offset)
675 		 *   TCi is the io_tag of struct sci_request
676 		 */
677 		ret_val = readl(scu_reg_base +
678 				(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
679 				((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
680 	}
681 
682 	return ret_val;
683 }
684 
685 enum sci_status sci_request_start(struct isci_request *ireq)
686 {
687 	enum sci_base_request_states state;
688 	struct scu_task_context *tc = ireq->tc;
689 	struct isci_host *ihost = ireq->owning_controller;
690 
691 	state = ireq->sm.current_state_id;
692 	if (state != SCI_REQ_CONSTRUCTED) {
693 		dev_warn(&ihost->pdev->dev,
694 			"%s: SCIC IO Request requested to start while in wrong "
695 			 "state %d\n", __func__, state);
696 		return SCI_FAILURE_INVALID_STATE;
697 	}
698 
699 	tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
700 
701 	switch (tc->protocol_type) {
702 	case SCU_TASK_CONTEXT_PROTOCOL_SMP:
703 	case SCU_TASK_CONTEXT_PROTOCOL_SSP:
704 		/* SSP/SMP Frame */
705 		tc->type.ssp.tag = ireq->io_tag;
706 		tc->type.ssp.target_port_transfer_tag = 0xFFFF;
707 		break;
708 
709 	case SCU_TASK_CONTEXT_PROTOCOL_STP:
710 		/* STP/SATA Frame
711 		 * tc->type.stp.ncq_tag = ireq->ncq_tag;
712 		 */
713 		break;
714 
715 	case SCU_TASK_CONTEXT_PROTOCOL_NONE:
716 		/* / @todo When do we set no protocol type? */
717 		break;
718 
719 	default:
720 		/* This should never happen since we build the IO
721 		 * requests */
722 		break;
723 	}
724 
725 	/* Add to the post_context the io tag value */
726 	ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
727 
728 	/* Everything is good go ahead and change state */
729 	sci_change_state(&ireq->sm, SCI_REQ_STARTED);
730 
731 	return SCI_SUCCESS;
732 }
733 
734 enum sci_status
735 sci_io_request_terminate(struct isci_request *ireq)
736 {
737 	enum sci_base_request_states state;
738 
739 	state = ireq->sm.current_state_id;
740 
741 	switch (state) {
742 	case SCI_REQ_CONSTRUCTED:
743 		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
744 		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
745 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
746 		return SCI_SUCCESS;
747 	case SCI_REQ_STARTED:
748 	case SCI_REQ_TASK_WAIT_TC_COMP:
749 	case SCI_REQ_SMP_WAIT_RESP:
750 	case SCI_REQ_SMP_WAIT_TC_COMP:
751 	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
752 	case SCI_REQ_STP_UDMA_WAIT_D2H:
753 	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
754 	case SCI_REQ_STP_NON_DATA_WAIT_D2H:
755 	case SCI_REQ_STP_PIO_WAIT_H2D:
756 	case SCI_REQ_STP_PIO_WAIT_FRAME:
757 	case SCI_REQ_STP_PIO_DATA_IN:
758 	case SCI_REQ_STP_PIO_DATA_OUT:
759 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
760 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
761 	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
762 	case SCI_REQ_ATAPI_WAIT_H2D:
763 	case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
764 	case SCI_REQ_ATAPI_WAIT_D2H:
765 	case SCI_REQ_ATAPI_WAIT_TC_COMP:
766 		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
767 		return SCI_SUCCESS;
768 	case SCI_REQ_TASK_WAIT_TC_RESP:
769 		/* The task frame was already confirmed to have been
770 		 * sent by the SCU HW.  Since the state machine is
771 		 * now only waiting for the task response itself,
772 		 * abort the request and complete it immediately
773 		 * and don't wait for the task response.
774 		 */
775 		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
776 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
777 		return SCI_SUCCESS;
778 	case SCI_REQ_ABORTING:
779 		/* If a request has a termination requested twice, return
780 		 * a failure indication, since HW confirmation of the first
781 		 * abort is still outstanding.
782 		 */
783 	case SCI_REQ_COMPLETED:
784 	default:
785 		dev_warn(&ireq->owning_controller->pdev->dev,
786 			 "%s: SCIC IO Request requested to abort while in wrong "
787 			 "state %d\n",
788 			 __func__,
789 			 ireq->sm.current_state_id);
790 		break;
791 	}
792 
793 	return SCI_FAILURE_INVALID_STATE;
794 }
795 
796 enum sci_status sci_request_complete(struct isci_request *ireq)
797 {
798 	enum sci_base_request_states state;
799 	struct isci_host *ihost = ireq->owning_controller;
800 
801 	state = ireq->sm.current_state_id;
802 	if (WARN_ONCE(state != SCI_REQ_COMPLETED,
803 		      "isci: request completion from wrong state (%d)\n", state))
804 		return SCI_FAILURE_INVALID_STATE;
805 
806 	if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
807 		sci_controller_release_frame(ihost,
808 						  ireq->saved_rx_frame_index);
809 
810 	/* XXX can we just stop the machine and remove the 'final' state? */
811 	sci_change_state(&ireq->sm, SCI_REQ_FINAL);
812 	return SCI_SUCCESS;
813 }
814 
815 enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
816 						  u32 event_code)
817 {
818 	enum sci_base_request_states state;
819 	struct isci_host *ihost = ireq->owning_controller;
820 
821 	state = ireq->sm.current_state_id;
822 
823 	if (state != SCI_REQ_STP_PIO_DATA_IN) {
824 		dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
825 			 __func__, event_code, state);
826 
827 		return SCI_FAILURE_INVALID_STATE;
828 	}
829 
830 	switch (scu_get_event_specifier(event_code)) {
831 	case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
832 		/* We are waiting for data and the SCU has R_ERR the data frame.
833 		 * Go back to waiting for the D2H Register FIS
834 		 */
835 		sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
836 		return SCI_SUCCESS;
837 	default:
838 		dev_err(&ihost->pdev->dev,
839 			"%s: pio request unexpected event %#x\n",
840 			__func__, event_code);
841 
842 		/* TODO Should we fail the PIO request when we get an
843 		 * unexpected event?
844 		 */
845 		return SCI_FAILURE;
846 	}
847 }
848 
849 /*
850  * This function copies response data for requests returning response data
851  *    instead of sense data.
852  * @sci_req: This parameter specifies the request object for which to copy
853  *    the response data.
854  */
855 static void sci_io_request_copy_response(struct isci_request *ireq)
856 {
857 	void *resp_buf;
858 	u32 len;
859 	struct ssp_response_iu *ssp_response;
860 	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
861 
862 	ssp_response = &ireq->ssp.rsp;
863 
864 	resp_buf = &isci_tmf->resp.resp_iu;
865 
866 	len = min_t(u32,
867 		    SSP_RESP_IU_MAX_SIZE,
868 		    be32_to_cpu(ssp_response->response_data_len));
869 
870 	memcpy(resp_buf, ssp_response->resp_data, len);
871 }
872 
873 static enum sci_status
874 request_started_state_tc_event(struct isci_request *ireq,
875 			       u32 completion_code)
876 {
877 	struct ssp_response_iu *resp_iu;
878 	u8 datapres;
879 
880 	/* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
881 	 * to determine SDMA status
882 	 */
883 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
884 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
885 		ireq->scu_status = SCU_TASK_DONE_GOOD;
886 		ireq->sci_status = SCI_SUCCESS;
887 		break;
888 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
889 		/* There are times when the SCU hardware will return an early
890 		 * response because the io request specified more data than is
891 		 * returned by the target device (mode pages, inquiry data,
892 		 * etc.).  We must check the response stats to see if this is
893 		 * truly a failed request or a good request that just got
894 		 * completed early.
895 		 */
896 		struct ssp_response_iu *resp = &ireq->ssp.rsp;
897 		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
898 
899 		sci_swab32_cpy(&ireq->ssp.rsp,
900 			       &ireq->ssp.rsp,
901 			       word_cnt);
902 
903 		if (resp->status == 0) {
904 			ireq->scu_status = SCU_TASK_DONE_GOOD;
905 			ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
906 		} else {
907 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
908 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
909 		}
910 		break;
911 	}
912 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
913 		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
914 
915 		sci_swab32_cpy(&ireq->ssp.rsp,
916 			       &ireq->ssp.rsp,
917 			       word_cnt);
918 
919 		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
920 		ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
921 		break;
922 	}
923 
924 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
925 		/* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
926 		 * guaranteed to be received before this completion status is
927 		 * posted?
928 		 */
929 		resp_iu = &ireq->ssp.rsp;
930 		datapres = resp_iu->datapres;
931 
932 		if (datapres == 1 || datapres == 2) {
933 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
934 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
935 		} else {
936 			ireq->scu_status = SCU_TASK_DONE_GOOD;
937 			ireq->sci_status = SCI_SUCCESS;
938 		}
939 		break;
940 	/* only stp device gets suspended. */
941 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
942 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
943 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
944 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
945 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
946 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
947 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
948 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
949 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
950 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
951 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
952 		if (ireq->protocol == SCIC_STP_PROTOCOL) {
953 			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
954 					   SCU_COMPLETION_TL_STATUS_SHIFT;
955 			ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
956 		} else {
957 			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
958 					   SCU_COMPLETION_TL_STATUS_SHIFT;
959 			ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
960 		}
961 		break;
962 
963 	/* both stp/ssp device gets suspended */
964 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
965 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
966 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
967 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
968 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
969 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
970 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
971 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
972 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
973 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
974 		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
975 				   SCU_COMPLETION_TL_STATUS_SHIFT;
976 		ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
977 		break;
978 
979 	/* neither ssp nor stp gets suspended. */
980 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
981 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
982 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
983 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
984 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
985 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
986 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
987 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
988 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
989 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
990 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
991 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
992 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
993 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
994 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
995 	default:
996 		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
997 				   SCU_COMPLETION_TL_STATUS_SHIFT;
998 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
999 		break;
1000 	}
1001 
1002 	/*
1003 	 * TODO: This is probably wrong for ACK/NAK timeout conditions
1004 	 */
1005 
1006 	/* In all cases we will treat this as the completion of the IO req. */
1007 	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1008 	return SCI_SUCCESS;
1009 }
1010 
1011 static enum sci_status
1012 request_aborting_state_tc_event(struct isci_request *ireq,
1013 				u32 completion_code)
1014 {
1015 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1016 	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1017 	case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1018 		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
1019 		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
1020 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1021 		break;
1022 
1023 	default:
1024 		/* Unless we get some strange error wait for the task abort to complete
1025 		 * TODO: Should there be a state change for this completion?
1026 		 */
1027 		break;
1028 	}
1029 
1030 	return SCI_SUCCESS;
1031 }
1032 
1033 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1034 						       u32 completion_code)
1035 {
1036 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1037 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1038 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1039 		ireq->sci_status = SCI_SUCCESS;
1040 		sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1041 		break;
1042 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1043 		/* Currently, the decision is to simply allow the task request
1044 		 * to timeout if the task IU wasn't received successfully.
1045 		 * There is a potential for receiving multiple task responses if
1046 		 * we decide to send the task IU again.
1047 		 */
1048 		dev_warn(&ireq->owning_controller->pdev->dev,
1049 			 "%s: TaskRequest:0x%p CompletionCode:%x - "
1050 			 "ACK/NAK timeout\n", __func__, ireq,
1051 			 completion_code);
1052 
1053 		sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1054 		break;
1055 	default:
1056 		/*
1057 		 * All other completion status cause the IO to be complete.
1058 		 * If a NAK was received, then it is up to the user to retry
1059 		 * the request.
1060 		 */
1061 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1062 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1063 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1064 		break;
1065 	}
1066 
1067 	return SCI_SUCCESS;
1068 }
1069 
1070 static enum sci_status
1071 smp_request_await_response_tc_event(struct isci_request *ireq,
1072 				    u32 completion_code)
1073 {
1074 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1075 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1076 		/* In the AWAIT RESPONSE state, any TC completion is
1077 		 * unexpected.  but if the TC has success status, we
1078 		 * complete the IO anyway.
1079 		 */
1080 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1081 		ireq->sci_status = SCI_SUCCESS;
1082 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1083 		break;
1084 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1085 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1086 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1087 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1088 		/* These status has been seen in a specific LSI
1089 		 * expander, which sometimes is not able to send smp
1090 		 * response within 2 ms. This causes our hardware break
1091 		 * the connection and set TC completion with one of
1092 		 * these SMP_XXX_XX_ERR status. For these type of error,
1093 		 * we ask ihost user to retry the request.
1094 		 */
1095 		ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1096 		ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1097 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1098 		break;
1099 	default:
1100 		/* All other completion status cause the IO to be complete.  If a NAK
1101 		 * was received, then it is up to the user to retry the request
1102 		 */
1103 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1104 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1105 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1106 		break;
1107 	}
1108 
1109 	return SCI_SUCCESS;
1110 }
1111 
1112 static enum sci_status
1113 smp_request_await_tc_event(struct isci_request *ireq,
1114 			   u32 completion_code)
1115 {
1116 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1117 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1118 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1119 		ireq->sci_status = SCI_SUCCESS;
1120 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1121 		break;
1122 	default:
1123 		/* All other completion status cause the IO to be
1124 		 * complete.  If a NAK was received, then it is up to
1125 		 * the user to retry the request.
1126 		 */
1127 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1128 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1129 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1130 		break;
1131 	}
1132 
1133 	return SCI_SUCCESS;
1134 }
1135 
1136 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1137 {
1138 	struct scu_sgl_element *sgl;
1139 	struct scu_sgl_element_pair *sgl_pair;
1140 	struct isci_request *ireq = to_ireq(stp_req);
1141 	struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1142 
1143 	sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1144 	if (!sgl_pair)
1145 		sgl = NULL;
1146 	else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1147 		if (sgl_pair->B.address_lower == 0 &&
1148 		    sgl_pair->B.address_upper == 0) {
1149 			sgl = NULL;
1150 		} else {
1151 			pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1152 			sgl = &sgl_pair->B;
1153 		}
1154 	} else {
1155 		if (sgl_pair->next_pair_lower == 0 &&
1156 		    sgl_pair->next_pair_upper == 0) {
1157 			sgl = NULL;
1158 		} else {
1159 			pio_sgl->index++;
1160 			pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1161 			sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1162 			sgl = &sgl_pair->A;
1163 		}
1164 	}
1165 
1166 	return sgl;
1167 }
1168 
1169 static enum sci_status
1170 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1171 					u32 completion_code)
1172 {
1173 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1174 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1175 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1176 		ireq->sci_status = SCI_SUCCESS;
1177 		sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1178 		break;
1179 
1180 	default:
1181 		/* All other completion status cause the IO to be
1182 		 * complete.  If a NAK was received, then it is up to
1183 		 * the user to retry the request.
1184 		 */
1185 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1186 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1187 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1188 		break;
1189 	}
1190 
1191 	return SCI_SUCCESS;
1192 }
1193 
1194 #define SCU_MAX_FRAME_BUFFER_SIZE  0x400  /* 1K is the maximum SCU frame data payload */
1195 
1196 /* transmit DATA_FIS from (current sgl + offset) for input
1197  * parameter length. current sgl and offset is alreay stored in the IO request
1198  */
1199 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1200 	struct isci_request *ireq,
1201 	u32 length)
1202 {
1203 	struct isci_stp_request *stp_req = &ireq->stp.req;
1204 	struct scu_task_context *task_context = ireq->tc;
1205 	struct scu_sgl_element_pair *sgl_pair;
1206 	struct scu_sgl_element *current_sgl;
1207 
1208 	/* Recycle the TC and reconstruct it for sending out DATA FIS containing
1209 	 * for the data from current_sgl+offset for the input length
1210 	 */
1211 	sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1212 	if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1213 		current_sgl = &sgl_pair->A;
1214 	else
1215 		current_sgl = &sgl_pair->B;
1216 
1217 	/* update the TC */
1218 	task_context->command_iu_upper = current_sgl->address_upper;
1219 	task_context->command_iu_lower = current_sgl->address_lower;
1220 	task_context->transfer_length_bytes = length;
1221 	task_context->type.stp.fis_type = FIS_DATA;
1222 
1223 	/* send the new TC out. */
1224 	return sci_controller_continue_io(ireq);
1225 }
1226 
1227 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1228 {
1229 	struct isci_stp_request *stp_req = &ireq->stp.req;
1230 	struct scu_sgl_element_pair *sgl_pair;
1231 	enum sci_status status = SCI_SUCCESS;
1232 	struct scu_sgl_element *sgl;
1233 	u32 offset;
1234 	u32 len = 0;
1235 
1236 	offset = stp_req->sgl.offset;
1237 	sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1238 	if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1239 		return SCI_FAILURE;
1240 
1241 	if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1242 		sgl = &sgl_pair->A;
1243 		len = sgl_pair->A.length - offset;
1244 	} else {
1245 		sgl = &sgl_pair->B;
1246 		len = sgl_pair->B.length - offset;
1247 	}
1248 
1249 	if (stp_req->pio_len == 0)
1250 		return SCI_SUCCESS;
1251 
1252 	if (stp_req->pio_len >= len) {
1253 		status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1254 		if (status != SCI_SUCCESS)
1255 			return status;
1256 		stp_req->pio_len -= len;
1257 
1258 		/* update the current sgl, offset and save for future */
1259 		sgl = pio_sgl_next(stp_req);
1260 		offset = 0;
1261 	} else if (stp_req->pio_len < len) {
1262 		sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1263 
1264 		/* Sgl offset will be adjusted and saved for future */
1265 		offset += stp_req->pio_len;
1266 		sgl->address_lower += stp_req->pio_len;
1267 		stp_req->pio_len = 0;
1268 	}
1269 
1270 	stp_req->sgl.offset = offset;
1271 
1272 	return status;
1273 }
1274 
1275 /**
1276  *
1277  * @stp_request: The request that is used for the SGL processing.
1278  * @data_buffer: The buffer of data to be copied.
1279  * @length: The length of the data transfer.
1280  *
1281  * Copy the data from the buffer for the length specified to the IO reqeust SGL
1282  * specified data region. enum sci_status
1283  */
1284 static enum sci_status
1285 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1286 					     u8 *data_buf, u32 len)
1287 {
1288 	struct isci_request *ireq;
1289 	u8 *src_addr;
1290 	int copy_len;
1291 	struct sas_task *task;
1292 	struct scatterlist *sg;
1293 	void *kaddr;
1294 	int total_len = len;
1295 
1296 	ireq = to_ireq(stp_req);
1297 	task = isci_request_access_task(ireq);
1298 	src_addr = data_buf;
1299 
1300 	if (task->num_scatter > 0) {
1301 		sg = task->scatter;
1302 
1303 		while (total_len > 0) {
1304 			struct page *page = sg_page(sg);
1305 
1306 			copy_len = min_t(int, total_len, sg_dma_len(sg));
1307 			kaddr = kmap_atomic(page, KM_IRQ0);
1308 			memcpy(kaddr + sg->offset, src_addr, copy_len);
1309 			kunmap_atomic(kaddr, KM_IRQ0);
1310 			total_len -= copy_len;
1311 			src_addr += copy_len;
1312 			sg = sg_next(sg);
1313 		}
1314 	} else {
1315 		BUG_ON(task->total_xfer_len < total_len);
1316 		memcpy(task->scatter, src_addr, total_len);
1317 	}
1318 
1319 	return SCI_SUCCESS;
1320 }
1321 
1322 /**
1323  *
1324  * @sci_req: The PIO DATA IN request that is to receive the data.
1325  * @data_buffer: The buffer to copy from.
1326  *
1327  * Copy the data buffer to the io request data region. enum sci_status
1328  */
1329 static enum sci_status sci_stp_request_pio_data_in_copy_data(
1330 	struct isci_stp_request *stp_req,
1331 	u8 *data_buffer)
1332 {
1333 	enum sci_status status;
1334 
1335 	/*
1336 	 * If there is less than 1K remaining in the transfer request
1337 	 * copy just the data for the transfer */
1338 	if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1339 		status = sci_stp_request_pio_data_in_copy_data_buffer(
1340 			stp_req, data_buffer, stp_req->pio_len);
1341 
1342 		if (status == SCI_SUCCESS)
1343 			stp_req->pio_len = 0;
1344 	} else {
1345 		/* We are transfering the whole frame so copy */
1346 		status = sci_stp_request_pio_data_in_copy_data_buffer(
1347 			stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1348 
1349 		if (status == SCI_SUCCESS)
1350 			stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1351 	}
1352 
1353 	return status;
1354 }
1355 
1356 static enum sci_status
1357 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1358 					      u32 completion_code)
1359 {
1360 	enum sci_status status = SCI_SUCCESS;
1361 
1362 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1363 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1364 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1365 		ireq->sci_status = SCI_SUCCESS;
1366 		sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1367 		break;
1368 
1369 	default:
1370 		/* All other completion status cause the IO to be
1371 		 * complete.  If a NAK was received, then it is up to
1372 		 * the user to retry the request.
1373 		 */
1374 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1375 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1376 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1377 		break;
1378 	}
1379 
1380 	return status;
1381 }
1382 
1383 static enum sci_status
1384 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1385 			      u32 completion_code)
1386 {
1387 	enum sci_status status = SCI_SUCCESS;
1388 	bool all_frames_transferred = false;
1389 	struct isci_stp_request *stp_req = &ireq->stp.req;
1390 
1391 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1392 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1393 		/* Transmit data */
1394 		if (stp_req->pio_len != 0) {
1395 			status = sci_stp_request_pio_data_out_transmit_data(ireq);
1396 			if (status == SCI_SUCCESS) {
1397 				if (stp_req->pio_len == 0)
1398 					all_frames_transferred = true;
1399 			}
1400 		} else if (stp_req->pio_len == 0) {
1401 			/*
1402 			 * this will happen if the all data is written at the
1403 			 * first time after the pio setup fis is received
1404 			 */
1405 			all_frames_transferred  = true;
1406 		}
1407 
1408 		/* all data transferred. */
1409 		if (all_frames_transferred) {
1410 			/*
1411 			 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1412 			 * and wait for PIO_SETUP fis / or D2H REg fis. */
1413 			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1414 		}
1415 		break;
1416 
1417 	default:
1418 		/*
1419 		 * All other completion status cause the IO to be complete.
1420 		 * If a NAK was received, then it is up to the user to retry
1421 		 * the request.
1422 		 */
1423 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1424 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1425 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1426 		break;
1427 	}
1428 
1429 	return status;
1430 }
1431 
1432 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1433 								       u32 frame_index)
1434 {
1435 	struct isci_host *ihost = ireq->owning_controller;
1436 	struct dev_to_host_fis *frame_header;
1437 	enum sci_status status;
1438 	u32 *frame_buffer;
1439 
1440 	status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1441 							       frame_index,
1442 							       (void **)&frame_header);
1443 
1444 	if ((status == SCI_SUCCESS) &&
1445 	    (frame_header->fis_type == FIS_REGD2H)) {
1446 		sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1447 							      frame_index,
1448 							      (void **)&frame_buffer);
1449 
1450 		sci_controller_copy_sata_response(&ireq->stp.rsp,
1451 						       frame_header,
1452 						       frame_buffer);
1453 	}
1454 
1455 	sci_controller_release_frame(ihost, frame_index);
1456 
1457 	return status;
1458 }
1459 
1460 static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1461 					       u32 frame_index)
1462 {
1463 	struct isci_host *ihost = ireq->owning_controller;
1464 	enum sci_status status;
1465 	struct dev_to_host_fis *frame_header;
1466 	u32 *frame_buffer;
1467 
1468 	status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1469 							  frame_index,
1470 							  (void **)&frame_header);
1471 
1472 	if (status != SCI_SUCCESS)
1473 		return status;
1474 
1475 	if (frame_header->fis_type != FIS_REGD2H) {
1476 		dev_err(&ireq->isci_host->pdev->dev,
1477 			"%s ERROR: invalid fis type 0x%X\n",
1478 			__func__, frame_header->fis_type);
1479 		return SCI_FAILURE;
1480 	}
1481 
1482 	sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1483 						 frame_index,
1484 						 (void **)&frame_buffer);
1485 
1486 	sci_controller_copy_sata_response(&ireq->stp.rsp,
1487 					  (u32 *)frame_header,
1488 					  frame_buffer);
1489 
1490 	/* Frame has been decoded return it to the controller */
1491 	sci_controller_release_frame(ihost, frame_index);
1492 
1493 	return status;
1494 }
1495 
1496 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1497 						   u32 frame_index)
1498 {
1499 	struct sas_task *task = isci_request_access_task(ireq);
1500 	enum sci_status status;
1501 
1502 	status = process_unsolicited_fis(ireq, frame_index);
1503 
1504 	if (status == SCI_SUCCESS) {
1505 		if (ireq->stp.rsp.status & ATA_ERR)
1506 			status = SCI_IO_FAILURE_RESPONSE_VALID;
1507 	} else {
1508 		status = SCI_IO_FAILURE_RESPONSE_VALID;
1509 	}
1510 
1511 	if (status != SCI_SUCCESS) {
1512 		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1513 		ireq->sci_status = status;
1514 	} else {
1515 		ireq->scu_status = SCU_TASK_DONE_GOOD;
1516 		ireq->sci_status = SCI_SUCCESS;
1517 	}
1518 
1519 	/* the d2h ufi is the end of non-data commands */
1520 	if (task->data_dir == DMA_NONE)
1521 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1522 
1523 	return status;
1524 }
1525 
1526 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1527 {
1528 	struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1529 	void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1530 	struct scu_task_context *task_context = ireq->tc;
1531 
1532 	/* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1533 	 * type. The TC for previous Packet fis was already there, we only need to
1534 	 * change the H2D fis content.
1535 	 */
1536 	memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1537 	memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1538 	memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1539 	task_context->type.stp.fis_type = FIS_DATA;
1540 	task_context->transfer_length_bytes = dev->cdb_len;
1541 }
1542 
1543 static void scu_atapi_construct_task_context(struct isci_request *ireq)
1544 {
1545 	struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1546 	struct sas_task *task = isci_request_access_task(ireq);
1547 	struct scu_task_context *task_context = ireq->tc;
1548 	int cdb_len = dev->cdb_len;
1549 
1550 	/* reference: SSTL 1.13.4.2
1551 	 * task_type, sata_direction
1552 	 */
1553 	if (task->data_dir == DMA_TO_DEVICE) {
1554 		task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1555 		task_context->sata_direction = 0;
1556 	} else {
1557 		/* todo: for NO_DATA command, we need to send out raw frame. */
1558 		task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1559 		task_context->sata_direction = 1;
1560 	}
1561 
1562 	memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1563 	task_context->type.stp.fis_type = FIS_DATA;
1564 
1565 	memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1566 	memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1567 	task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1568 
1569 	/* task phase is set to TX_CMD */
1570 	task_context->task_phase = 0x1;
1571 
1572 	/* retry counter */
1573 	task_context->stp_retry_count = 0;
1574 
1575 	/* data transfer size. */
1576 	task_context->transfer_length_bytes = task->total_xfer_len;
1577 
1578 	/* setup sgl */
1579 	sci_request_build_sgl(ireq);
1580 }
1581 
1582 enum sci_status
1583 sci_io_request_frame_handler(struct isci_request *ireq,
1584 				  u32 frame_index)
1585 {
1586 	struct isci_host *ihost = ireq->owning_controller;
1587 	struct isci_stp_request *stp_req = &ireq->stp.req;
1588 	enum sci_base_request_states state;
1589 	enum sci_status status;
1590 	ssize_t word_cnt;
1591 
1592 	state = ireq->sm.current_state_id;
1593 	switch (state)  {
1594 	case SCI_REQ_STARTED: {
1595 		struct ssp_frame_hdr ssp_hdr;
1596 		void *frame_header;
1597 
1598 		sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1599 							      frame_index,
1600 							      &frame_header);
1601 
1602 		word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1603 		sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1604 
1605 		if (ssp_hdr.frame_type == SSP_RESPONSE) {
1606 			struct ssp_response_iu *resp_iu;
1607 			ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1608 
1609 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1610 								      frame_index,
1611 								      (void **)&resp_iu);
1612 
1613 			sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1614 
1615 			resp_iu = &ireq->ssp.rsp;
1616 
1617 			if (resp_iu->datapres == 0x01 ||
1618 			    resp_iu->datapres == 0x02) {
1619 				ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1620 				ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1621 			} else {
1622 				ireq->scu_status = SCU_TASK_DONE_GOOD;
1623 				ireq->sci_status = SCI_SUCCESS;
1624 			}
1625 		} else {
1626 			/* not a response frame, why did it get forwarded? */
1627 			dev_err(&ihost->pdev->dev,
1628 				"%s: SCIC IO Request 0x%p received unexpected "
1629 				"frame %d type 0x%02x\n", __func__, ireq,
1630 				frame_index, ssp_hdr.frame_type);
1631 		}
1632 
1633 		/*
1634 		 * In any case we are done with this frame buffer return it to
1635 		 * the controller
1636 		 */
1637 		sci_controller_release_frame(ihost, frame_index);
1638 
1639 		return SCI_SUCCESS;
1640 	}
1641 
1642 	case SCI_REQ_TASK_WAIT_TC_RESP:
1643 		sci_io_request_copy_response(ireq);
1644 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1645 		sci_controller_release_frame(ihost, frame_index);
1646 		return SCI_SUCCESS;
1647 
1648 	case SCI_REQ_SMP_WAIT_RESP: {
1649 		struct sas_task *task = isci_request_access_task(ireq);
1650 		struct scatterlist *sg = &task->smp_task.smp_resp;
1651 		void *frame_header, *kaddr;
1652 		u8 *rsp;
1653 
1654 		sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1655 							 frame_index,
1656 							 &frame_header);
1657 		kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
1658 		rsp = kaddr + sg->offset;
1659 		sci_swab32_cpy(rsp, frame_header, 1);
1660 
1661 		if (rsp[0] == SMP_RESPONSE) {
1662 			void *smp_resp;
1663 
1664 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1665 								 frame_index,
1666 								 &smp_resp);
1667 
1668 			word_cnt = (sg->length/4)-1;
1669 			if (word_cnt > 0)
1670 				word_cnt = min_t(unsigned int, word_cnt,
1671 						 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
1672 			sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1673 
1674 			ireq->scu_status = SCU_TASK_DONE_GOOD;
1675 			ireq->sci_status = SCI_SUCCESS;
1676 			sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1677 		} else {
1678 			/*
1679 			 * This was not a response frame why did it get
1680 			 * forwarded?
1681 			 */
1682 			dev_err(&ihost->pdev->dev,
1683 				"%s: SCIC SMP Request 0x%p received unexpected "
1684 				"frame %d type 0x%02x\n",
1685 				__func__,
1686 				ireq,
1687 				frame_index,
1688 				rsp[0]);
1689 
1690 			ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1691 			ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1692 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1693 		}
1694 		kunmap_atomic(kaddr, KM_IRQ0);
1695 
1696 		sci_controller_release_frame(ihost, frame_index);
1697 
1698 		return SCI_SUCCESS;
1699 	}
1700 
1701 	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1702 		return sci_stp_request_udma_general_frame_handler(ireq,
1703 								       frame_index);
1704 
1705 	case SCI_REQ_STP_UDMA_WAIT_D2H:
1706 		/* Use the general frame handler to copy the resposne data */
1707 		status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1708 
1709 		if (status != SCI_SUCCESS)
1710 			return status;
1711 
1712 		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1713 		ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1714 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1715 		return SCI_SUCCESS;
1716 
1717 	case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1718 		struct dev_to_host_fis *frame_header;
1719 		u32 *frame_buffer;
1720 
1721 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1722 								       frame_index,
1723 								       (void **)&frame_header);
1724 
1725 		if (status != SCI_SUCCESS) {
1726 			dev_err(&ihost->pdev->dev,
1727 				"%s: SCIC IO Request 0x%p could not get frame "
1728 				"header for frame index %d, status %x\n",
1729 				__func__,
1730 				stp_req,
1731 				frame_index,
1732 				status);
1733 
1734 			return status;
1735 		}
1736 
1737 		switch (frame_header->fis_type) {
1738 		case FIS_REGD2H:
1739 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1740 								      frame_index,
1741 								      (void **)&frame_buffer);
1742 
1743 			sci_controller_copy_sata_response(&ireq->stp.rsp,
1744 							       frame_header,
1745 							       frame_buffer);
1746 
1747 			/* The command has completed with error */
1748 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1749 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1750 			break;
1751 
1752 		default:
1753 			dev_warn(&ihost->pdev->dev,
1754 				 "%s: IO Request:0x%p Frame Id:%d protocol "
1755 				  "violation occurred\n", __func__, stp_req,
1756 				  frame_index);
1757 
1758 			ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1759 			ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1760 			break;
1761 		}
1762 
1763 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1764 
1765 		/* Frame has been decoded return it to the controller */
1766 		sci_controller_release_frame(ihost, frame_index);
1767 
1768 		return status;
1769 	}
1770 
1771 	case SCI_REQ_STP_PIO_WAIT_FRAME: {
1772 		struct sas_task *task = isci_request_access_task(ireq);
1773 		struct dev_to_host_fis *frame_header;
1774 		u32 *frame_buffer;
1775 
1776 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1777 								       frame_index,
1778 								       (void **)&frame_header);
1779 
1780 		if (status != SCI_SUCCESS) {
1781 			dev_err(&ihost->pdev->dev,
1782 				"%s: SCIC IO Request 0x%p could not get frame "
1783 				"header for frame index %d, status %x\n",
1784 				__func__, stp_req, frame_index, status);
1785 			return status;
1786 		}
1787 
1788 		switch (frame_header->fis_type) {
1789 		case FIS_PIO_SETUP:
1790 			/* Get from the frame buffer the PIO Setup Data */
1791 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1792 								      frame_index,
1793 								      (void **)&frame_buffer);
1794 
1795 			/* Get the data from the PIO Setup The SCU Hardware
1796 			 * returns first word in the frame_header and the rest
1797 			 * of the data is in the frame buffer so we need to
1798 			 * back up one dword
1799 			 */
1800 
1801 			/* transfer_count: first 16bits in the 4th dword */
1802 			stp_req->pio_len = frame_buffer[3] & 0xffff;
1803 
1804 			/* status: 4th byte in the 3rd dword */
1805 			stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1806 
1807 			sci_controller_copy_sata_response(&ireq->stp.rsp,
1808 							       frame_header,
1809 							       frame_buffer);
1810 
1811 			ireq->stp.rsp.status = stp_req->status;
1812 
1813 			/* The next state is dependent on whether the
1814 			 * request was PIO Data-in or Data out
1815 			 */
1816 			if (task->data_dir == DMA_FROM_DEVICE) {
1817 				sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1818 			} else if (task->data_dir == DMA_TO_DEVICE) {
1819 				/* Transmit data */
1820 				status = sci_stp_request_pio_data_out_transmit_data(ireq);
1821 				if (status != SCI_SUCCESS)
1822 					break;
1823 				sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1824 			}
1825 			break;
1826 
1827 		case FIS_SETDEVBITS:
1828 			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1829 			break;
1830 
1831 		case FIS_REGD2H:
1832 			if (frame_header->status & ATA_BUSY) {
1833 				/*
1834 				 * Now why is the drive sending a D2H Register
1835 				 * FIS when it is still busy?  Do nothing since
1836 				 * we are still in the right state.
1837 				 */
1838 				dev_dbg(&ihost->pdev->dev,
1839 					"%s: SCIC PIO Request 0x%p received "
1840 					"D2H Register FIS with BSY status "
1841 					"0x%x\n",
1842 					__func__,
1843 					stp_req,
1844 					frame_header->status);
1845 				break;
1846 			}
1847 
1848 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1849 								      frame_index,
1850 								      (void **)&frame_buffer);
1851 
1852 			sci_controller_copy_sata_response(&ireq->stp.req,
1853 							       frame_header,
1854 							       frame_buffer);
1855 
1856 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1857 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1858 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1859 			break;
1860 
1861 		default:
1862 			/* FIXME: what do we do here? */
1863 			break;
1864 		}
1865 
1866 		/* Frame is decoded return it to the controller */
1867 		sci_controller_release_frame(ihost, frame_index);
1868 
1869 		return status;
1870 	}
1871 
1872 	case SCI_REQ_STP_PIO_DATA_IN: {
1873 		struct dev_to_host_fis *frame_header;
1874 		struct sata_fis_data *frame_buffer;
1875 
1876 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1877 								       frame_index,
1878 								       (void **)&frame_header);
1879 
1880 		if (status != SCI_SUCCESS) {
1881 			dev_err(&ihost->pdev->dev,
1882 				"%s: SCIC IO Request 0x%p could not get frame "
1883 				"header for frame index %d, status %x\n",
1884 				__func__,
1885 				stp_req,
1886 				frame_index,
1887 				status);
1888 			return status;
1889 		}
1890 
1891 		if (frame_header->fis_type != FIS_DATA) {
1892 			dev_err(&ihost->pdev->dev,
1893 				"%s: SCIC PIO Request 0x%p received frame %d "
1894 				"with fis type 0x%02x when expecting a data "
1895 				"fis.\n",
1896 				__func__,
1897 				stp_req,
1898 				frame_index,
1899 				frame_header->fis_type);
1900 
1901 			ireq->scu_status = SCU_TASK_DONE_GOOD;
1902 			ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
1903 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1904 
1905 			/* Frame is decoded return it to the controller */
1906 			sci_controller_release_frame(ihost, frame_index);
1907 			return status;
1908 		}
1909 
1910 		if (stp_req->sgl.index < 0) {
1911 			ireq->saved_rx_frame_index = frame_index;
1912 			stp_req->pio_len = 0;
1913 		} else {
1914 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1915 								      frame_index,
1916 								      (void **)&frame_buffer);
1917 
1918 			status = sci_stp_request_pio_data_in_copy_data(stp_req,
1919 									    (u8 *)frame_buffer);
1920 
1921 			/* Frame is decoded return it to the controller */
1922 			sci_controller_release_frame(ihost, frame_index);
1923 		}
1924 
1925 		/* Check for the end of the transfer, are there more
1926 		 * bytes remaining for this data transfer
1927 		 */
1928 		if (status != SCI_SUCCESS || stp_req->pio_len != 0)
1929 			return status;
1930 
1931 		if ((stp_req->status & ATA_BUSY) == 0) {
1932 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1933 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1934 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1935 		} else {
1936 			sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1937 		}
1938 		return status;
1939 	}
1940 
1941 	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1942 		struct dev_to_host_fis *frame_header;
1943 		u32 *frame_buffer;
1944 
1945 		status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1946 								       frame_index,
1947 								       (void **)&frame_header);
1948 		if (status != SCI_SUCCESS) {
1949 			dev_err(&ihost->pdev->dev,
1950 				"%s: SCIC IO Request 0x%p could not get frame "
1951 				"header for frame index %d, status %x\n",
1952 				__func__,
1953 				stp_req,
1954 				frame_index,
1955 				status);
1956 			return status;
1957 		}
1958 
1959 		switch (frame_header->fis_type) {
1960 		case FIS_REGD2H:
1961 			sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1962 								      frame_index,
1963 								      (void **)&frame_buffer);
1964 
1965 			sci_controller_copy_sata_response(&ireq->stp.rsp,
1966 							       frame_header,
1967 							       frame_buffer);
1968 
1969 			/* The command has completed with error */
1970 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1971 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1972 			break;
1973 
1974 		default:
1975 			dev_warn(&ihost->pdev->dev,
1976 				 "%s: IO Request:0x%p Frame Id:%d protocol "
1977 				 "violation occurred\n",
1978 				 __func__,
1979 				 stp_req,
1980 				 frame_index);
1981 
1982 			ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1983 			ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1984 			break;
1985 		}
1986 
1987 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1988 
1989 		/* Frame has been decoded return it to the controller */
1990 		sci_controller_release_frame(ihost, frame_index);
1991 
1992 		return status;
1993 	}
1994 	case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
1995 		struct sas_task *task = isci_request_access_task(ireq);
1996 
1997 		sci_controller_release_frame(ihost, frame_index);
1998 		ireq->target_device->working_request = ireq;
1999 		if (task->data_dir == DMA_NONE) {
2000 			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2001 			scu_atapi_reconstruct_raw_frame_task_context(ireq);
2002 		} else {
2003 			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2004 			scu_atapi_construct_task_context(ireq);
2005 		}
2006 
2007 		sci_controller_continue_io(ireq);
2008 		return SCI_SUCCESS;
2009 	}
2010 	case SCI_REQ_ATAPI_WAIT_D2H:
2011 		return atapi_d2h_reg_frame_handler(ireq, frame_index);
2012 	case SCI_REQ_ABORTING:
2013 		/*
2014 		 * TODO: Is it even possible to get an unsolicited frame in the
2015 		 * aborting state?
2016 		 */
2017 		sci_controller_release_frame(ihost, frame_index);
2018 		return SCI_SUCCESS;
2019 
2020 	default:
2021 		dev_warn(&ihost->pdev->dev,
2022 			 "%s: SCIC IO Request given unexpected frame %x while "
2023 			 "in state %d\n",
2024 			 __func__,
2025 			 frame_index,
2026 			 state);
2027 
2028 		sci_controller_release_frame(ihost, frame_index);
2029 		return SCI_FAILURE_INVALID_STATE;
2030 	}
2031 }
2032 
2033 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
2034 						       u32 completion_code)
2035 {
2036 	enum sci_status status = SCI_SUCCESS;
2037 
2038 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2039 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2040 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2041 		ireq->sci_status = SCI_SUCCESS;
2042 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2043 		break;
2044 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2045 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2046 		/* We must check ther response buffer to see if the D2H
2047 		 * Register FIS was received before we got the TC
2048 		 * completion.
2049 		 */
2050 		if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2051 			sci_remote_device_suspend(ireq->target_device,
2052 				SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2053 
2054 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2055 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2056 			sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2057 		} else {
2058 			/* If we have an error completion status for the
2059 			 * TC then we can expect a D2H register FIS from
2060 			 * the device so we must change state to wait
2061 			 * for it
2062 			 */
2063 			sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2064 		}
2065 		break;
2066 
2067 	/* TODO Check to see if any of these completion status need to
2068 	 * wait for the device to host register fis.
2069 	 */
2070 	/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2071 	 * - this comes only for B0
2072 	 */
2073 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2074 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2075 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2076 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2077 		sci_remote_device_suspend(ireq->target_device,
2078 			SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2079 		/* Fall through to the default case */
2080 	default:
2081 		/* All other completion status cause the IO to be complete. */
2082 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2083 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2084 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2085 		break;
2086 	}
2087 
2088 	return status;
2089 }
2090 
2091 static enum sci_status
2092 stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
2093 						   u32 completion_code)
2094 {
2095 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2096 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2097 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2098 		ireq->sci_status = SCI_SUCCESS;
2099 		sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2100 		break;
2101 
2102 	default:
2103 		/*
2104 		 * All other completion status cause the IO to be complete.
2105 		 * If a NAK was received, then it is up to the user to retry
2106 		 * the request.
2107 		 */
2108 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2109 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2110 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2111 		break;
2112 	}
2113 
2114 	return SCI_SUCCESS;
2115 }
2116 
2117 static enum sci_status
2118 stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2119 						     u32 completion_code)
2120 {
2121 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2122 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2123 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2124 		ireq->sci_status = SCI_SUCCESS;
2125 		sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2126 		break;
2127 
2128 	default:
2129 		/* All other completion status cause the IO to be complete.  If
2130 		 * a NAK was received, then it is up to the user to retry the
2131 		 * request.
2132 		 */
2133 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2134 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2135 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2136 		break;
2137 	}
2138 
2139 	return SCI_SUCCESS;
2140 }
2141 
2142 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2143 						  enum sci_base_request_states next)
2144 {
2145 	enum sci_status status = SCI_SUCCESS;
2146 
2147 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2148 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2149 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2150 		ireq->sci_status = SCI_SUCCESS;
2151 		sci_change_state(&ireq->sm, next);
2152 		break;
2153 	default:
2154 		/* All other completion status cause the IO to be complete.
2155 		 * If a NAK was received, then it is up to the user to retry
2156 		 * the request.
2157 		 */
2158 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2159 		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2160 
2161 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2162 		break;
2163 	}
2164 
2165 	return status;
2166 }
2167 
2168 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2169 							u32 completion_code)
2170 {
2171 	struct isci_remote_device *idev = ireq->target_device;
2172 	struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2173 	enum sci_status status = SCI_SUCCESS;
2174 
2175 	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2176 	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
2177 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2178 		break;
2179 
2180 	case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
2181 		u16 len = sci_req_tx_bytes(ireq);
2182 
2183 		/* likely non-error data underrrun, workaround missing
2184 		 * d2h frame from the controller
2185 		 */
2186 		if (d2h->fis_type != FIS_REGD2H) {
2187 			d2h->fis_type = FIS_REGD2H;
2188 			d2h->flags = (1 << 6);
2189 			d2h->status = 0x50;
2190 			d2h->error = 0;
2191 			d2h->lbal = 0;
2192 			d2h->byte_count_low = len & 0xff;
2193 			d2h->byte_count_high = len >> 8;
2194 			d2h->device = 0xa0;
2195 			d2h->lbal_exp = 0;
2196 			d2h->lbam_exp = 0;
2197 			d2h->lbah_exp = 0;
2198 			d2h->_r_a = 0;
2199 			d2h->sector_count = 0x3;
2200 			d2h->sector_count_exp = 0;
2201 			d2h->_r_b = 0;
2202 			d2h->_r_c = 0;
2203 			d2h->_r_d = 0;
2204 		}
2205 
2206 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2207 		ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
2208 		status = ireq->sci_status;
2209 
2210 		/* the hw will have suspended the rnc, so complete the
2211 		 * request upon pending resume
2212 		 */
2213 		sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2214 		break;
2215 	}
2216 	case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
2217 		/* In this case, there is no UF coming after.
2218 		 * compelte the IO now.
2219 		 */
2220 		ireq->scu_status = SCU_TASK_DONE_GOOD;
2221 		ireq->sci_status = SCI_SUCCESS;
2222 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2223 		break;
2224 
2225 	default:
2226 		if (d2h->fis_type == FIS_REGD2H) {
2227 			/* UF received change the device state to ATAPI_ERROR */
2228 			status = ireq->sci_status;
2229 			sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2230 		} else {
2231 			/* If receiving any non-sucess TC status, no UF
2232 			 * received yet, then an UF for the status fis
2233 			 * is coming after (XXX: suspect this is
2234 			 * actually a protocol error or a bug like the
2235 			 * DONE_UNEXP_FIS case)
2236 			 */
2237 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2238 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2239 
2240 			sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2241 		}
2242 		break;
2243 	}
2244 
2245 	return status;
2246 }
2247 
2248 enum sci_status
2249 sci_io_request_tc_completion(struct isci_request *ireq,
2250 				  u32 completion_code)
2251 {
2252 	enum sci_base_request_states state;
2253 	struct isci_host *ihost = ireq->owning_controller;
2254 
2255 	state = ireq->sm.current_state_id;
2256 
2257 	switch (state) {
2258 	case SCI_REQ_STARTED:
2259 		return request_started_state_tc_event(ireq, completion_code);
2260 
2261 	case SCI_REQ_TASK_WAIT_TC_COMP:
2262 		return ssp_task_request_await_tc_event(ireq,
2263 						       completion_code);
2264 
2265 	case SCI_REQ_SMP_WAIT_RESP:
2266 		return smp_request_await_response_tc_event(ireq,
2267 							   completion_code);
2268 
2269 	case SCI_REQ_SMP_WAIT_TC_COMP:
2270 		return smp_request_await_tc_event(ireq, completion_code);
2271 
2272 	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2273 		return stp_request_udma_await_tc_event(ireq,
2274 						       completion_code);
2275 
2276 	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2277 		return stp_request_non_data_await_h2d_tc_event(ireq,
2278 							       completion_code);
2279 
2280 	case SCI_REQ_STP_PIO_WAIT_H2D:
2281 		return stp_request_pio_await_h2d_completion_tc_event(ireq,
2282 								     completion_code);
2283 
2284 	case SCI_REQ_STP_PIO_DATA_OUT:
2285 		return pio_data_out_tx_done_tc_event(ireq, completion_code);
2286 
2287 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2288 		return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2289 									  completion_code);
2290 
2291 	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2292 		return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2293 									    completion_code);
2294 
2295 	case SCI_REQ_ABORTING:
2296 		return request_aborting_state_tc_event(ireq,
2297 						       completion_code);
2298 
2299 	case SCI_REQ_ATAPI_WAIT_H2D:
2300 		return atapi_raw_completion(ireq, completion_code,
2301 					    SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2302 
2303 	case SCI_REQ_ATAPI_WAIT_TC_COMP:
2304 		return atapi_raw_completion(ireq, completion_code,
2305 					    SCI_REQ_ATAPI_WAIT_D2H);
2306 
2307 	case SCI_REQ_ATAPI_WAIT_D2H:
2308 		return atapi_data_tc_completion_handler(ireq, completion_code);
2309 
2310 	default:
2311 		dev_warn(&ihost->pdev->dev,
2312 			 "%s: SCIC IO Request given task completion "
2313 			 "notification %x while in wrong state %d\n",
2314 			 __func__,
2315 			 completion_code,
2316 			 state);
2317 		return SCI_FAILURE_INVALID_STATE;
2318 	}
2319 }
2320 
2321 /**
2322  * isci_request_process_response_iu() - This function sets the status and
2323  *    response iu, in the task struct, from the request object for the upper
2324  *    layer driver.
2325  * @sas_task: This parameter is the task struct from the upper layer driver.
2326  * @resp_iu: This parameter points to the response iu of the completed request.
2327  * @dev: This parameter specifies the linux device struct.
2328  *
2329  * none.
2330  */
2331 static void isci_request_process_response_iu(
2332 	struct sas_task *task,
2333 	struct ssp_response_iu *resp_iu,
2334 	struct device *dev)
2335 {
2336 	dev_dbg(dev,
2337 		"%s: resp_iu = %p "
2338 		"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2339 		"resp_iu->response_data_len = %x, "
2340 		"resp_iu->sense_data_len = %x\nrepsonse data: ",
2341 		__func__,
2342 		resp_iu,
2343 		resp_iu->status,
2344 		resp_iu->datapres,
2345 		resp_iu->response_data_len,
2346 		resp_iu->sense_data_len);
2347 
2348 	task->task_status.stat = resp_iu->status;
2349 
2350 	/* libsas updates the task status fields based on the response iu. */
2351 	sas_ssp_task_response(dev, task, resp_iu);
2352 }
2353 
2354 /**
2355  * isci_request_set_open_reject_status() - This function prepares the I/O
2356  *    completion for OPEN_REJECT conditions.
2357  * @request: This parameter is the completed isci_request object.
2358  * @response_ptr: This parameter specifies the service response for the I/O.
2359  * @status_ptr: This parameter specifies the exec status for the I/O.
2360  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2361  *    the LLDD with respect to completing this request or forcing an abort
2362  *    condition on the I/O.
2363  * @open_rej_reason: This parameter specifies the encoded reason for the
2364  *    abandon-class reject.
2365  *
2366  * none.
2367  */
2368 static void isci_request_set_open_reject_status(
2369 	struct isci_request *request,
2370 	struct sas_task *task,
2371 	enum service_response *response_ptr,
2372 	enum exec_status *status_ptr,
2373 	enum isci_completion_selection *complete_to_host_ptr,
2374 	enum sas_open_rej_reason open_rej_reason)
2375 {
2376 	/* Task in the target is done. */
2377 	set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2378 	*response_ptr                     = SAS_TASK_UNDELIVERED;
2379 	*status_ptr                       = SAS_OPEN_REJECT;
2380 	*complete_to_host_ptr             = isci_perform_normal_io_completion;
2381 	task->task_status.open_rej_reason = open_rej_reason;
2382 }
2383 
2384 /**
2385  * isci_request_handle_controller_specific_errors() - This function decodes
2386  *    controller-specific I/O completion error conditions.
2387  * @request: This parameter is the completed isci_request object.
2388  * @response_ptr: This parameter specifies the service response for the I/O.
2389  * @status_ptr: This parameter specifies the exec status for the I/O.
2390  * @complete_to_host_ptr: This parameter specifies the action to be taken by
2391  *    the LLDD with respect to completing this request or forcing an abort
2392  *    condition on the I/O.
2393  *
2394  * none.
2395  */
2396 static void isci_request_handle_controller_specific_errors(
2397 	struct isci_remote_device *idev,
2398 	struct isci_request *request,
2399 	struct sas_task *task,
2400 	enum service_response *response_ptr,
2401 	enum exec_status *status_ptr,
2402 	enum isci_completion_selection *complete_to_host_ptr)
2403 {
2404 	unsigned int cstatus;
2405 
2406 	cstatus = request->scu_status;
2407 
2408 	dev_dbg(&request->isci_host->pdev->dev,
2409 		"%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2410 		"- controller status = 0x%x\n",
2411 		__func__, request, cstatus);
2412 
2413 	/* Decode the controller-specific errors; most
2414 	 * important is to recognize those conditions in which
2415 	 * the target may still have a task outstanding that
2416 	 * must be aborted.
2417 	 *
2418 	 * Note that there are SCU completion codes being
2419 	 * named in the decode below for which SCIC has already
2420 	 * done work to handle them in a way other than as
2421 	 * a controller-specific completion code; these are left
2422 	 * in the decode below for completeness sake.
2423 	 */
2424 	switch (cstatus) {
2425 	case SCU_TASK_DONE_DMASETUP_DIRERR:
2426 	/* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2427 	case SCU_TASK_DONE_XFERCNT_ERR:
2428 		/* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2429 		if (task->task_proto == SAS_PROTOCOL_SMP) {
2430 			/* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2431 			*response_ptr = SAS_TASK_COMPLETE;
2432 
2433 			/* See if the device has been/is being stopped. Note
2434 			 * that we ignore the quiesce state, since we are
2435 			 * concerned about the actual device state.
2436 			 */
2437 			if (!idev)
2438 				*status_ptr = SAS_DEVICE_UNKNOWN;
2439 			else
2440 				*status_ptr = SAS_ABORTED_TASK;
2441 
2442 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2443 
2444 			*complete_to_host_ptr =
2445 				isci_perform_normal_io_completion;
2446 		} else {
2447 			/* Task in the target is not done. */
2448 			*response_ptr = SAS_TASK_UNDELIVERED;
2449 
2450 			if (!idev)
2451 				*status_ptr = SAS_DEVICE_UNKNOWN;
2452 			else
2453 				*status_ptr = SAM_STAT_TASK_ABORTED;
2454 
2455 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2456 
2457 			*complete_to_host_ptr =
2458 				isci_perform_error_io_completion;
2459 		}
2460 
2461 		break;
2462 
2463 	case SCU_TASK_DONE_CRC_ERR:
2464 	case SCU_TASK_DONE_NAK_CMD_ERR:
2465 	case SCU_TASK_DONE_EXCESS_DATA:
2466 	case SCU_TASK_DONE_UNEXP_FIS:
2467 	/* Also SCU_TASK_DONE_UNEXP_RESP: */
2468 	case SCU_TASK_DONE_VIIT_ENTRY_NV:       /* TODO - conditions? */
2469 	case SCU_TASK_DONE_IIT_ENTRY_NV:        /* TODO - conditions? */
2470 	case SCU_TASK_DONE_RNCNV_OUTBOUND:      /* TODO - conditions? */
2471 		/* These are conditions in which the target
2472 		 * has completed the task, so that no cleanup
2473 		 * is necessary.
2474 		 */
2475 		*response_ptr = SAS_TASK_COMPLETE;
2476 
2477 		/* See if the device has been/is being stopped. Note
2478 		 * that we ignore the quiesce state, since we are
2479 		 * concerned about the actual device state.
2480 		 */
2481 		if (!idev)
2482 			*status_ptr = SAS_DEVICE_UNKNOWN;
2483 		else
2484 			*status_ptr = SAS_ABORTED_TASK;
2485 
2486 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2487 
2488 		*complete_to_host_ptr = isci_perform_normal_io_completion;
2489 		break;
2490 
2491 
2492 	/* Note that the only open reject completion codes seen here will be
2493 	 * abandon-class codes; all others are automatically retried in the SCU.
2494 	 */
2495 	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2496 
2497 		isci_request_set_open_reject_status(
2498 			request, task, response_ptr, status_ptr,
2499 			complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2500 		break;
2501 
2502 	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2503 
2504 		/* Note - the return of AB0 will change when
2505 		 * libsas implements detection of zone violations.
2506 		 */
2507 		isci_request_set_open_reject_status(
2508 			request, task, response_ptr, status_ptr,
2509 			complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2510 		break;
2511 
2512 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2513 
2514 		isci_request_set_open_reject_status(
2515 			request, task, response_ptr, status_ptr,
2516 			complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2517 		break;
2518 
2519 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2520 
2521 		isci_request_set_open_reject_status(
2522 			request, task, response_ptr, status_ptr,
2523 			complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2524 		break;
2525 
2526 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2527 
2528 		isci_request_set_open_reject_status(
2529 			request, task, response_ptr, status_ptr,
2530 			complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2531 		break;
2532 
2533 	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2534 
2535 		isci_request_set_open_reject_status(
2536 			request, task, response_ptr, status_ptr,
2537 			complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2538 		break;
2539 
2540 	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2541 
2542 		isci_request_set_open_reject_status(
2543 			request, task, response_ptr, status_ptr,
2544 			complete_to_host_ptr, SAS_OREJ_STP_NORES);
2545 		break;
2546 
2547 	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2548 
2549 		isci_request_set_open_reject_status(
2550 			request, task, response_ptr, status_ptr,
2551 			complete_to_host_ptr, SAS_OREJ_EPROTO);
2552 		break;
2553 
2554 	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2555 
2556 		isci_request_set_open_reject_status(
2557 			request, task, response_ptr, status_ptr,
2558 			complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2559 		break;
2560 
2561 	case SCU_TASK_DONE_LL_R_ERR:
2562 	/* Also SCU_TASK_DONE_ACK_NAK_TO: */
2563 	case SCU_TASK_DONE_LL_PERR:
2564 	case SCU_TASK_DONE_LL_SY_TERM:
2565 	/* Also SCU_TASK_DONE_NAK_ERR:*/
2566 	case SCU_TASK_DONE_LL_LF_TERM:
2567 	/* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2568 	case SCU_TASK_DONE_LL_ABORT_ERR:
2569 	case SCU_TASK_DONE_SEQ_INV_TYPE:
2570 	/* Also SCU_TASK_DONE_UNEXP_XR: */
2571 	case SCU_TASK_DONE_XR_IU_LEN_ERR:
2572 	case SCU_TASK_DONE_INV_FIS_LEN:
2573 	/* Also SCU_TASK_DONE_XR_WD_LEN: */
2574 	case SCU_TASK_DONE_SDMA_ERR:
2575 	case SCU_TASK_DONE_OFFSET_ERR:
2576 	case SCU_TASK_DONE_MAX_PLD_ERR:
2577 	case SCU_TASK_DONE_LF_ERR:
2578 	case SCU_TASK_DONE_SMP_RESP_TO_ERR:  /* Escalate to dev reset? */
2579 	case SCU_TASK_DONE_SMP_LL_RX_ERR:
2580 	case SCU_TASK_DONE_UNEXP_DATA:
2581 	case SCU_TASK_DONE_UNEXP_SDBFIS:
2582 	case SCU_TASK_DONE_REG_ERR:
2583 	case SCU_TASK_DONE_SDB_ERR:
2584 	case SCU_TASK_DONE_TASK_ABORT:
2585 	default:
2586 		/* Task in the target is not done. */
2587 		*response_ptr = SAS_TASK_UNDELIVERED;
2588 		*status_ptr = SAM_STAT_TASK_ABORTED;
2589 
2590 		if (task->task_proto == SAS_PROTOCOL_SMP) {
2591 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2592 
2593 			*complete_to_host_ptr = isci_perform_normal_io_completion;
2594 		} else {
2595 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2596 
2597 			*complete_to_host_ptr = isci_perform_error_io_completion;
2598 		}
2599 		break;
2600 	}
2601 }
2602 
2603 /**
2604  * isci_task_save_for_upper_layer_completion() - This function saves the
2605  *    request for later completion to the upper layer driver.
2606  * @host: This parameter is a pointer to the host on which the the request
2607  *    should be queued (either as an error or success).
2608  * @request: This parameter is the completed request.
2609  * @response: This parameter is the response code for the completed task.
2610  * @status: This parameter is the status code for the completed task.
2611  *
2612  * none.
2613  */
2614 static void isci_task_save_for_upper_layer_completion(
2615 	struct isci_host *host,
2616 	struct isci_request *request,
2617 	enum service_response response,
2618 	enum exec_status status,
2619 	enum isci_completion_selection task_notification_selection)
2620 {
2621 	struct sas_task *task = isci_request_access_task(request);
2622 
2623 	task_notification_selection
2624 		= isci_task_set_completion_status(task, response, status,
2625 						  task_notification_selection);
2626 
2627 	/* Tasks aborted specifically by a call to the lldd_abort_task
2628 	 * function should not be completed to the host in the regular path.
2629 	 */
2630 	switch (task_notification_selection) {
2631 
2632 	case isci_perform_normal_io_completion:
2633 
2634 		/* Normal notification (task_done) */
2635 		dev_dbg(&host->pdev->dev,
2636 			"%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
2637 			__func__,
2638 			task,
2639 			task->task_status.resp, response,
2640 			task->task_status.stat, status);
2641 		/* Add to the completed list. */
2642 		list_add(&request->completed_node,
2643 			 &host->requests_to_complete);
2644 
2645 		/* Take the request off the device's pending request list. */
2646 		list_del_init(&request->dev_node);
2647 		break;
2648 
2649 	case isci_perform_aborted_io_completion:
2650 		/* No notification to libsas because this request is
2651 		 * already in the abort path.
2652 		 */
2653 		dev_dbg(&host->pdev->dev,
2654 			 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
2655 			 __func__,
2656 			 task,
2657 			 task->task_status.resp, response,
2658 			 task->task_status.stat, status);
2659 
2660 		/* Wake up whatever process was waiting for this
2661 		 * request to complete.
2662 		 */
2663 		WARN_ON(request->io_request_completion == NULL);
2664 
2665 		if (request->io_request_completion != NULL) {
2666 
2667 			/* Signal whoever is waiting that this
2668 			* request is complete.
2669 			*/
2670 			complete(request->io_request_completion);
2671 		}
2672 		break;
2673 
2674 	case isci_perform_error_io_completion:
2675 		/* Use sas_task_abort */
2676 		dev_dbg(&host->pdev->dev,
2677 			 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
2678 			 __func__,
2679 			 task,
2680 			 task->task_status.resp, response,
2681 			 task->task_status.stat, status);
2682 		/* Add to the aborted list. */
2683 		list_add(&request->completed_node,
2684 			 &host->requests_to_errorback);
2685 		break;
2686 
2687 	default:
2688 		dev_dbg(&host->pdev->dev,
2689 			 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
2690 			 __func__,
2691 			 task,
2692 			 task->task_status.resp, response,
2693 			 task->task_status.stat, status);
2694 
2695 		/* Add to the error to libsas list. */
2696 		list_add(&request->completed_node,
2697 			 &host->requests_to_errorback);
2698 		break;
2699 	}
2700 }
2701 
2702 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2703 {
2704 	struct task_status_struct *ts = &task->task_status;
2705 	struct ata_task_resp *resp = (void *)&ts->buf[0];
2706 
2707 	resp->frame_len = sizeof(*fis);
2708 	memcpy(resp->ending_fis, fis, sizeof(*fis));
2709 	ts->buf_valid_size = sizeof(*resp);
2710 
2711 	/* If the device fault bit is set in the status register, then
2712 	 * set the sense data and return.
2713 	 */
2714 	if (fis->status & ATA_DF)
2715 		ts->stat = SAS_PROTO_RESPONSE;
2716 	else if (fis->status & ATA_ERR)
2717 		ts->stat = SAM_STAT_CHECK_CONDITION;
2718 	else
2719 		ts->stat = SAM_STAT_GOOD;
2720 
2721 	ts->resp = SAS_TASK_COMPLETE;
2722 }
2723 
2724 static void isci_request_io_request_complete(struct isci_host *ihost,
2725 					     struct isci_request *request,
2726 					     enum sci_io_status completion_status)
2727 {
2728 	struct sas_task *task = isci_request_access_task(request);
2729 	struct ssp_response_iu *resp_iu;
2730 	unsigned long task_flags;
2731 	struct isci_remote_device *idev = isci_lookup_device(task->dev);
2732 	enum service_response response       = SAS_TASK_UNDELIVERED;
2733 	enum exec_status status         = SAS_ABORTED_TASK;
2734 	enum isci_request_status request_status;
2735 	enum isci_completion_selection complete_to_host
2736 		= isci_perform_normal_io_completion;
2737 
2738 	dev_dbg(&ihost->pdev->dev,
2739 		"%s: request = %p, task = %p,\n"
2740 		"task->data_dir = %d completion_status = 0x%x\n",
2741 		__func__,
2742 		request,
2743 		task,
2744 		task->data_dir,
2745 		completion_status);
2746 
2747 	spin_lock(&request->state_lock);
2748 	request_status = request->status;
2749 
2750 	/* Decode the request status.  Note that if the request has been
2751 	 * aborted by a task management function, we don't care
2752 	 * what the status is.
2753 	 */
2754 	switch (request_status) {
2755 
2756 	case aborted:
2757 		/* "aborted" indicates that the request was aborted by a task
2758 		 * management function, since once a task management request is
2759 		 * perfomed by the device, the request only completes because
2760 		 * of the subsequent driver terminate.
2761 		 *
2762 		 * Aborted also means an external thread is explicitly managing
2763 		 * this request, so that we do not complete it up the stack.
2764 		 *
2765 		 * The target is still there (since the TMF was successful).
2766 		 */
2767 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2768 		response = SAS_TASK_COMPLETE;
2769 
2770 		/* See if the device has been/is being stopped. Note
2771 		 * that we ignore the quiesce state, since we are
2772 		 * concerned about the actual device state.
2773 		 */
2774 		if (!idev)
2775 			status = SAS_DEVICE_UNKNOWN;
2776 		else
2777 			status = SAS_ABORTED_TASK;
2778 
2779 		complete_to_host = isci_perform_aborted_io_completion;
2780 		/* This was an aborted request. */
2781 
2782 		spin_unlock(&request->state_lock);
2783 		break;
2784 
2785 	case aborting:
2786 		/* aborting means that the task management function tried and
2787 		 * failed to abort the request. We need to note the request
2788 		 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2789 		 * target as down.
2790 		 *
2791 		 * Aborting also means an external thread is explicitly managing
2792 		 * this request, so that we do not complete it up the stack.
2793 		 */
2794 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2795 		response = SAS_TASK_UNDELIVERED;
2796 
2797 		if (!idev)
2798 			/* The device has been /is being stopped. Note that
2799 			 * we ignore the quiesce state, since we are
2800 			 * concerned about the actual device state.
2801 			 */
2802 			status = SAS_DEVICE_UNKNOWN;
2803 		else
2804 			status = SAS_PHY_DOWN;
2805 
2806 		complete_to_host = isci_perform_aborted_io_completion;
2807 
2808 		/* This was an aborted request. */
2809 
2810 		spin_unlock(&request->state_lock);
2811 		break;
2812 
2813 	case terminating:
2814 
2815 		/* This was an terminated request.  This happens when
2816 		 * the I/O is being terminated because of an action on
2817 		 * the device (reset, tear down, etc.), and the I/O needs
2818 		 * to be completed up the stack.
2819 		 */
2820 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2821 		response = SAS_TASK_UNDELIVERED;
2822 
2823 		/* See if the device has been/is being stopped. Note
2824 		 * that we ignore the quiesce state, since we are
2825 		 * concerned about the actual device state.
2826 		 */
2827 		if (!idev)
2828 			status = SAS_DEVICE_UNKNOWN;
2829 		else
2830 			status = SAS_ABORTED_TASK;
2831 
2832 		complete_to_host = isci_perform_aborted_io_completion;
2833 
2834 		/* This was a terminated request. */
2835 
2836 		spin_unlock(&request->state_lock);
2837 		break;
2838 
2839 	case dead:
2840 		/* This was a terminated request that timed-out during the
2841 		 * termination process.  There is no task to complete to
2842 		 * libsas.
2843 		 */
2844 		complete_to_host = isci_perform_normal_io_completion;
2845 		spin_unlock(&request->state_lock);
2846 		break;
2847 
2848 	default:
2849 
2850 		/* The request is done from an SCU HW perspective. */
2851 		request->status = completed;
2852 
2853 		spin_unlock(&request->state_lock);
2854 
2855 		/* This is an active request being completed from the core. */
2856 		switch (completion_status) {
2857 
2858 		case SCI_IO_FAILURE_RESPONSE_VALID:
2859 			dev_dbg(&ihost->pdev->dev,
2860 				"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2861 				__func__,
2862 				request,
2863 				task);
2864 
2865 			if (sas_protocol_ata(task->task_proto)) {
2866 				isci_process_stp_response(task, &request->stp.rsp);
2867 			} else if (SAS_PROTOCOL_SSP == task->task_proto) {
2868 
2869 				/* crack the iu response buffer. */
2870 				resp_iu = &request->ssp.rsp;
2871 				isci_request_process_response_iu(task, resp_iu,
2872 								 &ihost->pdev->dev);
2873 
2874 			} else if (SAS_PROTOCOL_SMP == task->task_proto) {
2875 
2876 				dev_err(&ihost->pdev->dev,
2877 					"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2878 					"SAS_PROTOCOL_SMP protocol\n",
2879 					__func__);
2880 
2881 			} else
2882 				dev_err(&ihost->pdev->dev,
2883 					"%s: unknown protocol\n", __func__);
2884 
2885 			/* use the task status set in the task struct by the
2886 			 * isci_request_process_response_iu call.
2887 			 */
2888 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2889 			response = task->task_status.resp;
2890 			status = task->task_status.stat;
2891 			break;
2892 
2893 		case SCI_IO_SUCCESS:
2894 		case SCI_IO_SUCCESS_IO_DONE_EARLY:
2895 
2896 			response = SAS_TASK_COMPLETE;
2897 			status   = SAM_STAT_GOOD;
2898 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2899 
2900 			if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2901 
2902 				/* This was an SSP / STP / SATA transfer.
2903 				 * There is a possibility that less data than
2904 				 * the maximum was transferred.
2905 				 */
2906 				u32 transferred_length = sci_req_tx_bytes(request);
2907 
2908 				task->task_status.residual
2909 					= task->total_xfer_len - transferred_length;
2910 
2911 				/* If there were residual bytes, call this an
2912 				 * underrun.
2913 				 */
2914 				if (task->task_status.residual != 0)
2915 					status = SAS_DATA_UNDERRUN;
2916 
2917 				dev_dbg(&ihost->pdev->dev,
2918 					"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2919 					__func__,
2920 					status);
2921 
2922 			} else
2923 				dev_dbg(&ihost->pdev->dev,
2924 					"%s: SCI_IO_SUCCESS\n",
2925 					__func__);
2926 
2927 			break;
2928 
2929 		case SCI_IO_FAILURE_TERMINATED:
2930 			dev_dbg(&ihost->pdev->dev,
2931 				"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2932 				__func__,
2933 				request,
2934 				task);
2935 
2936 			/* The request was terminated explicitly.  No handling
2937 			 * is needed in the SCSI error handler path.
2938 			 */
2939 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2940 			response = SAS_TASK_UNDELIVERED;
2941 
2942 			/* See if the device has been/is being stopped. Note
2943 			 * that we ignore the quiesce state, since we are
2944 			 * concerned about the actual device state.
2945 			 */
2946 			if (!idev)
2947 				status = SAS_DEVICE_UNKNOWN;
2948 			else
2949 				status = SAS_ABORTED_TASK;
2950 
2951 			complete_to_host = isci_perform_normal_io_completion;
2952 			break;
2953 
2954 		case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2955 
2956 			isci_request_handle_controller_specific_errors(
2957 				idev, request, task, &response, &status,
2958 				&complete_to_host);
2959 
2960 			break;
2961 
2962 		case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2963 			/* This is a special case, in that the I/O completion
2964 			 * is telling us that the device needs a reset.
2965 			 * In order for the device reset condition to be
2966 			 * noticed, the I/O has to be handled in the error
2967 			 * handler.  Set the reset flag and cause the
2968 			 * SCSI error thread to be scheduled.
2969 			 */
2970 			spin_lock_irqsave(&task->task_state_lock, task_flags);
2971 			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2972 			spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2973 
2974 			/* Fail the I/O. */
2975 			response = SAS_TASK_UNDELIVERED;
2976 			status = SAM_STAT_TASK_ABORTED;
2977 
2978 			complete_to_host = isci_perform_error_io_completion;
2979 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2980 			break;
2981 
2982 		case SCI_FAILURE_RETRY_REQUIRED:
2983 
2984 			/* Fail the I/O so it can be retried. */
2985 			response = SAS_TASK_UNDELIVERED;
2986 			if (!idev)
2987 				status = SAS_DEVICE_UNKNOWN;
2988 			else
2989 				status = SAS_ABORTED_TASK;
2990 
2991 			complete_to_host = isci_perform_normal_io_completion;
2992 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2993 			break;
2994 
2995 
2996 		default:
2997 			/* Catch any otherwise unhandled error codes here. */
2998 			dev_dbg(&ihost->pdev->dev,
2999 				 "%s: invalid completion code: 0x%x - "
3000 				 "isci_request = %p\n",
3001 				 __func__, completion_status, request);
3002 
3003 			response = SAS_TASK_UNDELIVERED;
3004 
3005 			/* See if the device has been/is being stopped. Note
3006 			 * that we ignore the quiesce state, since we are
3007 			 * concerned about the actual device state.
3008 			 */
3009 			if (!idev)
3010 				status = SAS_DEVICE_UNKNOWN;
3011 			else
3012 				status = SAS_ABORTED_TASK;
3013 
3014 			if (SAS_PROTOCOL_SMP == task->task_proto) {
3015 				set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3016 				complete_to_host = isci_perform_normal_io_completion;
3017 			} else {
3018 				clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3019 				complete_to_host = isci_perform_error_io_completion;
3020 			}
3021 			break;
3022 		}
3023 		break;
3024 	}
3025 
3026 	switch (task->task_proto) {
3027 	case SAS_PROTOCOL_SSP:
3028 		if (task->data_dir == DMA_NONE)
3029 			break;
3030 		if (task->num_scatter == 0)
3031 			/* 0 indicates a single dma address */
3032 			dma_unmap_single(&ihost->pdev->dev,
3033 					 request->zero_scatter_daddr,
3034 					 task->total_xfer_len, task->data_dir);
3035 		else  /* unmap the sgl dma addresses */
3036 			dma_unmap_sg(&ihost->pdev->dev, task->scatter,
3037 				     request->num_sg_entries, task->data_dir);
3038 		break;
3039 	case SAS_PROTOCOL_SMP: {
3040 		struct scatterlist *sg = &task->smp_task.smp_req;
3041 		struct smp_req *smp_req;
3042 		void *kaddr;
3043 
3044 		dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
3045 
3046 		/* need to swab it back in case the command buffer is re-used */
3047 		kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3048 		smp_req = kaddr + sg->offset;
3049 		sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3050 		kunmap_atomic(kaddr, KM_IRQ0);
3051 		break;
3052 	}
3053 	default:
3054 		break;
3055 	}
3056 
3057 	/* Put the completed request on the correct list */
3058 	isci_task_save_for_upper_layer_completion(ihost, request, response,
3059 						  status, complete_to_host
3060 						  );
3061 
3062 	/* complete the io request to the core. */
3063 	sci_controller_complete_io(ihost, request->target_device, request);
3064 	isci_put_device(idev);
3065 
3066 	/* set terminated handle so it cannot be completed or
3067 	 * terminated again, and to cause any calls into abort
3068 	 * task to recognize the already completed case.
3069 	 */
3070 	set_bit(IREQ_TERMINATED, &request->flags);
3071 }
3072 
3073 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
3074 {
3075 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3076 	struct domain_device *dev = ireq->target_device->domain_dev;
3077 	enum sci_base_request_states state;
3078 	struct sas_task *task;
3079 
3080 	/* XXX as hch said always creating an internal sas_task for tmf
3081 	 * requests would simplify the driver
3082 	 */
3083 	task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
3084 
3085 	/* all unaccelerated request types (non ssp or ncq) handled with
3086 	 * substates
3087 	 */
3088 	if (!task && dev->dev_type == SAS_END_DEV) {
3089 		state = SCI_REQ_TASK_WAIT_TC_COMP;
3090 	} else if (!task &&
3091 		   (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3092 		    isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3093 		state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
3094 	} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3095 		state = SCI_REQ_SMP_WAIT_RESP;
3096 	} else if (task && sas_protocol_ata(task->task_proto) &&
3097 		   !task->ata_task.use_ncq) {
3098 		if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
3099 			task->ata_task.fis.command == ATA_CMD_PACKET) {
3100 			state = SCI_REQ_ATAPI_WAIT_H2D;
3101 		} else if (task->data_dir == DMA_NONE) {
3102 			state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
3103 		} else if (task->ata_task.dma_xfer) {
3104 			state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
3105 		} else /* PIO */ {
3106 			state = SCI_REQ_STP_PIO_WAIT_H2D;
3107 		}
3108 	} else {
3109 		/* SSP or NCQ are fully accelerated, no substates */
3110 		return;
3111 	}
3112 	sci_change_state(sm, state);
3113 }
3114 
3115 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
3116 {
3117 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3118 	struct isci_host *ihost = ireq->owning_controller;
3119 
3120 	/* Tell the SCI_USER that the IO request is complete */
3121 	if (!test_bit(IREQ_TMF, &ireq->flags))
3122 		isci_request_io_request_complete(ihost, ireq,
3123 						 ireq->sci_status);
3124 	else
3125 		isci_task_request_complete(ihost, ireq, ireq->sci_status);
3126 }
3127 
3128 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
3129 {
3130 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3131 
3132 	/* Setting the abort bit in the Task Context is required by the silicon. */
3133 	ireq->tc->abort = 1;
3134 }
3135 
3136 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3137 {
3138 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3139 
3140 	ireq->target_device->working_request = ireq;
3141 }
3142 
3143 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3144 {
3145 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3146 
3147 	ireq->target_device->working_request = ireq;
3148 }
3149 
3150 static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3151 {
3152 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3153 
3154 	ireq->target_device->working_request = ireq;
3155 }
3156 
3157 static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3158 {
3159 	struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3160 	struct scu_task_context *tc = ireq->tc;
3161 	struct host_to_dev_fis *h2d_fis;
3162 	enum sci_status status;
3163 
3164 	/* Clear the SRST bit */
3165 	h2d_fis = &ireq->stp.cmd;
3166 	h2d_fis->control = 0;
3167 
3168 	/* Clear the TC control bit */
3169 	tc->control_frame = 0;
3170 
3171 	status = sci_controller_continue_io(ireq);
3172 	WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3173 }
3174 
3175 static const struct sci_base_state sci_request_state_table[] = {
3176 	[SCI_REQ_INIT] = { },
3177 	[SCI_REQ_CONSTRUCTED] = { },
3178 	[SCI_REQ_STARTED] = {
3179 		.enter_state = sci_request_started_state_enter,
3180 	},
3181 	[SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3182 		.enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
3183 	},
3184 	[SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3185 	[SCI_REQ_STP_PIO_WAIT_H2D] = {
3186 		.enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
3187 	},
3188 	[SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3189 	[SCI_REQ_STP_PIO_DATA_IN] = { },
3190 	[SCI_REQ_STP_PIO_DATA_OUT] = { },
3191 	[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3192 	[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3193 	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3194 		.enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3195 	},
3196 	[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3197 		.enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3198 	},
3199 	[SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3200 	[SCI_REQ_TASK_WAIT_TC_COMP] = { },
3201 	[SCI_REQ_TASK_WAIT_TC_RESP] = { },
3202 	[SCI_REQ_SMP_WAIT_RESP] = { },
3203 	[SCI_REQ_SMP_WAIT_TC_COMP] = { },
3204 	[SCI_REQ_ATAPI_WAIT_H2D] = { },
3205 	[SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3206 	[SCI_REQ_ATAPI_WAIT_D2H] = { },
3207 	[SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
3208 	[SCI_REQ_COMPLETED] = {
3209 		.enter_state = sci_request_completed_state_enter,
3210 	},
3211 	[SCI_REQ_ABORTING] = {
3212 		.enter_state = sci_request_aborting_state_enter,
3213 	},
3214 	[SCI_REQ_FINAL] = { },
3215 };
3216 
3217 static void
3218 sci_general_request_construct(struct isci_host *ihost,
3219 				   struct isci_remote_device *idev,
3220 				   struct isci_request *ireq)
3221 {
3222 	sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3223 
3224 	ireq->target_device = idev;
3225 	ireq->protocol = SCIC_NO_PROTOCOL;
3226 	ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3227 
3228 	ireq->sci_status   = SCI_SUCCESS;
3229 	ireq->scu_status   = 0;
3230 	ireq->post_context = 0xFFFFFFFF;
3231 }
3232 
3233 static enum sci_status
3234 sci_io_request_construct(struct isci_host *ihost,
3235 			  struct isci_remote_device *idev,
3236 			  struct isci_request *ireq)
3237 {
3238 	struct domain_device *dev = idev->domain_dev;
3239 	enum sci_status status = SCI_SUCCESS;
3240 
3241 	/* Build the common part of the request */
3242 	sci_general_request_construct(ihost, idev, ireq);
3243 
3244 	if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3245 		return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3246 
3247 	if (dev->dev_type == SAS_END_DEV)
3248 		/* pass */;
3249 	else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
3250 		memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3251 	else if (dev_is_expander(dev))
3252 		/* pass */;
3253 	else
3254 		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3255 
3256 	memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3257 
3258 	return status;
3259 }
3260 
3261 enum sci_status sci_task_request_construct(struct isci_host *ihost,
3262 					    struct isci_remote_device *idev,
3263 					    u16 io_tag, struct isci_request *ireq)
3264 {
3265 	struct domain_device *dev = idev->domain_dev;
3266 	enum sci_status status = SCI_SUCCESS;
3267 
3268 	/* Build the common part of the request */
3269 	sci_general_request_construct(ihost, idev, ireq);
3270 
3271 	if (dev->dev_type == SAS_END_DEV ||
3272 	    dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
3273 		set_bit(IREQ_TMF, &ireq->flags);
3274 		memset(ireq->tc, 0, sizeof(struct scu_task_context));
3275 	} else
3276 		status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3277 
3278 	return status;
3279 }
3280 
3281 static enum sci_status isci_request_ssp_request_construct(
3282 	struct isci_request *request)
3283 {
3284 	enum sci_status status;
3285 
3286 	dev_dbg(&request->isci_host->pdev->dev,
3287 		"%s: request = %p\n",
3288 		__func__,
3289 		request);
3290 	status = sci_io_request_construct_basic_ssp(request);
3291 	return status;
3292 }
3293 
3294 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3295 {
3296 	struct sas_task *task = isci_request_access_task(ireq);
3297 	struct host_to_dev_fis *fis = &ireq->stp.cmd;
3298 	struct ata_queued_cmd *qc = task->uldd_task;
3299 	enum sci_status status;
3300 
3301 	dev_dbg(&ireq->isci_host->pdev->dev,
3302 		"%s: ireq = %p\n",
3303 		__func__,
3304 		ireq);
3305 
3306 	memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3307 	if (!task->ata_task.device_control_reg_update)
3308 		fis->flags |= 0x80;
3309 	fis->flags &= 0xF0;
3310 
3311 	status = sci_io_request_construct_basic_sata(ireq);
3312 
3313 	if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3314 		   qc->tf.command == ATA_CMD_FPDMA_READ)) {
3315 		fis->sector_count = qc->tag << 3;
3316 		ireq->tc->type.stp.ncq_tag = qc->tag;
3317 	}
3318 
3319 	return status;
3320 }
3321 
3322 static enum sci_status
3323 sci_io_request_construct_smp(struct device *dev,
3324 			      struct isci_request *ireq,
3325 			      struct sas_task *task)
3326 {
3327 	struct scatterlist *sg = &task->smp_task.smp_req;
3328 	struct isci_remote_device *idev;
3329 	struct scu_task_context *task_context;
3330 	struct isci_port *iport;
3331 	struct smp_req *smp_req;
3332 	void *kaddr;
3333 	u8 req_len;
3334 	u32 cmd;
3335 
3336 	kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
3337 	smp_req = kaddr + sg->offset;
3338 	/*
3339 	 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3340 	 * functions under SAS 2.0, a zero request length really indicates
3341 	 * a non-zero default length.
3342 	 */
3343 	if (smp_req->req_len == 0) {
3344 		switch (smp_req->func) {
3345 		case SMP_DISCOVER:
3346 		case SMP_REPORT_PHY_ERR_LOG:
3347 		case SMP_REPORT_PHY_SATA:
3348 		case SMP_REPORT_ROUTE_INFO:
3349 			smp_req->req_len = 2;
3350 			break;
3351 		case SMP_CONF_ROUTE_INFO:
3352 		case SMP_PHY_CONTROL:
3353 		case SMP_PHY_TEST_FUNCTION:
3354 			smp_req->req_len = 9;
3355 			break;
3356 			/* Default - zero is a valid default for 2.0. */
3357 		}
3358 	}
3359 	req_len = smp_req->req_len;
3360 	sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3361 	cmd = *(u32 *) smp_req;
3362 	kunmap_atomic(kaddr, KM_IRQ0);
3363 
3364 	if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3365 		return SCI_FAILURE;
3366 
3367 	ireq->protocol = SCIC_SMP_PROTOCOL;
3368 
3369 	/* byte swap the smp request. */
3370 
3371 	task_context = ireq->tc;
3372 
3373 	idev = ireq->target_device;
3374 	iport = idev->owning_port;
3375 
3376 	/*
3377 	 * Fill in the TC with the its required data
3378 	 * 00h
3379 	 */
3380 	task_context->priority = 0;
3381 	task_context->initiator_request = 1;
3382 	task_context->connection_rate = idev->connection_rate;
3383 	task_context->protocol_engine_index = ISCI_PEG;
3384 	task_context->logical_port_index = iport->physical_port_index;
3385 	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3386 	task_context->abort = 0;
3387 	task_context->valid = SCU_TASK_CONTEXT_VALID;
3388 	task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3389 
3390 	/* 04h */
3391 	task_context->remote_node_index = idev->rnc.remote_node_index;
3392 	task_context->command_code = 0;
3393 	task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3394 
3395 	/* 08h */
3396 	task_context->link_layer_control = 0;
3397 	task_context->do_not_dma_ssp_good_response = 1;
3398 	task_context->strict_ordering = 0;
3399 	task_context->control_frame = 1;
3400 	task_context->timeout_enable = 0;
3401 	task_context->block_guard_enable = 0;
3402 
3403 	/* 0ch */
3404 	task_context->address_modifier = 0;
3405 
3406 	/* 10h */
3407 	task_context->ssp_command_iu_length = req_len;
3408 
3409 	/* 14h */
3410 	task_context->transfer_length_bytes = 0;
3411 
3412 	/*
3413 	 * 18h ~ 30h, protocol specific
3414 	 * since commandIU has been build by framework at this point, we just
3415 	 * copy the frist DWord from command IU to this location. */
3416 	memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3417 
3418 	/*
3419 	 * 40h
3420 	 * "For SMP you could program it to zero. We would prefer that way
3421 	 * so that done code will be consistent." - Venki
3422 	 */
3423 	task_context->task_phase = 0;
3424 
3425 	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3426 			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3427 			       (iport->physical_port_index <<
3428 				SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3429 			      ISCI_TAG_TCI(ireq->io_tag));
3430 	/*
3431 	 * Copy the physical address for the command buffer to the SCU Task
3432 	 * Context command buffer should not contain command header.
3433 	 */
3434 	task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3435 	task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3436 
3437 	/* SMP response comes as UF, so no need to set response IU address. */
3438 	task_context->response_iu_upper = 0;
3439 	task_context->response_iu_lower = 0;
3440 
3441 	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3442 
3443 	return SCI_SUCCESS;
3444 }
3445 
3446 /*
3447  * isci_smp_request_build() - This function builds the smp request.
3448  * @ireq: This parameter points to the isci_request allocated in the
3449  *    request construct function.
3450  *
3451  * SCI_SUCCESS on successfull completion, or specific failure code.
3452  */
3453 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3454 {
3455 	struct sas_task *task = isci_request_access_task(ireq);
3456 	struct device *dev = &ireq->isci_host->pdev->dev;
3457 	enum sci_status status = SCI_FAILURE;
3458 
3459 	status = sci_io_request_construct_smp(dev, ireq, task);
3460 	if (status != SCI_SUCCESS)
3461 		dev_dbg(&ireq->isci_host->pdev->dev,
3462 			 "%s: failed with status = %d\n",
3463 			 __func__,
3464 			 status);
3465 
3466 	return status;
3467 }
3468 
3469 /**
3470  * isci_io_request_build() - This function builds the io request object.
3471  * @ihost: This parameter specifies the ISCI host object
3472  * @request: This parameter points to the isci_request object allocated in the
3473  *    request construct function.
3474  * @sci_device: This parameter is the handle for the sci core's remote device
3475  *    object that is the destination for this request.
3476  *
3477  * SCI_SUCCESS on successfull completion, or specific failure code.
3478  */
3479 static enum sci_status isci_io_request_build(struct isci_host *ihost,
3480 					     struct isci_request *request,
3481 					     struct isci_remote_device *idev)
3482 {
3483 	enum sci_status status = SCI_SUCCESS;
3484 	struct sas_task *task = isci_request_access_task(request);
3485 
3486 	dev_dbg(&ihost->pdev->dev,
3487 		"%s: idev = 0x%p; request = %p, "
3488 		"num_scatter = %d\n",
3489 		__func__,
3490 		idev,
3491 		request,
3492 		task->num_scatter);
3493 
3494 	/* map the sgl addresses, if present.
3495 	 * libata does the mapping for sata devices
3496 	 * before we get the request.
3497 	 */
3498 	if (task->num_scatter &&
3499 	    !sas_protocol_ata(task->task_proto) &&
3500 	    !(SAS_PROTOCOL_SMP & task->task_proto)) {
3501 
3502 		request->num_sg_entries = dma_map_sg(
3503 			&ihost->pdev->dev,
3504 			task->scatter,
3505 			task->num_scatter,
3506 			task->data_dir
3507 			);
3508 
3509 		if (request->num_sg_entries == 0)
3510 			return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3511 	}
3512 
3513 	status = sci_io_request_construct(ihost, idev, request);
3514 
3515 	if (status != SCI_SUCCESS) {
3516 		dev_dbg(&ihost->pdev->dev,
3517 			 "%s: failed request construct\n",
3518 			 __func__);
3519 		return SCI_FAILURE;
3520 	}
3521 
3522 	switch (task->task_proto) {
3523 	case SAS_PROTOCOL_SMP:
3524 		status = isci_smp_request_build(request);
3525 		break;
3526 	case SAS_PROTOCOL_SSP:
3527 		status = isci_request_ssp_request_construct(request);
3528 		break;
3529 	case SAS_PROTOCOL_SATA:
3530 	case SAS_PROTOCOL_STP:
3531 	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3532 		status = isci_request_stp_request_construct(request);
3533 		break;
3534 	default:
3535 		dev_dbg(&ihost->pdev->dev,
3536 			 "%s: unknown protocol\n", __func__);
3537 		return SCI_FAILURE;
3538 	}
3539 
3540 	return SCI_SUCCESS;
3541 }
3542 
3543 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3544 {
3545 	struct isci_request *ireq;
3546 
3547 	ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3548 	ireq->io_tag = tag;
3549 	ireq->io_request_completion = NULL;
3550 	ireq->flags = 0;
3551 	ireq->num_sg_entries = 0;
3552 	INIT_LIST_HEAD(&ireq->completed_node);
3553 	INIT_LIST_HEAD(&ireq->dev_node);
3554 	isci_request_change_state(ireq, allocated);
3555 
3556 	return ireq;
3557 }
3558 
3559 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3560 						     struct sas_task *task,
3561 						     u16 tag)
3562 {
3563 	struct isci_request *ireq;
3564 
3565 	ireq = isci_request_from_tag(ihost, tag);
3566 	ireq->ttype_ptr.io_task_ptr = task;
3567 	ireq->ttype = io_task;
3568 	task->lldd_task = ireq;
3569 
3570 	return ireq;
3571 }
3572 
3573 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3574 					       struct isci_tmf *isci_tmf,
3575 					       u16 tag)
3576 {
3577 	struct isci_request *ireq;
3578 
3579 	ireq = isci_request_from_tag(ihost, tag);
3580 	ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3581 	ireq->ttype = tmf_task;
3582 
3583 	return ireq;
3584 }
3585 
3586 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3587 			 struct sas_task *task, u16 tag)
3588 {
3589 	enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3590 	struct isci_request *ireq;
3591 	unsigned long flags;
3592 	int ret = 0;
3593 
3594 	/* do common allocation and init of request object. */
3595 	ireq = isci_io_request_from_tag(ihost, task, tag);
3596 
3597 	status = isci_io_request_build(ihost, ireq, idev);
3598 	if (status != SCI_SUCCESS) {
3599 		dev_dbg(&ihost->pdev->dev,
3600 			 "%s: request_construct failed - status = 0x%x\n",
3601 			 __func__,
3602 			 status);
3603 		return status;
3604 	}
3605 
3606 	spin_lock_irqsave(&ihost->scic_lock, flags);
3607 
3608 	if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3609 
3610 		if (isci_task_is_ncq_recovery(task)) {
3611 
3612 			/* The device is in an NCQ recovery state.  Issue the
3613 			 * request on the task side.  Note that it will
3614 			 * complete on the I/O request side because the
3615 			 * request was built that way (ie.
3616 			 * ireq->is_task_management_request is false).
3617 			 */
3618 			status = sci_controller_start_task(ihost,
3619 							    idev,
3620 							    ireq);
3621 		} else {
3622 			status = SCI_FAILURE;
3623 		}
3624 	} else {
3625 		/* send the request, let the core assign the IO TAG.	*/
3626 		status = sci_controller_start_io(ihost, idev,
3627 						  ireq);
3628 	}
3629 
3630 	if (status != SCI_SUCCESS &&
3631 	    status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3632 		dev_dbg(&ihost->pdev->dev,
3633 			 "%s: failed request start (0x%x)\n",
3634 			 __func__, status);
3635 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
3636 		return status;
3637 	}
3638 
3639 	/* Either I/O started OK, or the core has signaled that
3640 	 * the device needs a target reset.
3641 	 *
3642 	 * In either case, hold onto the I/O for later.
3643 	 *
3644 	 * Update it's status and add it to the list in the
3645 	 * remote device object.
3646 	 */
3647 	list_add(&ireq->dev_node, &idev->reqs_in_process);
3648 
3649 	if (status == SCI_SUCCESS) {
3650 		isci_request_change_state(ireq, started);
3651 	} else {
3652 		/* The request did not really start in the
3653 		 * hardware, so clear the request handle
3654 		 * here so no terminations will be done.
3655 		 */
3656 		set_bit(IREQ_TERMINATED, &ireq->flags);
3657 		isci_request_change_state(ireq, completed);
3658 	}
3659 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
3660 
3661 	if (status ==
3662 	    SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3663 		/* Signal libsas that we need the SCSI error
3664 		 * handler thread to work on this I/O and that
3665 		 * we want a device reset.
3666 		 */
3667 		spin_lock_irqsave(&task->task_state_lock, flags);
3668 		task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3669 		spin_unlock_irqrestore(&task->task_state_lock, flags);
3670 
3671 		/* Cause this task to be scheduled in the SCSI error
3672 		 * handler thread.
3673 		 */
3674 		isci_execpath_callback(ihost, task,
3675 				       sas_task_abort);
3676 
3677 		/* Change the status, since we are holding
3678 		 * the I/O until it is managed by the SCSI
3679 		 * error handler.
3680 		 */
3681 		status = SCI_SUCCESS;
3682 	}
3683 
3684 	return ret;
3685 }
3686