xref: /openbmc/linux/drivers/scsi/isci/request.h (revision f00e6ba4996a34f098fe50c78077f0568fd838ec)
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 #ifndef _ISCI_REQUEST_H_
57 #define _ISCI_REQUEST_H_
58 
59 #include "isci.h"
60 #include "host.h"
61 #include "scu_task_context.h"
62 
63 /**
64  * struct isci_request_status - This enum defines the possible states of an I/O
65  *    request.
66  *
67  *
68  */
69 enum isci_request_status {
70 	unallocated = 0x00,
71 	allocated   = 0x01,
72 	started     = 0x02,
73 	completed   = 0x03,
74 	aborting    = 0x04,
75 	aborted     = 0x05,
76 	terminating = 0x06,
77 	dead        = 0x07
78 };
79 
80 enum task_type {
81 	io_task  = 0,
82 	tmf_task = 1
83 };
84 
85 enum sci_request_protocol {
86 	SCIC_NO_PROTOCOL,
87 	SCIC_SMP_PROTOCOL,
88 	SCIC_SSP_PROTOCOL,
89 	SCIC_STP_PROTOCOL
90 }; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
91 
92 struct scic_sds_stp_request {
93 	union {
94 		u32 ncq;
95 
96 		u32 udma;
97 
98 		struct scic_sds_stp_pio_request {
99 			/**
100 			 * Total transfer for the entire PIO request recorded at request constuction
101 			 * time.
102 			 *
103 			 * @todo Should we just decrement this value for each byte of data transitted
104 			 *       or received to elemenate the current_transfer_bytes field?
105 			 */
106 			u32 total_transfer_bytes;
107 
108 			/**
109 			 * Total number of bytes received/transmitted in data frames since the start
110 			 * of the IO request.  At the end of the IO request this should equal the
111 			 * total_transfer_bytes.
112 			 */
113 			u32 current_transfer_bytes;
114 
115 			/**
116 			 * The number of bytes requested in the in the PIO setup.
117 			 */
118 			u32 pio_transfer_bytes;
119 
120 			/**
121 			 * PIO Setup ending status value to tell us if we need to wait for another FIS
122 			 * or if the transfer is complete. On the receipt of a D2H FIS this will be
123 			 * the status field of that FIS.
124 			 */
125 			u8 ending_status;
126 
127 			/**
128 			 * On receipt of a D2H FIS this will be the ending error field if the
129 			 * ending_status has the SATA_STATUS_ERR bit set.
130 			 */
131 			u8 ending_error;
132 
133 			struct scic_sds_request_pio_sgl {
134 				struct scu_sgl_element_pair *sgl_pair;
135 				u8 sgl_set;
136 				u32 sgl_offset;
137 			} request_current;
138 		} pio;
139 
140 		struct {
141 			/**
142 			 * The number of bytes requested in the PIO setup before CDB data frame.
143 			 */
144 			u32 device_preferred_cdb_length;
145 		} packet;
146 	} type;
147 };
148 
149 struct scic_sds_request {
150 	/**
151 	 * This field contains the information for the base request state machine.
152 	 */
153 	struct sci_base_state_machine state_machine;
154 
155 	/**
156 	 * This field simply points to the controller to which this IO request
157 	 * is associated.
158 	 */
159 	struct scic_sds_controller *owning_controller;
160 
161 	/**
162 	 * This field simply points to the remote device to which this IO request
163 	 * is associated.
164 	 */
165 	struct scic_sds_remote_device *target_device;
166 
167 	/**
168 	 * This field is utilized to determine if the SCI user is managing
169 	 * the IO tag for this request or if the core is managing it.
170 	 */
171 	bool was_tag_assigned_by_user;
172 
173 	/**
174 	 * This field indicates the IO tag for this request.  The IO tag is
175 	 * comprised of the task_index and a sequence count. The sequence count
176 	 * is utilized to help identify tasks from one life to another.
177 	 */
178 	u16 io_tag;
179 
180 	/**
181 	 * This field specifies the protocol being utilized for this
182 	 * IO request.
183 	 */
184 	enum sci_request_protocol protocol;
185 
186 	/**
187 	 * This field indicates the completion status taken from the SCUs
188 	 * completion code.  It indicates the completion result for the SCU hardware.
189 	 */
190 	u32 scu_status;
191 
192 	/**
193 	 * This field indicates the completion status returned to the SCI user.  It
194 	 * indicates the users view of the io request completion.
195 	 */
196 	u32 sci_status;
197 
198 	/**
199 	 * This field contains the value to be utilized when posting (e.g. Post_TC,
200 	 * Post_TC_Abort) this request to the silicon.
201 	 */
202 	u32 post_context;
203 
204 	struct scu_task_context *task_context_buffer;
205 	struct scu_task_context tc ____cacheline_aligned;
206 
207 	/* could be larger with sg chaining */
208 	#define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
209 	struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
210 
211 	/**
212 	 * This field indicates if this request is a task management request or
213 	 * normal IO request.
214 	 */
215 	bool is_task_management_request;
216 
217 	/**
218 	 * This field is a pointer to the stored rx frame data.  It is used in STP
219 	 * internal requests and SMP response frames.  If this field is non-NULL the
220 	 * saved frame must be released on IO request completion.
221 	 *
222 	 * @todo In the future do we want to keep a list of RX frame buffers?
223 	 */
224 	u32 saved_rx_frame_index;
225 
226 	/**
227 	 * This field specifies the current state handlers in place for this
228 	 * IO Request object.  This field is updated each time the request
229 	 * changes state.
230 	 */
231 	const struct scic_sds_io_request_state_handler *state_handlers;
232 
233 	/**
234 	 * This field in the recorded device sequence for the io request.  This is
235 	 * recorded during the build operation and is compared in the start
236 	 * operation.  If the sequence is different then there was a change of
237 	 * devices from the build to start operations.
238 	 */
239 	u8 device_sequence;
240 
241 	union {
242 		struct {
243 			union {
244 				struct ssp_cmd_iu cmd;
245 				struct ssp_task_iu tmf;
246 			};
247 			union {
248 				struct ssp_response_iu rsp;
249 				u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
250 			};
251 		} ssp;
252 
253 		struct {
254 			struct smp_req cmd;
255 			struct smp_resp rsp;
256 		} smp;
257 
258 		struct {
259 			struct scic_sds_stp_request req;
260 			struct host_to_dev_fis cmd;
261 			struct dev_to_host_fis rsp;
262 		} stp;
263 	};
264 
265 };
266 
267 static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
268 {
269 	struct scic_sds_request *sci_req;
270 
271 	sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
272 	return sci_req;
273 }
274 
275 struct isci_request {
276 	enum isci_request_status status;
277 	enum task_type ttype;
278 	unsigned short io_tag;
279 	bool complete_in_target;
280 	bool terminated;
281 
282 	union ttype_ptr_union {
283 		struct sas_task *io_task_ptr;   /* When ttype==io_task  */
284 		struct isci_tmf *tmf_task_ptr;  /* When ttype==tmf_task */
285 	} ttype_ptr;
286 	struct isci_host *isci_host;
287 	struct isci_remote_device *isci_device;
288 	/* For use in the requests_to_{complete|abort} lists: */
289 	struct list_head completed_node;
290 	/* For use in the reqs_in_process list: */
291 	struct list_head dev_node;
292 	spinlock_t state_lock;
293 	dma_addr_t request_daddr;
294 	dma_addr_t zero_scatter_daddr;
295 
296 	unsigned int num_sg_entries;                    /* returned by pci_alloc_sg */
297 
298 	/** Note: "io_request_completion" is completed in two different ways
299 	 * depending on whether this is a TMF or regular request.
300 	 * - TMF requests are completed in the thread that started them;
301 	 * - regular requests are completed in the request completion callback
302 	 *   function.
303 	 * This difference in operation allows the aborter of a TMF request
304 	 * to be sure that once the TMF request completes, the I/O that the
305 	 * TMF was aborting is guaranteed to have completed.
306 	 */
307 	struct completion *io_request_completion;
308 	struct scic_sds_request sci;
309 };
310 
311 static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
312 {
313 	struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
314 
315 	return ireq;
316 }
317 
318 /**
319  * enum sci_base_request_states - This enumeration depicts all the states for
320  *    the common request state machine.
321  *
322  *
323  */
324 enum sci_base_request_states {
325 	/**
326 	 * Simply the initial state for the base request state machine.
327 	 */
328 	SCI_BASE_REQUEST_STATE_INITIAL,
329 
330 	/**
331 	 * This state indicates that the request has been constructed. This state
332 	 * is entered from the INITIAL state.
333 	 */
334 	SCI_BASE_REQUEST_STATE_CONSTRUCTED,
335 
336 	/**
337 	 * This state indicates that the request has been started. This state is
338 	 * entered from the CONSTRUCTED state.
339 	 */
340 	SCI_BASE_REQUEST_STATE_STARTED,
341 
342 	SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE,
343 	SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE,
344 
345 	SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE,
346 	SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE,
347 
348 	SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE,
349 	SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE,
350 	SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE,
351 
352 	/**
353 	 * While in this state the IO request object is waiting for the TC completion
354 	 * notification for the H2D Register FIS
355 	 */
356 	SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE,
357 
358 	/**
359 	 * While in this state the IO request object is waiting for either a PIO Setup
360 	 * FIS or a D2H register FIS.  The type of frame received is based on the
361 	 * result of the prior frame and line conditions.
362 	 */
363 	SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE,
364 
365 	/**
366 	 * While in this state the IO request object is waiting for a DATA frame from
367 	 * the device.
368 	 */
369 	SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE,
370 
371 	/**
372 	 * While in this state the IO request object is waiting to transmit the next data
373 	 * frame to the device.
374 	 */
375 	SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE,
376 
377 	/**
378 	 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
379 	 * task management request is waiting for the transmission of the
380 	 * initial frame (i.e. command, task, etc.).
381 	 */
382 	SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
383 
384 	/**
385 	 * This sub-state indicates that the started task management request
386 	 * is waiting for the reception of an unsolicited frame
387 	 * (i.e. response IU).
388 	 */
389 	SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
390 
391 	/**
392 	 * This sub-state indicates that the started task management request
393 	 * is waiting for the reception of an unsolicited frame
394 	 * (i.e. response IU).
395 	 */
396 	SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
397 
398 	/**
399 	 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
400 	 * waiting for the transmission of the initial frame (i.e. command, task, etc.).
401 	 */
402 	SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
403 
404 	/**
405 	 * This state indicates that the request has completed.
406 	 * This state is entered from the STARTED state. This state is entered from
407 	 * the ABORTING state.
408 	 */
409 	SCI_BASE_REQUEST_STATE_COMPLETED,
410 
411 	/**
412 	 * This state indicates that the request is in the process of being
413 	 * terminated/aborted.
414 	 * This state is entered from the CONSTRUCTED state.
415 	 * This state is entered from the STARTED state.
416 	 */
417 	SCI_BASE_REQUEST_STATE_ABORTING,
418 
419 	/**
420 	 * Simply the final state for the base request state machine.
421 	 */
422 	SCI_BASE_REQUEST_STATE_FINAL,
423 };
424 
425 typedef enum sci_status (*scic_sds_io_request_handler_t)
426 				(struct scic_sds_request *request);
427 typedef enum sci_status (*scic_sds_io_request_frame_handler_t)
428 				(struct scic_sds_request *req, u32 frame);
429 typedef enum sci_status (*scic_sds_io_request_event_handler_t)
430 				(struct scic_sds_request *req, u32 event);
431 typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t)
432 				(struct scic_sds_request *req, u32 completion_code);
433 
434 /**
435  * struct scic_sds_io_request_state_handler - This is the SDS core definition
436  *    of the state handlers.
437  *
438  *
439  */
440 struct scic_sds_io_request_state_handler {
441 	/**
442 	 * The start_handler specifies the method invoked when a user attempts to
443 	 * start a request.
444 	 */
445 	scic_sds_io_request_handler_t start_handler;
446 
447 	/**
448 	 * The complete_handler specifies the method invoked when a user attempts to
449 	 * complete a request.
450 	 */
451 	scic_sds_io_request_handler_t complete_handler;
452 
453 	scic_sds_io_request_task_completion_handler_t tc_completion_handler;
454 	scic_sds_io_request_event_handler_t event_handler;
455 	scic_sds_io_request_frame_handler_t frame_handler;
456 };
457 
458 /**
459  * scic_sds_request_get_controller() -
460  *
461  * This macro will return the controller for this io request object
462  */
463 #define scic_sds_request_get_controller(sci_req) \
464 	((sci_req)->owning_controller)
465 
466 /**
467  * scic_sds_request_get_device() -
468  *
469  * This macro will return the device for this io request object
470  */
471 #define scic_sds_request_get_device(sci_req) \
472 	((sci_req)->target_device)
473 
474 /**
475  * scic_sds_request_get_port() -
476  *
477  * This macro will return the port for this io request object
478  */
479 #define scic_sds_request_get_port(sci_req)	\
480 	scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
481 
482 /**
483  * scic_sds_request_get_post_context() -
484  *
485  * This macro returns the constructed post context result for the io request.
486  */
487 #define scic_sds_request_get_post_context(sci_req)	\
488 	((sci_req)->post_context)
489 
490 /**
491  * scic_sds_request_get_task_context() -
492  *
493  * This is a helper macro to return the os handle for this request object.
494  */
495 #define scic_sds_request_get_task_context(request) \
496 	((request)->task_context_buffer)
497 
498 /**
499  * scic_sds_request_set_status() -
500  *
501  * This macro will set the scu hardware status and sci request completion
502  * status for an io request.
503  */
504 #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
505 	{ \
506 		(request)->scu_status = (scu_status_code); \
507 		(request)->sci_status = (sci_status_code); \
508 	}
509 
510 #define scic_sds_request_complete(a_request) \
511 	((a_request)->state_handlers->complete_handler(a_request))
512 
513 
514 extern enum sci_status
515 scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code);
516 
517 /**
518  * SCU_SGL_ZERO() -
519  *
520  * This macro zeros the hardware SGL element data
521  */
522 #define SCU_SGL_ZERO(scu_sge) \
523 	{ \
524 		(scu_sge).length = 0; \
525 		(scu_sge).address_lower = 0; \
526 		(scu_sge).address_upper = 0; \
527 		(scu_sge).address_modifier = 0;	\
528 	}
529 
530 /**
531  * SCU_SGL_COPY() -
532  *
533  * This macro copys the SGL Element data from the host os to the hardware SGL
534  * elment data
535  */
536 #define SCU_SGL_COPY(scu_sge, os_sge) \
537 	{ \
538 		(scu_sge).length = sg_dma_len(sg); \
539 		(scu_sge).address_upper = \
540 			upper_32_bits(sg_dma_address(sg)); \
541 		(scu_sge).address_lower = \
542 			lower_32_bits(sg_dma_address(sg)); \
543 		(scu_sge).address_modifier = 0;	\
544 	}
545 
546 enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
547 enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
548 enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
549 						  u32 event_code);
550 enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
551 						  u32 frame_index);
552 enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
553 
554 /* XXX open code in caller */
555 static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
556 					       dma_addr_t phys_addr)
557 {
558 	struct isci_request *ireq = sci_req_to_ireq(sci_req);
559 	dma_addr_t offset;
560 
561 	BUG_ON(phys_addr < ireq->request_daddr);
562 
563 	offset = phys_addr - ireq->request_daddr;
564 
565 	BUG_ON(offset >= sizeof(*ireq));
566 
567 	return (char *)ireq + offset;
568 }
569 
570 /* XXX open code in caller */
571 static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
572 						      void *virt_addr)
573 {
574 	struct isci_request *ireq = sci_req_to_ireq(sci_req);
575 
576 	char *requested_addr = (char *)virt_addr;
577 	char *base_addr = (char *)ireq;
578 
579 	BUG_ON(requested_addr < base_addr);
580 	BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
581 
582 	return ireq->request_daddr + (requested_addr - base_addr);
583 }
584 
585 /**
586  * This function gets the status of the request object.
587  * @request: This parameter points to the isci_request object
588  *
589  * status of the object as a isci_request_status enum.
590  */
591 static inline
592 enum isci_request_status isci_request_get_state(
593 	struct isci_request *isci_request)
594 {
595 	BUG_ON(isci_request == NULL);
596 
597 	/*probably a bad sign...	*/
598 	if (isci_request->status == unallocated)
599 		dev_warn(&isci_request->isci_host->pdev->dev,
600 			 "%s: isci_request->status == unallocated\n",
601 			 __func__);
602 
603 	return isci_request->status;
604 }
605 
606 
607 /**
608  * isci_request_change_state() - This function sets the status of the request
609  *    object.
610  * @request: This parameter points to the isci_request object
611  * @status: This Parameter is the new status of the object
612  *
613  */
614 static inline enum isci_request_status isci_request_change_state(
615 	struct isci_request *isci_request,
616 	enum isci_request_status status)
617 {
618 	enum isci_request_status old_state;
619 	unsigned long flags;
620 
621 	dev_dbg(&isci_request->isci_host->pdev->dev,
622 		"%s: isci_request = %p, state = 0x%x\n",
623 		__func__,
624 		isci_request,
625 		status);
626 
627 	BUG_ON(isci_request == NULL);
628 
629 	spin_lock_irqsave(&isci_request->state_lock, flags);
630 	old_state = isci_request->status;
631 	isci_request->status = status;
632 	spin_unlock_irqrestore(&isci_request->state_lock, flags);
633 
634 	return old_state;
635 }
636 
637 /**
638  * isci_request_change_started_to_newstate() - This function sets the status of
639  *    the request object.
640  * @request: This parameter points to the isci_request object
641  * @status: This Parameter is the new status of the object
642  *
643  * state previous to any change.
644  */
645 static inline enum isci_request_status isci_request_change_started_to_newstate(
646 	struct isci_request *isci_request,
647 	struct completion *completion_ptr,
648 	enum isci_request_status newstate)
649 {
650 	enum isci_request_status old_state;
651 	unsigned long flags;
652 
653 	spin_lock_irqsave(&isci_request->state_lock, flags);
654 
655 	old_state = isci_request->status;
656 
657 	if (old_state == started || old_state == aborting) {
658 		BUG_ON(isci_request->io_request_completion != NULL);
659 
660 		isci_request->io_request_completion = completion_ptr;
661 		isci_request->status = newstate;
662 	}
663 	spin_unlock_irqrestore(&isci_request->state_lock, flags);
664 
665 	dev_dbg(&isci_request->isci_host->pdev->dev,
666 		"%s: isci_request = %p, old_state = 0x%x\n",
667 		__func__,
668 		isci_request,
669 		old_state);
670 
671 	return old_state;
672 }
673 
674 /**
675  * isci_request_change_started_to_aborted() - This function sets the status of
676  *    the request object.
677  * @request: This parameter points to the isci_request object
678  * @completion_ptr: This parameter is saved as the kernel completion structure
679  *    signalled when the old request completes.
680  *
681  * state previous to any change.
682  */
683 static inline enum isci_request_status isci_request_change_started_to_aborted(
684 	struct isci_request *isci_request,
685 	struct completion *completion_ptr)
686 {
687 	return isci_request_change_started_to_newstate(
688 		       isci_request, completion_ptr, aborted
689 		       );
690 }
691 /**
692  * isci_request_free() - This function frees the request object.
693  * @isci_host: This parameter specifies the ISCI host object
694  * @isci_request: This parameter points to the isci_request object
695  *
696  */
697 static inline void isci_request_free(
698 	struct isci_host *isci_host,
699 	struct isci_request *isci_request)
700 {
701 	if (!isci_request)
702 		return;
703 
704 	/* release the dma memory if we fail. */
705 	dma_pool_free(isci_host->dma_pool, isci_request,
706 		      isci_request->request_daddr);
707 }
708 
709 
710 /* #define ISCI_REQUEST_VALIDATE_ACCESS
711  */
712 
713 #ifdef ISCI_REQUEST_VALIDATE_ACCESS
714 
715 static inline
716 struct sas_task *isci_request_access_task(struct isci_request *isci_request)
717 {
718 	BUG_ON(isci_request->ttype != io_task);
719 	return isci_request->ttype_ptr.io_task_ptr;
720 }
721 
722 static inline
723 struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
724 {
725 	BUG_ON(isci_request->ttype != tmf_task);
726 	return isci_request->ttype_ptr.tmf_task_ptr;
727 }
728 
729 #else  /* not ISCI_REQUEST_VALIDATE_ACCESS */
730 
731 #define isci_request_access_task(RequestPtr) \
732 	((RequestPtr)->ttype_ptr.io_task_ptr)
733 
734 #define isci_request_access_tmf(RequestPtr)  \
735 	((RequestPtr)->ttype_ptr.tmf_task_ptr)
736 
737 #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
738 
739 
740 int isci_request_alloc_tmf(
741 	struct isci_host *isci_host,
742 	struct isci_tmf *isci_tmf,
743 	struct isci_request **isci_request,
744 	struct isci_remote_device *isci_device,
745 	gfp_t gfp_flags);
746 
747 
748 int isci_request_execute(
749 	struct isci_host *isci_host,
750 	struct sas_task *task,
751 	struct isci_request **request,
752 	gfp_t gfp_flags);
753 
754 /**
755  * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
756  *    sgl
757  * @request: This parameter points to the isci_request object
758  * @*pdev: This Parameter is the pci_device struct for the controller
759  *
760  */
761 static inline void isci_request_unmap_sgl(
762 	struct isci_request *request,
763 	struct pci_dev *pdev)
764 {
765 	struct sas_task *task = isci_request_access_task(request);
766 
767 	dev_dbg(&request->isci_host->pdev->dev,
768 		"%s: request = %p, task = %p,\n"
769 		"task->data_dir = %d, is_sata = %d\n ",
770 		__func__,
771 		request,
772 		task,
773 		task->data_dir,
774 		sas_protocol_ata(task->task_proto));
775 
776 	if ((task->data_dir != PCI_DMA_NONE) &&
777 	    !sas_protocol_ata(task->task_proto)) {
778 		if (task->num_scatter == 0)
779 			/* 0 indicates a single dma address */
780 			dma_unmap_single(
781 				&pdev->dev,
782 				request->zero_scatter_daddr,
783 				task->total_xfer_len,
784 				task->data_dir
785 				);
786 
787 		else  /* unmap the sgl dma addresses */
788 			dma_unmap_sg(
789 				&pdev->dev,
790 				task->scatter,
791 				request->num_sg_entries,
792 				task->data_dir
793 				);
794 	}
795 }
796 
797 /**
798  * isci_request_io_request_get_next_sge() - This function is called by the sci
799  *    core to retrieve the next sge for a given request.
800  * @request: This parameter is the isci_request object.
801  * @current_sge_address: This parameter is the last sge retrieved by the sci
802  *    core for this request.
803  *
804  * pointer to the next sge for specified request.
805  */
806 static inline void *isci_request_io_request_get_next_sge(
807 	struct isci_request *request,
808 	void *current_sge_address)
809 {
810 	struct sas_task *task = isci_request_access_task(request);
811 	void *ret = NULL;
812 
813 	dev_dbg(&request->isci_host->pdev->dev,
814 		"%s: request = %p, "
815 		"current_sge_address = %p, "
816 		"num_scatter = %d\n",
817 		__func__,
818 		request,
819 		current_sge_address,
820 		task->num_scatter);
821 
822 	if (!current_sge_address)	/* First time through.. */
823 		ret = task->scatter;    /* always task->scatter */
824 	else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
825 		ret = NULL;              /* there is only one element. */
826 	else
827 		ret = sg_next(current_sge_address);     /* sg_next returns NULL
828 							 * for the last element
829 							 */
830 
831 	dev_dbg(&request->isci_host->pdev->dev,
832 		"%s: next sge address = %p\n",
833 		__func__,
834 		ret);
835 
836 	return ret;
837 }
838 
839 void isci_terminate_pending_requests(struct isci_host *isci_host,
840 				     struct isci_remote_device *isci_device,
841 				     enum isci_request_status new_request_state);
842 enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
843 					    struct scic_sds_remote_device *sci_dev,
844 					    u16 io_tag,
845 					    struct scic_sds_request *sci_req);
846 enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
847 enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
848 enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
849 						    u32 transfer_length,
850 						    enum dma_data_direction dir);
851 void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
852 void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
853 #endif /* !defined(_ISCI_REQUEST_H_) */
854