1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #ifndef _ISCI_REQUEST_H_ 57 #define _ISCI_REQUEST_H_ 58 59 #include "isci.h" 60 #include "scic_sds_request.h" 61 62 /** 63 * struct isci_request_status - This enum defines the possible states of an I/O 64 * request. 65 * 66 * 67 */ 68 enum isci_request_status { 69 unallocated = 0x00, 70 allocated = 0x01, 71 started = 0x02, 72 completed = 0x03, 73 aborting = 0x04, 74 aborted = 0x05, 75 terminating = 0x06, 76 dead = 0x07 77 }; 78 79 enum task_type { 80 io_task = 0, 81 tmf_task = 1 82 }; 83 84 struct isci_request { 85 struct scic_sds_request *sci_request_handle; 86 enum isci_request_status status; 87 enum task_type ttype; 88 unsigned short io_tag; 89 bool complete_in_target; 90 91 union ttype_ptr_union { 92 struct sas_task *io_task_ptr; /* When ttype==io_task */ 93 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ 94 } ttype_ptr; 95 struct isci_host *isci_host; 96 struct isci_remote_device *isci_device; 97 /* For use in the requests_to_{complete|abort} lists: */ 98 struct list_head completed_node; 99 /* For use in the reqs_in_process list: */ 100 struct list_head dev_node; 101 spinlock_t state_lock; 102 dma_addr_t request_daddr; 103 dma_addr_t zero_scatter_daddr; 104 105 unsigned int num_sg_entries; /* returned by pci_alloc_sg */ 106 unsigned int request_alloc_size; /* size of block from dma_pool_alloc */ 107 108 /** Note: "io_request_completion" is completed in two different ways 109 * depending on whether this is a TMF or regular request. 110 * - TMF requests are completed in the thread that started them; 111 * - regular requests are completed in the request completion callback 112 * function. 113 * This difference in operation allows the aborter of a TMF request 114 * to be sure that once the TMF request completes, the I/O that the 115 * TMF was aborting is guaranteed to have completed. 116 */ 117 struct completion *io_request_completion; 118 struct scic_sds_request sci_req[0] ____cacheline_aligned; 119 }; 120 121 /** 122 * This function gets the status of the request object. 123 * @request: This parameter points to the isci_request object 124 * 125 * status of the object as a isci_request_status enum. 126 */ 127 static inline 128 enum isci_request_status isci_request_get_state( 129 struct isci_request *isci_request) 130 { 131 BUG_ON(isci_request == NULL); 132 133 /*probably a bad sign... */ 134 if (isci_request->status == unallocated) 135 dev_warn(&isci_request->isci_host->pdev->dev, 136 "%s: isci_request->status == unallocated\n", 137 __func__); 138 139 return isci_request->status; 140 } 141 142 143 /** 144 * isci_request_change_state() - This function sets the status of the request 145 * object. 146 * @request: This parameter points to the isci_request object 147 * @status: This Parameter is the new status of the object 148 * 149 */ 150 static inline enum isci_request_status isci_request_change_state( 151 struct isci_request *isci_request, 152 enum isci_request_status status) 153 { 154 enum isci_request_status old_state; 155 unsigned long flags; 156 157 dev_dbg(&isci_request->isci_host->pdev->dev, 158 "%s: isci_request = %p, state = 0x%x\n", 159 __func__, 160 isci_request, 161 status); 162 163 BUG_ON(isci_request == NULL); 164 165 spin_lock_irqsave(&isci_request->state_lock, flags); 166 old_state = isci_request->status; 167 isci_request->status = status; 168 spin_unlock_irqrestore(&isci_request->state_lock, flags); 169 170 return old_state; 171 } 172 173 /** 174 * isci_request_change_started_to_newstate() - This function sets the status of 175 * the request object. 176 * @request: This parameter points to the isci_request object 177 * @status: This Parameter is the new status of the object 178 * 179 * state previous to any change. 180 */ 181 static inline enum isci_request_status isci_request_change_started_to_newstate( 182 struct isci_request *isci_request, 183 struct completion *completion_ptr, 184 enum isci_request_status newstate) 185 { 186 enum isci_request_status old_state; 187 unsigned long flags; 188 189 spin_lock_irqsave(&isci_request->state_lock, flags); 190 191 old_state = isci_request->status; 192 193 if (old_state == started || old_state == aborting) { 194 BUG_ON(isci_request->io_request_completion != NULL); 195 196 isci_request->io_request_completion = completion_ptr; 197 isci_request->status = newstate; 198 } 199 spin_unlock_irqrestore(&isci_request->state_lock, flags); 200 201 dev_dbg(&isci_request->isci_host->pdev->dev, 202 "%s: isci_request = %p, old_state = 0x%x\n", 203 __func__, 204 isci_request, 205 old_state); 206 207 return old_state; 208 } 209 210 /** 211 * isci_request_change_started_to_aborted() - This function sets the status of 212 * the request object. 213 * @request: This parameter points to the isci_request object 214 * @completion_ptr: This parameter is saved as the kernel completion structure 215 * signalled when the old request completes. 216 * 217 * state previous to any change. 218 */ 219 static inline enum isci_request_status isci_request_change_started_to_aborted( 220 struct isci_request *isci_request, 221 struct completion *completion_ptr) 222 { 223 return isci_request_change_started_to_newstate( 224 isci_request, completion_ptr, aborted 225 ); 226 } 227 /** 228 * isci_request_free() - This function frees the request object. 229 * @isci_host: This parameter specifies the ISCI host object 230 * @isci_request: This parameter points to the isci_request object 231 * 232 */ 233 static inline void isci_request_free( 234 struct isci_host *isci_host, 235 struct isci_request *isci_request) 236 { 237 if (!isci_request) 238 return; 239 240 /* release the dma memory if we fail. */ 241 dma_pool_free(isci_host->dma_pool, isci_request, 242 isci_request->request_daddr); 243 } 244 245 246 /* #define ISCI_REQUEST_VALIDATE_ACCESS 247 */ 248 249 #ifdef ISCI_REQUEST_VALIDATE_ACCESS 250 251 static inline 252 struct sas_task *isci_request_access_task(struct isci_request *isci_request) 253 { 254 BUG_ON(isci_request->ttype != io_task); 255 return isci_request->ttype_ptr.io_task_ptr; 256 } 257 258 static inline 259 struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request) 260 { 261 BUG_ON(isci_request->ttype != tmf_task); 262 return isci_request->ttype_ptr.tmf_task_ptr; 263 } 264 265 #else /* not ISCI_REQUEST_VALIDATE_ACCESS */ 266 267 #define isci_request_access_task(RequestPtr) \ 268 ((RequestPtr)->ttype_ptr.io_task_ptr) 269 270 #define isci_request_access_tmf(RequestPtr) \ 271 ((RequestPtr)->ttype_ptr.tmf_task_ptr) 272 273 #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */ 274 275 276 int isci_request_alloc_tmf( 277 struct isci_host *isci_host, 278 struct isci_tmf *isci_tmf, 279 struct isci_request **isci_request, 280 struct isci_remote_device *isci_device, 281 gfp_t gfp_flags); 282 283 284 int isci_request_execute( 285 struct isci_host *isci_host, 286 struct sas_task *task, 287 struct isci_request **request, 288 gfp_t gfp_flags); 289 290 /** 291 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given 292 * sgl 293 * @request: This parameter points to the isci_request object 294 * @*pdev: This Parameter is the pci_device struct for the controller 295 * 296 */ 297 static inline void isci_request_unmap_sgl( 298 struct isci_request *request, 299 struct pci_dev *pdev) 300 { 301 struct sas_task *task = isci_request_access_task(request); 302 303 dev_dbg(&request->isci_host->pdev->dev, 304 "%s: request = %p, task = %p,\n" 305 "task->data_dir = %d, is_sata = %d\n ", 306 __func__, 307 request, 308 task, 309 task->data_dir, 310 sas_protocol_ata(task->task_proto)); 311 312 if ((task->data_dir != PCI_DMA_NONE) && 313 !sas_protocol_ata(task->task_proto)) { 314 if (task->num_scatter == 0) 315 /* 0 indicates a single dma address */ 316 dma_unmap_single( 317 &pdev->dev, 318 request->zero_scatter_daddr, 319 task->total_xfer_len, 320 task->data_dir 321 ); 322 323 else /* unmap the sgl dma addresses */ 324 dma_unmap_sg( 325 &pdev->dev, 326 task->scatter, 327 request->num_sg_entries, 328 task->data_dir 329 ); 330 } 331 } 332 333 334 void isci_request_io_request_complete( 335 struct isci_host *isci_host, 336 struct isci_request *request, 337 enum sci_io_status completion_status); 338 339 /** 340 * isci_request_io_request_get_next_sge() - This function is called by the sci 341 * core to retrieve the next sge for a given request. 342 * @request: This parameter is the isci_request object. 343 * @current_sge_address: This parameter is the last sge retrieved by the sci 344 * core for this request. 345 * 346 * pointer to the next sge for specified request. 347 */ 348 static inline void *isci_request_io_request_get_next_sge( 349 struct isci_request *request, 350 void *current_sge_address) 351 { 352 struct sas_task *task = isci_request_access_task(request); 353 void *ret = NULL; 354 355 dev_dbg(&request->isci_host->pdev->dev, 356 "%s: request = %p, " 357 "current_sge_address = %p, " 358 "num_scatter = %d\n", 359 __func__, 360 request, 361 current_sge_address, 362 task->num_scatter); 363 364 if (!current_sge_address) /* First time through.. */ 365 ret = task->scatter; /* always task->scatter */ 366 else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */ 367 ret = NULL; /* there is only one element. */ 368 else 369 ret = sg_next(current_sge_address); /* sg_next returns NULL 370 * for the last element 371 */ 372 373 dev_dbg(&request->isci_host->pdev->dev, 374 "%s: next sge address = %p\n", 375 __func__, 376 ret); 377 378 return ret; 379 } 380 381 382 void isci_terminate_pending_requests( 383 struct isci_host *isci_host, 384 struct isci_remote_device *isci_device, 385 enum isci_request_status new_request_state); 386 387 388 389 390 #endif /* !defined(_ISCI_REQUEST_H_) */ 391