1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * BSD LICENSE 25 * 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 27 * All rights reserved. 28 * 29 * Redistribution and use in source and binary forms, with or without 30 * modification, are permitted provided that the following conditions 31 * are met: 32 * 33 * * Redistributions of source code must retain the above copyright 34 * notice, this list of conditions and the following disclaimer. 35 * * Redistributions in binary form must reproduce the above copyright 36 * notice, this list of conditions and the following disclaimer in 37 * the documentation and/or other materials provided with the 38 * distribution. 39 * * Neither the name of Intel Corporation nor the names of its 40 * contributors may be used to endorse or promote products derived 41 * from this software without specific prior written permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56 #ifndef _ISCI_REQUEST_H_ 57 #define _ISCI_REQUEST_H_ 58 59 #include "isci.h" 60 #include "scic_sds_request.h" 61 62 /** 63 * struct isci_request_status - This enum defines the possible states of an I/O 64 * request. 65 * 66 * 67 */ 68 enum isci_request_status { 69 unallocated = 0x00, 70 allocated = 0x01, 71 started = 0x02, 72 completed = 0x03, 73 aborting = 0x04, 74 aborted = 0x05, 75 terminating = 0x06, 76 dead = 0x07 77 }; 78 79 enum task_type { 80 io_task = 0, 81 tmf_task = 1 82 }; 83 84 struct isci_request { 85 enum isci_request_status status; 86 enum task_type ttype; 87 unsigned short io_tag; 88 bool complete_in_target; 89 bool terminated; 90 91 union ttype_ptr_union { 92 struct sas_task *io_task_ptr; /* When ttype==io_task */ 93 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ 94 } ttype_ptr; 95 struct isci_host *isci_host; 96 struct isci_remote_device *isci_device; 97 /* For use in the requests_to_{complete|abort} lists: */ 98 struct list_head completed_node; 99 /* For use in the reqs_in_process list: */ 100 struct list_head dev_node; 101 spinlock_t state_lock; 102 dma_addr_t request_daddr; 103 dma_addr_t zero_scatter_daddr; 104 105 unsigned int num_sg_entries; /* returned by pci_alloc_sg */ 106 107 /** Note: "io_request_completion" is completed in two different ways 108 * depending on whether this is a TMF or regular request. 109 * - TMF requests are completed in the thread that started them; 110 * - regular requests are completed in the request completion callback 111 * function. 112 * This difference in operation allows the aborter of a TMF request 113 * to be sure that once the TMF request completes, the I/O that the 114 * TMF was aborting is guaranteed to have completed. 115 */ 116 struct completion *io_request_completion; 117 struct scic_sds_request sci; 118 }; 119 120 static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req) 121 { 122 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci); 123 124 return ireq; 125 } 126 127 /** 128 * This function gets the status of the request object. 129 * @request: This parameter points to the isci_request object 130 * 131 * status of the object as a isci_request_status enum. 132 */ 133 static inline 134 enum isci_request_status isci_request_get_state( 135 struct isci_request *isci_request) 136 { 137 BUG_ON(isci_request == NULL); 138 139 /*probably a bad sign... */ 140 if (isci_request->status == unallocated) 141 dev_warn(&isci_request->isci_host->pdev->dev, 142 "%s: isci_request->status == unallocated\n", 143 __func__); 144 145 return isci_request->status; 146 } 147 148 149 /** 150 * isci_request_change_state() - This function sets the status of the request 151 * object. 152 * @request: This parameter points to the isci_request object 153 * @status: This Parameter is the new status of the object 154 * 155 */ 156 static inline enum isci_request_status isci_request_change_state( 157 struct isci_request *isci_request, 158 enum isci_request_status status) 159 { 160 enum isci_request_status old_state; 161 unsigned long flags; 162 163 dev_dbg(&isci_request->isci_host->pdev->dev, 164 "%s: isci_request = %p, state = 0x%x\n", 165 __func__, 166 isci_request, 167 status); 168 169 BUG_ON(isci_request == NULL); 170 171 spin_lock_irqsave(&isci_request->state_lock, flags); 172 old_state = isci_request->status; 173 isci_request->status = status; 174 spin_unlock_irqrestore(&isci_request->state_lock, flags); 175 176 return old_state; 177 } 178 179 /** 180 * isci_request_change_started_to_newstate() - This function sets the status of 181 * the request object. 182 * @request: This parameter points to the isci_request object 183 * @status: This Parameter is the new status of the object 184 * 185 * state previous to any change. 186 */ 187 static inline enum isci_request_status isci_request_change_started_to_newstate( 188 struct isci_request *isci_request, 189 struct completion *completion_ptr, 190 enum isci_request_status newstate) 191 { 192 enum isci_request_status old_state; 193 unsigned long flags; 194 195 spin_lock_irqsave(&isci_request->state_lock, flags); 196 197 old_state = isci_request->status; 198 199 if (old_state == started || old_state == aborting) { 200 BUG_ON(isci_request->io_request_completion != NULL); 201 202 isci_request->io_request_completion = completion_ptr; 203 isci_request->status = newstate; 204 } 205 spin_unlock_irqrestore(&isci_request->state_lock, flags); 206 207 dev_dbg(&isci_request->isci_host->pdev->dev, 208 "%s: isci_request = %p, old_state = 0x%x\n", 209 __func__, 210 isci_request, 211 old_state); 212 213 return old_state; 214 } 215 216 /** 217 * isci_request_change_started_to_aborted() - This function sets the status of 218 * the request object. 219 * @request: This parameter points to the isci_request object 220 * @completion_ptr: This parameter is saved as the kernel completion structure 221 * signalled when the old request completes. 222 * 223 * state previous to any change. 224 */ 225 static inline enum isci_request_status isci_request_change_started_to_aborted( 226 struct isci_request *isci_request, 227 struct completion *completion_ptr) 228 { 229 return isci_request_change_started_to_newstate( 230 isci_request, completion_ptr, aborted 231 ); 232 } 233 /** 234 * isci_request_free() - This function frees the request object. 235 * @isci_host: This parameter specifies the ISCI host object 236 * @isci_request: This parameter points to the isci_request object 237 * 238 */ 239 static inline void isci_request_free( 240 struct isci_host *isci_host, 241 struct isci_request *isci_request) 242 { 243 if (!isci_request) 244 return; 245 246 /* release the dma memory if we fail. */ 247 dma_pool_free(isci_host->dma_pool, isci_request, 248 isci_request->request_daddr); 249 } 250 251 252 /* #define ISCI_REQUEST_VALIDATE_ACCESS 253 */ 254 255 #ifdef ISCI_REQUEST_VALIDATE_ACCESS 256 257 static inline 258 struct sas_task *isci_request_access_task(struct isci_request *isci_request) 259 { 260 BUG_ON(isci_request->ttype != io_task); 261 return isci_request->ttype_ptr.io_task_ptr; 262 } 263 264 static inline 265 struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request) 266 { 267 BUG_ON(isci_request->ttype != tmf_task); 268 return isci_request->ttype_ptr.tmf_task_ptr; 269 } 270 271 #else /* not ISCI_REQUEST_VALIDATE_ACCESS */ 272 273 #define isci_request_access_task(RequestPtr) \ 274 ((RequestPtr)->ttype_ptr.io_task_ptr) 275 276 #define isci_request_access_tmf(RequestPtr) \ 277 ((RequestPtr)->ttype_ptr.tmf_task_ptr) 278 279 #endif /* not ISCI_REQUEST_VALIDATE_ACCESS */ 280 281 282 int isci_request_alloc_tmf( 283 struct isci_host *isci_host, 284 struct isci_tmf *isci_tmf, 285 struct isci_request **isci_request, 286 struct isci_remote_device *isci_device, 287 gfp_t gfp_flags); 288 289 290 int isci_request_execute( 291 struct isci_host *isci_host, 292 struct sas_task *task, 293 struct isci_request **request, 294 gfp_t gfp_flags); 295 296 /** 297 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given 298 * sgl 299 * @request: This parameter points to the isci_request object 300 * @*pdev: This Parameter is the pci_device struct for the controller 301 * 302 */ 303 static inline void isci_request_unmap_sgl( 304 struct isci_request *request, 305 struct pci_dev *pdev) 306 { 307 struct sas_task *task = isci_request_access_task(request); 308 309 dev_dbg(&request->isci_host->pdev->dev, 310 "%s: request = %p, task = %p,\n" 311 "task->data_dir = %d, is_sata = %d\n ", 312 __func__, 313 request, 314 task, 315 task->data_dir, 316 sas_protocol_ata(task->task_proto)); 317 318 if ((task->data_dir != PCI_DMA_NONE) && 319 !sas_protocol_ata(task->task_proto)) { 320 if (task->num_scatter == 0) 321 /* 0 indicates a single dma address */ 322 dma_unmap_single( 323 &pdev->dev, 324 request->zero_scatter_daddr, 325 task->total_xfer_len, 326 task->data_dir 327 ); 328 329 else /* unmap the sgl dma addresses */ 330 dma_unmap_sg( 331 &pdev->dev, 332 task->scatter, 333 request->num_sg_entries, 334 task->data_dir 335 ); 336 } 337 } 338 339 340 void isci_request_io_request_complete( 341 struct isci_host *isci_host, 342 struct isci_request *request, 343 enum sci_io_status completion_status); 344 345 /** 346 * isci_request_io_request_get_next_sge() - This function is called by the sci 347 * core to retrieve the next sge for a given request. 348 * @request: This parameter is the isci_request object. 349 * @current_sge_address: This parameter is the last sge retrieved by the sci 350 * core for this request. 351 * 352 * pointer to the next sge for specified request. 353 */ 354 static inline void *isci_request_io_request_get_next_sge( 355 struct isci_request *request, 356 void *current_sge_address) 357 { 358 struct sas_task *task = isci_request_access_task(request); 359 void *ret = NULL; 360 361 dev_dbg(&request->isci_host->pdev->dev, 362 "%s: request = %p, " 363 "current_sge_address = %p, " 364 "num_scatter = %d\n", 365 __func__, 366 request, 367 current_sge_address, 368 task->num_scatter); 369 370 if (!current_sge_address) /* First time through.. */ 371 ret = task->scatter; /* always task->scatter */ 372 else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */ 373 ret = NULL; /* there is only one element. */ 374 else 375 ret = sg_next(current_sge_address); /* sg_next returns NULL 376 * for the last element 377 */ 378 379 dev_dbg(&request->isci_host->pdev->dev, 380 "%s: next sge address = %p\n", 381 __func__, 382 ret); 383 384 return ret; 385 } 386 387 388 void isci_terminate_pending_requests( 389 struct isci_host *isci_host, 390 struct isci_remote_device *isci_device, 391 enum isci_request_status new_request_state); 392 393 394 395 396 #endif /* !defined(_ISCI_REQUEST_H_) */ 397