1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Adaptec AAC series RAID controller driver 4 * (c) Copyright 2001 Red Hat Inc. 5 * 6 * based on the old aacraid driver that is.. 7 * Adaptec aacraid device driver for Linux. 8 * 9 * Copyright (c) 2000-2010 Adaptec, Inc. 10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) 12 * 13 * Module Name: 14 * dpcsup.c 15 * 16 * Abstract: All DPC processing routines for the cyclone board occur here. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/types.h> 22 #include <linux/spinlock.h> 23 #include <linux/slab.h> 24 #include <linux/completion.h> 25 #include <linux/blkdev.h> 26 27 #include "aacraid.h" 28 29 /** 30 * aac_response_normal - Handle command replies 31 * @q: Queue to read from 32 * 33 * This DPC routine will be run when the adapter interrupts us to let us 34 * know there is a response on our normal priority queue. We will pull off 35 * all QE there are and wake up all the waiters before exiting. We will 36 * take a spinlock out on the queue before operating on it. 37 */ 38 39 unsigned int aac_response_normal(struct aac_queue * q) 40 { 41 struct aac_dev * dev = q->dev; 42 struct aac_entry *entry; 43 struct hw_fib * hwfib; 44 struct fib * fib; 45 int consumed = 0; 46 unsigned long flags, mflags; 47 48 spin_lock_irqsave(q->lock, flags); 49 /* 50 * Keep pulling response QEs off the response queue and waking 51 * up the waiters until there are no more QEs. We then return 52 * back to the system. If no response was requested we just 53 * deallocate the Fib here and continue. 54 */ 55 while(aac_consumer_get(dev, q, &entry)) 56 { 57 int fast; 58 u32 index = le32_to_cpu(entry->addr); 59 fast = index & 0x01; 60 fib = &dev->fibs[index >> 2]; 61 hwfib = fib->hw_fib_va; 62 63 aac_consumer_free(dev, q, HostNormRespQueue); 64 /* 65 * Remove this fib from the Outstanding I/O queue. 66 * But only if it has not already been timed out. 67 * 68 * If the fib has been timed out already, then just 69 * continue. The caller has already been notified that 70 * the fib timed out. 71 */ 72 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); 73 74 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 75 spin_unlock_irqrestore(q->lock, flags); 76 aac_fib_complete(fib); 77 aac_fib_free(fib); 78 spin_lock_irqsave(q->lock, flags); 79 continue; 80 } 81 spin_unlock_irqrestore(q->lock, flags); 82 83 if (fast) { 84 /* 85 * Doctor the fib 86 */ 87 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 88 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); 89 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; 90 } 91 92 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 93 94 if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) 95 { 96 __le32 *pstatus = (__le32 *)hwfib->data; 97 if (*pstatus & cpu_to_le32(0xffff0000)) 98 *pstatus = cpu_to_le32(ST_OK); 99 } 100 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 101 { 102 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) 103 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); 104 else 105 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); 106 /* 107 * NOTE: we cannot touch the fib after this 108 * call, because it may have been deallocated. 109 */ 110 fib->callback(fib->callback_data, fib); 111 } else { 112 unsigned long flagv; 113 spin_lock_irqsave(&fib->event_lock, flagv); 114 if (!fib->done) { 115 fib->done = 1; 116 complete(&fib->event_wait); 117 } 118 spin_unlock_irqrestore(&fib->event_lock, flagv); 119 120 spin_lock_irqsave(&dev->manage_lock, mflags); 121 dev->management_fib_count--; 122 spin_unlock_irqrestore(&dev->manage_lock, mflags); 123 124 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 125 if (fib->done == 2) { 126 spin_lock_irqsave(&fib->event_lock, flagv); 127 fib->done = 0; 128 spin_unlock_irqrestore(&fib->event_lock, flagv); 129 aac_fib_complete(fib); 130 aac_fib_free(fib); 131 } 132 } 133 consumed++; 134 spin_lock_irqsave(q->lock, flags); 135 } 136 137 if (consumed > aac_config.peak_fibs) 138 aac_config.peak_fibs = consumed; 139 if (consumed == 0) 140 aac_config.zero_fibs++; 141 142 spin_unlock_irqrestore(q->lock, flags); 143 return 0; 144 } 145 146 147 /** 148 * aac_command_normal - handle commands 149 * @q: queue to process 150 * 151 * This DPC routine will be queued when the adapter interrupts us to 152 * let us know there is a command on our normal priority queue. We will 153 * pull off all QE there are and wake up all the waiters before exiting. 154 * We will take a spinlock out on the queue before operating on it. 155 */ 156 157 unsigned int aac_command_normal(struct aac_queue *q) 158 { 159 struct aac_dev * dev = q->dev; 160 struct aac_entry *entry; 161 unsigned long flags; 162 163 spin_lock_irqsave(q->lock, flags); 164 165 /* 166 * Keep pulling response QEs off the response queue and waking 167 * up the waiters until there are no more QEs. We then return 168 * back to the system. 169 */ 170 while(aac_consumer_get(dev, q, &entry)) 171 { 172 struct fib fibctx; 173 struct hw_fib * hw_fib; 174 u32 index; 175 struct fib *fib = &fibctx; 176 177 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); 178 hw_fib = &dev->aif_base_va[index]; 179 180 /* 181 * Allocate a FIB at all costs. For non queued stuff 182 * we can just use the stack so we are happy. We need 183 * a fib object in order to manage the linked lists 184 */ 185 if (dev->aif_thread) 186 if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) 187 fib = &fibctx; 188 189 memset(fib, 0, sizeof(struct fib)); 190 INIT_LIST_HEAD(&fib->fiblink); 191 fib->type = FSAFS_NTC_FIB_CONTEXT; 192 fib->size = sizeof(struct fib); 193 fib->hw_fib_va = hw_fib; 194 fib->data = hw_fib->data; 195 fib->dev = dev; 196 197 198 if (dev->aif_thread && fib != &fibctx) { 199 list_add_tail(&fib->fiblink, &q->cmdq); 200 aac_consumer_free(dev, q, HostNormCmdQueue); 201 wake_up_interruptible(&q->cmdready); 202 } else { 203 aac_consumer_free(dev, q, HostNormCmdQueue); 204 spin_unlock_irqrestore(q->lock, flags); 205 /* 206 * Set the status of this FIB 207 */ 208 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 209 aac_fib_adapter_complete(fib, sizeof(u32)); 210 spin_lock_irqsave(q->lock, flags); 211 } 212 } 213 spin_unlock_irqrestore(q->lock, flags); 214 return 0; 215 } 216 217 /* 218 * 219 * aac_aif_callback 220 * @context: the context set in the fib - here it is scsi cmd 221 * @fibptr: pointer to the fib 222 * 223 * Handles the AIFs - new method (SRC) 224 * 225 */ 226 227 static void aac_aif_callback(void *context, struct fib * fibptr) 228 { 229 struct fib *fibctx; 230 struct aac_dev *dev; 231 struct aac_aifcmd *cmd; 232 int status; 233 234 fibctx = (struct fib *)context; 235 BUG_ON(fibptr == NULL); 236 dev = fibptr->dev; 237 238 if ((fibptr->hw_fib_va->header.XferState & 239 cpu_to_le32(NoMoreAifDataAvailable)) || 240 dev->sa_firmware) { 241 aac_fib_complete(fibptr); 242 aac_fib_free(fibptr); 243 return; 244 } 245 246 aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); 247 248 aac_fib_init(fibctx); 249 cmd = (struct aac_aifcmd *) fib_data(fibctx); 250 cmd->command = cpu_to_le32(AifReqEvent); 251 252 status = aac_fib_send(AifRequest, 253 fibctx, 254 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), 255 FsaNormal, 256 0, 1, 257 (fib_callback)aac_aif_callback, fibctx); 258 } 259 260 261 /** 262 * aac_intr_normal - Handle command replies 263 * @dev: Device 264 * @index: completion reference 265 * 266 * This DPC routine will be run when the adapter interrupts us to let us 267 * know there is a response on our normal priority queue. We will pull off 268 * all QE there are and wake up all the waiters before exiting. 269 */ 270 unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, 271 int isFastResponse, struct hw_fib *aif_fib) 272 { 273 unsigned long mflags; 274 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 275 if (isAif == 1) { /* AIF - common */ 276 struct hw_fib * hw_fib; 277 struct fib * fib; 278 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; 279 unsigned long flags; 280 281 /* 282 * Allocate a FIB. For non queued stuff we can just use 283 * the stack so we are happy. We need a fib object in order to 284 * manage the linked lists. 285 */ 286 if ((!dev->aif_thread) 287 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) 288 return 1; 289 if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { 290 kfree (fib); 291 return 1; 292 } 293 if (dev->sa_firmware) { 294 fib->hbacmd_size = index; /* store event type */ 295 } else if (aif_fib != NULL) { 296 memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); 297 } else { 298 memcpy(hw_fib, (struct hw_fib *) 299 (((uintptr_t)(dev->regs.sa)) + index), 300 sizeof(struct hw_fib)); 301 } 302 INIT_LIST_HEAD(&fib->fiblink); 303 fib->type = FSAFS_NTC_FIB_CONTEXT; 304 fib->size = sizeof(struct fib); 305 fib->hw_fib_va = hw_fib; 306 fib->data = hw_fib->data; 307 fib->dev = dev; 308 309 spin_lock_irqsave(q->lock, flags); 310 list_add_tail(&fib->fiblink, &q->cmdq); 311 wake_up_interruptible(&q->cmdready); 312 spin_unlock_irqrestore(q->lock, flags); 313 return 1; 314 } else if (isAif == 2) { /* AIF - new (SRC) */ 315 struct fib *fibctx; 316 struct aac_aifcmd *cmd; 317 318 fibctx = aac_fib_alloc(dev); 319 if (!fibctx) 320 return 1; 321 aac_fib_init(fibctx); 322 323 cmd = (struct aac_aifcmd *) fib_data(fibctx); 324 cmd->command = cpu_to_le32(AifReqEvent); 325 326 return aac_fib_send(AifRequest, 327 fibctx, 328 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), 329 FsaNormal, 330 0, 1, 331 (fib_callback)aac_aif_callback, fibctx); 332 } else { 333 struct fib *fib = &dev->fibs[index]; 334 int start_callback = 0; 335 336 /* 337 * Remove this fib from the Outstanding I/O queue. 338 * But only if it has not already been timed out. 339 * 340 * If the fib has been timed out already, then just 341 * continue. The caller has already been notified that 342 * the fib timed out. 343 */ 344 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); 345 346 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 347 aac_fib_complete(fib); 348 aac_fib_free(fib); 349 return 0; 350 } 351 352 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 353 354 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { 355 356 if (isFastResponse) 357 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; 358 359 if (fib->callback) { 360 start_callback = 1; 361 } else { 362 unsigned long flagv; 363 int completed = 0; 364 365 dprintk((KERN_INFO "event_wait up\n")); 366 spin_lock_irqsave(&fib->event_lock, flagv); 367 if (fib->done == 2) { 368 fib->done = 1; 369 completed = 1; 370 } else { 371 fib->done = 1; 372 complete(&fib->event_wait); 373 } 374 spin_unlock_irqrestore(&fib->event_lock, flagv); 375 376 spin_lock_irqsave(&dev->manage_lock, mflags); 377 dev->management_fib_count--; 378 spin_unlock_irqrestore(&dev->manage_lock, 379 mflags); 380 381 FIB_COUNTER_INCREMENT(aac_config.NativeRecved); 382 if (completed) 383 aac_fib_complete(fib); 384 } 385 } else { 386 struct hw_fib *hwfib = fib->hw_fib_va; 387 388 if (isFastResponse) { 389 /* Doctor the fib */ 390 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 391 hwfib->header.XferState |= 392 cpu_to_le32(AdapterProcessed); 393 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; 394 } 395 396 if (hwfib->header.Command == 397 cpu_to_le16(NuFileSystem)) { 398 __le32 *pstatus = (__le32 *)hwfib->data; 399 400 if (*pstatus & cpu_to_le32(0xffff0000)) 401 *pstatus = cpu_to_le32(ST_OK); 402 } 403 if (hwfib->header.XferState & 404 cpu_to_le32(NoResponseExpected | Async)) { 405 if (hwfib->header.XferState & cpu_to_le32( 406 NoResponseExpected)) 407 FIB_COUNTER_INCREMENT( 408 aac_config.NoResponseRecved); 409 else 410 FIB_COUNTER_INCREMENT( 411 aac_config.AsyncRecved); 412 start_callback = 1; 413 } else { 414 unsigned long flagv; 415 int completed = 0; 416 417 dprintk((KERN_INFO "event_wait up\n")); 418 spin_lock_irqsave(&fib->event_lock, flagv); 419 if (fib->done == 2) { 420 fib->done = 1; 421 completed = 1; 422 } else { 423 fib->done = 1; 424 complete(&fib->event_wait); 425 } 426 spin_unlock_irqrestore(&fib->event_lock, flagv); 427 428 spin_lock_irqsave(&dev->manage_lock, mflags); 429 dev->management_fib_count--; 430 spin_unlock_irqrestore(&dev->manage_lock, 431 mflags); 432 433 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 434 if (completed) 435 aac_fib_complete(fib); 436 } 437 } 438 439 440 if (start_callback) { 441 /* 442 * NOTE: we cannot touch the fib after this 443 * call, because it may have been deallocated. 444 */ 445 if (likely(fib->callback && fib->callback_data)) { 446 fib->callback(fib->callback_data, fib); 447 } else { 448 aac_fib_complete(fib); 449 aac_fib_free(fib); 450 } 451 452 } 453 return 0; 454 } 455 } 456