1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; see the file COPYING. If not, write to 22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * 24 * Module Name: 25 * dpcsup.c 26 * 27 * Abstract: All DPC processing routines for the cyclone board occur here. 28 * 29 * 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/init.h> 34 #include <linux/types.h> 35 #include <linux/spinlock.h> 36 #include <linux/slab.h> 37 #include <linux/completion.h> 38 #include <linux/blkdev.h> 39 #include <linux/semaphore.h> 40 41 #include "aacraid.h" 42 43 /** 44 * aac_response_normal - Handle command replies 45 * @q: Queue to read from 46 * 47 * This DPC routine will be run when the adapter interrupts us to let us 48 * know there is a response on our normal priority queue. We will pull off 49 * all QE there are and wake up all the waiters before exiting. We will 50 * take a spinlock out on the queue before operating on it. 51 */ 52 53 unsigned int aac_response_normal(struct aac_queue * q) 54 { 55 struct aac_dev * dev = q->dev; 56 struct aac_entry *entry; 57 struct hw_fib * hwfib; 58 struct fib * fib; 59 int consumed = 0; 60 unsigned long flags; 61 62 spin_lock_irqsave(q->lock, flags); 63 /* 64 * Keep pulling response QEs off the response queue and waking 65 * up the waiters until there are no more QEs. We then return 66 * back to the system. If no response was requesed we just 67 * deallocate the Fib here and continue. 68 */ 69 while(aac_consumer_get(dev, q, &entry)) 70 { 71 int fast; 72 u32 index = le32_to_cpu(entry->addr); 73 fast = index & 0x01; 74 fib = &dev->fibs[index >> 2]; 75 hwfib = fib->hw_fib_va; 76 77 aac_consumer_free(dev, q, HostNormRespQueue); 78 /* 79 * Remove this fib from the Outstanding I/O queue. 80 * But only if it has not already been timed out. 81 * 82 * If the fib has been timed out already, then just 83 * continue. The caller has already been notified that 84 * the fib timed out. 85 */ 86 dev->queues->queue[AdapNormCmdQueue].numpending--; 87 88 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 89 spin_unlock_irqrestore(q->lock, flags); 90 aac_fib_complete(fib); 91 aac_fib_free(fib); 92 spin_lock_irqsave(q->lock, flags); 93 continue; 94 } 95 spin_unlock_irqrestore(q->lock, flags); 96 97 if (fast) { 98 /* 99 * Doctor the fib 100 */ 101 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 102 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); 103 } 104 105 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 106 107 if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) 108 { 109 __le32 *pstatus = (__le32 *)hwfib->data; 110 if (*pstatus & cpu_to_le32(0xffff0000)) 111 *pstatus = cpu_to_le32(ST_OK); 112 } 113 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 114 { 115 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) 116 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); 117 else 118 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); 119 /* 120 * NOTE: we cannot touch the fib after this 121 * call, because it may have been deallocated. 122 */ 123 fib->flags = 0; 124 fib->callback(fib->callback_data, fib); 125 } else { 126 unsigned long flagv; 127 spin_lock_irqsave(&fib->event_lock, flagv); 128 if (!fib->done) 129 fib->done = 1; 130 up(&fib->event_wait); 131 spin_unlock_irqrestore(&fib->event_lock, flagv); 132 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 133 if (fib->done == 2) { 134 aac_fib_complete(fib); 135 aac_fib_free(fib); 136 } 137 } 138 consumed++; 139 spin_lock_irqsave(q->lock, flags); 140 } 141 142 if (consumed > aac_config.peak_fibs) 143 aac_config.peak_fibs = consumed; 144 if (consumed == 0) 145 aac_config.zero_fibs++; 146 147 spin_unlock_irqrestore(q->lock, flags); 148 return 0; 149 } 150 151 152 /** 153 * aac_command_normal - handle commands 154 * @q: queue to process 155 * 156 * This DPC routine will be queued when the adapter interrupts us to 157 * let us know there is a command on our normal priority queue. We will 158 * pull off all QE there are and wake up all the waiters before exiting. 159 * We will take a spinlock out on the queue before operating on it. 160 */ 161 162 unsigned int aac_command_normal(struct aac_queue *q) 163 { 164 struct aac_dev * dev = q->dev; 165 struct aac_entry *entry; 166 unsigned long flags; 167 168 spin_lock_irqsave(q->lock, flags); 169 170 /* 171 * Keep pulling response QEs off the response queue and waking 172 * up the waiters until there are no more QEs. We then return 173 * back to the system. 174 */ 175 while(aac_consumer_get(dev, q, &entry)) 176 { 177 struct fib fibctx; 178 struct hw_fib * hw_fib; 179 u32 index; 180 struct fib *fib = &fibctx; 181 182 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); 183 hw_fib = &dev->aif_base_va[index]; 184 185 /* 186 * Allocate a FIB at all costs. For non queued stuff 187 * we can just use the stack so we are happy. We need 188 * a fib object in order to manage the linked lists 189 */ 190 if (dev->aif_thread) 191 if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) 192 fib = &fibctx; 193 194 memset(fib, 0, sizeof(struct fib)); 195 INIT_LIST_HEAD(&fib->fiblink); 196 fib->type = FSAFS_NTC_FIB_CONTEXT; 197 fib->size = sizeof(struct fib); 198 fib->hw_fib_va = hw_fib; 199 fib->data = hw_fib->data; 200 fib->dev = dev; 201 202 203 if (dev->aif_thread && fib != &fibctx) { 204 list_add_tail(&fib->fiblink, &q->cmdq); 205 aac_consumer_free(dev, q, HostNormCmdQueue); 206 wake_up_interruptible(&q->cmdready); 207 } else { 208 aac_consumer_free(dev, q, HostNormCmdQueue); 209 spin_unlock_irqrestore(q->lock, flags); 210 /* 211 * Set the status of this FIB 212 */ 213 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 214 aac_fib_adapter_complete(fib, sizeof(u32)); 215 spin_lock_irqsave(q->lock, flags); 216 } 217 } 218 spin_unlock_irqrestore(q->lock, flags); 219 return 0; 220 } 221 222 223 /** 224 * aac_intr_normal - Handle command replies 225 * @dev: Device 226 * @index: completion reference 227 * 228 * This DPC routine will be run when the adapter interrupts us to let us 229 * know there is a response on our normal priority queue. We will pull off 230 * all QE there are and wake up all the waiters before exiting. 231 */ 232 233 unsigned int aac_intr_normal(struct aac_dev * dev, u32 index) 234 { 235 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 236 if ((index & 0x00000002L)) { 237 struct hw_fib * hw_fib; 238 struct fib * fib; 239 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; 240 unsigned long flags; 241 242 if (index == 0xFFFFFFFEL) /* Special Case */ 243 return 0; /* Do nothing */ 244 /* 245 * Allocate a FIB. For non queued stuff we can just use 246 * the stack so we are happy. We need a fib object in order to 247 * manage the linked lists. 248 */ 249 if ((!dev->aif_thread) 250 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) 251 return 1; 252 if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { 253 kfree (fib); 254 return 1; 255 } 256 memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + 257 (index & ~0x00000002L)), sizeof(struct hw_fib)); 258 INIT_LIST_HEAD(&fib->fiblink); 259 fib->type = FSAFS_NTC_FIB_CONTEXT; 260 fib->size = sizeof(struct fib); 261 fib->hw_fib_va = hw_fib; 262 fib->data = hw_fib->data; 263 fib->dev = dev; 264 265 spin_lock_irqsave(q->lock, flags); 266 list_add_tail(&fib->fiblink, &q->cmdq); 267 wake_up_interruptible(&q->cmdready); 268 spin_unlock_irqrestore(q->lock, flags); 269 return 1; 270 } else { 271 int fast = index & 0x01; 272 struct fib * fib = &dev->fibs[index >> 2]; 273 struct hw_fib * hwfib = fib->hw_fib_va; 274 275 /* 276 * Remove this fib from the Outstanding I/O queue. 277 * But only if it has not already been timed out. 278 * 279 * If the fib has been timed out already, then just 280 * continue. The caller has already been notified that 281 * the fib timed out. 282 */ 283 dev->queues->queue[AdapNormCmdQueue].numpending--; 284 285 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 286 aac_fib_complete(fib); 287 aac_fib_free(fib); 288 return 0; 289 } 290 291 if (fast) { 292 /* 293 * Doctor the fib 294 */ 295 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 296 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); 297 } 298 299 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 300 301 if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) 302 { 303 __le32 *pstatus = (__le32 *)hwfib->data; 304 if (*pstatus & cpu_to_le32(0xffff0000)) 305 *pstatus = cpu_to_le32(ST_OK); 306 } 307 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 308 { 309 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) 310 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); 311 else 312 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); 313 /* 314 * NOTE: we cannot touch the fib after this 315 * call, because it may have been deallocated. 316 */ 317 fib->flags = 0; 318 fib->callback(fib->callback_data, fib); 319 } else { 320 unsigned long flagv; 321 dprintk((KERN_INFO "event_wait up\n")); 322 spin_lock_irqsave(&fib->event_lock, flagv); 323 if (!fib->done) 324 fib->done = 1; 325 up(&fib->event_wait); 326 spin_unlock_irqrestore(&fib->event_lock, flagv); 327 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 328 } 329 return 0; 330 } 331 } 332