1 /* 2 * Intel IXP4xx Queue Manager driver for Linux 3 * 4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2 of the GNU General Public License 8 * as published by the Free Software Foundation. 9 */ 10 11 #include <linux/ioport.h> 12 #include <linux/interrupt.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/platform_device.h> 17 #include <linux/soc/ixp4xx/qmgr.h> 18 19 static struct qmgr_regs __iomem *qmgr_regs; 20 static int qmgr_irq_1; 21 static int qmgr_irq_2; 22 static spinlock_t qmgr_lock; 23 static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ 24 static void (*irq_handlers[QUEUES])(void *pdev); 25 static void *irq_pdevs[QUEUES]; 26 27 #if DEBUG_QMGR 28 char qmgr_queue_descs[QUEUES][32]; 29 #endif 30 31 void qmgr_put_entry(unsigned int queue, u32 val) 32 { 33 #if DEBUG_QMGR 34 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ 35 36 printk(KERN_DEBUG "Queue %s(%i) put %X\n", 37 qmgr_queue_descs[queue], queue, val); 38 #endif 39 __raw_writel(val, &qmgr_regs->acc[queue][0]); 40 } 41 42 u32 qmgr_get_entry(unsigned int queue) 43 { 44 u32 val; 45 val = __raw_readl(&qmgr_regs->acc[queue][0]); 46 #if DEBUG_QMGR 47 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ 48 49 printk(KERN_DEBUG "Queue %s(%i) get %X\n", 50 qmgr_queue_descs[queue], queue, val); 51 #endif 52 return val; 53 } 54 55 static int __qmgr_get_stat1(unsigned int queue) 56 { 57 return (__raw_readl(&qmgr_regs->stat1[queue >> 3]) 58 >> ((queue & 7) << 2)) & 0xF; 59 } 60 61 static int __qmgr_get_stat2(unsigned int queue) 62 { 63 BUG_ON(queue >= HALF_QUEUES); 64 return (__raw_readl(&qmgr_regs->stat2[queue >> 4]) 65 >> ((queue & 0xF) << 1)) & 0x3; 66 } 67 68 /** 69 * qmgr_stat_empty() - checks if a hardware queue is empty 70 * @queue: queue number 71 * 72 * Returns non-zero value if the queue is empty. 73 */ 74 int qmgr_stat_empty(unsigned int queue) 75 { 76 BUG_ON(queue >= HALF_QUEUES); 77 return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY; 78 } 79 80 /** 81 * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark 82 * @queue: queue number 83 * 84 * Returns non-zero value if the queue is below low watermark. 85 */ 86 int qmgr_stat_below_low_watermark(unsigned int queue) 87 { 88 if (queue >= HALF_QUEUES) 89 return (__raw_readl(&qmgr_regs->statne_h) >> 90 (queue - HALF_QUEUES)) & 0x01; 91 return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY; 92 } 93 94 /** 95 * qmgr_stat_full() - checks if a hardware queue is full 96 * @queue: queue number 97 * 98 * Returns non-zero value if the queue is full. 99 */ 100 int qmgr_stat_full(unsigned int queue) 101 { 102 if (queue >= HALF_QUEUES) 103 return (__raw_readl(&qmgr_regs->statf_h) >> 104 (queue - HALF_QUEUES)) & 0x01; 105 return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL; 106 } 107 108 /** 109 * qmgr_stat_overflow() - checks if a hardware queue experienced overflow 110 * @queue: queue number 111 * 112 * Returns non-zero value if the queue experienced overflow. 113 */ 114 int qmgr_stat_overflow(unsigned int queue) 115 { 116 return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW; 117 } 118 119 void qmgr_set_irq(unsigned int queue, int src, 120 void (*handler)(void *pdev), void *pdev) 121 { 122 unsigned long flags; 123 124 spin_lock_irqsave(&qmgr_lock, flags); 125 if (queue < HALF_QUEUES) { 126 u32 __iomem *reg; 127 int bit; 128 BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL); 129 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ 130 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ 131 __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), 132 reg); 133 } else 134 /* IRQ source for queues 32-63 is fixed */ 135 BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY); 136 137 irq_handlers[queue] = handler; 138 irq_pdevs[queue] = pdev; 139 spin_unlock_irqrestore(&qmgr_lock, flags); 140 } 141 142 143 static irqreturn_t qmgr_irq1_a0(int irq, void *pdev) 144 { 145 int i, ret = 0; 146 u32 en_bitmap, src, stat; 147 148 /* ACK - it may clear any bits so don't rely on it */ 149 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]); 150 151 en_bitmap = qmgr_regs->irqen[0]; 152 while (en_bitmap) { 153 i = __fls(en_bitmap); /* number of the last "low" queue */ 154 en_bitmap &= ~BIT(i); 155 src = qmgr_regs->irqsrc[i >> 3]; 156 stat = qmgr_regs->stat1[i >> 3]; 157 if (src & 4) /* the IRQ condition is inverted */ 158 stat = ~stat; 159 if (stat & BIT(src & 3)) { 160 irq_handlers[i](irq_pdevs[i]); 161 ret = IRQ_HANDLED; 162 } 163 } 164 return ret; 165 } 166 167 168 static irqreturn_t qmgr_irq2_a0(int irq, void *pdev) 169 { 170 int i, ret = 0; 171 u32 req_bitmap; 172 173 /* ACK - it may clear any bits so don't rely on it */ 174 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]); 175 176 req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h; 177 while (req_bitmap) { 178 i = __fls(req_bitmap); /* number of the last "high" queue */ 179 req_bitmap &= ~BIT(i); 180 irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]); 181 ret = IRQ_HANDLED; 182 } 183 return ret; 184 } 185 186 187 static irqreturn_t qmgr_irq(int irq, void *pdev) 188 { 189 int i, half = (irq == qmgr_irq_1 ? 0 : 1); 190 u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]); 191 192 if (!req_bitmap) 193 return 0; 194 __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */ 195 196 while (req_bitmap) { 197 i = __fls(req_bitmap); /* number of the last queue */ 198 req_bitmap &= ~BIT(i); 199 i += half * HALF_QUEUES; 200 irq_handlers[i](irq_pdevs[i]); 201 } 202 return IRQ_HANDLED; 203 } 204 205 206 void qmgr_enable_irq(unsigned int queue) 207 { 208 unsigned long flags; 209 int half = queue / 32; 210 u32 mask = 1 << (queue & (HALF_QUEUES - 1)); 211 212 spin_lock_irqsave(&qmgr_lock, flags); 213 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask, 214 &qmgr_regs->irqen[half]); 215 spin_unlock_irqrestore(&qmgr_lock, flags); 216 } 217 218 void qmgr_disable_irq(unsigned int queue) 219 { 220 unsigned long flags; 221 int half = queue / 32; 222 u32 mask = 1 << (queue & (HALF_QUEUES - 1)); 223 224 spin_lock_irqsave(&qmgr_lock, flags); 225 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask, 226 &qmgr_regs->irqen[half]); 227 __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */ 228 spin_unlock_irqrestore(&qmgr_lock, flags); 229 } 230 231 static inline void shift_mask(u32 *mask) 232 { 233 mask[3] = mask[3] << 1 | mask[2] >> 31; 234 mask[2] = mask[2] << 1 | mask[1] >> 31; 235 mask[1] = mask[1] << 1 | mask[0] >> 31; 236 mask[0] <<= 1; 237 } 238 239 #if DEBUG_QMGR 240 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, 241 unsigned int nearly_empty_watermark, 242 unsigned int nearly_full_watermark, 243 const char *desc_format, const char* name) 244 #else 245 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, 246 unsigned int nearly_empty_watermark, 247 unsigned int nearly_full_watermark) 248 #endif 249 { 250 u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ 251 int err; 252 253 BUG_ON(queue >= QUEUES); 254 255 if ((nearly_empty_watermark | nearly_full_watermark) & ~7) 256 return -EINVAL; 257 258 switch (len) { 259 case 16: 260 cfg = 0 << 24; 261 mask[0] = 0x1; 262 break; 263 case 32: 264 cfg = 1 << 24; 265 mask[0] = 0x3; 266 break; 267 case 64: 268 cfg = 2 << 24; 269 mask[0] = 0xF; 270 break; 271 case 128: 272 cfg = 3 << 24; 273 mask[0] = 0xFF; 274 break; 275 default: 276 return -EINVAL; 277 } 278 279 cfg |= nearly_empty_watermark << 26; 280 cfg |= nearly_full_watermark << 29; 281 len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */ 282 mask[1] = mask[2] = mask[3] = 0; 283 284 if (!try_module_get(THIS_MODULE)) 285 return -ENODEV; 286 287 spin_lock_irq(&qmgr_lock); 288 if (__raw_readl(&qmgr_regs->sram[queue])) { 289 err = -EBUSY; 290 goto err; 291 } 292 293 while (1) { 294 if (!(used_sram_bitmap[0] & mask[0]) && 295 !(used_sram_bitmap[1] & mask[1]) && 296 !(used_sram_bitmap[2] & mask[2]) && 297 !(used_sram_bitmap[3] & mask[3])) 298 break; /* found free space */ 299 300 addr++; 301 shift_mask(mask); 302 if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) { 303 printk(KERN_ERR "qmgr: no free SRAM space for" 304 " queue %i\n", queue); 305 err = -ENOMEM; 306 goto err; 307 } 308 } 309 310 used_sram_bitmap[0] |= mask[0]; 311 used_sram_bitmap[1] |= mask[1]; 312 used_sram_bitmap[2] |= mask[2]; 313 used_sram_bitmap[3] |= mask[3]; 314 __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); 315 #if DEBUG_QMGR 316 snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]), 317 desc_format, name); 318 printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n", 319 qmgr_queue_descs[queue], queue, addr); 320 #endif 321 spin_unlock_irq(&qmgr_lock); 322 return 0; 323 324 err: 325 spin_unlock_irq(&qmgr_lock); 326 module_put(THIS_MODULE); 327 return err; 328 } 329 330 void qmgr_release_queue(unsigned int queue) 331 { 332 u32 cfg, addr, mask[4]; 333 334 BUG_ON(queue >= QUEUES); /* not in valid range */ 335 336 spin_lock_irq(&qmgr_lock); 337 cfg = __raw_readl(&qmgr_regs->sram[queue]); 338 addr = (cfg >> 14) & 0xFF; 339 340 BUG_ON(!addr); /* not requested */ 341 342 switch ((cfg >> 24) & 3) { 343 case 0: mask[0] = 0x1; break; 344 case 1: mask[0] = 0x3; break; 345 case 2: mask[0] = 0xF; break; 346 case 3: mask[0] = 0xFF; break; 347 } 348 349 mask[1] = mask[2] = mask[3] = 0; 350 351 while (addr--) 352 shift_mask(mask); 353 354 #if DEBUG_QMGR 355 printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n", 356 qmgr_queue_descs[queue], queue); 357 qmgr_queue_descs[queue][0] = '\x0'; 358 #endif 359 360 while ((addr = qmgr_get_entry(queue))) 361 printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n", 362 queue, addr); 363 364 __raw_writel(0, &qmgr_regs->sram[queue]); 365 366 used_sram_bitmap[0] &= ~mask[0]; 367 used_sram_bitmap[1] &= ~mask[1]; 368 used_sram_bitmap[2] &= ~mask[2]; 369 used_sram_bitmap[3] &= ~mask[3]; 370 irq_handlers[queue] = NULL; /* catch IRQ bugs */ 371 spin_unlock_irq(&qmgr_lock); 372 373 module_put(THIS_MODULE); 374 } 375 376 static int ixp4xx_qmgr_probe(struct platform_device *pdev) 377 { 378 int i, err; 379 irq_handler_t handler1, handler2; 380 struct device *dev = &pdev->dev; 381 struct resource *res; 382 int irq1, irq2; 383 384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 385 if (!res) 386 return -ENODEV; 387 qmgr_regs = devm_ioremap_resource(dev, res); 388 if (IS_ERR(qmgr_regs)) 389 return PTR_ERR(qmgr_regs); 390 391 irq1 = platform_get_irq(pdev, 0); 392 if (irq1 <= 0) 393 return irq1 ? irq1 : -EINVAL; 394 qmgr_irq_1 = irq1; 395 irq2 = platform_get_irq(pdev, 1); 396 if (irq2 <= 0) 397 return irq2 ? irq2 : -EINVAL; 398 qmgr_irq_2 = irq2; 399 400 /* reset qmgr registers */ 401 for (i = 0; i < 4; i++) { 402 __raw_writel(0x33333333, &qmgr_regs->stat1[i]); 403 __raw_writel(0, &qmgr_regs->irqsrc[i]); 404 } 405 for (i = 0; i < 2; i++) { 406 __raw_writel(0, &qmgr_regs->stat2[i]); 407 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */ 408 __raw_writel(0, &qmgr_regs->irqen[i]); 409 } 410 411 __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h); 412 __raw_writel(0, &qmgr_regs->statf_h); 413 414 for (i = 0; i < QUEUES; i++) 415 __raw_writel(0, &qmgr_regs->sram[i]); 416 417 if (cpu_is_ixp42x_rev_a0()) { 418 handler1 = qmgr_irq1_a0; 419 handler2 = qmgr_irq2_a0; 420 } else 421 handler1 = handler2 = qmgr_irq; 422 423 err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager", 424 NULL); 425 if (err) { 426 dev_err(dev, "failed to request IRQ%i (%i)\n", 427 irq1, err); 428 return err; 429 } 430 431 err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager", 432 NULL); 433 if (err) { 434 dev_err(dev, "failed to request IRQ%i (%i)\n", 435 irq2, err); 436 return err; 437 } 438 439 used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ 440 spin_lock_init(&qmgr_lock); 441 442 dev_info(dev, "IXP4xx Queue Manager initialized.\n"); 443 return 0; 444 } 445 446 static int ixp4xx_qmgr_remove(struct platform_device *pdev) 447 { 448 synchronize_irq(qmgr_irq_1); 449 synchronize_irq(qmgr_irq_2); 450 return 0; 451 } 452 453 static const struct of_device_id ixp4xx_qmgr_of_match[] = { 454 { 455 .compatible = "intel,ixp4xx-ahb-queue-manager", 456 }, 457 {}, 458 }; 459 460 static struct platform_driver ixp4xx_qmgr_driver = { 461 .driver = { 462 .name = "ixp4xx-qmgr", 463 .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match), 464 }, 465 .probe = ixp4xx_qmgr_probe, 466 .remove = ixp4xx_qmgr_remove, 467 }; 468 module_platform_driver(ixp4xx_qmgr_driver); 469 470 MODULE_LICENSE("GPL v2"); 471 MODULE_AUTHOR("Krzysztof Halasa"); 472 473 EXPORT_SYMBOL(qmgr_put_entry); 474 EXPORT_SYMBOL(qmgr_get_entry); 475 EXPORT_SYMBOL(qmgr_stat_empty); 476 EXPORT_SYMBOL(qmgr_stat_below_low_watermark); 477 EXPORT_SYMBOL(qmgr_stat_full); 478 EXPORT_SYMBOL(qmgr_stat_overflow); 479 EXPORT_SYMBOL(qmgr_set_irq); 480 EXPORT_SYMBOL(qmgr_enable_irq); 481 EXPORT_SYMBOL(qmgr_disable_irq); 482 #if DEBUG_QMGR 483 EXPORT_SYMBOL(qmgr_queue_descs); 484 EXPORT_SYMBOL(qmgr_request_queue); 485 #else 486 EXPORT_SYMBOL(__qmgr_request_queue); 487 #endif 488 EXPORT_SYMBOL(qmgr_release_queue); 489