1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/platform_device.h> 27 #include <linux/memory.h> 28 #include <linux/clk.h> 29 #include <linux/of.h> 30 #include <linux/of_irq.h> 31 #include <linux/irqdomain.h> 32 #include <linux/platform_data/dma-mv_xor.h> 33 34 #include "dmaengine.h" 35 #include "mv_xor.h" 36 37 static void mv_xor_issue_pending(struct dma_chan *chan); 38 39 #define to_mv_xor_chan(chan) \ 40 container_of(chan, struct mv_xor_chan, dmachan) 41 42 #define to_mv_xor_slot(tx) \ 43 container_of(tx, struct mv_xor_desc_slot, async_tx) 44 45 #define mv_chan_to_devp(chan) \ 46 ((chan)->dmadev.dev) 47 48 static void mv_desc_init(struct mv_xor_desc_slot *desc, 49 dma_addr_t addr, u32 byte_count, 50 enum dma_ctrl_flags flags) 51 { 52 struct mv_xor_desc *hw_desc = desc->hw_desc; 53 54 hw_desc->status = XOR_DESC_DMA_OWNED; 55 hw_desc->phy_next_desc = 0; 56 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ 57 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? 58 XOR_DESC_EOD_INT_EN : 0; 59 hw_desc->phy_dest_addr = addr; 60 hw_desc->byte_count = byte_count; 61 } 62 63 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 64 u32 next_desc_addr) 65 { 66 struct mv_xor_desc *hw_desc = desc->hw_desc; 67 BUG_ON(hw_desc->phy_next_desc); 68 hw_desc->phy_next_desc = next_desc_addr; 69 } 70 71 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) 72 { 73 struct mv_xor_desc *hw_desc = desc->hw_desc; 74 hw_desc->phy_next_desc = 0; 75 } 76 77 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 78 int index, dma_addr_t addr) 79 { 80 struct mv_xor_desc *hw_desc = desc->hw_desc; 81 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; 82 if (desc->type == DMA_XOR) 83 hw_desc->desc_command |= (1 << index); 84 } 85 86 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 87 { 88 return readl_relaxed(XOR_CURR_DESC(chan)); 89 } 90 91 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 92 u32 next_desc_addr) 93 { 94 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); 95 } 96 97 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 98 { 99 u32 val = readl_relaxed(XOR_INTR_MASK(chan)); 100 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 101 writel_relaxed(val, XOR_INTR_MASK(chan)); 102 } 103 104 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 105 { 106 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); 107 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 108 return intr_cause; 109 } 110 111 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 112 { 113 u32 val; 114 115 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; 116 val = ~(val << (chan->idx * 16)); 117 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 118 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 119 } 120 121 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 122 { 123 u32 val = 0xFFFF0000 >> (chan->idx * 16); 124 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 125 } 126 127 static void mv_set_mode(struct mv_xor_chan *chan, 128 enum dma_transaction_type type) 129 { 130 u32 op_mode; 131 u32 config = readl_relaxed(XOR_CONFIG(chan)); 132 133 switch (type) { 134 case DMA_XOR: 135 op_mode = XOR_OPERATION_MODE_XOR; 136 break; 137 case DMA_MEMCPY: 138 op_mode = XOR_OPERATION_MODE_MEMCPY; 139 break; 140 default: 141 dev_err(mv_chan_to_devp(chan), 142 "error: unsupported operation %d\n", 143 type); 144 BUG(); 145 return; 146 } 147 148 config &= ~0x7; 149 config |= op_mode; 150 151 #if defined(__BIG_ENDIAN) 152 config |= XOR_DESCRIPTOR_SWAP; 153 #else 154 config &= ~XOR_DESCRIPTOR_SWAP; 155 #endif 156 157 writel_relaxed(config, XOR_CONFIG(chan)); 158 chan->current_type = type; 159 } 160 161 static void mv_chan_activate(struct mv_xor_chan *chan) 162 { 163 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 164 165 /* writel ensures all descriptors are flushed before activation */ 166 writel(BIT(0), XOR_ACTIVATION(chan)); 167 } 168 169 static char mv_chan_is_busy(struct mv_xor_chan *chan) 170 { 171 u32 state = readl_relaxed(XOR_ACTIVATION(chan)); 172 173 state = (state >> 4) & 0x3; 174 175 return (state == 1) ? 1 : 0; 176 } 177 178 /** 179 * mv_xor_free_slots - flags descriptor slots for reuse 180 * @slot: Slot to free 181 * Caller must hold &mv_chan->lock while calling this function 182 */ 183 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 184 struct mv_xor_desc_slot *slot) 185 { 186 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", 187 __func__, __LINE__, slot); 188 189 slot->slot_used = 0; 190 191 } 192 193 /* 194 * mv_xor_start_new_chain - program the engine to operate on new chain headed by 195 * sw_desc 196 * Caller must hold &mv_chan->lock while calling this function 197 */ 198 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 199 struct mv_xor_desc_slot *sw_desc) 200 { 201 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 202 __func__, __LINE__, sw_desc); 203 204 /* set the hardware chain */ 205 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 206 207 mv_chan->pending++; 208 mv_xor_issue_pending(&mv_chan->dmachan); 209 } 210 211 static dma_cookie_t 212 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 213 struct mv_xor_chan *mv_chan, dma_cookie_t cookie) 214 { 215 BUG_ON(desc->async_tx.cookie < 0); 216 217 if (desc->async_tx.cookie > 0) { 218 cookie = desc->async_tx.cookie; 219 220 /* call the callback (must not sleep or submit new 221 * operations to this channel) 222 */ 223 if (desc->async_tx.callback) 224 desc->async_tx.callback( 225 desc->async_tx.callback_param); 226 227 dma_descriptor_unmap(&desc->async_tx); 228 } 229 230 /* run dependent operations */ 231 dma_run_dependencies(&desc->async_tx); 232 233 return cookie; 234 } 235 236 static int 237 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) 238 { 239 struct mv_xor_desc_slot *iter, *_iter; 240 241 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 242 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 243 completed_node) { 244 245 if (async_tx_test_ack(&iter->async_tx)) { 246 list_del(&iter->completed_node); 247 mv_xor_free_slots(mv_chan, iter); 248 } 249 } 250 return 0; 251 } 252 253 static int 254 mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 255 struct mv_xor_chan *mv_chan) 256 { 257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 258 __func__, __LINE__, desc, desc->async_tx.flags); 259 list_del(&desc->chain_node); 260 /* the client is allowed to attach dependent operations 261 * until 'ack' is set 262 */ 263 if (!async_tx_test_ack(&desc->async_tx)) { 264 /* move this slot to the completed_slots */ 265 list_add_tail(&desc->completed_node, &mv_chan->completed_slots); 266 return 0; 267 } 268 269 mv_xor_free_slots(mv_chan, desc); 270 return 0; 271 } 272 273 /* This function must be called with the mv_xor_chan spinlock held */ 274 static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 275 { 276 struct mv_xor_desc_slot *iter, *_iter; 277 dma_cookie_t cookie = 0; 278 int busy = mv_chan_is_busy(mv_chan); 279 u32 current_desc = mv_chan_get_current_desc(mv_chan); 280 int seen_current = 0; 281 282 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 283 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 284 mv_xor_clean_completed_slots(mv_chan); 285 286 /* free completed slots from the chain starting with 287 * the oldest descriptor 288 */ 289 290 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 291 chain_node) { 292 prefetch(_iter); 293 prefetch(&_iter->async_tx); 294 295 /* do not advance past the current descriptor loaded into the 296 * hardware channel, subsequent descriptors are either in 297 * process or have not been submitted 298 */ 299 if (seen_current) 300 break; 301 302 /* stop the search if we reach the current descriptor and the 303 * channel is busy 304 */ 305 if (iter->async_tx.phys == current_desc) { 306 seen_current = 1; 307 if (busy) 308 break; 309 } 310 311 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); 312 313 if (mv_xor_clean_slot(iter, mv_chan)) 314 break; 315 } 316 317 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 318 struct mv_xor_desc_slot *chain_head; 319 chain_head = list_entry(mv_chan->chain.next, 320 struct mv_xor_desc_slot, 321 chain_node); 322 323 mv_xor_start_new_chain(mv_chan, chain_head); 324 } 325 326 if (cookie > 0) 327 mv_chan->dmachan.completed_cookie = cookie; 328 } 329 330 static void mv_xor_tasklet(unsigned long data) 331 { 332 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 333 334 spin_lock_bh(&chan->lock); 335 mv_xor_slot_cleanup(chan); 336 spin_unlock_bh(&chan->lock); 337 } 338 339 static struct mv_xor_desc_slot * 340 mv_xor_alloc_slot(struct mv_xor_chan *mv_chan) 341 { 342 struct mv_xor_desc_slot *iter, *_iter; 343 int retry = 0; 344 345 /* start search from the last allocated descrtiptor 346 * if a contiguous allocation can not be found start searching 347 * from the beginning of the list 348 */ 349 retry: 350 if (retry == 0) 351 iter = mv_chan->last_used; 352 else 353 iter = list_entry(&mv_chan->all_slots, 354 struct mv_xor_desc_slot, 355 slot_node); 356 357 list_for_each_entry_safe_continue( 358 iter, _iter, &mv_chan->all_slots, slot_node) { 359 360 prefetch(_iter); 361 prefetch(&_iter->async_tx); 362 if (iter->slot_used) { 363 /* give up after finding the first busy slot 364 * on the second pass through the list 365 */ 366 if (retry) 367 break; 368 continue; 369 } 370 371 /* pre-ack descriptor */ 372 async_tx_ack(&iter->async_tx); 373 374 iter->slot_used = 1; 375 INIT_LIST_HEAD(&iter->chain_node); 376 iter->async_tx.cookie = -EBUSY; 377 mv_chan->last_used = iter; 378 mv_desc_clear_next_desc(iter); 379 380 return iter; 381 382 } 383 if (!retry++) 384 goto retry; 385 386 /* try to free some slots if the allocation fails */ 387 tasklet_schedule(&mv_chan->irq_tasklet); 388 389 return NULL; 390 } 391 392 /************************ DMA engine API functions ****************************/ 393 static dma_cookie_t 394 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 395 { 396 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 397 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 398 struct mv_xor_desc_slot *old_chain_tail; 399 dma_cookie_t cookie; 400 int new_hw_chain = 1; 401 402 dev_dbg(mv_chan_to_devp(mv_chan), 403 "%s sw_desc %p: async_tx %p\n", 404 __func__, sw_desc, &sw_desc->async_tx); 405 406 spin_lock_bh(&mv_chan->lock); 407 cookie = dma_cookie_assign(tx); 408 409 if (list_empty(&mv_chan->chain)) 410 list_add_tail(&sw_desc->chain_node, &mv_chan->chain); 411 else { 412 new_hw_chain = 0; 413 414 old_chain_tail = list_entry(mv_chan->chain.prev, 415 struct mv_xor_desc_slot, 416 chain_node); 417 list_add_tail(&sw_desc->chain_node, &mv_chan->chain); 418 419 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", 420 &old_chain_tail->async_tx.phys); 421 422 /* fix up the hardware chain */ 423 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); 424 425 /* if the channel is not busy */ 426 if (!mv_chan_is_busy(mv_chan)) { 427 u32 current_desc = mv_chan_get_current_desc(mv_chan); 428 /* 429 * and the curren desc is the end of the chain before 430 * the append, then we need to start the channel 431 */ 432 if (current_desc == old_chain_tail->async_tx.phys) 433 new_hw_chain = 1; 434 } 435 } 436 437 if (new_hw_chain) 438 mv_xor_start_new_chain(mv_chan, sw_desc); 439 440 spin_unlock_bh(&mv_chan->lock); 441 442 return cookie; 443 } 444 445 /* returns the number of allocated descriptors */ 446 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 447 { 448 void *virt_desc; 449 dma_addr_t dma_desc; 450 int idx; 451 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 452 struct mv_xor_desc_slot *slot = NULL; 453 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; 454 455 /* Allocate descriptor slots */ 456 idx = mv_chan->slots_allocated; 457 while (idx < num_descs_in_pool) { 458 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 459 if (!slot) { 460 dev_info(mv_chan_to_devp(mv_chan), 461 "channel only initialized %d descriptor slots", 462 idx); 463 break; 464 } 465 virt_desc = mv_chan->dma_desc_pool_virt; 466 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; 467 468 dma_async_tx_descriptor_init(&slot->async_tx, chan); 469 slot->async_tx.tx_submit = mv_xor_tx_submit; 470 INIT_LIST_HEAD(&slot->chain_node); 471 INIT_LIST_HEAD(&slot->slot_node); 472 dma_desc = mv_chan->dma_desc_pool; 473 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; 474 slot->idx = idx++; 475 476 spin_lock_bh(&mv_chan->lock); 477 mv_chan->slots_allocated = idx; 478 list_add_tail(&slot->slot_node, &mv_chan->all_slots); 479 spin_unlock_bh(&mv_chan->lock); 480 } 481 482 if (mv_chan->slots_allocated && !mv_chan->last_used) 483 mv_chan->last_used = list_entry(mv_chan->all_slots.next, 484 struct mv_xor_desc_slot, 485 slot_node); 486 487 dev_dbg(mv_chan_to_devp(mv_chan), 488 "allocated %d descriptor slots last_used: %p\n", 489 mv_chan->slots_allocated, mv_chan->last_used); 490 491 return mv_chan->slots_allocated ? : -ENOMEM; 492 } 493 494 static struct dma_async_tx_descriptor * 495 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 496 unsigned int src_cnt, size_t len, unsigned long flags) 497 { 498 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 499 struct mv_xor_desc_slot *sw_desc; 500 501 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 502 return NULL; 503 504 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 505 506 dev_dbg(mv_chan_to_devp(mv_chan), 507 "%s src_cnt: %d len: %u dest %pad flags: %ld\n", 508 __func__, src_cnt, len, &dest, flags); 509 510 spin_lock_bh(&mv_chan->lock); 511 sw_desc = mv_xor_alloc_slot(mv_chan); 512 if (sw_desc) { 513 sw_desc->type = DMA_XOR; 514 sw_desc->async_tx.flags = flags; 515 mv_desc_init(sw_desc, dest, len, flags); 516 while (src_cnt--) 517 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); 518 } 519 spin_unlock_bh(&mv_chan->lock); 520 dev_dbg(mv_chan_to_devp(mv_chan), 521 "%s sw_desc %p async_tx %p \n", 522 __func__, sw_desc, &sw_desc->async_tx); 523 return sw_desc ? &sw_desc->async_tx : NULL; 524 } 525 526 static struct dma_async_tx_descriptor * 527 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 528 size_t len, unsigned long flags) 529 { 530 /* 531 * A MEMCPY operation is identical to an XOR operation with only 532 * a single source address. 533 */ 534 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 535 } 536 537 static struct dma_async_tx_descriptor * 538 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 539 { 540 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 541 dma_addr_t src, dest; 542 size_t len; 543 544 src = mv_chan->dummy_src_addr; 545 dest = mv_chan->dummy_dst_addr; 546 len = MV_XOR_MIN_BYTE_COUNT; 547 548 /* 549 * We implement the DMA_INTERRUPT operation as a minimum sized 550 * XOR operation with a single dummy source address. 551 */ 552 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 553 } 554 555 static void mv_xor_free_chan_resources(struct dma_chan *chan) 556 { 557 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 558 struct mv_xor_desc_slot *iter, *_iter; 559 int in_use_descs = 0; 560 561 spin_lock_bh(&mv_chan->lock); 562 563 mv_xor_slot_cleanup(mv_chan); 564 565 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 566 chain_node) { 567 in_use_descs++; 568 list_del(&iter->chain_node); 569 } 570 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 571 completed_node) { 572 in_use_descs++; 573 list_del(&iter->completed_node); 574 } 575 list_for_each_entry_safe_reverse( 576 iter, _iter, &mv_chan->all_slots, slot_node) { 577 list_del(&iter->slot_node); 578 kfree(iter); 579 mv_chan->slots_allocated--; 580 } 581 mv_chan->last_used = NULL; 582 583 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 584 __func__, mv_chan->slots_allocated); 585 spin_unlock_bh(&mv_chan->lock); 586 587 if (in_use_descs) 588 dev_err(mv_chan_to_devp(mv_chan), 589 "freeing %d in use descriptors!\n", in_use_descs); 590 } 591 592 /** 593 * mv_xor_status - poll the status of an XOR transaction 594 * @chan: XOR channel handle 595 * @cookie: XOR transaction identifier 596 * @txstate: XOR transactions state holder (or NULL) 597 */ 598 static enum dma_status mv_xor_status(struct dma_chan *chan, 599 dma_cookie_t cookie, 600 struct dma_tx_state *txstate) 601 { 602 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 603 enum dma_status ret; 604 605 ret = dma_cookie_status(chan, cookie, txstate); 606 if (ret == DMA_COMPLETE) 607 return ret; 608 609 spin_lock_bh(&mv_chan->lock); 610 mv_xor_slot_cleanup(mv_chan); 611 spin_unlock_bh(&mv_chan->lock); 612 613 return dma_cookie_status(chan, cookie, txstate); 614 } 615 616 static void mv_dump_xor_regs(struct mv_xor_chan *chan) 617 { 618 u32 val; 619 620 val = readl_relaxed(XOR_CONFIG(chan)); 621 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 622 623 val = readl_relaxed(XOR_ACTIVATION(chan)); 624 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 625 626 val = readl_relaxed(XOR_INTR_CAUSE(chan)); 627 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 628 629 val = readl_relaxed(XOR_INTR_MASK(chan)); 630 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 631 632 val = readl_relaxed(XOR_ERROR_CAUSE(chan)); 633 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 634 635 val = readl_relaxed(XOR_ERROR_ADDR(chan)); 636 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 637 } 638 639 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 640 u32 intr_cause) 641 { 642 if (intr_cause & XOR_INT_ERR_DECODE) { 643 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); 644 return; 645 } 646 647 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", 648 chan->idx, intr_cause); 649 650 mv_dump_xor_regs(chan); 651 WARN_ON(1); 652 } 653 654 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 655 { 656 struct mv_xor_chan *chan = data; 657 u32 intr_cause = mv_chan_get_intr_cause(chan); 658 659 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 660 661 if (intr_cause & XOR_INTR_ERRORS) 662 mv_xor_err_interrupt_handler(chan, intr_cause); 663 664 tasklet_schedule(&chan->irq_tasklet); 665 666 mv_xor_device_clear_eoc_cause(chan); 667 668 return IRQ_HANDLED; 669 } 670 671 static void mv_xor_issue_pending(struct dma_chan *chan) 672 { 673 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 674 675 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 676 mv_chan->pending = 0; 677 mv_chan_activate(mv_chan); 678 } 679 } 680 681 /* 682 * Perform a transaction to verify the HW works. 683 */ 684 685 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 686 { 687 int i, ret; 688 void *src, *dest; 689 dma_addr_t src_dma, dest_dma; 690 struct dma_chan *dma_chan; 691 dma_cookie_t cookie; 692 struct dma_async_tx_descriptor *tx; 693 struct dmaengine_unmap_data *unmap; 694 int err = 0; 695 696 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 697 if (!src) 698 return -ENOMEM; 699 700 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 701 if (!dest) { 702 kfree(src); 703 return -ENOMEM; 704 } 705 706 /* Fill in src buffer */ 707 for (i = 0; i < PAGE_SIZE; i++) 708 ((u8 *) src)[i] = (u8)i; 709 710 dma_chan = &mv_chan->dmachan; 711 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 712 err = -ENODEV; 713 goto out; 714 } 715 716 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); 717 if (!unmap) { 718 err = -ENOMEM; 719 goto free_resources; 720 } 721 722 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 723 PAGE_SIZE, DMA_TO_DEVICE); 724 unmap->addr[0] = src_dma; 725 726 ret = dma_mapping_error(dma_chan->device->dev, src_dma); 727 if (ret) { 728 err = -ENOMEM; 729 goto free_resources; 730 } 731 unmap->to_cnt = 1; 732 733 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 734 PAGE_SIZE, DMA_FROM_DEVICE); 735 unmap->addr[1] = dest_dma; 736 737 ret = dma_mapping_error(dma_chan->device->dev, dest_dma); 738 if (ret) { 739 err = -ENOMEM; 740 goto free_resources; 741 } 742 unmap->from_cnt = 1; 743 unmap->len = PAGE_SIZE; 744 745 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 746 PAGE_SIZE, 0); 747 if (!tx) { 748 dev_err(dma_chan->device->dev, 749 "Self-test cannot prepare operation, disabling\n"); 750 err = -ENODEV; 751 goto free_resources; 752 } 753 754 cookie = mv_xor_tx_submit(tx); 755 if (dma_submit_error(cookie)) { 756 dev_err(dma_chan->device->dev, 757 "Self-test submit error, disabling\n"); 758 err = -ENODEV; 759 goto free_resources; 760 } 761 762 mv_xor_issue_pending(dma_chan); 763 async_tx_ack(tx); 764 msleep(1); 765 766 if (mv_xor_status(dma_chan, cookie, NULL) != 767 DMA_COMPLETE) { 768 dev_err(dma_chan->device->dev, 769 "Self-test copy timed out, disabling\n"); 770 err = -ENODEV; 771 goto free_resources; 772 } 773 774 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 775 PAGE_SIZE, DMA_FROM_DEVICE); 776 if (memcmp(src, dest, PAGE_SIZE)) { 777 dev_err(dma_chan->device->dev, 778 "Self-test copy failed compare, disabling\n"); 779 err = -ENODEV; 780 goto free_resources; 781 } 782 783 free_resources: 784 dmaengine_unmap_put(unmap); 785 mv_xor_free_chan_resources(dma_chan); 786 out: 787 kfree(src); 788 kfree(dest); 789 return err; 790 } 791 792 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 793 static int 794 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) 795 { 796 int i, src_idx, ret; 797 struct page *dest; 798 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 799 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 800 dma_addr_t dest_dma; 801 struct dma_async_tx_descriptor *tx; 802 struct dmaengine_unmap_data *unmap; 803 struct dma_chan *dma_chan; 804 dma_cookie_t cookie; 805 u8 cmp_byte = 0; 806 u32 cmp_word; 807 int err = 0; 808 int src_count = MV_XOR_NUM_SRC_TEST; 809 810 for (src_idx = 0; src_idx < src_count; src_idx++) { 811 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 812 if (!xor_srcs[src_idx]) { 813 while (src_idx--) 814 __free_page(xor_srcs[src_idx]); 815 return -ENOMEM; 816 } 817 } 818 819 dest = alloc_page(GFP_KERNEL); 820 if (!dest) { 821 while (src_idx--) 822 __free_page(xor_srcs[src_idx]); 823 return -ENOMEM; 824 } 825 826 /* Fill in src buffers */ 827 for (src_idx = 0; src_idx < src_count; src_idx++) { 828 u8 *ptr = page_address(xor_srcs[src_idx]); 829 for (i = 0; i < PAGE_SIZE; i++) 830 ptr[i] = (1 << src_idx); 831 } 832 833 for (src_idx = 0; src_idx < src_count; src_idx++) 834 cmp_byte ^= (u8) (1 << src_idx); 835 836 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 837 (cmp_byte << 8) | cmp_byte; 838 839 memset(page_address(dest), 0, PAGE_SIZE); 840 841 dma_chan = &mv_chan->dmachan; 842 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 843 err = -ENODEV; 844 goto out; 845 } 846 847 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, 848 GFP_KERNEL); 849 if (!unmap) { 850 err = -ENOMEM; 851 goto free_resources; 852 } 853 854 /* test xor */ 855 for (i = 0; i < src_count; i++) { 856 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 857 0, PAGE_SIZE, DMA_TO_DEVICE); 858 dma_srcs[i] = unmap->addr[i]; 859 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); 860 if (ret) { 861 err = -ENOMEM; 862 goto free_resources; 863 } 864 unmap->to_cnt++; 865 } 866 867 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 868 DMA_FROM_DEVICE); 869 dest_dma = unmap->addr[src_count]; 870 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); 871 if (ret) { 872 err = -ENOMEM; 873 goto free_resources; 874 } 875 unmap->from_cnt = 1; 876 unmap->len = PAGE_SIZE; 877 878 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 879 src_count, PAGE_SIZE, 0); 880 if (!tx) { 881 dev_err(dma_chan->device->dev, 882 "Self-test cannot prepare operation, disabling\n"); 883 err = -ENODEV; 884 goto free_resources; 885 } 886 887 cookie = mv_xor_tx_submit(tx); 888 if (dma_submit_error(cookie)) { 889 dev_err(dma_chan->device->dev, 890 "Self-test submit error, disabling\n"); 891 err = -ENODEV; 892 goto free_resources; 893 } 894 895 mv_xor_issue_pending(dma_chan); 896 async_tx_ack(tx); 897 msleep(8); 898 899 if (mv_xor_status(dma_chan, cookie, NULL) != 900 DMA_COMPLETE) { 901 dev_err(dma_chan->device->dev, 902 "Self-test xor timed out, disabling\n"); 903 err = -ENODEV; 904 goto free_resources; 905 } 906 907 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 908 PAGE_SIZE, DMA_FROM_DEVICE); 909 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 910 u32 *ptr = page_address(dest); 911 if (ptr[i] != cmp_word) { 912 dev_err(dma_chan->device->dev, 913 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", 914 i, ptr[i], cmp_word); 915 err = -ENODEV; 916 goto free_resources; 917 } 918 } 919 920 free_resources: 921 dmaengine_unmap_put(unmap); 922 mv_xor_free_chan_resources(dma_chan); 923 out: 924 src_idx = src_count; 925 while (src_idx--) 926 __free_page(xor_srcs[src_idx]); 927 __free_page(dest); 928 return err; 929 } 930 931 /* This driver does not implement any of the optional DMA operations. */ 932 static int 933 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 934 unsigned long arg) 935 { 936 return -ENOSYS; 937 } 938 939 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 940 { 941 struct dma_chan *chan, *_chan; 942 struct device *dev = mv_chan->dmadev.dev; 943 944 dma_async_device_unregister(&mv_chan->dmadev); 945 946 dma_free_coherent(dev, MV_XOR_POOL_SIZE, 947 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 948 dma_unmap_single(dev, mv_chan->dummy_src_addr, 949 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 950 dma_unmap_single(dev, mv_chan->dummy_dst_addr, 951 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 952 953 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 954 device_node) { 955 list_del(&chan->device_node); 956 } 957 958 free_irq(mv_chan->irq, mv_chan); 959 960 return 0; 961 } 962 963 static struct mv_xor_chan * 964 mv_xor_channel_add(struct mv_xor_device *xordev, 965 struct platform_device *pdev, 966 int idx, dma_cap_mask_t cap_mask, int irq) 967 { 968 int ret = 0; 969 struct mv_xor_chan *mv_chan; 970 struct dma_device *dma_dev; 971 972 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 973 if (!mv_chan) 974 return ERR_PTR(-ENOMEM); 975 976 mv_chan->idx = idx; 977 mv_chan->irq = irq; 978 979 dma_dev = &mv_chan->dmadev; 980 981 /* 982 * These source and destination dummy buffers are used to implement 983 * a DMA_INTERRUPT operation as a minimum-sized XOR operation. 984 * Hence, we only need to map the buffers at initialization-time. 985 */ 986 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, 987 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 988 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, 989 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 990 991 /* allocate coherent memory for hardware descriptors 992 * note: writecombine gives slightly better performance, but 993 * requires that we explicitly flush the writes 994 */ 995 mv_chan->dma_desc_pool_virt = 996 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, 997 &mv_chan->dma_desc_pool, GFP_KERNEL); 998 if (!mv_chan->dma_desc_pool_virt) 999 return ERR_PTR(-ENOMEM); 1000 1001 /* discover transaction capabilites from the platform data */ 1002 dma_dev->cap_mask = cap_mask; 1003 1004 INIT_LIST_HEAD(&dma_dev->channels); 1005 1006 /* set base routines */ 1007 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1008 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1009 dma_dev->device_tx_status = mv_xor_status; 1010 dma_dev->device_issue_pending = mv_xor_issue_pending; 1011 dma_dev->device_control = mv_xor_control; 1012 dma_dev->dev = &pdev->dev; 1013 1014 /* set prep routines based on capability */ 1015 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1016 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; 1017 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1018 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1019 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1020 dma_dev->max_xor = 8; 1021 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1022 } 1023 1024 mv_chan->mmr_base = xordev->xor_base; 1025 mv_chan->mmr_high_base = xordev->xor_high_base; 1026 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1027 mv_chan); 1028 1029 /* clear errors before enabling interrupts */ 1030 mv_xor_device_clear_err_status(mv_chan); 1031 1032 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 1033 0, dev_name(&pdev->dev), mv_chan); 1034 if (ret) 1035 goto err_free_dma; 1036 1037 mv_chan_unmask_interrupts(mv_chan); 1038 1039 mv_set_mode(mv_chan, DMA_XOR); 1040 1041 spin_lock_init(&mv_chan->lock); 1042 INIT_LIST_HEAD(&mv_chan->chain); 1043 INIT_LIST_HEAD(&mv_chan->completed_slots); 1044 INIT_LIST_HEAD(&mv_chan->all_slots); 1045 mv_chan->dmachan.device = dma_dev; 1046 dma_cookie_init(&mv_chan->dmachan); 1047 1048 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1049 1050 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1051 ret = mv_xor_memcpy_self_test(mv_chan); 1052 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1053 if (ret) 1054 goto err_free_irq; 1055 } 1056 1057 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1058 ret = mv_xor_xor_self_test(mv_chan); 1059 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1060 if (ret) 1061 goto err_free_irq; 1062 } 1063 1064 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", 1065 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1066 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1067 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1068 1069 dma_async_device_register(dma_dev); 1070 return mv_chan; 1071 1072 err_free_irq: 1073 free_irq(mv_chan->irq, mv_chan); 1074 err_free_dma: 1075 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 1076 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1077 return ERR_PTR(ret); 1078 } 1079 1080 static void 1081 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 1082 const struct mbus_dram_target_info *dram) 1083 { 1084 void __iomem *base = xordev->xor_high_base; 1085 u32 win_enable = 0; 1086 int i; 1087 1088 for (i = 0; i < 8; i++) { 1089 writel(0, base + WINDOW_BASE(i)); 1090 writel(0, base + WINDOW_SIZE(i)); 1091 if (i < 4) 1092 writel(0, base + WINDOW_REMAP_HIGH(i)); 1093 } 1094 1095 for (i = 0; i < dram->num_cs; i++) { 1096 const struct mbus_dram_window *cs = dram->cs + i; 1097 1098 writel((cs->base & 0xffff0000) | 1099 (cs->mbus_attr << 8) | 1100 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1101 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1102 1103 win_enable |= (1 << i); 1104 win_enable |= 3 << (16 + (2 * i)); 1105 } 1106 1107 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1108 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1109 writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1110 writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1111 } 1112 1113 static int mv_xor_probe(struct platform_device *pdev) 1114 { 1115 const struct mbus_dram_target_info *dram; 1116 struct mv_xor_device *xordev; 1117 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); 1118 struct resource *res; 1119 int i, ret; 1120 1121 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1122 1123 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); 1124 if (!xordev) 1125 return -ENOMEM; 1126 1127 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1128 if (!res) 1129 return -ENODEV; 1130 1131 xordev->xor_base = devm_ioremap(&pdev->dev, res->start, 1132 resource_size(res)); 1133 if (!xordev->xor_base) 1134 return -EBUSY; 1135 1136 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1137 if (!res) 1138 return -ENODEV; 1139 1140 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1141 resource_size(res)); 1142 if (!xordev->xor_high_base) 1143 return -EBUSY; 1144 1145 platform_set_drvdata(pdev, xordev); 1146 1147 /* 1148 * (Re-)program MBUS remapping windows if we are asked to. 1149 */ 1150 dram = mv_mbus_dram_info(); 1151 if (dram) 1152 mv_xor_conf_mbus_windows(xordev, dram); 1153 1154 /* Not all platforms can gate the clock, so it is not 1155 * an error if the clock does not exists. 1156 */ 1157 xordev->clk = clk_get(&pdev->dev, NULL); 1158 if (!IS_ERR(xordev->clk)) 1159 clk_prepare_enable(xordev->clk); 1160 1161 if (pdev->dev.of_node) { 1162 struct device_node *np; 1163 int i = 0; 1164 1165 for_each_child_of_node(pdev->dev.of_node, np) { 1166 struct mv_xor_chan *chan; 1167 dma_cap_mask_t cap_mask; 1168 int irq; 1169 1170 dma_cap_zero(cap_mask); 1171 if (of_property_read_bool(np, "dmacap,memcpy")) 1172 dma_cap_set(DMA_MEMCPY, cap_mask); 1173 if (of_property_read_bool(np, "dmacap,xor")) 1174 dma_cap_set(DMA_XOR, cap_mask); 1175 if (of_property_read_bool(np, "dmacap,interrupt")) 1176 dma_cap_set(DMA_INTERRUPT, cap_mask); 1177 1178 irq = irq_of_parse_and_map(np, 0); 1179 if (!irq) { 1180 ret = -ENODEV; 1181 goto err_channel_add; 1182 } 1183 1184 chan = mv_xor_channel_add(xordev, pdev, i, 1185 cap_mask, irq); 1186 if (IS_ERR(chan)) { 1187 ret = PTR_ERR(chan); 1188 irq_dispose_mapping(irq); 1189 goto err_channel_add; 1190 } 1191 1192 xordev->channels[i] = chan; 1193 i++; 1194 } 1195 } else if (pdata && pdata->channels) { 1196 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1197 struct mv_xor_channel_data *cd; 1198 struct mv_xor_chan *chan; 1199 int irq; 1200 1201 cd = &pdata->channels[i]; 1202 if (!cd) { 1203 ret = -ENODEV; 1204 goto err_channel_add; 1205 } 1206 1207 irq = platform_get_irq(pdev, i); 1208 if (irq < 0) { 1209 ret = irq; 1210 goto err_channel_add; 1211 } 1212 1213 chan = mv_xor_channel_add(xordev, pdev, i, 1214 cd->cap_mask, irq); 1215 if (IS_ERR(chan)) { 1216 ret = PTR_ERR(chan); 1217 goto err_channel_add; 1218 } 1219 1220 xordev->channels[i] = chan; 1221 } 1222 } 1223 1224 return 0; 1225 1226 err_channel_add: 1227 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1228 if (xordev->channels[i]) { 1229 mv_xor_channel_remove(xordev->channels[i]); 1230 if (pdev->dev.of_node) 1231 irq_dispose_mapping(xordev->channels[i]->irq); 1232 } 1233 1234 if (!IS_ERR(xordev->clk)) { 1235 clk_disable_unprepare(xordev->clk); 1236 clk_put(xordev->clk); 1237 } 1238 1239 return ret; 1240 } 1241 1242 static int mv_xor_remove(struct platform_device *pdev) 1243 { 1244 struct mv_xor_device *xordev = platform_get_drvdata(pdev); 1245 int i; 1246 1247 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1248 if (xordev->channels[i]) 1249 mv_xor_channel_remove(xordev->channels[i]); 1250 } 1251 1252 if (!IS_ERR(xordev->clk)) { 1253 clk_disable_unprepare(xordev->clk); 1254 clk_put(xordev->clk); 1255 } 1256 1257 return 0; 1258 } 1259 1260 #ifdef CONFIG_OF 1261 static struct of_device_id mv_xor_dt_ids[] = { 1262 { .compatible = "marvell,orion-xor", }, 1263 {}, 1264 }; 1265 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); 1266 #endif 1267 1268 static struct platform_driver mv_xor_driver = { 1269 .probe = mv_xor_probe, 1270 .remove = mv_xor_remove, 1271 .driver = { 1272 .owner = THIS_MODULE, 1273 .name = MV_XOR_NAME, 1274 .of_match_table = of_match_ptr(mv_xor_dt_ids), 1275 }, 1276 }; 1277 1278 1279 static int __init mv_xor_init(void) 1280 { 1281 return platform_driver_register(&mv_xor_driver); 1282 } 1283 module_init(mv_xor_init); 1284 1285 /* it's currently unsafe to unload this module */ 1286 #if 0 1287 static void __exit mv_xor_exit(void) 1288 { 1289 platform_driver_unregister(&mv_xor_driver); 1290 return; 1291 } 1292 1293 module_exit(mv_xor_exit); 1294 #endif 1295 1296 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1297 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1298 MODULE_LICENSE("GPL"); 1299