1ff7b0479SSaeed Bishara /* 2ff7b0479SSaeed Bishara * offload engine driver for the Marvell XOR engine 3ff7b0479SSaeed Bishara * Copyright (C) 2007, 2008, Marvell International Ltd. 4ff7b0479SSaeed Bishara * 5ff7b0479SSaeed Bishara * This program is free software; you can redistribute it and/or modify it 6ff7b0479SSaeed Bishara * under the terms and conditions of the GNU General Public License, 7ff7b0479SSaeed Bishara * version 2, as published by the Free Software Foundation. 8ff7b0479SSaeed Bishara * 9ff7b0479SSaeed Bishara * This program is distributed in the hope it will be useful, but WITHOUT 10ff7b0479SSaeed Bishara * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11ff7b0479SSaeed Bishara * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12ff7b0479SSaeed Bishara * more details. 13ff7b0479SSaeed Bishara */ 14ff7b0479SSaeed Bishara 15ff7b0479SSaeed Bishara #include <linux/init.h> 165a0e3ad6STejun Heo #include <linux/slab.h> 17ff7b0479SSaeed Bishara #include <linux/delay.h> 18ff7b0479SSaeed Bishara #include <linux/dma-mapping.h> 19ff7b0479SSaeed Bishara #include <linux/spinlock.h> 20ff7b0479SSaeed Bishara #include <linux/interrupt.h> 216f166312SLior Amsalem #include <linux/of_device.h> 22ff7b0479SSaeed Bishara #include <linux/platform_device.h> 23ff7b0479SSaeed Bishara #include <linux/memory.h> 24c510182bSAndrew Lunn #include <linux/clk.h> 25f7d12ef5SThomas Petazzoni #include <linux/of.h> 26f7d12ef5SThomas Petazzoni #include <linux/of_irq.h> 27f7d12ef5SThomas Petazzoni #include <linux/irqdomain.h> 2877757291SThomas Petazzoni #include <linux/cpumask.h> 29c02cecb9SArnd Bergmann #include <linux/platform_data/dma-mv_xor.h> 30d2ebfb33SRussell King - ARM Linux 31d2ebfb33SRussell King - ARM Linux #include "dmaengine.h" 32ff7b0479SSaeed Bishara #include "mv_xor.h" 33ff7b0479SSaeed Bishara 34dd130c65SGregory CLEMENT enum mv_xor_type { 35dd130c65SGregory CLEMENT XOR_ORION, 36dd130c65SGregory CLEMENT XOR_ARMADA_38X, 37ac5f0f3fSMarcin Wojtas XOR_ARMADA_37XX, 38dd130c65SGregory CLEMENT }; 39dd130c65SGregory CLEMENT 406f166312SLior Amsalem enum mv_xor_mode { 416f166312SLior Amsalem XOR_MODE_IN_REG, 426f166312SLior Amsalem XOR_MODE_IN_DESC, 436f166312SLior Amsalem }; 446f166312SLior Amsalem 45ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan); 46ff7b0479SSaeed Bishara 47ff7b0479SSaeed Bishara #define to_mv_xor_chan(chan) \ 4898817b99SThomas Petazzoni container_of(chan, struct mv_xor_chan, dmachan) 49ff7b0479SSaeed Bishara 50ff7b0479SSaeed Bishara #define to_mv_xor_slot(tx) \ 51ff7b0479SSaeed Bishara container_of(tx, struct mv_xor_desc_slot, async_tx) 52ff7b0479SSaeed Bishara 53c98c1781SThomas Petazzoni #define mv_chan_to_devp(chan) \ 541ef48a26SThomas Petazzoni ((chan)->dmadev.dev) 55c98c1781SThomas Petazzoni 56dfc97661SLior Amsalem static void mv_desc_init(struct mv_xor_desc_slot *desc, 57ba87d137SLior Amsalem dma_addr_t addr, u32 byte_count, 58ba87d137SLior Amsalem enum dma_ctrl_flags flags) 59ff7b0479SSaeed Bishara { 60ff7b0479SSaeed Bishara struct mv_xor_desc *hw_desc = desc->hw_desc; 61ff7b0479SSaeed Bishara 620e7488edSEzequiel Garcia hw_desc->status = XOR_DESC_DMA_OWNED; 63ff7b0479SSaeed Bishara hw_desc->phy_next_desc = 0; 64ba87d137SLior Amsalem /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ 65ba87d137SLior Amsalem hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? 66ba87d137SLior Amsalem XOR_DESC_EOD_INT_EN : 0; 67dfc97661SLior Amsalem hw_desc->phy_dest_addr = addr; 68ff7b0479SSaeed Bishara hw_desc->byte_count = byte_count; 69ff7b0479SSaeed Bishara } 70ff7b0479SSaeed Bishara 716f166312SLior Amsalem static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) 726f166312SLior Amsalem { 736f166312SLior Amsalem struct mv_xor_desc *hw_desc = desc->hw_desc; 746f166312SLior Amsalem 756f166312SLior Amsalem switch (desc->type) { 766f166312SLior Amsalem case DMA_XOR: 776f166312SLior Amsalem case DMA_INTERRUPT: 786f166312SLior Amsalem hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; 796f166312SLior Amsalem break; 806f166312SLior Amsalem case DMA_MEMCPY: 816f166312SLior Amsalem hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; 826f166312SLior Amsalem break; 836f166312SLior Amsalem default: 846f166312SLior Amsalem BUG(); 856f166312SLior Amsalem return; 866f166312SLior Amsalem } 876f166312SLior Amsalem } 886f166312SLior Amsalem 89ff7b0479SSaeed Bishara static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 90ff7b0479SSaeed Bishara u32 next_desc_addr) 91ff7b0479SSaeed Bishara { 92ff7b0479SSaeed Bishara struct mv_xor_desc *hw_desc = desc->hw_desc; 93ff7b0479SSaeed Bishara BUG_ON(hw_desc->phy_next_desc); 94ff7b0479SSaeed Bishara hw_desc->phy_next_desc = next_desc_addr; 95ff7b0479SSaeed Bishara } 96ff7b0479SSaeed Bishara 97ff7b0479SSaeed Bishara static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 98ff7b0479SSaeed Bishara int index, dma_addr_t addr) 99ff7b0479SSaeed Bishara { 100ff7b0479SSaeed Bishara struct mv_xor_desc *hw_desc = desc->hw_desc; 101e03bc654SThomas Petazzoni hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; 102ff7b0479SSaeed Bishara if (desc->type == DMA_XOR) 103ff7b0479SSaeed Bishara hw_desc->desc_command |= (1 << index); 104ff7b0479SSaeed Bishara } 105ff7b0479SSaeed Bishara 106ff7b0479SSaeed Bishara static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 107ff7b0479SSaeed Bishara { 1085733c38aSThomas Petazzoni return readl_relaxed(XOR_CURR_DESC(chan)); 109ff7b0479SSaeed Bishara } 110ff7b0479SSaeed Bishara 111ff7b0479SSaeed Bishara static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 112ff7b0479SSaeed Bishara u32 next_desc_addr) 113ff7b0479SSaeed Bishara { 1145733c38aSThomas Petazzoni writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); 115ff7b0479SSaeed Bishara } 116ff7b0479SSaeed Bishara 117ff7b0479SSaeed Bishara static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 118ff7b0479SSaeed Bishara { 1195733c38aSThomas Petazzoni u32 val = readl_relaxed(XOR_INTR_MASK(chan)); 120ff7b0479SSaeed Bishara val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 1215733c38aSThomas Petazzoni writel_relaxed(val, XOR_INTR_MASK(chan)); 122ff7b0479SSaeed Bishara } 123ff7b0479SSaeed Bishara 124ff7b0479SSaeed Bishara static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 125ff7b0479SSaeed Bishara { 1265733c38aSThomas Petazzoni u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); 127ff7b0479SSaeed Bishara intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 128ff7b0479SSaeed Bishara return intr_cause; 129ff7b0479SSaeed Bishara } 130ff7b0479SSaeed Bishara 1310951e728SMaxime Ripard static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan) 132ff7b0479SSaeed Bishara { 133ba87d137SLior Amsalem u32 val; 134ba87d137SLior Amsalem 135ba87d137SLior Amsalem val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; 136ba87d137SLior Amsalem val = ~(val << (chan->idx * 16)); 137c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 1385733c38aSThomas Petazzoni writel_relaxed(val, XOR_INTR_CAUSE(chan)); 139ff7b0479SSaeed Bishara } 140ff7b0479SSaeed Bishara 1410951e728SMaxime Ripard static void mv_chan_clear_err_status(struct mv_xor_chan *chan) 142ff7b0479SSaeed Bishara { 143ff7b0479SSaeed Bishara u32 val = 0xFFFF0000 >> (chan->idx * 16); 1445733c38aSThomas Petazzoni writel_relaxed(val, XOR_INTR_CAUSE(chan)); 145ff7b0479SSaeed Bishara } 146ff7b0479SSaeed Bishara 1470951e728SMaxime Ripard static void mv_chan_set_mode(struct mv_xor_chan *chan, 14881aafb3eSThomas Petazzoni u32 op_mode) 149ff7b0479SSaeed Bishara { 1505733c38aSThomas Petazzoni u32 config = readl_relaxed(XOR_CONFIG(chan)); 151ff7b0479SSaeed Bishara 1526f166312SLior Amsalem config &= ~0x7; 1536f166312SLior Amsalem config |= op_mode; 1546f166312SLior Amsalem 155e03bc654SThomas Petazzoni #if defined(__BIG_ENDIAN) 156e03bc654SThomas Petazzoni config |= XOR_DESCRIPTOR_SWAP; 157e03bc654SThomas Petazzoni #else 158e03bc654SThomas Petazzoni config &= ~XOR_DESCRIPTOR_SWAP; 159e03bc654SThomas Petazzoni #endif 160e03bc654SThomas Petazzoni 1615733c38aSThomas Petazzoni writel_relaxed(config, XOR_CONFIG(chan)); 162ff7b0479SSaeed Bishara } 163ff7b0479SSaeed Bishara 164ff7b0479SSaeed Bishara static void mv_chan_activate(struct mv_xor_chan *chan) 165ff7b0479SSaeed Bishara { 166c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 1675a9a55bfSEzequiel Garcia 1685a9a55bfSEzequiel Garcia /* writel ensures all descriptors are flushed before activation */ 1695a9a55bfSEzequiel Garcia writel(BIT(0), XOR_ACTIVATION(chan)); 170ff7b0479SSaeed Bishara } 171ff7b0479SSaeed Bishara 172ff7b0479SSaeed Bishara static char mv_chan_is_busy(struct mv_xor_chan *chan) 173ff7b0479SSaeed Bishara { 1745733c38aSThomas Petazzoni u32 state = readl_relaxed(XOR_ACTIVATION(chan)); 175ff7b0479SSaeed Bishara 176ff7b0479SSaeed Bishara state = (state >> 4) & 0x3; 177ff7b0479SSaeed Bishara 178ff7b0479SSaeed Bishara return (state == 1) ? 1 : 0; 179ff7b0479SSaeed Bishara } 180ff7b0479SSaeed Bishara 181ff7b0479SSaeed Bishara /* 1820951e728SMaxime Ripard * mv_chan_start_new_chain - program the engine to operate on new 1830951e728SMaxime Ripard * chain headed by sw_desc 184ff7b0479SSaeed Bishara * Caller must hold &mv_chan->lock while calling this function 185ff7b0479SSaeed Bishara */ 1860951e728SMaxime Ripard static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan, 187ff7b0479SSaeed Bishara struct mv_xor_desc_slot *sw_desc) 188ff7b0479SSaeed Bishara { 189c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 190ff7b0479SSaeed Bishara __func__, __LINE__, sw_desc); 191ff7b0479SSaeed Bishara 192ff7b0479SSaeed Bishara /* set the hardware chain */ 193ff7b0479SSaeed Bishara mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 19448a9db46SBartlomiej Zolnierkiewicz 195dfc97661SLior Amsalem mv_chan->pending++; 19698817b99SThomas Petazzoni mv_xor_issue_pending(&mv_chan->dmachan); 197ff7b0479SSaeed Bishara } 198ff7b0479SSaeed Bishara 199ff7b0479SSaeed Bishara static dma_cookie_t 2000951e728SMaxime Ripard mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 2010951e728SMaxime Ripard struct mv_xor_chan *mv_chan, 2020951e728SMaxime Ripard dma_cookie_t cookie) 203ff7b0479SSaeed Bishara { 204ff7b0479SSaeed Bishara BUG_ON(desc->async_tx.cookie < 0); 205ff7b0479SSaeed Bishara 206ff7b0479SSaeed Bishara if (desc->async_tx.cookie > 0) { 207ff7b0479SSaeed Bishara cookie = desc->async_tx.cookie; 208ff7b0479SSaeed Bishara 2098058e258SDave Jiang dma_descriptor_unmap(&desc->async_tx); 210ff7b0479SSaeed Bishara /* call the callback (must not sleep or submit new 211ff7b0479SSaeed Bishara * operations to this channel) 212ff7b0479SSaeed Bishara */ 213ee7681a4SDave Jiang dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); 214ff7b0479SSaeed Bishara } 215ff7b0479SSaeed Bishara 216ff7b0479SSaeed Bishara /* run dependent operations */ 21707f2211eSDan Williams dma_run_dependencies(&desc->async_tx); 218ff7b0479SSaeed Bishara 219ff7b0479SSaeed Bishara return cookie; 220ff7b0479SSaeed Bishara } 221ff7b0479SSaeed Bishara 222ff7b0479SSaeed Bishara static int 2230951e728SMaxime Ripard mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) 224ff7b0479SSaeed Bishara { 225ff7b0479SSaeed Bishara struct mv_xor_desc_slot *iter, *_iter; 226ff7b0479SSaeed Bishara 227c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 228ff7b0479SSaeed Bishara list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 229fbea28a2SLior Amsalem node) { 230ff7b0479SSaeed Bishara 231c5db858bSStefan Roese if (async_tx_test_ack(&iter->async_tx)) { 232fbea28a2SLior Amsalem list_move_tail(&iter->node, &mv_chan->free_slots); 233c5db858bSStefan Roese if (!list_empty(&iter->sg_tx_list)) { 234c5db858bSStefan Roese list_splice_tail_init(&iter->sg_tx_list, 235c5db858bSStefan Roese &mv_chan->free_slots); 236c5db858bSStefan Roese } 237c5db858bSStefan Roese } 238ff7b0479SSaeed Bishara } 239ff7b0479SSaeed Bishara return 0; 240ff7b0479SSaeed Bishara } 241ff7b0479SSaeed Bishara 242ff7b0479SSaeed Bishara static int 2430951e728SMaxime Ripard mv_desc_clean_slot(struct mv_xor_desc_slot *desc, 244ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan) 245ff7b0479SSaeed Bishara { 246c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 247ff7b0479SSaeed Bishara __func__, __LINE__, desc, desc->async_tx.flags); 248fbea28a2SLior Amsalem 249ff7b0479SSaeed Bishara /* the client is allowed to attach dependent operations 250ff7b0479SSaeed Bishara * until 'ack' is set 251ff7b0479SSaeed Bishara */ 252c5db858bSStefan Roese if (!async_tx_test_ack(&desc->async_tx)) { 253ff7b0479SSaeed Bishara /* move this slot to the completed_slots */ 254fbea28a2SLior Amsalem list_move_tail(&desc->node, &mv_chan->completed_slots); 255c5db858bSStefan Roese if (!list_empty(&desc->sg_tx_list)) { 256c5db858bSStefan Roese list_splice_tail_init(&desc->sg_tx_list, 257c5db858bSStefan Roese &mv_chan->completed_slots); 258c5db858bSStefan Roese } 259c5db858bSStefan Roese } else { 260fbea28a2SLior Amsalem list_move_tail(&desc->node, &mv_chan->free_slots); 261c5db858bSStefan Roese if (!list_empty(&desc->sg_tx_list)) { 262c5db858bSStefan Roese list_splice_tail_init(&desc->sg_tx_list, 263c5db858bSStefan Roese &mv_chan->free_slots); 264c5db858bSStefan Roese } 265c5db858bSStefan Roese } 266ff7b0479SSaeed Bishara 267ff7b0479SSaeed Bishara return 0; 268ff7b0479SSaeed Bishara } 269ff7b0479SSaeed Bishara 270fbeec99aSEzequiel Garcia /* This function must be called with the mv_xor_chan spinlock held */ 2710951e728SMaxime Ripard static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan) 272ff7b0479SSaeed Bishara { 273ff7b0479SSaeed Bishara struct mv_xor_desc_slot *iter, *_iter; 274ff7b0479SSaeed Bishara dma_cookie_t cookie = 0; 275ff7b0479SSaeed Bishara int busy = mv_chan_is_busy(mv_chan); 276ff7b0479SSaeed Bishara u32 current_desc = mv_chan_get_current_desc(mv_chan); 2779136291fSLior Amsalem int current_cleaned = 0; 2789136291fSLior Amsalem struct mv_xor_desc *hw_desc; 279ff7b0479SSaeed Bishara 280c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 281c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 2820951e728SMaxime Ripard mv_chan_clean_completed_slots(mv_chan); 283ff7b0479SSaeed Bishara 284ff7b0479SSaeed Bishara /* free completed slots from the chain starting with 285ff7b0479SSaeed Bishara * the oldest descriptor 286ff7b0479SSaeed Bishara */ 287ff7b0479SSaeed Bishara 288ff7b0479SSaeed Bishara list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 289fbea28a2SLior Amsalem node) { 290ff7b0479SSaeed Bishara 2919136291fSLior Amsalem /* clean finished descriptors */ 2929136291fSLior Amsalem hw_desc = iter->hw_desc; 2939136291fSLior Amsalem if (hw_desc->status & XOR_DESC_SUCCESS) { 2940951e728SMaxime Ripard cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, 2959136291fSLior Amsalem cookie); 296ff7b0479SSaeed Bishara 2979136291fSLior Amsalem /* done processing desc, clean slot */ 2980951e728SMaxime Ripard mv_desc_clean_slot(iter, mv_chan); 2999136291fSLior Amsalem 3009136291fSLior Amsalem /* break if we did cleaned the current */ 301ff7b0479SSaeed Bishara if (iter->async_tx.phys == current_desc) { 3029136291fSLior Amsalem current_cleaned = 1; 303ff7b0479SSaeed Bishara break; 304ff7b0479SSaeed Bishara } 3059136291fSLior Amsalem } else { 3069136291fSLior Amsalem if (iter->async_tx.phys == current_desc) { 3079136291fSLior Amsalem current_cleaned = 0; 308ff7b0479SSaeed Bishara break; 309ff7b0479SSaeed Bishara } 3109136291fSLior Amsalem } 3119136291fSLior Amsalem } 312ff7b0479SSaeed Bishara 313ff7b0479SSaeed Bishara if ((busy == 0) && !list_empty(&mv_chan->chain)) { 3149136291fSLior Amsalem if (current_cleaned) { 3159136291fSLior Amsalem /* 3169136291fSLior Amsalem * current descriptor cleaned and removed, run 3179136291fSLior Amsalem * from list head 3189136291fSLior Amsalem */ 3199136291fSLior Amsalem iter = list_entry(mv_chan->chain.next, 320ff7b0479SSaeed Bishara struct mv_xor_desc_slot, 321fbea28a2SLior Amsalem node); 3220951e728SMaxime Ripard mv_chan_start_new_chain(mv_chan, iter); 3239136291fSLior Amsalem } else { 324fbea28a2SLior Amsalem if (!list_is_last(&iter->node, &mv_chan->chain)) { 3259136291fSLior Amsalem /* 3269136291fSLior Amsalem * descriptors are still waiting after 3279136291fSLior Amsalem * current, trigger them 3289136291fSLior Amsalem */ 329fbea28a2SLior Amsalem iter = list_entry(iter->node.next, 3309136291fSLior Amsalem struct mv_xor_desc_slot, 331fbea28a2SLior Amsalem node); 3320951e728SMaxime Ripard mv_chan_start_new_chain(mv_chan, iter); 3339136291fSLior Amsalem } else { 3349136291fSLior Amsalem /* 3359136291fSLior Amsalem * some descriptors are still waiting 3369136291fSLior Amsalem * to be cleaned 3379136291fSLior Amsalem */ 3389136291fSLior Amsalem tasklet_schedule(&mv_chan->irq_tasklet); 3399136291fSLior Amsalem } 3409136291fSLior Amsalem } 341ff7b0479SSaeed Bishara } 342ff7b0479SSaeed Bishara 343ff7b0479SSaeed Bishara if (cookie > 0) 34498817b99SThomas Petazzoni mv_chan->dmachan.completed_cookie = cookie; 345ff7b0479SSaeed Bishara } 346ff7b0479SSaeed Bishara 347ff7b0479SSaeed Bishara static void mv_xor_tasklet(unsigned long data) 348ff7b0479SSaeed Bishara { 349ff7b0479SSaeed Bishara struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 350e43147acSEzequiel Garcia 351cbc229a4SBarry Song spin_lock(&chan->lock); 3520951e728SMaxime Ripard mv_chan_slot_cleanup(chan); 353cbc229a4SBarry Song spin_unlock(&chan->lock); 354ff7b0479SSaeed Bishara } 355ff7b0479SSaeed Bishara 356ff7b0479SSaeed Bishara static struct mv_xor_desc_slot * 3570951e728SMaxime Ripard mv_chan_alloc_slot(struct mv_xor_chan *mv_chan) 358ff7b0479SSaeed Bishara { 359fbea28a2SLior Amsalem struct mv_xor_desc_slot *iter; 360ff7b0479SSaeed Bishara 361fbea28a2SLior Amsalem spin_lock_bh(&mv_chan->lock); 362fbea28a2SLior Amsalem 363fbea28a2SLior Amsalem if (!list_empty(&mv_chan->free_slots)) { 364fbea28a2SLior Amsalem iter = list_first_entry(&mv_chan->free_slots, 365ff7b0479SSaeed Bishara struct mv_xor_desc_slot, 366fbea28a2SLior Amsalem node); 367ff7b0479SSaeed Bishara 368fbea28a2SLior Amsalem list_move_tail(&iter->node, &mv_chan->allocated_slots); 369dfc97661SLior Amsalem 370fbea28a2SLior Amsalem spin_unlock_bh(&mv_chan->lock); 371ff7b0479SSaeed Bishara 372dfc97661SLior Amsalem /* pre-ack descriptor */ 373ff7b0479SSaeed Bishara async_tx_ack(&iter->async_tx); 374dfc97661SLior Amsalem iter->async_tx.cookie = -EBUSY; 375dfc97661SLior Amsalem 376dfc97661SLior Amsalem return iter; 377dfc97661SLior Amsalem 378ff7b0479SSaeed Bishara } 379fbea28a2SLior Amsalem 380fbea28a2SLior Amsalem spin_unlock_bh(&mv_chan->lock); 381ff7b0479SSaeed Bishara 382ff7b0479SSaeed Bishara /* try to free some slots if the allocation fails */ 383ff7b0479SSaeed Bishara tasklet_schedule(&mv_chan->irq_tasklet); 384ff7b0479SSaeed Bishara 385ff7b0479SSaeed Bishara return NULL; 386ff7b0479SSaeed Bishara } 387ff7b0479SSaeed Bishara 388ff7b0479SSaeed Bishara /************************ DMA engine API functions ****************************/ 389ff7b0479SSaeed Bishara static dma_cookie_t 390ff7b0479SSaeed Bishara mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 391ff7b0479SSaeed Bishara { 392ff7b0479SSaeed Bishara struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 393ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 394dfc97661SLior Amsalem struct mv_xor_desc_slot *old_chain_tail; 395ff7b0479SSaeed Bishara dma_cookie_t cookie; 396ff7b0479SSaeed Bishara int new_hw_chain = 1; 397ff7b0479SSaeed Bishara 398c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), 399ff7b0479SSaeed Bishara "%s sw_desc %p: async_tx %p\n", 400ff7b0479SSaeed Bishara __func__, sw_desc, &sw_desc->async_tx); 401ff7b0479SSaeed Bishara 402ff7b0479SSaeed Bishara spin_lock_bh(&mv_chan->lock); 403884485e1SRussell King - ARM Linux cookie = dma_cookie_assign(tx); 404ff7b0479SSaeed Bishara 405ff7b0479SSaeed Bishara if (list_empty(&mv_chan->chain)) 406fbea28a2SLior Amsalem list_move_tail(&sw_desc->node, &mv_chan->chain); 407ff7b0479SSaeed Bishara else { 408ff7b0479SSaeed Bishara new_hw_chain = 0; 409ff7b0479SSaeed Bishara 410ff7b0479SSaeed Bishara old_chain_tail = list_entry(mv_chan->chain.prev, 411ff7b0479SSaeed Bishara struct mv_xor_desc_slot, 412fbea28a2SLior Amsalem node); 413fbea28a2SLior Amsalem list_move_tail(&sw_desc->node, &mv_chan->chain); 414ff7b0479SSaeed Bishara 41531fd8f5bSOlof Johansson dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", 41631fd8f5bSOlof Johansson &old_chain_tail->async_tx.phys); 417ff7b0479SSaeed Bishara 418ff7b0479SSaeed Bishara /* fix up the hardware chain */ 419dfc97661SLior Amsalem mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); 420ff7b0479SSaeed Bishara 421ff7b0479SSaeed Bishara /* if the channel is not busy */ 422ff7b0479SSaeed Bishara if (!mv_chan_is_busy(mv_chan)) { 423ff7b0479SSaeed Bishara u32 current_desc = mv_chan_get_current_desc(mv_chan); 424ff7b0479SSaeed Bishara /* 425ff7b0479SSaeed Bishara * and the curren desc is the end of the chain before 426ff7b0479SSaeed Bishara * the append, then we need to start the channel 427ff7b0479SSaeed Bishara */ 428ff7b0479SSaeed Bishara if (current_desc == old_chain_tail->async_tx.phys) 429ff7b0479SSaeed Bishara new_hw_chain = 1; 430ff7b0479SSaeed Bishara } 431ff7b0479SSaeed Bishara } 432ff7b0479SSaeed Bishara 433ff7b0479SSaeed Bishara if (new_hw_chain) 4340951e728SMaxime Ripard mv_chan_start_new_chain(mv_chan, sw_desc); 435ff7b0479SSaeed Bishara 436ff7b0479SSaeed Bishara spin_unlock_bh(&mv_chan->lock); 437ff7b0479SSaeed Bishara 438ff7b0479SSaeed Bishara return cookie; 439ff7b0479SSaeed Bishara } 440ff7b0479SSaeed Bishara 441ff7b0479SSaeed Bishara /* returns the number of allocated descriptors */ 442aa1e6f1aSDan Williams static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 443ff7b0479SSaeed Bishara { 44431fd8f5bSOlof Johansson void *virt_desc; 44531fd8f5bSOlof Johansson dma_addr_t dma_desc; 446ff7b0479SSaeed Bishara int idx; 447ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 448ff7b0479SSaeed Bishara struct mv_xor_desc_slot *slot = NULL; 449b503fa01SThomas Petazzoni int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; 450ff7b0479SSaeed Bishara 451ff7b0479SSaeed Bishara /* Allocate descriptor slots */ 452ff7b0479SSaeed Bishara idx = mv_chan->slots_allocated; 453ff7b0479SSaeed Bishara while (idx < num_descs_in_pool) { 454ff7b0479SSaeed Bishara slot = kzalloc(sizeof(*slot), GFP_KERNEL); 455ff7b0479SSaeed Bishara if (!slot) { 456b8291ddeSEzequiel Garcia dev_info(mv_chan_to_devp(mv_chan), 457b8291ddeSEzequiel Garcia "channel only initialized %d descriptor slots", 458b8291ddeSEzequiel Garcia idx); 459ff7b0479SSaeed Bishara break; 460ff7b0479SSaeed Bishara } 46131fd8f5bSOlof Johansson virt_desc = mv_chan->dma_desc_pool_virt; 46231fd8f5bSOlof Johansson slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; 463ff7b0479SSaeed Bishara 464ff7b0479SSaeed Bishara dma_async_tx_descriptor_init(&slot->async_tx, chan); 465ff7b0479SSaeed Bishara slot->async_tx.tx_submit = mv_xor_tx_submit; 466fbea28a2SLior Amsalem INIT_LIST_HEAD(&slot->node); 467c5db858bSStefan Roese INIT_LIST_HEAD(&slot->sg_tx_list); 46831fd8f5bSOlof Johansson dma_desc = mv_chan->dma_desc_pool; 46931fd8f5bSOlof Johansson slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; 470ff7b0479SSaeed Bishara slot->idx = idx++; 471ff7b0479SSaeed Bishara 472ff7b0479SSaeed Bishara spin_lock_bh(&mv_chan->lock); 473ff7b0479SSaeed Bishara mv_chan->slots_allocated = idx; 474fbea28a2SLior Amsalem list_add_tail(&slot->node, &mv_chan->free_slots); 475ff7b0479SSaeed Bishara spin_unlock_bh(&mv_chan->lock); 476ff7b0479SSaeed Bishara } 477ff7b0479SSaeed Bishara 478c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), 479fbea28a2SLior Amsalem "allocated %d descriptor slots\n", 480fbea28a2SLior Amsalem mv_chan->slots_allocated); 481ff7b0479SSaeed Bishara 482ff7b0479SSaeed Bishara return mv_chan->slots_allocated ? : -ENOMEM; 483ff7b0479SSaeed Bishara } 484ff7b0479SSaeed Bishara 48577ff7a70SStefan Roese /* 48677ff7a70SStefan Roese * Check if source or destination is an PCIe/IO address (non-SDRAM) and add 48777ff7a70SStefan Roese * a new MBus window if necessary. Use a cache for these check so that 48877ff7a70SStefan Roese * the MMIO mapped registers don't have to be accessed for this check 48977ff7a70SStefan Roese * to speed up this process. 49077ff7a70SStefan Roese */ 49177ff7a70SStefan Roese static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr) 49277ff7a70SStefan Roese { 49377ff7a70SStefan Roese struct mv_xor_device *xordev = mv_chan->xordev; 49477ff7a70SStefan Roese void __iomem *base = mv_chan->mmr_high_base; 49577ff7a70SStefan Roese u32 win_enable; 49677ff7a70SStefan Roese u32 size; 49777ff7a70SStefan Roese u8 target, attr; 49877ff7a70SStefan Roese int ret; 49977ff7a70SStefan Roese int i; 50077ff7a70SStefan Roese 50177ff7a70SStefan Roese /* Nothing needs to get done for the Armada 3700 */ 50277ff7a70SStefan Roese if (xordev->xor_type == XOR_ARMADA_37XX) 50377ff7a70SStefan Roese return 0; 50477ff7a70SStefan Roese 50577ff7a70SStefan Roese /* 50677ff7a70SStefan Roese * Loop over the cached windows to check, if the requested area 50777ff7a70SStefan Roese * is already mapped. If this the case, nothing needs to be done 50877ff7a70SStefan Roese * and we can return. 50977ff7a70SStefan Roese */ 51077ff7a70SStefan Roese for (i = 0; i < WINDOW_COUNT; i++) { 51177ff7a70SStefan Roese if (addr >= xordev->win_start[i] && 51277ff7a70SStefan Roese addr <= xordev->win_end[i]) { 51377ff7a70SStefan Roese /* Window is already mapped */ 51477ff7a70SStefan Roese return 0; 51577ff7a70SStefan Roese } 51677ff7a70SStefan Roese } 51777ff7a70SStefan Roese 51877ff7a70SStefan Roese /* 51977ff7a70SStefan Roese * The window is not mapped, so we need to create the new mapping 52077ff7a70SStefan Roese */ 52177ff7a70SStefan Roese 52277ff7a70SStefan Roese /* If no IO window is found that addr has to be located in SDRAM */ 52377ff7a70SStefan Roese ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr); 52477ff7a70SStefan Roese if (ret < 0) 52577ff7a70SStefan Roese return 0; 52677ff7a70SStefan Roese 52777ff7a70SStefan Roese /* 52877ff7a70SStefan Roese * Mask the base addr 'addr' according to 'size' read back from the 52977ff7a70SStefan Roese * MBus window. Otherwise we might end up with an address located 53077ff7a70SStefan Roese * somewhere in the middle of this area here. 53177ff7a70SStefan Roese */ 53277ff7a70SStefan Roese size -= 1; 53377ff7a70SStefan Roese addr &= ~size; 53477ff7a70SStefan Roese 53577ff7a70SStefan Roese /* 53677ff7a70SStefan Roese * Reading one of both enabled register is enough, as they are always 53777ff7a70SStefan Roese * programmed to the identical values 53877ff7a70SStefan Roese */ 53977ff7a70SStefan Roese win_enable = readl(base + WINDOW_BAR_ENABLE(0)); 54077ff7a70SStefan Roese 54177ff7a70SStefan Roese /* Set 'i' to the first free window to write the new values to */ 54277ff7a70SStefan Roese i = ffs(~win_enable) - 1; 54377ff7a70SStefan Roese if (i >= WINDOW_COUNT) 54477ff7a70SStefan Roese return -ENOMEM; 54577ff7a70SStefan Roese 54677ff7a70SStefan Roese writel((addr & 0xffff0000) | (attr << 8) | target, 54777ff7a70SStefan Roese base + WINDOW_BASE(i)); 54877ff7a70SStefan Roese writel(size & 0xffff0000, base + WINDOW_SIZE(i)); 54977ff7a70SStefan Roese 55077ff7a70SStefan Roese /* Fill the caching variables for later use */ 55177ff7a70SStefan Roese xordev->win_start[i] = addr; 55277ff7a70SStefan Roese xordev->win_end[i] = addr + size; 55377ff7a70SStefan Roese 55477ff7a70SStefan Roese win_enable |= (1 << i); 55577ff7a70SStefan Roese win_enable |= 3 << (16 + (2 * i)); 55677ff7a70SStefan Roese writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 55777ff7a70SStefan Roese writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 55877ff7a70SStefan Roese 55977ff7a70SStefan Roese return 0; 56077ff7a70SStefan Roese } 56177ff7a70SStefan Roese 562ff7b0479SSaeed Bishara static struct dma_async_tx_descriptor * 563ff7b0479SSaeed Bishara mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 564ff7b0479SSaeed Bishara unsigned int src_cnt, size_t len, unsigned long flags) 565ff7b0479SSaeed Bishara { 566ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 567dfc97661SLior Amsalem struct mv_xor_desc_slot *sw_desc; 56877ff7a70SStefan Roese int ret; 569ff7b0479SSaeed Bishara 570ff7b0479SSaeed Bishara if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 571ff7b0479SSaeed Bishara return NULL; 572ff7b0479SSaeed Bishara 5737912d300SColy Li BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 574ff7b0479SSaeed Bishara 575c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), 576bc822e12SGregory CLEMENT "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", 57731fd8f5bSOlof Johansson __func__, src_cnt, len, &dest, flags); 578ff7b0479SSaeed Bishara 57977ff7a70SStefan Roese /* Check if a new window needs to get added for 'dest' */ 58077ff7a70SStefan Roese ret = mv_xor_add_io_win(mv_chan, dest); 58177ff7a70SStefan Roese if (ret) 58277ff7a70SStefan Roese return NULL; 58377ff7a70SStefan Roese 5840951e728SMaxime Ripard sw_desc = mv_chan_alloc_slot(mv_chan); 585ff7b0479SSaeed Bishara if (sw_desc) { 586ff7b0479SSaeed Bishara sw_desc->type = DMA_XOR; 587ff7b0479SSaeed Bishara sw_desc->async_tx.flags = flags; 588ba87d137SLior Amsalem mv_desc_init(sw_desc, dest, len, flags); 5896f166312SLior Amsalem if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) 5906f166312SLior Amsalem mv_desc_set_mode(sw_desc); 59177ff7a70SStefan Roese while (src_cnt--) { 59277ff7a70SStefan Roese /* Check if a new window needs to get added for 'src' */ 59377ff7a70SStefan Roese ret = mv_xor_add_io_win(mv_chan, src[src_cnt]); 59477ff7a70SStefan Roese if (ret) 59577ff7a70SStefan Roese return NULL; 596dfc97661SLior Amsalem mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); 597ff7b0479SSaeed Bishara } 59877ff7a70SStefan Roese } 599fbea28a2SLior Amsalem 600c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), 601ff7b0479SSaeed Bishara "%s sw_desc %p async_tx %p \n", 602ff7b0479SSaeed Bishara __func__, sw_desc, &sw_desc->async_tx); 603ff7b0479SSaeed Bishara return sw_desc ? &sw_desc->async_tx : NULL; 604ff7b0479SSaeed Bishara } 605ff7b0479SSaeed Bishara 6063e4f52e2SLior Amsalem static struct dma_async_tx_descriptor * 6073e4f52e2SLior Amsalem mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 6083e4f52e2SLior Amsalem size_t len, unsigned long flags) 6093e4f52e2SLior Amsalem { 6103e4f52e2SLior Amsalem /* 6113e4f52e2SLior Amsalem * A MEMCPY operation is identical to an XOR operation with only 6123e4f52e2SLior Amsalem * a single source address. 6133e4f52e2SLior Amsalem */ 6143e4f52e2SLior Amsalem return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 6153e4f52e2SLior Amsalem } 6163e4f52e2SLior Amsalem 61722843545SLior Amsalem static struct dma_async_tx_descriptor * 61822843545SLior Amsalem mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 61922843545SLior Amsalem { 62022843545SLior Amsalem struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 62122843545SLior Amsalem dma_addr_t src, dest; 62222843545SLior Amsalem size_t len; 62322843545SLior Amsalem 62422843545SLior Amsalem src = mv_chan->dummy_src_addr; 62522843545SLior Amsalem dest = mv_chan->dummy_dst_addr; 62622843545SLior Amsalem len = MV_XOR_MIN_BYTE_COUNT; 62722843545SLior Amsalem 62822843545SLior Amsalem /* 62922843545SLior Amsalem * We implement the DMA_INTERRUPT operation as a minimum sized 63022843545SLior Amsalem * XOR operation with a single dummy source address. 63122843545SLior Amsalem */ 63222843545SLior Amsalem return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 63322843545SLior Amsalem } 63422843545SLior Amsalem 635ff7b0479SSaeed Bishara static void mv_xor_free_chan_resources(struct dma_chan *chan) 636ff7b0479SSaeed Bishara { 637ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 638ff7b0479SSaeed Bishara struct mv_xor_desc_slot *iter, *_iter; 639ff7b0479SSaeed Bishara int in_use_descs = 0; 640ff7b0479SSaeed Bishara 641ff7b0479SSaeed Bishara spin_lock_bh(&mv_chan->lock); 642e43147acSEzequiel Garcia 6430951e728SMaxime Ripard mv_chan_slot_cleanup(mv_chan); 644ff7b0479SSaeed Bishara 645ff7b0479SSaeed Bishara list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 646fbea28a2SLior Amsalem node) { 647ff7b0479SSaeed Bishara in_use_descs++; 648fbea28a2SLior Amsalem list_move_tail(&iter->node, &mv_chan->free_slots); 649ff7b0479SSaeed Bishara } 650ff7b0479SSaeed Bishara list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 651fbea28a2SLior Amsalem node) { 652ff7b0479SSaeed Bishara in_use_descs++; 653fbea28a2SLior Amsalem list_move_tail(&iter->node, &mv_chan->free_slots); 654fbea28a2SLior Amsalem } 655fbea28a2SLior Amsalem list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, 656fbea28a2SLior Amsalem node) { 657fbea28a2SLior Amsalem in_use_descs++; 658fbea28a2SLior Amsalem list_move_tail(&iter->node, &mv_chan->free_slots); 659ff7b0479SSaeed Bishara } 660ff7b0479SSaeed Bishara list_for_each_entry_safe_reverse( 661fbea28a2SLior Amsalem iter, _iter, &mv_chan->free_slots, node) { 662fbea28a2SLior Amsalem list_del(&iter->node); 663ff7b0479SSaeed Bishara kfree(iter); 664ff7b0479SSaeed Bishara mv_chan->slots_allocated--; 665ff7b0479SSaeed Bishara } 666ff7b0479SSaeed Bishara 667c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 668ff7b0479SSaeed Bishara __func__, mv_chan->slots_allocated); 669ff7b0479SSaeed Bishara spin_unlock_bh(&mv_chan->lock); 670ff7b0479SSaeed Bishara 671ff7b0479SSaeed Bishara if (in_use_descs) 672c98c1781SThomas Petazzoni dev_err(mv_chan_to_devp(mv_chan), 673ff7b0479SSaeed Bishara "freeing %d in use descriptors!\n", in_use_descs); 674ff7b0479SSaeed Bishara } 675ff7b0479SSaeed Bishara 676ff7b0479SSaeed Bishara /** 67707934481SLinus Walleij * mv_xor_status - poll the status of an XOR transaction 678ff7b0479SSaeed Bishara * @chan: XOR channel handle 679ff7b0479SSaeed Bishara * @cookie: XOR transaction identifier 68007934481SLinus Walleij * @txstate: XOR transactions state holder (or NULL) 681ff7b0479SSaeed Bishara */ 68207934481SLinus Walleij static enum dma_status mv_xor_status(struct dma_chan *chan, 683ff7b0479SSaeed Bishara dma_cookie_t cookie, 68407934481SLinus Walleij struct dma_tx_state *txstate) 685ff7b0479SSaeed Bishara { 686ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 687ff7b0479SSaeed Bishara enum dma_status ret; 688ff7b0479SSaeed Bishara 68996a2af41SRussell King - ARM Linux ret = dma_cookie_status(chan, cookie, txstate); 690890766d2SEzequiel Garcia if (ret == DMA_COMPLETE) 691ff7b0479SSaeed Bishara return ret; 692e43147acSEzequiel Garcia 693e43147acSEzequiel Garcia spin_lock_bh(&mv_chan->lock); 6940951e728SMaxime Ripard mv_chan_slot_cleanup(mv_chan); 695e43147acSEzequiel Garcia spin_unlock_bh(&mv_chan->lock); 696ff7b0479SSaeed Bishara 69796a2af41SRussell King - ARM Linux return dma_cookie_status(chan, cookie, txstate); 698ff7b0479SSaeed Bishara } 699ff7b0479SSaeed Bishara 7000951e728SMaxime Ripard static void mv_chan_dump_regs(struct mv_xor_chan *chan) 701ff7b0479SSaeed Bishara { 702ff7b0479SSaeed Bishara u32 val; 703ff7b0479SSaeed Bishara 7045733c38aSThomas Petazzoni val = readl_relaxed(XOR_CONFIG(chan)); 7051ba151cdSJoe Perches dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 706ff7b0479SSaeed Bishara 7075733c38aSThomas Petazzoni val = readl_relaxed(XOR_ACTIVATION(chan)); 7081ba151cdSJoe Perches dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 709ff7b0479SSaeed Bishara 7105733c38aSThomas Petazzoni val = readl_relaxed(XOR_INTR_CAUSE(chan)); 7111ba151cdSJoe Perches dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 712ff7b0479SSaeed Bishara 7135733c38aSThomas Petazzoni val = readl_relaxed(XOR_INTR_MASK(chan)); 7141ba151cdSJoe Perches dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 715ff7b0479SSaeed Bishara 7165733c38aSThomas Petazzoni val = readl_relaxed(XOR_ERROR_CAUSE(chan)); 7171ba151cdSJoe Perches dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 718ff7b0479SSaeed Bishara 7195733c38aSThomas Petazzoni val = readl_relaxed(XOR_ERROR_ADDR(chan)); 7201ba151cdSJoe Perches dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 721ff7b0479SSaeed Bishara } 722ff7b0479SSaeed Bishara 7230951e728SMaxime Ripard static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan, 724ff7b0479SSaeed Bishara u32 intr_cause) 725ff7b0479SSaeed Bishara { 7260e7488edSEzequiel Garcia if (intr_cause & XOR_INT_ERR_DECODE) { 7270e7488edSEzequiel Garcia dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); 728ff7b0479SSaeed Bishara return; 729ff7b0479SSaeed Bishara } 730ff7b0479SSaeed Bishara 7310e7488edSEzequiel Garcia dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", 732ff7b0479SSaeed Bishara chan->idx, intr_cause); 733ff7b0479SSaeed Bishara 7340951e728SMaxime Ripard mv_chan_dump_regs(chan); 7350e7488edSEzequiel Garcia WARN_ON(1); 736ff7b0479SSaeed Bishara } 737ff7b0479SSaeed Bishara 738ff7b0479SSaeed Bishara static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 739ff7b0479SSaeed Bishara { 740ff7b0479SSaeed Bishara struct mv_xor_chan *chan = data; 741ff7b0479SSaeed Bishara u32 intr_cause = mv_chan_get_intr_cause(chan); 742ff7b0479SSaeed Bishara 743c98c1781SThomas Petazzoni dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 744ff7b0479SSaeed Bishara 7450e7488edSEzequiel Garcia if (intr_cause & XOR_INTR_ERRORS) 7460951e728SMaxime Ripard mv_chan_err_interrupt_handler(chan, intr_cause); 747ff7b0479SSaeed Bishara 748ff7b0479SSaeed Bishara tasklet_schedule(&chan->irq_tasklet); 749ff7b0479SSaeed Bishara 7500951e728SMaxime Ripard mv_chan_clear_eoc_cause(chan); 751ff7b0479SSaeed Bishara 752ff7b0479SSaeed Bishara return IRQ_HANDLED; 753ff7b0479SSaeed Bishara } 754ff7b0479SSaeed Bishara 755ff7b0479SSaeed Bishara static void mv_xor_issue_pending(struct dma_chan *chan) 756ff7b0479SSaeed Bishara { 757ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 758ff7b0479SSaeed Bishara 759ff7b0479SSaeed Bishara if (mv_chan->pending >= MV_XOR_THRESHOLD) { 760ff7b0479SSaeed Bishara mv_chan->pending = 0; 761ff7b0479SSaeed Bishara mv_chan_activate(mv_chan); 762ff7b0479SSaeed Bishara } 763ff7b0479SSaeed Bishara } 764ff7b0479SSaeed Bishara 765ff7b0479SSaeed Bishara /* 766ff7b0479SSaeed Bishara * Perform a transaction to verify the HW works. 767ff7b0479SSaeed Bishara */ 768ff7b0479SSaeed Bishara 7690951e728SMaxime Ripard static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) 770ff7b0479SSaeed Bishara { 771b8c01d25SEzequiel Garcia int i, ret; 772ff7b0479SSaeed Bishara void *src, *dest; 773ff7b0479SSaeed Bishara dma_addr_t src_dma, dest_dma; 774ff7b0479SSaeed Bishara struct dma_chan *dma_chan; 775ff7b0479SSaeed Bishara dma_cookie_t cookie; 776ff7b0479SSaeed Bishara struct dma_async_tx_descriptor *tx; 777d16695a7SEzequiel Garcia struct dmaengine_unmap_data *unmap; 778ff7b0479SSaeed Bishara int err = 0; 779ff7b0479SSaeed Bishara 7806da2ec56SKees Cook src = kmalloc(PAGE_SIZE, GFP_KERNEL); 781ff7b0479SSaeed Bishara if (!src) 782ff7b0479SSaeed Bishara return -ENOMEM; 783ff7b0479SSaeed Bishara 7846396bb22SKees Cook dest = kzalloc(PAGE_SIZE, GFP_KERNEL); 785ff7b0479SSaeed Bishara if (!dest) { 786ff7b0479SSaeed Bishara kfree(src); 787ff7b0479SSaeed Bishara return -ENOMEM; 788ff7b0479SSaeed Bishara } 789ff7b0479SSaeed Bishara 790ff7b0479SSaeed Bishara /* Fill in src buffer */ 791d16695a7SEzequiel Garcia for (i = 0; i < PAGE_SIZE; i++) 792ff7b0479SSaeed Bishara ((u8 *) src)[i] = (u8)i; 793ff7b0479SSaeed Bishara 794275cc0c8SThomas Petazzoni dma_chan = &mv_chan->dmachan; 795aa1e6f1aSDan Williams if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 796ff7b0479SSaeed Bishara err = -ENODEV; 797ff7b0479SSaeed Bishara goto out; 798ff7b0479SSaeed Bishara } 799ff7b0479SSaeed Bishara 800d16695a7SEzequiel Garcia unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); 801d16695a7SEzequiel Garcia if (!unmap) { 802d16695a7SEzequiel Garcia err = -ENOMEM; 803d16695a7SEzequiel Garcia goto free_resources; 804d16695a7SEzequiel Garcia } 805ff7b0479SSaeed Bishara 80651564635SStefan Roese src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 807b70e52caSGeliang Tang offset_in_page(src), PAGE_SIZE, 80851564635SStefan Roese DMA_TO_DEVICE); 809d16695a7SEzequiel Garcia unmap->addr[0] = src_dma; 810d16695a7SEzequiel Garcia 811b8c01d25SEzequiel Garcia ret = dma_mapping_error(dma_chan->device->dev, src_dma); 812b8c01d25SEzequiel Garcia if (ret) { 813b8c01d25SEzequiel Garcia err = -ENOMEM; 814b8c01d25SEzequiel Garcia goto free_resources; 815b8c01d25SEzequiel Garcia } 816b8c01d25SEzequiel Garcia unmap->to_cnt = 1; 817b8c01d25SEzequiel Garcia 81851564635SStefan Roese dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 819b70e52caSGeliang Tang offset_in_page(dest), PAGE_SIZE, 82051564635SStefan Roese DMA_FROM_DEVICE); 821d16695a7SEzequiel Garcia unmap->addr[1] = dest_dma; 822d16695a7SEzequiel Garcia 823b8c01d25SEzequiel Garcia ret = dma_mapping_error(dma_chan->device->dev, dest_dma); 824b8c01d25SEzequiel Garcia if (ret) { 825b8c01d25SEzequiel Garcia err = -ENOMEM; 826b8c01d25SEzequiel Garcia goto free_resources; 827b8c01d25SEzequiel Garcia } 828b8c01d25SEzequiel Garcia unmap->from_cnt = 1; 829d16695a7SEzequiel Garcia unmap->len = PAGE_SIZE; 830ff7b0479SSaeed Bishara 831ff7b0479SSaeed Bishara tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 832d16695a7SEzequiel Garcia PAGE_SIZE, 0); 833b8c01d25SEzequiel Garcia if (!tx) { 834b8c01d25SEzequiel Garcia dev_err(dma_chan->device->dev, 835b8c01d25SEzequiel Garcia "Self-test cannot prepare operation, disabling\n"); 836b8c01d25SEzequiel Garcia err = -ENODEV; 837b8c01d25SEzequiel Garcia goto free_resources; 838b8c01d25SEzequiel Garcia } 839b8c01d25SEzequiel Garcia 840ff7b0479SSaeed Bishara cookie = mv_xor_tx_submit(tx); 841b8c01d25SEzequiel Garcia if (dma_submit_error(cookie)) { 842b8c01d25SEzequiel Garcia dev_err(dma_chan->device->dev, 843b8c01d25SEzequiel Garcia "Self-test submit error, disabling\n"); 844b8c01d25SEzequiel Garcia err = -ENODEV; 845b8c01d25SEzequiel Garcia goto free_resources; 846b8c01d25SEzequiel Garcia } 847b8c01d25SEzequiel Garcia 848ff7b0479SSaeed Bishara mv_xor_issue_pending(dma_chan); 849ff7b0479SSaeed Bishara async_tx_ack(tx); 850ff7b0479SSaeed Bishara msleep(1); 851ff7b0479SSaeed Bishara 85207934481SLinus Walleij if (mv_xor_status(dma_chan, cookie, NULL) != 853b3efb8fcSVinod Koul DMA_COMPLETE) { 854a3fc74bcSThomas Petazzoni dev_err(dma_chan->device->dev, 855ff7b0479SSaeed Bishara "Self-test copy timed out, disabling\n"); 856ff7b0479SSaeed Bishara err = -ENODEV; 857ff7b0479SSaeed Bishara goto free_resources; 858ff7b0479SSaeed Bishara } 859ff7b0479SSaeed Bishara 860c35064c4SThomas Petazzoni dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 861d16695a7SEzequiel Garcia PAGE_SIZE, DMA_FROM_DEVICE); 862d16695a7SEzequiel Garcia if (memcmp(src, dest, PAGE_SIZE)) { 863a3fc74bcSThomas Petazzoni dev_err(dma_chan->device->dev, 864ff7b0479SSaeed Bishara "Self-test copy failed compare, disabling\n"); 865ff7b0479SSaeed Bishara err = -ENODEV; 866ff7b0479SSaeed Bishara goto free_resources; 867ff7b0479SSaeed Bishara } 868ff7b0479SSaeed Bishara 869ff7b0479SSaeed Bishara free_resources: 870d16695a7SEzequiel Garcia dmaengine_unmap_put(unmap); 871ff7b0479SSaeed Bishara mv_xor_free_chan_resources(dma_chan); 872ff7b0479SSaeed Bishara out: 873ff7b0479SSaeed Bishara kfree(src); 874ff7b0479SSaeed Bishara kfree(dest); 875ff7b0479SSaeed Bishara return err; 876ff7b0479SSaeed Bishara } 877ff7b0479SSaeed Bishara 878ff7b0479SSaeed Bishara #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 879463a1f8bSBill Pemberton static int 8800951e728SMaxime Ripard mv_chan_xor_self_test(struct mv_xor_chan *mv_chan) 881ff7b0479SSaeed Bishara { 882b8c01d25SEzequiel Garcia int i, src_idx, ret; 883ff7b0479SSaeed Bishara struct page *dest; 884ff7b0479SSaeed Bishara struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 885ff7b0479SSaeed Bishara dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 886ff7b0479SSaeed Bishara dma_addr_t dest_dma; 887ff7b0479SSaeed Bishara struct dma_async_tx_descriptor *tx; 888d16695a7SEzequiel Garcia struct dmaengine_unmap_data *unmap; 889ff7b0479SSaeed Bishara struct dma_chan *dma_chan; 890ff7b0479SSaeed Bishara dma_cookie_t cookie; 891ff7b0479SSaeed Bishara u8 cmp_byte = 0; 892ff7b0479SSaeed Bishara u32 cmp_word; 893ff7b0479SSaeed Bishara int err = 0; 894d16695a7SEzequiel Garcia int src_count = MV_XOR_NUM_SRC_TEST; 895ff7b0479SSaeed Bishara 896d16695a7SEzequiel Garcia for (src_idx = 0; src_idx < src_count; src_idx++) { 897ff7b0479SSaeed Bishara xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 898a09b09aeSRoel Kluin if (!xor_srcs[src_idx]) { 899a09b09aeSRoel Kluin while (src_idx--) 900ff7b0479SSaeed Bishara __free_page(xor_srcs[src_idx]); 901ff7b0479SSaeed Bishara return -ENOMEM; 902ff7b0479SSaeed Bishara } 903ff7b0479SSaeed Bishara } 904ff7b0479SSaeed Bishara 905ff7b0479SSaeed Bishara dest = alloc_page(GFP_KERNEL); 906a09b09aeSRoel Kluin if (!dest) { 907a09b09aeSRoel Kluin while (src_idx--) 908ff7b0479SSaeed Bishara __free_page(xor_srcs[src_idx]); 909ff7b0479SSaeed Bishara return -ENOMEM; 910ff7b0479SSaeed Bishara } 911ff7b0479SSaeed Bishara 912ff7b0479SSaeed Bishara /* Fill in src buffers */ 913d16695a7SEzequiel Garcia for (src_idx = 0; src_idx < src_count; src_idx++) { 914ff7b0479SSaeed Bishara u8 *ptr = page_address(xor_srcs[src_idx]); 915ff7b0479SSaeed Bishara for (i = 0; i < PAGE_SIZE; i++) 916ff7b0479SSaeed Bishara ptr[i] = (1 << src_idx); 917ff7b0479SSaeed Bishara } 918ff7b0479SSaeed Bishara 919d16695a7SEzequiel Garcia for (src_idx = 0; src_idx < src_count; src_idx++) 920ff7b0479SSaeed Bishara cmp_byte ^= (u8) (1 << src_idx); 921ff7b0479SSaeed Bishara 922ff7b0479SSaeed Bishara cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 923ff7b0479SSaeed Bishara (cmp_byte << 8) | cmp_byte; 924ff7b0479SSaeed Bishara 925ff7b0479SSaeed Bishara memset(page_address(dest), 0, PAGE_SIZE); 926ff7b0479SSaeed Bishara 927275cc0c8SThomas Petazzoni dma_chan = &mv_chan->dmachan; 928aa1e6f1aSDan Williams if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 929ff7b0479SSaeed Bishara err = -ENODEV; 930ff7b0479SSaeed Bishara goto out; 931ff7b0479SSaeed Bishara } 932ff7b0479SSaeed Bishara 933d16695a7SEzequiel Garcia unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, 934d16695a7SEzequiel Garcia GFP_KERNEL); 935d16695a7SEzequiel Garcia if (!unmap) { 936d16695a7SEzequiel Garcia err = -ENOMEM; 937d16695a7SEzequiel Garcia goto free_resources; 938d16695a7SEzequiel Garcia } 939ff7b0479SSaeed Bishara 940d16695a7SEzequiel Garcia /* test xor */ 941d16695a7SEzequiel Garcia for (i = 0; i < src_count; i++) { 942d16695a7SEzequiel Garcia unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 943ff7b0479SSaeed Bishara 0, PAGE_SIZE, DMA_TO_DEVICE); 944d16695a7SEzequiel Garcia dma_srcs[i] = unmap->addr[i]; 945b8c01d25SEzequiel Garcia ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); 946b8c01d25SEzequiel Garcia if (ret) { 947b8c01d25SEzequiel Garcia err = -ENOMEM; 948b8c01d25SEzequiel Garcia goto free_resources; 949b8c01d25SEzequiel Garcia } 950d16695a7SEzequiel Garcia unmap->to_cnt++; 951d16695a7SEzequiel Garcia } 952d16695a7SEzequiel Garcia 953d16695a7SEzequiel Garcia unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 954d16695a7SEzequiel Garcia DMA_FROM_DEVICE); 955d16695a7SEzequiel Garcia dest_dma = unmap->addr[src_count]; 956b8c01d25SEzequiel Garcia ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); 957b8c01d25SEzequiel Garcia if (ret) { 958b8c01d25SEzequiel Garcia err = -ENOMEM; 959b8c01d25SEzequiel Garcia goto free_resources; 960b8c01d25SEzequiel Garcia } 961d16695a7SEzequiel Garcia unmap->from_cnt = 1; 962d16695a7SEzequiel Garcia unmap->len = PAGE_SIZE; 963ff7b0479SSaeed Bishara 964ff7b0479SSaeed Bishara tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 965d16695a7SEzequiel Garcia src_count, PAGE_SIZE, 0); 966b8c01d25SEzequiel Garcia if (!tx) { 967b8c01d25SEzequiel Garcia dev_err(dma_chan->device->dev, 968b8c01d25SEzequiel Garcia "Self-test cannot prepare operation, disabling\n"); 969b8c01d25SEzequiel Garcia err = -ENODEV; 970b8c01d25SEzequiel Garcia goto free_resources; 971b8c01d25SEzequiel Garcia } 972ff7b0479SSaeed Bishara 973ff7b0479SSaeed Bishara cookie = mv_xor_tx_submit(tx); 974b8c01d25SEzequiel Garcia if (dma_submit_error(cookie)) { 975b8c01d25SEzequiel Garcia dev_err(dma_chan->device->dev, 976b8c01d25SEzequiel Garcia "Self-test submit error, disabling\n"); 977b8c01d25SEzequiel Garcia err = -ENODEV; 978b8c01d25SEzequiel Garcia goto free_resources; 979b8c01d25SEzequiel Garcia } 980b8c01d25SEzequiel Garcia 981ff7b0479SSaeed Bishara mv_xor_issue_pending(dma_chan); 982ff7b0479SSaeed Bishara async_tx_ack(tx); 983ff7b0479SSaeed Bishara msleep(8); 984ff7b0479SSaeed Bishara 98507934481SLinus Walleij if (mv_xor_status(dma_chan, cookie, NULL) != 986b3efb8fcSVinod Koul DMA_COMPLETE) { 987a3fc74bcSThomas Petazzoni dev_err(dma_chan->device->dev, 988ff7b0479SSaeed Bishara "Self-test xor timed out, disabling\n"); 989ff7b0479SSaeed Bishara err = -ENODEV; 990ff7b0479SSaeed Bishara goto free_resources; 991ff7b0479SSaeed Bishara } 992ff7b0479SSaeed Bishara 993c35064c4SThomas Petazzoni dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 994ff7b0479SSaeed Bishara PAGE_SIZE, DMA_FROM_DEVICE); 995ff7b0479SSaeed Bishara for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 996ff7b0479SSaeed Bishara u32 *ptr = page_address(dest); 997ff7b0479SSaeed Bishara if (ptr[i] != cmp_word) { 998a3fc74bcSThomas Petazzoni dev_err(dma_chan->device->dev, 9991ba151cdSJoe Perches "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", 10001ba151cdSJoe Perches i, ptr[i], cmp_word); 1001ff7b0479SSaeed Bishara err = -ENODEV; 1002ff7b0479SSaeed Bishara goto free_resources; 1003ff7b0479SSaeed Bishara } 1004ff7b0479SSaeed Bishara } 1005ff7b0479SSaeed Bishara 1006ff7b0479SSaeed Bishara free_resources: 1007d16695a7SEzequiel Garcia dmaengine_unmap_put(unmap); 1008ff7b0479SSaeed Bishara mv_xor_free_chan_resources(dma_chan); 1009ff7b0479SSaeed Bishara out: 1010d16695a7SEzequiel Garcia src_idx = src_count; 1011ff7b0479SSaeed Bishara while (src_idx--) 1012ff7b0479SSaeed Bishara __free_page(xor_srcs[src_idx]); 1013ff7b0479SSaeed Bishara __free_page(dest); 1014ff7b0479SSaeed Bishara return err; 1015ff7b0479SSaeed Bishara } 1016ff7b0479SSaeed Bishara 10171ef48a26SThomas Petazzoni static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 1018ff7b0479SSaeed Bishara { 1019ff7b0479SSaeed Bishara struct dma_chan *chan, *_chan; 10201ef48a26SThomas Petazzoni struct device *dev = mv_chan->dmadev.dev; 1021ff7b0479SSaeed Bishara 10221ef48a26SThomas Petazzoni dma_async_device_unregister(&mv_chan->dmadev); 1023ff7b0479SSaeed Bishara 1024b503fa01SThomas Petazzoni dma_free_coherent(dev, MV_XOR_POOL_SIZE, 10251ef48a26SThomas Petazzoni mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 102622843545SLior Amsalem dma_unmap_single(dev, mv_chan->dummy_src_addr, 102722843545SLior Amsalem MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 102822843545SLior Amsalem dma_unmap_single(dev, mv_chan->dummy_dst_addr, 102922843545SLior Amsalem MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 1030ff7b0479SSaeed Bishara 10311ef48a26SThomas Petazzoni list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 1032ff7b0479SSaeed Bishara device_node) { 1033ff7b0479SSaeed Bishara list_del(&chan->device_node); 1034ff7b0479SSaeed Bishara } 1035ff7b0479SSaeed Bishara 103688eb92cbSThomas Petazzoni free_irq(mv_chan->irq, mv_chan); 103788eb92cbSThomas Petazzoni 1038ff7b0479SSaeed Bishara return 0; 1039ff7b0479SSaeed Bishara } 1040ff7b0479SSaeed Bishara 10411ef48a26SThomas Petazzoni static struct mv_xor_chan * 1042297eedbaSThomas Petazzoni mv_xor_channel_add(struct mv_xor_device *xordev, 1043a6b4a9d2SThomas Petazzoni struct platform_device *pdev, 1044dd130c65SGregory CLEMENT int idx, dma_cap_mask_t cap_mask, int irq) 1045ff7b0479SSaeed Bishara { 1046ff7b0479SSaeed Bishara int ret = 0; 1047ff7b0479SSaeed Bishara struct mv_xor_chan *mv_chan; 1048ff7b0479SSaeed Bishara struct dma_device *dma_dev; 1049ff7b0479SSaeed Bishara 10501ef48a26SThomas Petazzoni mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1051a577659fSSachin Kamat if (!mv_chan) 1052a577659fSSachin Kamat return ERR_PTR(-ENOMEM); 1053ff7b0479SSaeed Bishara 10549aedbdbaSThomas Petazzoni mv_chan->idx = idx; 105588eb92cbSThomas Petazzoni mv_chan->irq = irq; 1056dd130c65SGregory CLEMENT if (xordev->xor_type == XOR_ORION) 1057dd130c65SGregory CLEMENT mv_chan->op_in_desc = XOR_MODE_IN_REG; 1058dd130c65SGregory CLEMENT else 1059dd130c65SGregory CLEMENT mv_chan->op_in_desc = XOR_MODE_IN_DESC; 1060ff7b0479SSaeed Bishara 10611ef48a26SThomas Petazzoni dma_dev = &mv_chan->dmadev; 106277ff7a70SStefan Roese mv_chan->xordev = xordev; 1063ff7b0479SSaeed Bishara 106422843545SLior Amsalem /* 106522843545SLior Amsalem * These source and destination dummy buffers are used to implement 106622843545SLior Amsalem * a DMA_INTERRUPT operation as a minimum-sized XOR operation. 106722843545SLior Amsalem * Hence, we only need to map the buffers at initialization-time. 106822843545SLior Amsalem */ 106922843545SLior Amsalem mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, 107022843545SLior Amsalem mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 107122843545SLior Amsalem mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, 107222843545SLior Amsalem mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 107322843545SLior Amsalem 1074ff7b0479SSaeed Bishara /* allocate coherent memory for hardware descriptors 1075ff7b0479SSaeed Bishara * note: writecombine gives slightly better performance, but 1076ff7b0479SSaeed Bishara * requires that we explicitly flush the writes 1077ff7b0479SSaeed Bishara */ 10781ef48a26SThomas Petazzoni mv_chan->dma_desc_pool_virt = 1079f6e45661SLuis R. Rodriguez dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool, 1080f6e45661SLuis R. Rodriguez GFP_KERNEL); 10811ef48a26SThomas Petazzoni if (!mv_chan->dma_desc_pool_virt) 1082a6b4a9d2SThomas Petazzoni return ERR_PTR(-ENOMEM); 1083ff7b0479SSaeed Bishara 1084ff7b0479SSaeed Bishara /* discover transaction capabilites from the platform data */ 1085a6b4a9d2SThomas Petazzoni dma_dev->cap_mask = cap_mask; 1086ff7b0479SSaeed Bishara 1087ff7b0479SSaeed Bishara INIT_LIST_HEAD(&dma_dev->channels); 1088ff7b0479SSaeed Bishara 1089ff7b0479SSaeed Bishara /* set base routines */ 1090ff7b0479SSaeed Bishara dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1091ff7b0479SSaeed Bishara dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 109207934481SLinus Walleij dma_dev->device_tx_status = mv_xor_status; 1093ff7b0479SSaeed Bishara dma_dev->device_issue_pending = mv_xor_issue_pending; 1094ff7b0479SSaeed Bishara dma_dev->dev = &pdev->dev; 1095ff7b0479SSaeed Bishara 1096ff7b0479SSaeed Bishara /* set prep routines based on capability */ 109722843545SLior Amsalem if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 109822843545SLior Amsalem dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; 1099ff7b0479SSaeed Bishara if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1100ff7b0479SSaeed Bishara dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1101ff7b0479SSaeed Bishara if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1102c019894eSJoe Perches dma_dev->max_xor = 8; 1103ff7b0479SSaeed Bishara dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1104ff7b0479SSaeed Bishara } 1105ff7b0479SSaeed Bishara 1106297eedbaSThomas Petazzoni mv_chan->mmr_base = xordev->xor_base; 110782a1402eSEzequiel Garcia mv_chan->mmr_high_base = xordev->xor_high_base; 1108ff7b0479SSaeed Bishara tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1109ff7b0479SSaeed Bishara mv_chan); 1110ff7b0479SSaeed Bishara 1111ff7b0479SSaeed Bishara /* clear errors before enabling interrupts */ 11120951e728SMaxime Ripard mv_chan_clear_err_status(mv_chan); 1113ff7b0479SSaeed Bishara 11142d0a0745SThomas Petazzoni ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 1115ff7b0479SSaeed Bishara 0, dev_name(&pdev->dev), mv_chan); 1116ff7b0479SSaeed Bishara if (ret) 1117ff7b0479SSaeed Bishara goto err_free_dma; 1118ff7b0479SSaeed Bishara 1119ff7b0479SSaeed Bishara mv_chan_unmask_interrupts(mv_chan); 1120ff7b0479SSaeed Bishara 11216f166312SLior Amsalem if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) 112281aafb3eSThomas Petazzoni mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC); 11236f166312SLior Amsalem else 112481aafb3eSThomas Petazzoni mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR); 1125ff7b0479SSaeed Bishara 1126ff7b0479SSaeed Bishara spin_lock_init(&mv_chan->lock); 1127ff7b0479SSaeed Bishara INIT_LIST_HEAD(&mv_chan->chain); 1128ff7b0479SSaeed Bishara INIT_LIST_HEAD(&mv_chan->completed_slots); 1129fbea28a2SLior Amsalem INIT_LIST_HEAD(&mv_chan->free_slots); 1130fbea28a2SLior Amsalem INIT_LIST_HEAD(&mv_chan->allocated_slots); 113198817b99SThomas Petazzoni mv_chan->dmachan.device = dma_dev; 113298817b99SThomas Petazzoni dma_cookie_init(&mv_chan->dmachan); 1133ff7b0479SSaeed Bishara 113498817b99SThomas Petazzoni list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1135ff7b0479SSaeed Bishara 1136ff7b0479SSaeed Bishara if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 11370951e728SMaxime Ripard ret = mv_chan_memcpy_self_test(mv_chan); 1138ff7b0479SSaeed Bishara dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1139ff7b0479SSaeed Bishara if (ret) 11402d0a0745SThomas Petazzoni goto err_free_irq; 1141ff7b0479SSaeed Bishara } 1142ff7b0479SSaeed Bishara 1143ff7b0479SSaeed Bishara if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 11440951e728SMaxime Ripard ret = mv_chan_xor_self_test(mv_chan); 1145ff7b0479SSaeed Bishara dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1146ff7b0479SSaeed Bishara if (ret) 11472d0a0745SThomas Petazzoni goto err_free_irq; 1148ff7b0479SSaeed Bishara } 1149ff7b0479SSaeed Bishara 1150c678fa66SDave Jiang dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", 11516f166312SLior Amsalem mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", 1152ff7b0479SSaeed Bishara dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1153ff7b0479SSaeed Bishara dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1154ff7b0479SSaeed Bishara dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1155ff7b0479SSaeed Bishara 1156*7c97381eSAditya Pakki ret = dma_async_device_register(dma_dev); 1157*7c97381eSAditya Pakki if (ret) 1158*7c97381eSAditya Pakki goto err_free_irq; 1159*7c97381eSAditya Pakki 11601ef48a26SThomas Petazzoni return mv_chan; 1161ff7b0479SSaeed Bishara 11622d0a0745SThomas Petazzoni err_free_irq: 11632d0a0745SThomas Petazzoni free_irq(mv_chan->irq, mv_chan); 1164ff7b0479SSaeed Bishara err_free_dma: 1165b503fa01SThomas Petazzoni dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 11661ef48a26SThomas Petazzoni mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1167a6b4a9d2SThomas Petazzoni return ERR_PTR(ret); 1168ff7b0479SSaeed Bishara } 1169ff7b0479SSaeed Bishara 1170ff7b0479SSaeed Bishara static void 1171297eedbaSThomas Petazzoni mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 117263a9332bSAndrew Lunn const struct mbus_dram_target_info *dram) 1173ff7b0479SSaeed Bishara { 117482a1402eSEzequiel Garcia void __iomem *base = xordev->xor_high_base; 1175ff7b0479SSaeed Bishara u32 win_enable = 0; 1176ff7b0479SSaeed Bishara int i; 1177ff7b0479SSaeed Bishara 1178ff7b0479SSaeed Bishara for (i = 0; i < 8; i++) { 1179ff7b0479SSaeed Bishara writel(0, base + WINDOW_BASE(i)); 1180ff7b0479SSaeed Bishara writel(0, base + WINDOW_SIZE(i)); 1181ff7b0479SSaeed Bishara if (i < 4) 1182ff7b0479SSaeed Bishara writel(0, base + WINDOW_REMAP_HIGH(i)); 1183ff7b0479SSaeed Bishara } 1184ff7b0479SSaeed Bishara 1185ff7b0479SSaeed Bishara for (i = 0; i < dram->num_cs; i++) { 118663a9332bSAndrew Lunn const struct mbus_dram_window *cs = dram->cs + i; 1187ff7b0479SSaeed Bishara 1188ff7b0479SSaeed Bishara writel((cs->base & 0xffff0000) | 1189ff7b0479SSaeed Bishara (cs->mbus_attr << 8) | 1190ff7b0479SSaeed Bishara dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1191ff7b0479SSaeed Bishara writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1192ff7b0479SSaeed Bishara 119377ff7a70SStefan Roese /* Fill the caching variables for later use */ 119477ff7a70SStefan Roese xordev->win_start[i] = cs->base; 119577ff7a70SStefan Roese xordev->win_end[i] = cs->base + cs->size - 1; 119677ff7a70SStefan Roese 1197ff7b0479SSaeed Bishara win_enable |= (1 << i); 1198ff7b0479SSaeed Bishara win_enable |= 3 << (16 + (2 * i)); 1199ff7b0479SSaeed Bishara } 1200ff7b0479SSaeed Bishara 1201ff7b0479SSaeed Bishara writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1202ff7b0479SSaeed Bishara writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1203c4b4b732SThomas Petazzoni writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1204c4b4b732SThomas Petazzoni writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1205ff7b0479SSaeed Bishara } 1206ff7b0479SSaeed Bishara 1207ac5f0f3fSMarcin Wojtas static void 1208ac5f0f3fSMarcin Wojtas mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev) 1209ac5f0f3fSMarcin Wojtas { 1210ac5f0f3fSMarcin Wojtas void __iomem *base = xordev->xor_high_base; 1211ac5f0f3fSMarcin Wojtas u32 win_enable = 0; 1212ac5f0f3fSMarcin Wojtas int i; 1213ac5f0f3fSMarcin Wojtas 1214ac5f0f3fSMarcin Wojtas for (i = 0; i < 8; i++) { 1215ac5f0f3fSMarcin Wojtas writel(0, base + WINDOW_BASE(i)); 1216ac5f0f3fSMarcin Wojtas writel(0, base + WINDOW_SIZE(i)); 1217ac5f0f3fSMarcin Wojtas if (i < 4) 1218ac5f0f3fSMarcin Wojtas writel(0, base + WINDOW_REMAP_HIGH(i)); 1219ac5f0f3fSMarcin Wojtas } 1220ac5f0f3fSMarcin Wojtas /* 1221ac5f0f3fSMarcin Wojtas * For Armada3700 open default 4GB Mbus window. The dram 1222ac5f0f3fSMarcin Wojtas * related configuration are done at AXIS level. 1223ac5f0f3fSMarcin Wojtas */ 1224ac5f0f3fSMarcin Wojtas writel(0xffff0000, base + WINDOW_SIZE(0)); 1225ac5f0f3fSMarcin Wojtas win_enable |= 1; 1226ac5f0f3fSMarcin Wojtas win_enable |= 3 << 16; 1227ac5f0f3fSMarcin Wojtas 1228ac5f0f3fSMarcin Wojtas writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1229ac5f0f3fSMarcin Wojtas writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1230ac5f0f3fSMarcin Wojtas writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1231ac5f0f3fSMarcin Wojtas writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1232ac5f0f3fSMarcin Wojtas } 1233ac5f0f3fSMarcin Wojtas 12348b648436SThomas Petazzoni /* 12358b648436SThomas Petazzoni * Since this XOR driver is basically used only for RAID5, we don't 12368b648436SThomas Petazzoni * need to care about synchronizing ->suspend with DMA activity, 12378b648436SThomas Petazzoni * because the DMA engine will naturally be quiet due to the block 12388b648436SThomas Petazzoni * devices being suspended. 12398b648436SThomas Petazzoni */ 12408b648436SThomas Petazzoni static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state) 12418b648436SThomas Petazzoni { 12428b648436SThomas Petazzoni struct mv_xor_device *xordev = platform_get_drvdata(pdev); 12438b648436SThomas Petazzoni int i; 12448b648436SThomas Petazzoni 12458b648436SThomas Petazzoni for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 12468b648436SThomas Petazzoni struct mv_xor_chan *mv_chan = xordev->channels[i]; 12478b648436SThomas Petazzoni 12488b648436SThomas Petazzoni if (!mv_chan) 12498b648436SThomas Petazzoni continue; 12508b648436SThomas Petazzoni 12518b648436SThomas Petazzoni mv_chan->saved_config_reg = 12528b648436SThomas Petazzoni readl_relaxed(XOR_CONFIG(mv_chan)); 12538b648436SThomas Petazzoni mv_chan->saved_int_mask_reg = 12548b648436SThomas Petazzoni readl_relaxed(XOR_INTR_MASK(mv_chan)); 12558b648436SThomas Petazzoni } 12568b648436SThomas Petazzoni 12578b648436SThomas Petazzoni return 0; 12588b648436SThomas Petazzoni } 12598b648436SThomas Petazzoni 12608b648436SThomas Petazzoni static int mv_xor_resume(struct platform_device *dev) 12618b648436SThomas Petazzoni { 12628b648436SThomas Petazzoni struct mv_xor_device *xordev = platform_get_drvdata(dev); 12638b648436SThomas Petazzoni const struct mbus_dram_target_info *dram; 12648b648436SThomas Petazzoni int i; 12658b648436SThomas Petazzoni 12668b648436SThomas Petazzoni for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 12678b648436SThomas Petazzoni struct mv_xor_chan *mv_chan = xordev->channels[i]; 12688b648436SThomas Petazzoni 12698b648436SThomas Petazzoni if (!mv_chan) 12708b648436SThomas Petazzoni continue; 12718b648436SThomas Petazzoni 12728b648436SThomas Petazzoni writel_relaxed(mv_chan->saved_config_reg, 12738b648436SThomas Petazzoni XOR_CONFIG(mv_chan)); 12748b648436SThomas Petazzoni writel_relaxed(mv_chan->saved_int_mask_reg, 12758b648436SThomas Petazzoni XOR_INTR_MASK(mv_chan)); 12768b648436SThomas Petazzoni } 12778b648436SThomas Petazzoni 1278ac5f0f3fSMarcin Wojtas if (xordev->xor_type == XOR_ARMADA_37XX) { 1279ac5f0f3fSMarcin Wojtas mv_xor_conf_mbus_windows_a3700(xordev); 1280ac5f0f3fSMarcin Wojtas return 0; 1281ac5f0f3fSMarcin Wojtas } 1282ac5f0f3fSMarcin Wojtas 12838b648436SThomas Petazzoni dram = mv_mbus_dram_info(); 12848b648436SThomas Petazzoni if (dram) 12858b648436SThomas Petazzoni mv_xor_conf_mbus_windows(xordev, dram); 12868b648436SThomas Petazzoni 12878b648436SThomas Petazzoni return 0; 12888b648436SThomas Petazzoni } 12898b648436SThomas Petazzoni 12906f166312SLior Amsalem static const struct of_device_id mv_xor_dt_ids[] = { 1291dd130c65SGregory CLEMENT { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION }, 1292dd130c65SGregory CLEMENT { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X }, 1293ac5f0f3fSMarcin Wojtas { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX }, 12946f166312SLior Amsalem {}, 12956f166312SLior Amsalem }; 12966f166312SLior Amsalem 129777757291SThomas Petazzoni static unsigned int mv_xor_engine_count; 1298ff7b0479SSaeed Bishara 1299c2714334SLinus Torvalds static int mv_xor_probe(struct platform_device *pdev) 1300ff7b0479SSaeed Bishara { 130163a9332bSAndrew Lunn const struct mbus_dram_target_info *dram; 1302297eedbaSThomas Petazzoni struct mv_xor_device *xordev; 1303d4adcc01SJingoo Han struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); 1304ff7b0479SSaeed Bishara struct resource *res; 130577757291SThomas Petazzoni unsigned int max_engines, max_channels; 130660d151f3SThomas Petazzoni int i, ret; 1307ff7b0479SSaeed Bishara 13081ba151cdSJoe Perches dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1309ff7b0479SSaeed Bishara 1310297eedbaSThomas Petazzoni xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); 1311297eedbaSThomas Petazzoni if (!xordev) 1312ff7b0479SSaeed Bishara return -ENOMEM; 1313ff7b0479SSaeed Bishara 1314ff7b0479SSaeed Bishara res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1315ff7b0479SSaeed Bishara if (!res) 1316ff7b0479SSaeed Bishara return -ENODEV; 1317ff7b0479SSaeed Bishara 1318297eedbaSThomas Petazzoni xordev->xor_base = devm_ioremap(&pdev->dev, res->start, 13194de1ba15SH Hartley Sweeten resource_size(res)); 1320297eedbaSThomas Petazzoni if (!xordev->xor_base) 1321ff7b0479SSaeed Bishara return -EBUSY; 1322ff7b0479SSaeed Bishara 1323ff7b0479SSaeed Bishara res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1324ff7b0479SSaeed Bishara if (!res) 1325ff7b0479SSaeed Bishara return -ENODEV; 1326ff7b0479SSaeed Bishara 1327297eedbaSThomas Petazzoni xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, 13284de1ba15SH Hartley Sweeten resource_size(res)); 1329297eedbaSThomas Petazzoni if (!xordev->xor_high_base) 1330ff7b0479SSaeed Bishara return -EBUSY; 1331ff7b0479SSaeed Bishara 1332297eedbaSThomas Petazzoni platform_set_drvdata(pdev, xordev); 1333ff7b0479SSaeed Bishara 1334dd130c65SGregory CLEMENT 1335dd130c65SGregory CLEMENT /* 1336dd130c65SGregory CLEMENT * We need to know which type of XOR device we use before 1337dd130c65SGregory CLEMENT * setting up. In non-dt case it can only be the legacy one. 1338dd130c65SGregory CLEMENT */ 1339dd130c65SGregory CLEMENT xordev->xor_type = XOR_ORION; 1340dd130c65SGregory CLEMENT if (pdev->dev.of_node) { 1341dd130c65SGregory CLEMENT const struct of_device_id *of_id = 1342dd130c65SGregory CLEMENT of_match_device(mv_xor_dt_ids, 1343dd130c65SGregory CLEMENT &pdev->dev); 1344dd130c65SGregory CLEMENT 1345dd130c65SGregory CLEMENT xordev->xor_type = (uintptr_t)of_id->data; 1346dd130c65SGregory CLEMENT } 1347dd130c65SGregory CLEMENT 1348ff7b0479SSaeed Bishara /* 1349ff7b0479SSaeed Bishara * (Re-)program MBUS remapping windows if we are asked to. 1350ff7b0479SSaeed Bishara */ 1351ac5f0f3fSMarcin Wojtas if (xordev->xor_type == XOR_ARMADA_37XX) { 1352ac5f0f3fSMarcin Wojtas mv_xor_conf_mbus_windows_a3700(xordev); 1353ac5f0f3fSMarcin Wojtas } else { 135463a9332bSAndrew Lunn dram = mv_mbus_dram_info(); 135563a9332bSAndrew Lunn if (dram) 1356297eedbaSThomas Petazzoni mv_xor_conf_mbus_windows(xordev, dram); 1357ac5f0f3fSMarcin Wojtas } 1358ff7b0479SSaeed Bishara 1359c510182bSAndrew Lunn /* Not all platforms can gate the clock, so it is not 1360c510182bSAndrew Lunn * an error if the clock does not exists. 1361c510182bSAndrew Lunn */ 1362297eedbaSThomas Petazzoni xordev->clk = clk_get(&pdev->dev, NULL); 1363297eedbaSThomas Petazzoni if (!IS_ERR(xordev->clk)) 1364297eedbaSThomas Petazzoni clk_prepare_enable(xordev->clk); 1365c510182bSAndrew Lunn 136677757291SThomas Petazzoni /* 136777757291SThomas Petazzoni * We don't want to have more than one channel per CPU in 136877757291SThomas Petazzoni * order for async_tx to perform well. So we limit the number 136977757291SThomas Petazzoni * of engines and channels so that we take into account this 137077757291SThomas Petazzoni * constraint. Note that we also want to use channels from 1371ac5f0f3fSMarcin Wojtas * separate engines when possible. For dual-CPU Armada 3700 1372ac5f0f3fSMarcin Wojtas * SoC with single XOR engine allow using its both channels. 137377757291SThomas Petazzoni */ 137477757291SThomas Petazzoni max_engines = num_present_cpus(); 1375ac5f0f3fSMarcin Wojtas if (xordev->xor_type == XOR_ARMADA_37XX) 1376ac5f0f3fSMarcin Wojtas max_channels = num_present_cpus(); 1377ac5f0f3fSMarcin Wojtas else 137877757291SThomas Petazzoni max_channels = min_t(unsigned int, 137977757291SThomas Petazzoni MV_XOR_MAX_CHANNELS, 138077757291SThomas Petazzoni DIV_ROUND_UP(num_present_cpus(), 2)); 138177757291SThomas Petazzoni 138277757291SThomas Petazzoni if (mv_xor_engine_count >= max_engines) 138377757291SThomas Petazzoni return 0; 138477757291SThomas Petazzoni 1385f7d12ef5SThomas Petazzoni if (pdev->dev.of_node) { 1386f7d12ef5SThomas Petazzoni struct device_node *np; 1387f7d12ef5SThomas Petazzoni int i = 0; 1388f7d12ef5SThomas Petazzoni 1389f7d12ef5SThomas Petazzoni for_each_child_of_node(pdev->dev.of_node, np) { 13900be8253fSRussell King struct mv_xor_chan *chan; 1391f7d12ef5SThomas Petazzoni dma_cap_mask_t cap_mask; 1392f7d12ef5SThomas Petazzoni int irq; 1393f7d12ef5SThomas Petazzoni 139477757291SThomas Petazzoni if (i >= max_channels) 139577757291SThomas Petazzoni continue; 139677757291SThomas Petazzoni 1397f7d12ef5SThomas Petazzoni dma_cap_zero(cap_mask); 1398f7d12ef5SThomas Petazzoni dma_cap_set(DMA_MEMCPY, cap_mask); 1399f7d12ef5SThomas Petazzoni dma_cap_set(DMA_XOR, cap_mask); 1400f7d12ef5SThomas Petazzoni dma_cap_set(DMA_INTERRUPT, cap_mask); 1401f7d12ef5SThomas Petazzoni 1402f7d12ef5SThomas Petazzoni irq = irq_of_parse_and_map(np, 0); 1403f8eb9e7dSThomas Petazzoni if (!irq) { 1404f8eb9e7dSThomas Petazzoni ret = -ENODEV; 1405f7d12ef5SThomas Petazzoni goto err_channel_add; 1406f7d12ef5SThomas Petazzoni } 1407f7d12ef5SThomas Petazzoni 14080be8253fSRussell King chan = mv_xor_channel_add(xordev, pdev, i, 1409dd130c65SGregory CLEMENT cap_mask, irq); 14100be8253fSRussell King if (IS_ERR(chan)) { 14110be8253fSRussell King ret = PTR_ERR(chan); 1412f7d12ef5SThomas Petazzoni irq_dispose_mapping(irq); 1413f7d12ef5SThomas Petazzoni goto err_channel_add; 1414f7d12ef5SThomas Petazzoni } 1415f7d12ef5SThomas Petazzoni 14160be8253fSRussell King xordev->channels[i] = chan; 1417f7d12ef5SThomas Petazzoni i++; 1418f7d12ef5SThomas Petazzoni } 1419f7d12ef5SThomas Petazzoni } else if (pdata && pdata->channels) { 142077757291SThomas Petazzoni for (i = 0; i < max_channels; i++) { 1421e39f6ec1SThomas Petazzoni struct mv_xor_channel_data *cd; 14220be8253fSRussell King struct mv_xor_chan *chan; 142360d151f3SThomas Petazzoni int irq; 142460d151f3SThomas Petazzoni 142560d151f3SThomas Petazzoni cd = &pdata->channels[i]; 142660d151f3SThomas Petazzoni irq = platform_get_irq(pdev, i); 142760d151f3SThomas Petazzoni if (irq < 0) { 142860d151f3SThomas Petazzoni ret = irq; 142960d151f3SThomas Petazzoni goto err_channel_add; 143060d151f3SThomas Petazzoni } 143160d151f3SThomas Petazzoni 14320be8253fSRussell King chan = mv_xor_channel_add(xordev, pdev, i, 1433dd130c65SGregory CLEMENT cd->cap_mask, irq); 14340be8253fSRussell King if (IS_ERR(chan)) { 14350be8253fSRussell King ret = PTR_ERR(chan); 143660d151f3SThomas Petazzoni goto err_channel_add; 143760d151f3SThomas Petazzoni } 14380be8253fSRussell King 14390be8253fSRussell King xordev->channels[i] = chan; 144060d151f3SThomas Petazzoni } 144160d151f3SThomas Petazzoni } 144260d151f3SThomas Petazzoni 1443ff7b0479SSaeed Bishara return 0; 144460d151f3SThomas Petazzoni 144560d151f3SThomas Petazzoni err_channel_add: 144660d151f3SThomas Petazzoni for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1447f7d12ef5SThomas Petazzoni if (xordev->channels[i]) { 1448ab6e439fSThomas Petazzoni mv_xor_channel_remove(xordev->channels[i]); 1449f7d12ef5SThomas Petazzoni if (pdev->dev.of_node) 1450f7d12ef5SThomas Petazzoni irq_dispose_mapping(xordev->channels[i]->irq); 1451f7d12ef5SThomas Petazzoni } 145260d151f3SThomas Petazzoni 1453dab92064SThomas Petazzoni if (!IS_ERR(xordev->clk)) { 1454297eedbaSThomas Petazzoni clk_disable_unprepare(xordev->clk); 1455297eedbaSThomas Petazzoni clk_put(xordev->clk); 1456dab92064SThomas Petazzoni } 1457dab92064SThomas Petazzoni 145860d151f3SThomas Petazzoni return ret; 1459ff7b0479SSaeed Bishara } 1460ff7b0479SSaeed Bishara 1461ff7b0479SSaeed Bishara static struct platform_driver mv_xor_driver = { 1462ff7b0479SSaeed Bishara .probe = mv_xor_probe, 14638b648436SThomas Petazzoni .suspend = mv_xor_suspend, 14648b648436SThomas Petazzoni .resume = mv_xor_resume, 1465ff7b0479SSaeed Bishara .driver = { 1466ff7b0479SSaeed Bishara .name = MV_XOR_NAME, 1467f7d12ef5SThomas Petazzoni .of_match_table = of_match_ptr(mv_xor_dt_ids), 1468ff7b0479SSaeed Bishara }, 1469ff7b0479SSaeed Bishara }; 1470ff7b0479SSaeed Bishara 1471812608d1SGeliang Tang builtin_platform_driver(mv_xor_driver); 1472ff7b0479SSaeed Bishara 147325cf68daSPaul Gortmaker /* 1474ff7b0479SSaeed Bishara MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1475ff7b0479SSaeed Bishara MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1476ff7b0479SSaeed Bishara MODULE_LICENSE("GPL"); 147725cf68daSPaul Gortmaker */ 1478