1 /* 2 * Copyright (C) 2007, 2008, Marvell International Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * for more details. 12 */ 13 14 #ifndef MV_XOR_H 15 #define MV_XOR_H 16 17 #include <linux/types.h> 18 #include <linux/io.h> 19 #include <linux/dmaengine.h> 20 #include <linux/interrupt.h> 21 22 #define MV_XOR_POOL_SIZE (MV_XOR_SLOT_SIZE * 3072) 23 #define MV_XOR_SLOT_SIZE 64 24 #define MV_XOR_THRESHOLD 1 25 #define MV_XOR_MAX_CHANNELS 2 26 27 #define MV_XOR_MIN_BYTE_COUNT SZ_128 28 #define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1) 29 30 /* Values for the XOR_CONFIG register */ 31 #define XOR_OPERATION_MODE_XOR 0 32 #define XOR_OPERATION_MODE_MEMCPY 2 33 #define XOR_OPERATION_MODE_IN_DESC 7 34 #define XOR_DESCRIPTOR_SWAP BIT(14) 35 #define XOR_DESC_SUCCESS 0x40000000 36 37 #define XOR_DESC_OPERATION_XOR (0 << 24) 38 #define XOR_DESC_OPERATION_CRC32C (1 << 24) 39 #define XOR_DESC_OPERATION_MEMCPY (2 << 24) 40 41 #define XOR_DESC_DMA_OWNED BIT(31) 42 #define XOR_DESC_EOD_INT_EN BIT(31) 43 44 #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) 45 #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) 46 #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) 47 #define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4)) 48 #define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4)) 49 #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0) 50 #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4) 51 52 #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) 53 #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) 54 #define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30) 55 #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) 56 #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) 57 #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) 58 59 #define XOR_INT_END_OF_DESC BIT(0) 60 #define XOR_INT_END_OF_CHAIN BIT(1) 61 #define XOR_INT_STOPPED BIT(2) 62 #define XOR_INT_PAUSED BIT(3) 63 #define XOR_INT_ERR_DECODE BIT(4) 64 #define XOR_INT_ERR_RDPROT BIT(5) 65 #define XOR_INT_ERR_WRPROT BIT(6) 66 #define XOR_INT_ERR_OWN BIT(7) 67 #define XOR_INT_ERR_PAR BIT(8) 68 #define XOR_INT_ERR_MBUS BIT(9) 69 70 #define XOR_INTR_ERRORS (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \ 71 XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN | \ 72 XOR_INT_ERR_PAR | XOR_INT_ERR_MBUS) 73 74 #define XOR_INTR_MASK_VALUE (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \ 75 XOR_INT_STOPPED | XOR_INTR_ERRORS) 76 77 #define WINDOW_BASE(w) (0x50 + ((w) << 2)) 78 #define WINDOW_SIZE(w) (0x70 + ((w) << 2)) 79 #define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2)) 80 #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) 81 #define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) 82 83 #define WINDOW_COUNT 8 84 85 struct mv_xor_device { 86 void __iomem *xor_base; 87 void __iomem *xor_high_base; 88 struct clk *clk; 89 struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; 90 int xor_type; 91 92 u32 win_start[WINDOW_COUNT]; 93 u32 win_end[WINDOW_COUNT]; 94 }; 95 96 /** 97 * struct mv_xor_chan - internal representation of a XOR channel 98 * @pending: allows batching of hardware operations 99 * @lock: serializes enqueue/dequeue operations to the descriptors pool 100 * @mmr_base: memory mapped register base 101 * @idx: the index of the xor channel 102 * @chain: device chain view of the descriptors 103 * @free_slots: free slots usable by the channel 104 * @allocated_slots: slots allocated by the driver 105 * @completed_slots: slots completed by HW but still need to be acked 106 * @device: parent device 107 * @common: common dmaengine channel object members 108 * @slots_allocated: records the actual size of the descriptor slot pool 109 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs 110 * @op_in_desc: new mode of driver, each op is writen to descriptor. 111 */ 112 struct mv_xor_chan { 113 int pending; 114 spinlock_t lock; /* protects the descriptor slot pool */ 115 void __iomem *mmr_base; 116 void __iomem *mmr_high_base; 117 unsigned int idx; 118 int irq; 119 struct list_head chain; 120 struct list_head free_slots; 121 struct list_head allocated_slots; 122 struct list_head completed_slots; 123 dma_addr_t dma_desc_pool; 124 void *dma_desc_pool_virt; 125 size_t pool_size; 126 struct dma_device dmadev; 127 struct dma_chan dmachan; 128 int slots_allocated; 129 struct tasklet_struct irq_tasklet; 130 int op_in_desc; 131 char dummy_src[MV_XOR_MIN_BYTE_COUNT]; 132 char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; 133 dma_addr_t dummy_src_addr, dummy_dst_addr; 134 u32 saved_config_reg, saved_int_mask_reg; 135 136 struct mv_xor_device *xordev; 137 }; 138 139 /** 140 * struct mv_xor_desc_slot - software descriptor 141 * @node: node on the mv_xor_chan lists 142 * @hw_desc: virtual address of the hardware descriptor chain 143 * @phys: hardware address of the hardware descriptor chain 144 * @slot_used: slot in use or not 145 * @idx: pool index 146 * @tx_list: list of slots that make up a multi-descriptor transaction 147 * @async_tx: support for the async_tx api 148 */ 149 struct mv_xor_desc_slot { 150 struct list_head node; 151 struct list_head sg_tx_list; 152 enum dma_transaction_type type; 153 void *hw_desc; 154 u16 idx; 155 struct dma_async_tx_descriptor async_tx; 156 }; 157 158 /* 159 * This structure describes XOR descriptor size 64bytes. The 160 * mv_phy_src_idx() macro must be used when indexing the values of the 161 * phy_src_addr[] array. This is due to the fact that the 'descriptor 162 * swap' feature, used on big endian systems, swaps descriptors data 163 * within blocks of 8 bytes. So two consecutive values of the 164 * phy_src_addr[] array are actually swapped in big-endian, which 165 * explains the different mv_phy_src_idx() implementation. 166 */ 167 #if defined(__LITTLE_ENDIAN) 168 struct mv_xor_desc { 169 u32 status; /* descriptor execution status */ 170 u32 crc32_result; /* result of CRC-32 calculation */ 171 u32 desc_command; /* type of operation to be carried out */ 172 u32 phy_next_desc; /* next descriptor address pointer */ 173 u32 byte_count; /* size of src/dst blocks in bytes */ 174 u32 phy_dest_addr; /* destination block address */ 175 u32 phy_src_addr[8]; /* source block addresses */ 176 u32 reserved0; 177 u32 reserved1; 178 }; 179 #define mv_phy_src_idx(src_idx) (src_idx) 180 #else 181 struct mv_xor_desc { 182 u32 crc32_result; /* result of CRC-32 calculation */ 183 u32 status; /* descriptor execution status */ 184 u32 phy_next_desc; /* next descriptor address pointer */ 185 u32 desc_command; /* type of operation to be carried out */ 186 u32 phy_dest_addr; /* destination block address */ 187 u32 byte_count; /* size of src/dst blocks in bytes */ 188 u32 phy_src_addr[8]; /* source block addresses */ 189 u32 reserved1; 190 u32 reserved0; 191 }; 192 #define mv_phy_src_idx(src_idx) (src_idx ^ 1) 193 #endif 194 195 #define to_mv_sw_desc(addr_hw_desc) \ 196 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc) 197 198 #define mv_hw_desc_slot_idx(hw_desc, idx) \ 199 ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) 200 201 #endif 202