xgbe.h (08dcc47c06c79de31b9b2c0b4637f6119e5701fa) xgbe.h (174fd2597b0bd8c19fce6a97e8b0f753ef4ce7cb)
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *

--- 129 unchanged lines hidden (view full) ---

138#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
139#define XGBE_RX_DESC_CNT 512
140
141#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
142
143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
144#define XGBE_RX_BUF_ALIGN 64
145#define XGBE_SKB_ALLOC_SIZE 256
1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *

--- 129 unchanged lines hidden (view full) ---

138#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
139#define XGBE_RX_DESC_CNT 512
140
141#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
142
143#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
144#define XGBE_RX_BUF_ALIGN 64
145#define XGBE_SKB_ALLOC_SIZE 256
146#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
146
147#define XGBE_MAX_DMA_CHANNELS 16
148#define XGBE_MAX_QUEUES 16
149
150/* DMA cache settings - Outer sharable, write-back, write-allocate */
151#define XGBE_DMA_OS_AXDOMAIN 0x2
152#define XGBE_DMA_OS_ARCACHE 0xb
153#define XGBE_DMA_OS_AWCACHE 0xf

--- 91 unchanged lines hidden (view full) ---

245struct xgbe_page_alloc {
246 struct page *pages;
247 unsigned int pages_len;
248 unsigned int pages_offset;
249
250 dma_addr_t pages_dma;
251};
252
147
148#define XGBE_MAX_DMA_CHANNELS 16
149#define XGBE_MAX_QUEUES 16
150
151/* DMA cache settings - Outer sharable, write-back, write-allocate */
152#define XGBE_DMA_OS_AXDOMAIN 0x2
153#define XGBE_DMA_OS_ARCACHE 0xb
154#define XGBE_DMA_OS_AWCACHE 0xf

--- 91 unchanged lines hidden (view full) ---

246struct xgbe_page_alloc {
247 struct page *pages;
248 unsigned int pages_len;
249 unsigned int pages_offset;
250
251 dma_addr_t pages_dma;
252};
253
254/* Ring entry buffer data */
255struct xgbe_buffer_data {
256 struct xgbe_page_alloc pa;
257 struct xgbe_page_alloc pa_unmap;
258
259 dma_addr_t dma;
260 unsigned int dma_len;
261};
262
253/* Structure used to hold information related to the descriptor
254 * and the packet associated with the descriptor (always use
255 * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
256 */
257struct xgbe_ring_data {
258 struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
259 dma_addr_t rdesc_dma; /* DMA address of descriptor */
260
261 struct sk_buff *skb; /* Virtual address of SKB */
262 dma_addr_t skb_dma; /* DMA address of SKB data */
263 unsigned int skb_dma_len; /* Length of SKB DMA area */
264 unsigned int tso_header; /* TSO header indicator */
265
263/* Structure used to hold information related to the descriptor
264 * and the packet associated with the descriptor (always use
265 * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
266 */
267struct xgbe_ring_data {
268 struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
269 dma_addr_t rdesc_dma; /* DMA address of descriptor */
270
271 struct sk_buff *skb; /* Virtual address of SKB */
272 dma_addr_t skb_dma; /* DMA address of SKB data */
273 unsigned int skb_dma_len; /* Length of SKB DMA area */
274 unsigned int tso_header; /* TSO header indicator */
275
266 struct xgbe_page_alloc rx_pa; /* Rx buffer page allocation */
267 struct xgbe_page_alloc rx_unmap;
276 struct xgbe_buffer_data rx_hdr; /* Header locations */
277 struct xgbe_buffer_data rx_buf; /* Payload locations */
268
278
269 dma_addr_t rx_dma; /* DMA address of Rx buffer */
270 unsigned int rx_dma_len; /* Length of the Rx DMA buffer */
271
279 unsigned short hdr_len; /* Length of received header */
272 unsigned short len; /* Length of received Rx packet */
273
274 unsigned int interrupt; /* Interrupt indicator */
275
276 unsigned int mapped_as_page;
277
278 /* Incomplete receive save location. If the budget is exhausted
279 * or the last descriptor (last normal descriptor or a following

--- 23 unchanged lines hidden (view full) ---

303 unsigned int rdesc_count;
304
305 /* Array of descriptor data corresponding the descriptor memory
306 * (always use the XGBE_GET_DESC_DATA macro to access this data)
307 */
308 struct xgbe_ring_data *rdata;
309
310 /* Page allocation for RX buffers */
280 unsigned short len; /* Length of received Rx packet */
281
282 unsigned int interrupt; /* Interrupt indicator */
283
284 unsigned int mapped_as_page;
285
286 /* Incomplete receive save location. If the budget is exhausted
287 * or the last descriptor (last normal descriptor or a following

--- 23 unchanged lines hidden (view full) ---

311 unsigned int rdesc_count;
312
313 /* Array of descriptor data corresponding the descriptor memory
314 * (always use the XGBE_GET_DESC_DATA macro to access this data)
315 */
316 struct xgbe_ring_data *rdata;
317
318 /* Page allocation for RX buffers */
311 struct xgbe_page_alloc rx_pa;
319 struct xgbe_page_alloc rx_hdr_pa;
320 struct xgbe_page_alloc rx_buf_pa;
312
313 /* Ring index values
314 * cur - Tx: index of descriptor to be used for current transfer
315 * Rx: index of descriptor to check for packet availability
316 * dirty - Tx: index of descriptor to check for transfer complete
317 * Rx: count of descriptors in which a packet has been received
318 * (used with skb_realloc_index to refresh the ring)
319 */

--- 467 unchanged lines hidden ---
321
322 /* Ring index values
323 * cur - Tx: index of descriptor to be used for current transfer
324 * Rx: index of descriptor to check for packet availability
325 * dirty - Tx: index of descriptor to check for transfer complete
326 * Rx: count of descriptors in which a packet has been received
327 * (used with skb_realloc_index to refresh the ring)
328 */

--- 467 unchanged lines hidden ---