1 /* 2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 */ 18 #ifndef _VNIC_WQ_H_ 19 #define _VNIC_WQ_H_ 20 21 #include <linux/pci.h> 22 #include "vnic_dev.h" 23 #include "vnic_cq.h" 24 25 /* 26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth 27 * Driver) when both are built with CONFIG options =y 28 */ 29 #define vnic_wq_desc_avail fnic_wq_desc_avail 30 #define vnic_wq_desc_used fnic_wq_desc_used 31 #define vnic_wq_next_desc fni_cwq_next_desc 32 #define vnic_wq_post fnic_wq_post 33 #define vnic_wq_service fnic_wq_service 34 #define vnic_wq_free fnic_wq_free 35 #define vnic_wq_alloc fnic_wq_alloc 36 #define vnic_wq_init fnic_wq_init 37 #define vnic_wq_error_status fnic_wq_error_status 38 #define vnic_wq_enable fnic_wq_enable 39 #define vnic_wq_disable fnic_wq_disable 40 #define vnic_wq_clean fnic_wq_clean 41 42 /* Work queue control */ 43 struct vnic_wq_ctrl { 44 u64 ring_base; /* 0x00 */ 45 u32 ring_size; /* 0x08 */ 46 u32 pad0; 47 u32 posted_index; /* 0x10 */ 48 u32 pad1; 49 u32 cq_index; /* 0x18 */ 50 u32 pad2; 51 u32 enable; /* 0x20 */ 52 u32 pad3; 53 u32 running; /* 0x28 */ 54 u32 pad4; 55 u32 fetch_index; /* 0x30 */ 56 u32 pad5; 57 u32 dca_value; /* 0x38 */ 58 u32 pad6; 59 u32 error_interrupt_enable; /* 0x40 */ 60 u32 pad7; 61 u32 error_interrupt_offset; /* 0x48 */ 62 u32 pad8; 63 u32 error_status; /* 0x50 */ 64 u32 pad9; 65 }; 66 67 struct vnic_wq_buf { 68 struct vnic_wq_buf *next; 69 dma_addr_t dma_addr; 70 void *os_buf; 71 unsigned int len; 72 unsigned int index; 73 int sop; 74 void *desc; 75 }; 76 77 /* Break the vnic_wq_buf allocations into blocks of 64 entries */ 78 #define VNIC_WQ_BUF_BLK_ENTRIES 64 79 #define VNIC_WQ_BUF_BLK_SZ \ 80 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) 81 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ 82 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) 83 #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) 84 85 struct vnic_wq { 86 unsigned int index; 87 struct vnic_dev *vdev; 88 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ 89 struct vnic_dev_ring ring; 90 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; 91 struct vnic_wq_buf *to_use; 92 struct vnic_wq_buf *to_clean; 93 unsigned int pkts_outstanding; 94 }; 95 96 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) 97 { 98 /* how many does SW own? */ 99 return wq->ring.desc_avail; 100 } 101 102 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) 103 { 104 /* how many does HW own? */ 105 return wq->ring.desc_count - wq->ring.desc_avail - 1; 106 } 107 108 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) 109 { 110 return wq->to_use->desc; 111 } 112 113 static inline void vnic_wq_post(struct vnic_wq *wq, 114 void *os_buf, dma_addr_t dma_addr, 115 unsigned int len, int sop, int eop) 116 { 117 struct vnic_wq_buf *buf = wq->to_use; 118 119 buf->sop = sop; 120 buf->os_buf = eop ? os_buf : NULL; 121 buf->dma_addr = dma_addr; 122 buf->len = len; 123 124 buf = buf->next; 125 if (eop) { 126 /* Adding write memory barrier prevents compiler and/or CPU 127 * reordering, thus avoiding descriptor posting before 128 * descriptor is initialized. Otherwise, hardware can read 129 * stale descriptor fields. 130 */ 131 wmb(); 132 iowrite32(buf->index, &wq->ctrl->posted_index); 133 } 134 wq->to_use = buf; 135 136 wq->ring.desc_avail--; 137 } 138 139 static inline void vnic_wq_service(struct vnic_wq *wq, 140 struct cq_desc *cq_desc, u16 completed_index, 141 void (*buf_service)(struct vnic_wq *wq, 142 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), 143 void *opaque) 144 { 145 struct vnic_wq_buf *buf; 146 147 buf = wq->to_clean; 148 while (1) { 149 150 (*buf_service)(wq, cq_desc, buf, opaque); 151 152 wq->ring.desc_avail++; 153 154 wq->to_clean = buf->next; 155 156 if (buf->index == completed_index) 157 break; 158 159 buf = wq->to_clean; 160 } 161 } 162 163 void vnic_wq_free(struct vnic_wq *wq); 164 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, 165 unsigned int desc_count, unsigned int desc_size); 166 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 167 unsigned int error_interrupt_enable, 168 unsigned int error_interrupt_offset); 169 unsigned int vnic_wq_error_status(struct vnic_wq *wq); 170 void vnic_wq_enable(struct vnic_wq *wq); 171 int vnic_wq_disable(struct vnic_wq *wq); 172 void vnic_wq_clean(struct vnic_wq *wq, 173 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); 174 175 #endif /* _VNIC_WQ_H_ */ 176