1 /* 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 * 18 */ 19 20 #ifndef _VNIC_WQ_H_ 21 #define _VNIC_WQ_H_ 22 23 #include <linux/pci.h> 24 25 #include "vnic_dev.h" 26 #include "vnic_cq.h" 27 28 /* Work queue control */ 29 struct vnic_wq_ctrl { 30 u64 ring_base; /* 0x00 */ 31 u32 ring_size; /* 0x08 */ 32 u32 pad0; 33 u32 posted_index; /* 0x10 */ 34 u32 pad1; 35 u32 cq_index; /* 0x18 */ 36 u32 pad2; 37 u32 enable; /* 0x20 */ 38 u32 pad3; 39 u32 running; /* 0x28 */ 40 u32 pad4; 41 u32 fetch_index; /* 0x30 */ 42 u32 pad5; 43 u32 dca_value; /* 0x38 */ 44 u32 pad6; 45 u32 error_interrupt_enable; /* 0x40 */ 46 u32 pad7; 47 u32 error_interrupt_offset; /* 0x48 */ 48 u32 pad8; 49 u32 error_status; /* 0x50 */ 50 u32 pad9; 51 }; 52 53 struct vnic_wq_buf { 54 struct vnic_wq_buf *next; 55 dma_addr_t dma_addr; 56 void *os_buf; 57 unsigned int len; 58 unsigned int index; 59 int sop; 60 void *desc; 61 }; 62 63 /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ 64 #define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 65 #define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 66 #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ 67 ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ 68 VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) 69 #define VNIC_WQ_BUF_BLK_SZ(entries) \ 70 (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) 71 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ 72 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) 73 #define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) 74 75 struct vnic_wq { 76 unsigned int index; 77 struct vnic_dev *vdev; 78 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ 79 struct vnic_dev_ring ring; 80 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; 81 struct vnic_wq_buf *to_use; 82 struct vnic_wq_buf *to_clean; 83 unsigned int pkts_outstanding; 84 }; 85 86 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) 87 { 88 /* how many does SW own? */ 89 return wq->ring.desc_avail; 90 } 91 92 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) 93 { 94 /* how many does HW own? */ 95 return wq->ring.desc_count - wq->ring.desc_avail - 1; 96 } 97 98 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) 99 { 100 return wq->to_use->desc; 101 } 102 103 static inline void vnic_wq_post(struct vnic_wq *wq, 104 void *os_buf, dma_addr_t dma_addr, 105 unsigned int len, int sop, int eop) 106 { 107 struct vnic_wq_buf *buf = wq->to_use; 108 109 buf->sop = sop; 110 buf->os_buf = eop ? os_buf : NULL; 111 buf->dma_addr = dma_addr; 112 buf->len = len; 113 114 buf = buf->next; 115 if (eop) { 116 /* Adding write memory barrier prevents compiler and/or CPU 117 * reordering, thus avoiding descriptor posting before 118 * descriptor is initialized. Otherwise, hardware can read 119 * stale descriptor fields. 120 */ 121 wmb(); 122 iowrite32(buf->index, &wq->ctrl->posted_index); 123 } 124 wq->to_use = buf; 125 126 wq->ring.desc_avail--; 127 } 128 129 static inline void vnic_wq_service(struct vnic_wq *wq, 130 struct cq_desc *cq_desc, u16 completed_index, 131 void (*buf_service)(struct vnic_wq *wq, 132 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), 133 void *opaque) 134 { 135 struct vnic_wq_buf *buf; 136 137 buf = wq->to_clean; 138 while (1) { 139 140 (*buf_service)(wq, cq_desc, buf, opaque); 141 142 wq->ring.desc_avail++; 143 144 wq->to_clean = buf->next; 145 146 if (buf->index == completed_index) 147 break; 148 149 buf = wq->to_clean; 150 } 151 } 152 153 void vnic_wq_free(struct vnic_wq *wq); 154 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, 155 unsigned int desc_count, unsigned int desc_size); 156 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 157 unsigned int error_interrupt_enable, 158 unsigned int error_interrupt_offset); 159 unsigned int vnic_wq_error_status(struct vnic_wq *wq); 160 void vnic_wq_enable(struct vnic_wq *wq); 161 int vnic_wq_disable(struct vnic_wq *wq); 162 void vnic_wq_clean(struct vnic_wq *wq, 163 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); 164 165 #endif /* _VNIC_WQ_H_ */ 166