1 /* 2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 * 18 */ 19 20 #ifndef _VNIC_RQ_H_ 21 #define _VNIC_RQ_H_ 22 23 #include <linux/pci.h> 24 25 #include "vnic_dev.h" 26 #include "vnic_cq.h" 27 28 /* Receive queue control */ 29 struct vnic_rq_ctrl { 30 u64 ring_base; /* 0x00 */ 31 u32 ring_size; /* 0x08 */ 32 u32 pad0; 33 u32 posted_index; /* 0x10 */ 34 u32 pad1; 35 u32 cq_index; /* 0x18 */ 36 u32 pad2; 37 u32 enable; /* 0x20 */ 38 u32 pad3; 39 u32 running; /* 0x28 */ 40 u32 pad4; 41 u32 fetch_index; /* 0x30 */ 42 u32 pad5; 43 u32 error_interrupt_enable; /* 0x38 */ 44 u32 pad6; 45 u32 error_interrupt_offset; /* 0x40 */ 46 u32 pad7; 47 u32 error_status; /* 0x48 */ 48 u32 pad8; 49 u32 dropped_packet_count; /* 0x50 */ 50 u32 pad9; 51 u32 dropped_packet_count_rc; /* 0x58 */ 52 u32 pad10; 53 }; 54 55 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */ 56 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32 57 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64 58 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ 59 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ 60 VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES)) 61 #define VNIC_RQ_BUF_BLK_SZ(entries) \ 62 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) 63 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ 64 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) 65 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) 66 67 struct vnic_rq_buf { 68 struct vnic_rq_buf *next; 69 dma_addr_t dma_addr; 70 void *os_buf; 71 unsigned int os_buf_index; 72 unsigned int len; 73 unsigned int index; 74 void *desc; 75 uint64_t wr_id; 76 }; 77 78 struct vnic_rq { 79 unsigned int index; 80 struct vnic_dev *vdev; 81 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ 82 struct vnic_dev_ring ring; 83 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; 84 struct vnic_rq_buf *to_use; 85 struct vnic_rq_buf *to_clean; 86 void *os_buf_head; 87 unsigned int pkts_outstanding; 88 #ifdef CONFIG_NET_RX_BUSY_POLL 89 #define ENIC_POLL_STATE_IDLE 0 90 #define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */ 91 #define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */ 92 #define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */ 93 #define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */ 94 #define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \ 95 ENIC_POLL_STATE_POLL_YIELD) 96 #define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \ 97 ENIC_POLL_STATE_POLL) 98 #define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \ 99 ENIC_POLL_STATE_POLL_YIELD) 100 unsigned int bpoll_state; 101 spinlock_t bpoll_lock; 102 #endif /* CONFIG_NET_RX_BUSY_POLL */ 103 }; 104 105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) 106 { 107 /* how many does SW own? */ 108 return rq->ring.desc_avail; 109 } 110 111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) 112 { 113 /* how many does HW own? */ 114 return rq->ring.desc_count - rq->ring.desc_avail - 1; 115 } 116 117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) 118 { 119 return rq->to_use->desc; 120 } 121 122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) 123 { 124 return rq->to_use->index; 125 } 126 127 static inline void vnic_rq_post(struct vnic_rq *rq, 128 void *os_buf, unsigned int os_buf_index, 129 dma_addr_t dma_addr, unsigned int len, 130 uint64_t wrid) 131 { 132 struct vnic_rq_buf *buf = rq->to_use; 133 134 buf->os_buf = os_buf; 135 buf->os_buf_index = os_buf_index; 136 buf->dma_addr = dma_addr; 137 buf->len = len; 138 buf->wr_id = wrid; 139 140 buf = buf->next; 141 rq->to_use = buf; 142 rq->ring.desc_avail--; 143 144 /* Move the posted_index every nth descriptor 145 */ 146 147 #ifndef VNIC_RQ_RETURN_RATE 148 #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ 149 #endif 150 151 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { 152 /* Adding write memory barrier prevents compiler and/or CPU 153 * reordering, thus avoiding descriptor posting before 154 * descriptor is initialized. Otherwise, hardware can read 155 * stale descriptor fields. 156 */ 157 wmb(); 158 iowrite32(buf->index, &rq->ctrl->posted_index); 159 } 160 } 161 162 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 163 { 164 rq->ring.desc_avail += count; 165 } 166 167 enum desc_return_options { 168 VNIC_RQ_RETURN_DESC, 169 VNIC_RQ_DEFER_RETURN_DESC, 170 }; 171 172 static inline void vnic_rq_service(struct vnic_rq *rq, 173 struct cq_desc *cq_desc, u16 completed_index, 174 int desc_return, void (*buf_service)(struct vnic_rq *rq, 175 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 176 int skipped, void *opaque), void *opaque) 177 { 178 struct vnic_rq_buf *buf; 179 int skipped; 180 181 buf = rq->to_clean; 182 while (1) { 183 184 skipped = (buf->index != completed_index); 185 186 (*buf_service)(rq, cq_desc, buf, skipped, opaque); 187 188 if (desc_return == VNIC_RQ_RETURN_DESC) 189 rq->ring.desc_avail++; 190 191 rq->to_clean = buf->next; 192 193 if (!skipped) 194 break; 195 196 buf = rq->to_clean; 197 } 198 } 199 200 static inline int vnic_rq_fill(struct vnic_rq *rq, 201 int (*buf_fill)(struct vnic_rq *rq)) 202 { 203 int err; 204 205 while (vnic_rq_desc_avail(rq) > 0) { 206 207 err = (*buf_fill)(rq); 208 if (err) 209 return err; 210 } 211 212 return 0; 213 } 214 215 #ifdef CONFIG_NET_RX_BUSY_POLL 216 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) 217 { 218 spin_lock_init(&rq->bpoll_lock); 219 rq->bpoll_state = ENIC_POLL_STATE_IDLE; 220 } 221 222 static inline bool enic_poll_lock_napi(struct vnic_rq *rq) 223 { 224 bool rc = true; 225 226 spin_lock(&rq->bpoll_lock); 227 if (rq->bpoll_state & ENIC_POLL_LOCKED) { 228 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); 229 rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD; 230 rc = false; 231 } else { 232 rq->bpoll_state = ENIC_POLL_STATE_NAPI; 233 } 234 spin_unlock(&rq->bpoll_lock); 235 236 return rc; 237 } 238 239 static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) 240 { 241 bool rc = false; 242 243 spin_lock(&rq->bpoll_lock); 244 WARN_ON(rq->bpoll_state & 245 (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD)); 246 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) 247 rc = true; 248 rq->bpoll_state = ENIC_POLL_STATE_IDLE; 249 spin_unlock(&rq->bpoll_lock); 250 251 return rc; 252 } 253 254 static inline bool enic_poll_lock_poll(struct vnic_rq *rq) 255 { 256 bool rc = true; 257 258 spin_lock_bh(&rq->bpoll_lock); 259 if (rq->bpoll_state & ENIC_POLL_LOCKED) { 260 rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD; 261 rc = false; 262 } else { 263 rq->bpoll_state |= ENIC_POLL_STATE_POLL; 264 } 265 spin_unlock_bh(&rq->bpoll_lock); 266 267 return rc; 268 } 269 270 static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) 271 { 272 bool rc = false; 273 274 spin_lock_bh(&rq->bpoll_lock); 275 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); 276 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) 277 rc = true; 278 rq->bpoll_state = ENIC_POLL_STATE_IDLE; 279 spin_unlock_bh(&rq->bpoll_lock); 280 281 return rc; 282 } 283 284 static inline bool enic_poll_busy_polling(struct vnic_rq *rq) 285 { 286 WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED)); 287 return rq->bpoll_state & ENIC_POLL_USER_PEND; 288 } 289 290 #else 291 292 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) 293 { 294 } 295 296 static inline bool enic_poll_lock_napi(struct vnic_rq *rq) 297 { 298 return true; 299 } 300 301 static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) 302 { 303 return false; 304 } 305 306 static inline bool enic_poll_lock_poll(struct vnic_rq *rq) 307 { 308 return false; 309 } 310 311 static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) 312 { 313 return false; 314 } 315 316 static inline bool enic_poll_ll_polling(struct vnic_rq *rq) 317 { 318 return false; 319 } 320 #endif /* CONFIG_NET_RX_BUSY_POLL */ 321 322 void vnic_rq_free(struct vnic_rq *rq); 323 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, 324 unsigned int desc_count, unsigned int desc_size); 325 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, 326 unsigned int error_interrupt_enable, 327 unsigned int error_interrupt_offset); 328 unsigned int vnic_rq_error_status(struct vnic_rq *rq); 329 void vnic_rq_enable(struct vnic_rq *rq); 330 int vnic_rq_disable(struct vnic_rq *rq); 331 void vnic_rq_clean(struct vnic_rq *rq, 332 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); 333 334 #endif /* _VNIC_RQ_H_ */ 335