1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * - Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials 20 * provided with the distribution. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * SOFTWARE. 30 */ 31 #ifndef __T4_H__ 32 #define __T4_H__ 33 34 #include "t4_hw.h" 35 #include "t4_regs.h" 36 #include "t4_msg.h" 37 #include "t4fw_ri_api.h" 38 39 #define T4_MAX_NUM_QP 65536 40 #define T4_MAX_NUM_CQ 65536 41 #define T4_MAX_NUM_PD 65536 42 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) 43 #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) 44 #define T4_MAX_IQ_SIZE (65520 - 1) 45 #define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) 46 #define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) 47 #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) 48 #define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) 49 #define T4_MAX_NUM_STAG (1<<15) 50 #define T4_MAX_MR_SIZE (~0ULL) 51 #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 52 #define T4_STAG_UNSET 0xffffffff 53 #define T4_FW_MAJ 0 54 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) 55 #define A_PCIE_MA_SYNC 0x30b4 56 57 struct t4_status_page { 58 __be32 rsvd1; /* flit 0 - hw owns */ 59 __be16 rsvd2; 60 __be16 qid; 61 __be16 cidx; 62 __be16 pidx; 63 u8 qp_err; /* flit 1 - sw owns */ 64 u8 db_off; 65 u8 pad; 66 u16 host_wq_pidx; 67 u16 host_cidx; 68 u16 host_pidx; 69 }; 70 71 #define T4_EQ_ENTRY_SIZE 64 72 73 #define T4_SQ_NUM_SLOTS 5 74 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) 75 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ 76 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 77 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ 78 sizeof(struct fw_ri_immd))) 79 #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \ 80 sizeof(struct fw_ri_rdma_write_wr) - \ 81 sizeof(struct fw_ri_immd))) 82 #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \ 83 sizeof(struct fw_ri_rdma_write_wr) - \ 84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 85 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ 86 sizeof(struct fw_ri_immd)) & ~31UL) 87 #define T4_MAX_FR_DEPTH (1024 / sizeof(u64)) 88 89 #define T4_RQ_NUM_SLOTS 2 90 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) 91 #define T4_MAX_RECV_SGE 4 92 93 union t4_wr { 94 struct fw_ri_res_wr res; 95 struct fw_ri_wr ri; 96 struct fw_ri_rdma_write_wr write; 97 struct fw_ri_send_wr send; 98 struct fw_ri_rdma_read_wr read; 99 struct fw_ri_bind_mw_wr bind; 100 struct fw_ri_fr_nsmr_wr fr; 101 struct fw_ri_inv_lstag_wr inv; 102 struct t4_status_page status; 103 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; 104 }; 105 106 union t4_recv_wr { 107 struct fw_ri_recv_wr recv; 108 struct t4_status_page status; 109 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; 110 }; 111 112 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, 113 enum fw_wr_opcodes opcode, u8 flags, u8 len16) 114 { 115 wqe->send.opcode = (u8)opcode; 116 wqe->send.flags = flags; 117 wqe->send.wrid = wrid; 118 wqe->send.r1[0] = 0; 119 wqe->send.r1[1] = 0; 120 wqe->send.r1[2] = 0; 121 wqe->send.len16 = len16; 122 } 123 124 /* CQE/AE status codes */ 125 #define T4_ERR_SUCCESS 0x0 126 #define T4_ERR_STAG 0x1 /* STAG invalid: either the */ 127 /* STAG is offlimt, being 0, */ 128 /* or STAG_key mismatch */ 129 #define T4_ERR_PDID 0x2 /* PDID mismatch */ 130 #define T4_ERR_QPID 0x3 /* QPID mismatch */ 131 #define T4_ERR_ACCESS 0x4 /* Invalid access right */ 132 #define T4_ERR_WRAP 0x5 /* Wrap error */ 133 #define T4_ERR_BOUND 0x6 /* base and bounds voilation */ 134 #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */ 135 /* shared memory region */ 136 #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */ 137 /* shared memory region */ 138 #define T4_ERR_ECC 0x9 /* ECC error detected */ 139 #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */ 140 /* reading PSTAG for a MW */ 141 /* Invalidate */ 142 #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */ 143 /* software error */ 144 #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */ 145 #define T4_ERR_CRC 0x10 /* CRC error */ 146 #define T4_ERR_MARKER 0x11 /* Marker error */ 147 #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */ 148 #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */ 149 #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */ 150 #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */ 151 #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */ 152 #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */ 153 #define T4_ERR_MSN 0x18 /* MSN error */ 154 #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */ 155 #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */ 156 /* or READ_REQ */ 157 #define T4_ERR_MSN_GAP 0x1B 158 #define T4_ERR_MSN_RANGE 0x1C 159 #define T4_ERR_IRD_OVERFLOW 0x1D 160 #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */ 161 /* software error */ 162 #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */ 163 /* mismatch) */ 164 /* 165 * CQE defs 166 */ 167 struct t4_cqe { 168 __be32 header; 169 __be32 len; 170 union { 171 struct { 172 __be32 stag; 173 __be32 msn; 174 } rcqe; 175 struct { 176 u32 nada1; 177 u16 nada2; 178 u16 cidx; 179 } scqe; 180 struct { 181 __be32 wrid_hi; 182 __be32 wrid_low; 183 } gen; 184 } u; 185 __be64 reserved; 186 __be64 bits_type_ts; 187 }; 188 189 /* macros for flit 0 of the cqe */ 190 191 #define S_CQE_QPID 12 192 #define M_CQE_QPID 0xFFFFF 193 #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) 194 #define V_CQE_QPID(x) ((x)<<S_CQE_QPID) 195 196 #define S_CQE_SWCQE 11 197 #define M_CQE_SWCQE 0x1 198 #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE) 199 #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE) 200 201 #define S_CQE_STATUS 5 202 #define M_CQE_STATUS 0x1F 203 #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS) 204 #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS) 205 206 #define S_CQE_TYPE 4 207 #define M_CQE_TYPE 0x1 208 #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE) 209 #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE) 210 211 #define S_CQE_OPCODE 0 212 #define M_CQE_OPCODE 0xF 213 #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE) 214 #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE) 215 216 #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header))) 217 #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) 218 #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) 219 #define SQ_TYPE(x) (CQE_TYPE((x))) 220 #define RQ_TYPE(x) (!CQE_TYPE((x))) 221 #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) 222 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) 223 224 #define CQE_SEND_OPCODE(x)( \ 225 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ 226 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ 227 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ 228 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) 229 230 #define CQE_LEN(x) (be32_to_cpu((x)->len)) 231 232 /* used for RQ completion processing */ 233 #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag)) 234 #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn)) 235 236 /* used for SQ completion processing */ 237 #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) 238 239 /* generic accessor macros */ 240 #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) 241 #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) 242 243 /* macros for flit 3 of the cqe */ 244 #define S_CQE_GENBIT 63 245 #define M_CQE_GENBIT 0x1 246 #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) 247 #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT) 248 249 #define S_CQE_OVFBIT 62 250 #define M_CQE_OVFBIT 0x1 251 #define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT) 252 253 #define S_CQE_IQTYPE 60 254 #define M_CQE_IQTYPE 0x3 255 #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) 256 257 #define M_CQE_TS 0x0fffffffffffffffULL 258 #define G_CQE_TS(x) ((x) & M_CQE_TS) 259 260 #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) 261 #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) 262 #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) 263 264 struct t4_swsqe { 265 u64 wr_id; 266 struct t4_cqe cqe; 267 int read_len; 268 int opcode; 269 int complete; 270 int signaled; 271 u16 idx; 272 int flushed; 273 }; 274 275 static inline pgprot_t t4_pgprot_wc(pgprot_t prot) 276 { 277 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) 278 return pgprot_writecombine(prot); 279 #else 280 return pgprot_noncached(prot); 281 #endif 282 } 283 284 enum { 285 T4_SQ_ONCHIP = (1<<0), 286 }; 287 288 struct t4_sq { 289 union t4_wr *queue; 290 dma_addr_t dma_addr; 291 DEFINE_DMA_UNMAP_ADDR(mapping); 292 unsigned long phys_addr; 293 struct t4_swsqe *sw_sq; 294 struct t4_swsqe *oldest_read; 295 u64 udb; 296 size_t memsize; 297 u32 qid; 298 u16 in_use; 299 u16 size; 300 u16 cidx; 301 u16 pidx; 302 u16 wq_pidx; 303 u16 flags; 304 short flush_cidx; 305 }; 306 307 struct t4_swrqe { 308 u64 wr_id; 309 }; 310 311 struct t4_rq { 312 union t4_recv_wr *queue; 313 dma_addr_t dma_addr; 314 DEFINE_DMA_UNMAP_ADDR(mapping); 315 struct t4_swrqe *sw_rq; 316 u64 udb; 317 size_t memsize; 318 u32 qid; 319 u32 msn; 320 u32 rqt_hwaddr; 321 u16 rqt_size; 322 u16 in_use; 323 u16 size; 324 u16 cidx; 325 u16 pidx; 326 u16 wq_pidx; 327 }; 328 329 struct t4_wq { 330 struct t4_sq sq; 331 struct t4_rq rq; 332 void __iomem *db; 333 void __iomem *gts; 334 struct c4iw_rdev *rdev; 335 int flushed; 336 }; 337 338 static inline int t4_rqes_posted(struct t4_wq *wq) 339 { 340 return wq->rq.in_use; 341 } 342 343 static inline int t4_rq_empty(struct t4_wq *wq) 344 { 345 return wq->rq.in_use == 0; 346 } 347 348 static inline int t4_rq_full(struct t4_wq *wq) 349 { 350 return wq->rq.in_use == (wq->rq.size - 1); 351 } 352 353 static inline u32 t4_rq_avail(struct t4_wq *wq) 354 { 355 return wq->rq.size - 1 - wq->rq.in_use; 356 } 357 358 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) 359 { 360 wq->rq.in_use++; 361 if (++wq->rq.pidx == wq->rq.size) 362 wq->rq.pidx = 0; 363 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 364 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) 365 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; 366 } 367 368 static inline void t4_rq_consume(struct t4_wq *wq) 369 { 370 wq->rq.in_use--; 371 wq->rq.msn++; 372 if (++wq->rq.cidx == wq->rq.size) 373 wq->rq.cidx = 0; 374 } 375 376 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq) 377 { 378 return wq->rq.queue[wq->rq.size].status.host_wq_pidx; 379 } 380 381 static inline u16 t4_rq_wq_size(struct t4_wq *wq) 382 { 383 return wq->rq.size * T4_RQ_NUM_SLOTS; 384 } 385 386 static inline int t4_sq_onchip(struct t4_sq *sq) 387 { 388 return sq->flags & T4_SQ_ONCHIP; 389 } 390 391 static inline int t4_sq_empty(struct t4_wq *wq) 392 { 393 return wq->sq.in_use == 0; 394 } 395 396 static inline int t4_sq_full(struct t4_wq *wq) 397 { 398 return wq->sq.in_use == (wq->sq.size - 1); 399 } 400 401 static inline u32 t4_sq_avail(struct t4_wq *wq) 402 { 403 return wq->sq.size - 1 - wq->sq.in_use; 404 } 405 406 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) 407 { 408 wq->sq.in_use++; 409 if (++wq->sq.pidx == wq->sq.size) 410 wq->sq.pidx = 0; 411 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 412 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) 413 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; 414 } 415 416 static inline void t4_sq_consume(struct t4_wq *wq) 417 { 418 BUG_ON(wq->sq.in_use < 1); 419 if (wq->sq.cidx == wq->sq.flush_cidx) 420 wq->sq.flush_cidx = -1; 421 wq->sq.in_use--; 422 if (++wq->sq.cidx == wq->sq.size) 423 wq->sq.cidx = 0; 424 } 425 426 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq) 427 { 428 return wq->sq.queue[wq->sq.size].status.host_wq_pidx; 429 } 430 431 static inline u16 t4_sq_wq_size(struct t4_wq *wq) 432 { 433 return wq->sq.size * T4_SQ_NUM_SLOTS; 434 } 435 436 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) 437 { 438 wmb(); 439 writel(QID(wq->sq.qid) | PIDX(inc), wq->db); 440 } 441 442 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) 443 { 444 wmb(); 445 writel(QID(wq->rq.qid) | PIDX(inc), wq->db); 446 } 447 448 static inline int t4_wq_in_error(struct t4_wq *wq) 449 { 450 return wq->rq.queue[wq->rq.size].status.qp_err; 451 } 452 453 static inline void t4_set_wq_in_error(struct t4_wq *wq) 454 { 455 wq->rq.queue[wq->rq.size].status.qp_err = 1; 456 } 457 458 static inline void t4_disable_wq_db(struct t4_wq *wq) 459 { 460 wq->rq.queue[wq->rq.size].status.db_off = 1; 461 } 462 463 static inline void t4_enable_wq_db(struct t4_wq *wq) 464 { 465 wq->rq.queue[wq->rq.size].status.db_off = 0; 466 } 467 468 static inline int t4_wq_db_enabled(struct t4_wq *wq) 469 { 470 return !wq->rq.queue[wq->rq.size].status.db_off; 471 } 472 473 struct t4_cq { 474 struct t4_cqe *queue; 475 dma_addr_t dma_addr; 476 DEFINE_DMA_UNMAP_ADDR(mapping); 477 struct t4_cqe *sw_queue; 478 void __iomem *gts; 479 struct c4iw_rdev *rdev; 480 u64 ugts; 481 size_t memsize; 482 __be64 bits_type_ts; 483 u32 cqid; 484 u16 size; /* including status page */ 485 u16 cidx; 486 u16 sw_pidx; 487 u16 sw_cidx; 488 u16 sw_in_use; 489 u16 cidx_inc; 490 u8 gen; 491 u8 error; 492 }; 493 494 static inline int t4_arm_cq(struct t4_cq *cq, int se) 495 { 496 u32 val; 497 498 while (cq->cidx_inc > CIDXINC_MASK) { 499 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | 500 INGRESSQID(cq->cqid); 501 writel(val, cq->gts); 502 cq->cidx_inc -= CIDXINC_MASK; 503 } 504 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | 505 INGRESSQID(cq->cqid); 506 writel(val, cq->gts); 507 cq->cidx_inc = 0; 508 return 0; 509 } 510 511 static inline void t4_swcq_produce(struct t4_cq *cq) 512 { 513 cq->sw_in_use++; 514 if (cq->sw_in_use == cq->size) { 515 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); 516 cq->error = 1; 517 BUG_ON(1); 518 } 519 if (++cq->sw_pidx == cq->size) 520 cq->sw_pidx = 0; 521 } 522 523 static inline void t4_swcq_consume(struct t4_cq *cq) 524 { 525 BUG_ON(cq->sw_in_use < 1); 526 cq->sw_in_use--; 527 if (++cq->sw_cidx == cq->size) 528 cq->sw_cidx = 0; 529 } 530 531 static inline void t4_hwcq_consume(struct t4_cq *cq) 532 { 533 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; 534 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) { 535 u32 val; 536 537 val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | 538 INGRESSQID(cq->cqid); 539 writel(val, cq->gts); 540 cq->cidx_inc = 0; 541 } 542 if (++cq->cidx == cq->size) { 543 cq->cidx = 0; 544 cq->gen ^= 1; 545 } 546 } 547 548 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) 549 { 550 return (CQE_GENBIT(cqe) == cq->gen); 551 } 552 553 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 554 { 555 int ret; 556 u16 prev_cidx; 557 558 if (cq->cidx == 0) 559 prev_cidx = cq->size - 1; 560 else 561 prev_cidx = cq->cidx - 1; 562 563 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { 564 ret = -EOVERFLOW; 565 cq->error = 1; 566 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); 567 BUG_ON(1); 568 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { 569 *cqe = &cq->queue[cq->cidx]; 570 ret = 0; 571 } else 572 ret = -ENODATA; 573 return ret; 574 } 575 576 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) 577 { 578 if (cq->sw_in_use == cq->size) { 579 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); 580 cq->error = 1; 581 BUG_ON(1); 582 return NULL; 583 } 584 if (cq->sw_in_use) 585 return &cq->sw_queue[cq->sw_cidx]; 586 return NULL; 587 } 588 589 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 590 { 591 int ret = 0; 592 593 if (cq->error) 594 ret = -ENODATA; 595 else if (cq->sw_in_use) 596 *cqe = &cq->sw_queue[cq->sw_cidx]; 597 else 598 ret = t4_next_hw_cqe(cq, cqe); 599 return ret; 600 } 601 602 static inline int t4_cq_in_error(struct t4_cq *cq) 603 { 604 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; 605 } 606 607 static inline void t4_set_cq_in_error(struct t4_cq *cq) 608 { 609 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; 610 } 611 #endif 612