1 /* 2 * Copyright (C) 2014 Freescale Semiconductor 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include "qbman_portal.h" 8 9 /* QBMan portal management command codes */ 10 #define QBMAN_MC_ACQUIRE 0x30 11 #define QBMAN_WQCHAN_CONFIGURE 0x46 12 13 /* CINH register offsets */ 14 #define QBMAN_CINH_SWP_EQAR 0x8c0 15 #define QBMAN_CINH_SWP_DCAP 0xac0 16 #define QBMAN_CINH_SWP_SDQCR 0xb00 17 #define QBMAN_CINH_SWP_RAR 0xcc0 18 19 /* CENA register offsets */ 20 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) 21 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) 22 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) 23 #define QBMAN_CENA_SWP_CR 0x600 24 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) 25 #define QBMAN_CENA_SWP_VDQCR 0x780 26 27 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ 28 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0xff) >> 6) 29 30 /*******************************/ 31 /* Pre-defined attribute codes */ 32 /*******************************/ 33 34 struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); 35 struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); 36 37 /*************************/ 38 /* SDQCR attribute codes */ 39 /*************************/ 40 41 /* we put these here because at least some of them are required by 42 * qbman_swp_init() */ 43 struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); 44 struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); 45 struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); 46 #define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) 47 enum qbman_sdqcr_dct { 48 qbman_sdqcr_dct_null = 0, 49 qbman_sdqcr_dct_prio_ics, 50 qbman_sdqcr_dct_active_ics, 51 qbman_sdqcr_dct_active 52 }; 53 enum qbman_sdqcr_fc { 54 qbman_sdqcr_fc_one = 0, 55 qbman_sdqcr_fc_up_to_3 = 1 56 }; 57 58 /*********************************/ 59 /* Portal constructor/destructor */ 60 /*********************************/ 61 62 /* Software portals should always be in the power-on state when we initialise, 63 * due to the CCSR-based portal reset functionality that MC has. */ 64 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) 65 { 66 int ret; 67 struct qbman_swp *p = malloc(sizeof(struct qbman_swp)); 68 69 if (!p) 70 return NULL; 71 p->desc = d; 72 #ifdef QBMAN_CHECKING 73 p->mc.check = swp_mc_can_start; 74 #endif 75 p->mc.valid_bit = QB_VALID_BIT; 76 p->sdq = 0; 77 qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); 78 qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); 79 qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); 80 atomic_set(&p->vdq.busy, 1); 81 p->vdq.valid_bit = QB_VALID_BIT; 82 p->dqrr.next_idx = 0; 83 p->dqrr.valid_bit = QB_VALID_BIT; 84 ret = qbman_swp_sys_init(&p->sys, d); 85 if (ret) { 86 free(p); 87 printf("qbman_swp_sys_init() failed %d\n", ret); 88 return NULL; 89 } 90 qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, p->sdq); 91 return p; 92 } 93 94 /***********************/ 95 /* Management commands */ 96 /***********************/ 97 98 /* 99 * Internal code common to all types of management commands. 100 */ 101 102 void *qbman_swp_mc_start(struct qbman_swp *p) 103 { 104 void *ret; 105 int *return_val; 106 #ifdef QBMAN_CHECKING 107 BUG_ON(p->mc.check != swp_mc_can_start); 108 #endif 109 ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); 110 #ifdef QBMAN_CHECKING 111 return_val = (int *)ret; 112 if (!(*return_val)) 113 p->mc.check = swp_mc_can_submit; 114 #endif 115 return ret; 116 } 117 118 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) 119 { 120 uint32_t *v = cmd; 121 #ifdef QBMAN_CHECKING 122 BUG_ON(p->mc.check != swp_mc_can_submit); 123 #endif 124 lwsync(); 125 /* TBD: "|=" is going to hurt performance. Need to move as many fields 126 * out of word zero, and for those that remain, the "OR" needs to occur 127 * at the caller side. This debug check helps to catch cases where the 128 * caller wants to OR but has forgotten to do so. */ 129 BUG_ON((*v & cmd_verb) != *v); 130 *v = cmd_verb | p->mc.valid_bit; 131 qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); 132 /* TODO: add prefetch support for GPP */ 133 #ifdef QBMAN_CHECKING 134 p->mc.check = swp_mc_can_poll; 135 #endif 136 } 137 138 void *qbman_swp_mc_result(struct qbman_swp *p) 139 { 140 uint32_t *ret, verb; 141 #ifdef QBMAN_CHECKING 142 BUG_ON(p->mc.check != swp_mc_can_poll); 143 #endif 144 ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); 145 /* Remove the valid-bit - command completed iff the rest is non-zero */ 146 verb = ret[0] & ~QB_VALID_BIT; 147 if (!verb) 148 return NULL; 149 #ifdef QBMAN_CHECKING 150 p->mc.check = swp_mc_can_start; 151 #endif 152 p->mc.valid_bit ^= QB_VALID_BIT; 153 return ret; 154 } 155 156 /***********/ 157 /* Enqueue */ 158 /***********/ 159 160 /* These should be const, eventually */ 161 static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); 162 static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); 163 static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); 164 /* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ 165 static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); 166 static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); 167 static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); 168 static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); 169 static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); 170 171 enum qbman_eq_cmd_e { 172 /* No enqueue, primarily for plugging ORP gaps for dropped frames */ 173 qbman_eq_cmd_empty, 174 /* DMA an enqueue response once complete */ 175 qbman_eq_cmd_respond, 176 /* DMA an enqueue response only if the enqueue fails */ 177 qbman_eq_cmd_respond_reject 178 }; 179 180 void qbman_eq_desc_clear(struct qbman_eq_desc *d) 181 { 182 memset(d, 0, sizeof(*d)); 183 } 184 185 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) 186 { 187 uint32_t *cl = qb_cl(d); 188 189 qb_attr_code_encode(&code_eq_orp_en, cl, 0); 190 qb_attr_code_encode(&code_eq_cmd, cl, 191 respond_success ? qbman_eq_cmd_respond : 192 qbman_eq_cmd_respond_reject); 193 } 194 195 void qbman_eq_desc_set_response(struct qbman_eq_desc *d, 196 dma_addr_t storage_phys, 197 int stash) 198 { 199 uint32_t *cl = qb_cl(d); 200 201 qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); 202 qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); 203 } 204 205 206 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, 207 uint32_t qd_bin, uint32_t qd_prio) 208 { 209 uint32_t *cl = qb_cl(d); 210 211 qb_attr_code_encode(&code_eq_qd_en, cl, 1); 212 qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); 213 qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); 214 qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); 215 } 216 217 #define EQAR_IDX(eqar) ((eqar) & 0x7) 218 #define EQAR_VB(eqar) ((eqar) & 0x80) 219 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100) 220 221 int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, 222 const struct qbman_fd *fd) 223 { 224 uint32_t *p; 225 const uint32_t *cl = qb_cl(d); 226 uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); 227 debug("EQAR=%08x\n", eqar); 228 if (!EQAR_SUCCESS(eqar)) 229 return -EBUSY; 230 p = qbman_cena_write_start(&s->sys, 231 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); 232 word_copy(&p[1], &cl[1], 7); 233 word_copy(&p[8], fd, sizeof(*fd) >> 2); 234 lwsync(); 235 /* Set the verb byte, have to substitute in the valid-bit */ 236 p[0] = cl[0] | EQAR_VB(eqar); 237 qbman_cena_write_complete(&s->sys, 238 QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)), 239 p); 240 return 0; 241 } 242 243 /***************************/ 244 /* Volatile (pull) dequeue */ 245 /***************************/ 246 247 /* These should be const, eventually */ 248 static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); 249 static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); 250 static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); 251 static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); 252 static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); 253 static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); 254 static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); 255 static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); 256 257 enum qb_pull_dt_e { 258 qb_pull_dt_channel, 259 qb_pull_dt_workqueue, 260 qb_pull_dt_framequeue 261 }; 262 263 void qbman_pull_desc_clear(struct qbman_pull_desc *d) 264 { 265 memset(d, 0, sizeof(*d)); 266 } 267 268 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, 269 struct ldpaa_dq *storage, 270 dma_addr_t storage_phys, 271 int stash) 272 { 273 uint32_t *cl = qb_cl(d); 274 275 /* Squiggle the pointer 'storage' into the extra 2 words of the 276 * descriptor (which aren't copied to the hw command) */ 277 *(void **)&cl[4] = storage; 278 if (!storage) { 279 qb_attr_code_encode(&code_pull_rls, cl, 0); 280 return; 281 } 282 qb_attr_code_encode(&code_pull_rls, cl, 1); 283 qb_attr_code_encode(&code_pull_stash, cl, !!stash); 284 qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); 285 } 286 287 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) 288 { 289 uint32_t *cl = qb_cl(d); 290 291 BUG_ON(!numframes || (numframes > 16)); 292 qb_attr_code_encode(&code_pull_numframes, cl, 293 (uint32_t)(numframes - 1)); 294 } 295 296 void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) 297 { 298 uint32_t *cl = qb_cl(d); 299 300 qb_attr_code_encode(&code_pull_token, cl, token); 301 } 302 303 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) 304 { 305 uint32_t *cl = qb_cl(d); 306 307 qb_attr_code_encode(&code_pull_dct, cl, 1); 308 qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); 309 qb_attr_code_encode(&code_pull_dqsource, cl, fqid); 310 } 311 312 int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) 313 { 314 uint32_t *p; 315 uint32_t *cl = qb_cl(d); 316 317 if (!atomic_dec_and_test(&s->vdq.busy)) { 318 atomic_inc(&s->vdq.busy); 319 return -EBUSY; 320 } 321 s->vdq.storage = *(void **)&cl[4]; 322 s->vdq.token = qb_attr_code_decode(&code_pull_token, cl); 323 p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR); 324 word_copy(&p[1], &cl[1], 3); 325 lwsync(); 326 /* Set the verb byte, have to substitute in the valid-bit */ 327 p[0] = cl[0] | s->vdq.valid_bit; 328 s->vdq.valid_bit ^= QB_VALID_BIT; 329 qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p); 330 return 0; 331 } 332 333 /****************/ 334 /* Polling DQRR */ 335 /****************/ 336 337 static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); 338 static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); 339 static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); 340 341 #define QBMAN_DQRR_RESPONSE_DQ 0x60 342 #define QBMAN_DQRR_RESPONSE_FQRN 0x21 343 #define QBMAN_DQRR_RESPONSE_FQRNI 0x22 344 #define QBMAN_DQRR_RESPONSE_FQPN 0x24 345 #define QBMAN_DQRR_RESPONSE_FQDAN 0x25 346 #define QBMAN_DQRR_RESPONSE_CDAN 0x26 347 #define QBMAN_DQRR_RESPONSE_CSCN_MEM 0x27 348 #define QBMAN_DQRR_RESPONSE_CGCU 0x28 349 #define QBMAN_DQRR_RESPONSE_BPSCN 0x29 350 #define QBMAN_DQRR_RESPONSE_CSCN_WQ 0x2a 351 352 353 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry 354 * only once, so repeated calls can return a sequence of DQRR entries, without 355 * requiring they be consumed immediately or in any particular order. */ 356 const struct ldpaa_dq *qbman_swp_dqrr_next(struct qbman_swp *s) 357 { 358 uint32_t verb; 359 uint32_t response_verb; 360 uint32_t flags; 361 const struct ldpaa_dq *dq; 362 const uint32_t *p; 363 364 dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 365 p = qb_cl(dq); 366 verb = qb_attr_code_decode(&code_dqrr_verb, p); 367 368 /* If the valid-bit isn't of the expected polarity, nothing there. Note, 369 * in the DQRR reset bug workaround, we shouldn't need to skip these 370 * check, because we've already determined that a new entry is available 371 * and we've invalidated the cacheline before reading it, so the 372 * valid-bit behaviour is repaired and should tell us what we already 373 * knew from reading PI. 374 */ 375 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { 376 qbman_cena_invalidate_prefetch(&s->sys, 377 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 378 return NULL; 379 } 380 /* There's something there. Move "next_idx" attention to the next ring 381 * entry (and prefetch it) before returning what we found. */ 382 s->dqrr.next_idx++; 383 s->dqrr.next_idx &= QBMAN_DQRR_SIZE - 1; /* Wrap around at 4 */ 384 /* TODO: it's possible to do all this without conditionals, optimise it 385 * later. */ 386 if (!s->dqrr.next_idx) 387 s->dqrr.valid_bit ^= QB_VALID_BIT; 388 389 /* If this is the final response to a volatile dequeue command 390 indicate that the vdq is no longer busy */ 391 flags = ldpaa_dq_flags(dq); 392 response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); 393 if ((response_verb == QBMAN_DQRR_RESPONSE_DQ) && 394 (flags & LDPAA_DQ_STAT_VOLATILE) && 395 (flags & LDPAA_DQ_STAT_EXPIRED)) 396 atomic_inc(&s->vdq.busy); 397 398 qbman_cena_invalidate_prefetch(&s->sys, 399 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 400 return dq; 401 } 402 403 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ 404 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct ldpaa_dq *dq) 405 { 406 qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); 407 } 408 409 /*********************************/ 410 /* Polling user-provided storage */ 411 /*********************************/ 412 413 void qbman_dq_entry_set_oldtoken(struct ldpaa_dq *dq, 414 unsigned int num_entries, 415 uint8_t oldtoken) 416 { 417 memset(dq, oldtoken, num_entries * sizeof(*dq)); 418 } 419 420 int qbman_dq_entry_has_newtoken(struct qbman_swp *s, 421 const struct ldpaa_dq *dq, 422 uint8_t newtoken) 423 { 424 /* To avoid converting the little-endian DQ entry to host-endian prior 425 * to us knowing whether there is a valid entry or not (and run the 426 * risk of corrupting the incoming hardware LE write), we detect in 427 * hardware endianness rather than host. This means we need a different 428 * "code" depending on whether we are BE or LE in software, which is 429 * where DQRR_TOK_OFFSET comes in... */ 430 static struct qb_attr_code code_dqrr_tok_detect = 431 QB_CODE(0, DQRR_TOK_OFFSET, 8); 432 /* The user trying to poll for a result treats "dq" as const. It is 433 * however the same address that was provided to us non-const in the 434 * first place, for directing hardware DMA to. So we can cast away the 435 * const because it is mutable from our perspective. */ 436 uint32_t *p = qb_cl((struct ldpaa_dq *)dq); 437 uint32_t token; 438 439 token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); 440 if (token != newtoken) 441 return 0; 442 443 /* Only now do we convert from hardware to host endianness. Also, as we 444 * are returning success, the user has promised not to call us again, so 445 * there's no risk of us converting the endianness twice... */ 446 make_le32_n(p, 16); 447 448 /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the 449 * fact "VDQCR" shows busy doesn't mean that the result we're looking at 450 * is from the same command. Eg. we may be looking at our 10th dequeue 451 * result from our first VDQCR command, yet the second dequeue command 452 * could have been kicked off already, after seeing the 1st result. Ie. 453 * the result we're looking at is not necessarily proof that we can 454 * reset "busy". We instead base the decision on whether the current 455 * result is sitting at the first 'storage' location of the busy 456 * command. */ 457 if (s->vdq.storage == dq) { 458 s->vdq.storage = NULL; 459 atomic_inc(&s->vdq.busy); 460 } 461 return 1; 462 } 463 464 /********************************/ 465 /* Categorising dequeue entries */ 466 /********************************/ 467 468 static inline int __qbman_dq_entry_is_x(const struct ldpaa_dq *dq, uint32_t x) 469 { 470 const uint32_t *p = qb_cl(dq); 471 uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); 472 473 return response_verb == x; 474 } 475 476 int qbman_dq_entry_is_DQ(const struct ldpaa_dq *dq) 477 { 478 return __qbman_dq_entry_is_x(dq, QBMAN_DQRR_RESPONSE_DQ); 479 } 480 481 /*********************************/ 482 /* Parsing frame dequeue results */ 483 /*********************************/ 484 485 /* These APIs assume qbman_dq_entry_is_DQ() is TRUE */ 486 487 uint32_t ldpaa_dq_flags(const struct ldpaa_dq *dq) 488 { 489 const uint32_t *p = qb_cl(dq); 490 491 return qb_attr_code_decode(&code_dqrr_stat, p); 492 } 493 494 const struct dpaa_fd *ldpaa_dq_fd(const struct ldpaa_dq *dq) 495 { 496 const uint32_t *p = qb_cl(dq); 497 498 return (const struct dpaa_fd *)&p[8]; 499 } 500 501 /******************/ 502 /* Buffer release */ 503 /******************/ 504 505 /* These should be const, eventually */ 506 /* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ 507 static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); 508 static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); 509 510 void qbman_release_desc_clear(struct qbman_release_desc *d) 511 { 512 uint32_t *cl; 513 514 memset(d, 0, sizeof(*d)); 515 cl = qb_cl(d); 516 qb_attr_code_encode(&code_release_set_me, cl, 1); 517 } 518 519 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) 520 { 521 uint32_t *cl = qb_cl(d); 522 523 qb_attr_code_encode(&code_release_bpid, cl, bpid); 524 } 525 526 #define RAR_IDX(rar) ((rar) & 0x7) 527 #define RAR_VB(rar) ((rar) & 0x80) 528 #define RAR_SUCCESS(rar) ((rar) & 0x100) 529 530 int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, 531 const uint64_t *buffers, unsigned int num_buffers) 532 { 533 uint32_t *p; 534 const uint32_t *cl = qb_cl(d); 535 uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); 536 debug("RAR=%08x\n", rar); 537 if (!RAR_SUCCESS(rar)) 538 return -EBUSY; 539 BUG_ON(!num_buffers || (num_buffers > 7)); 540 /* Start the release command */ 541 p = qbman_cena_write_start(&s->sys, 542 QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 543 /* Copy the caller's buffer pointers to the command */ 544 u64_to_le32_copy(&p[2], buffers, num_buffers); 545 lwsync(); 546 /* Set the verb byte, have to substitute in the valid-bit and the number 547 * of buffers. */ 548 p[0] = cl[0] | RAR_VB(rar) | num_buffers; 549 qbman_cena_write_complete(&s->sys, 550 QBMAN_CENA_SWP_RCR(RAR_IDX(rar)), 551 p); 552 return 0; 553 } 554 555 /*******************/ 556 /* Buffer acquires */ 557 /*******************/ 558 559 /* These should be const, eventually */ 560 static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); 561 static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); 562 static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); 563 564 int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, 565 unsigned int num_buffers) 566 { 567 uint32_t *p; 568 uint32_t verb, rslt, num; 569 570 BUG_ON(!num_buffers || (num_buffers > 7)); 571 572 /* Start the management command */ 573 p = qbman_swp_mc_start(s); 574 575 if (!p) 576 return -EBUSY; 577 578 /* Encode the caller-provided attributes */ 579 qb_attr_code_encode(&code_acquire_bpid, p, bpid); 580 qb_attr_code_encode(&code_acquire_num, p, num_buffers); 581 582 /* Complete the management command */ 583 p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); 584 585 /* Decode the outcome */ 586 verb = qb_attr_code_decode(&code_generic_verb, p); 587 rslt = qb_attr_code_decode(&code_generic_rslt, p); 588 num = qb_attr_code_decode(&code_acquire_r_num, p); 589 BUG_ON(verb != QBMAN_MC_ACQUIRE); 590 591 /* Determine success or failure */ 592 if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { 593 printf("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", 594 bpid, rslt); 595 return -EIO; 596 } 597 BUG_ON(num > num_buffers); 598 /* Copy the acquired buffers to the caller's array */ 599 u64_from_le32_copy(buffers, &p[2], num); 600 return (int)num; 601 } 602