1 /***********************license start*************** 2 * Author: Cavium Networks 3 * 4 * Contact: support@caviumnetworks.com 5 * This file is part of the OCTEON SDK 6 * 7 * Copyright (c) 2003-2008 Cavium Networks 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this file; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 * or visit http://www.gnu.org/licenses/. 23 * 24 * This file may also be available under a different license from Cavium. 25 * Contact Cavium Networks for more information 26 ***********************license end**************************************/ 27 28 /* 29 * Support library for the hardware Packet Output unit. 30 */ 31 32 #include <asm/octeon/octeon.h> 33 34 #include <asm/octeon/cvmx-config.h> 35 #include <asm/octeon/cvmx-pko.h> 36 #include <asm/octeon/cvmx-helper.h> 37 38 /** 39 * Internal state of packet output 40 */ 41 42 static int __cvmx_pko_int(int interface, int index) 43 { 44 switch (interface) { 45 case 0: 46 return index; 47 case 1: 48 return 4; 49 case 2: 50 return index + 0x08; 51 case 3: 52 return index + 0x0c; 53 case 4: 54 return index + 0x10; 55 case 5: 56 return 0x1c; 57 case 6: 58 return 0x1d; 59 case 7: 60 return 0x1e; 61 case 8: 62 return 0x1f; 63 default: 64 return -1; 65 } 66 } 67 68 static void __cvmx_pko_iport_config(int pko_port) 69 { 70 int queue; 71 const int num_queues = 1; 72 const int base_queue = pko_port; 73 const int static_priority_end = 1; 74 const int static_priority_base = 1; 75 76 for (queue = 0; queue < num_queues; queue++) { 77 union cvmx_pko_mem_iqueue_ptrs config; 78 cvmx_cmd_queue_result_t cmd_res; 79 uint64_t *buf_ptr; 80 81 config.u64 = 0; 82 config.s.index = queue; 83 config.s.qid = base_queue + queue; 84 config.s.ipid = pko_port; 85 config.s.tail = (queue == (num_queues - 1)); 86 config.s.s_tail = (queue == static_priority_end); 87 config.s.static_p = (static_priority_base >= 0); 88 config.s.static_q = (queue <= static_priority_end); 89 config.s.qos_mask = 0xff; 90 91 cmd_res = cvmx_cmd_queue_initialize( 92 CVMX_CMD_QUEUE_PKO(base_queue + queue), 93 CVMX_PKO_MAX_QUEUE_DEPTH, 94 CVMX_FPA_OUTPUT_BUFFER_POOL, 95 (CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE - 96 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8)); 97 98 WARN(cmd_res, 99 "%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n", 100 __func__, (int)cmd_res, pko_port, base_queue, 101 num_queues, queue); 102 103 buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer( 104 CVMX_CMD_QUEUE_PKO(base_queue + queue)); 105 config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7; 106 CVMX_SYNCWS; 107 cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64); 108 } 109 } 110 111 static void __cvmx_pko_queue_alloc_o68(void) 112 { 113 int port; 114 115 for (port = 0; port < 48; port++) 116 __cvmx_pko_iport_config(port); 117 } 118 119 static void __cvmx_pko_port_map_o68(void) 120 { 121 int port; 122 int interface, index; 123 cvmx_helper_interface_mode_t mode; 124 union cvmx_pko_mem_iport_ptrs config; 125 126 /* 127 * Initialize every iport with the invalid eid. 128 */ 129 config.u64 = 0; 130 config.s.eid = 31; /* Invalid */ 131 for (port = 0; port < 128; port++) { 132 config.s.ipid = port; 133 cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64); 134 } 135 136 /* 137 * Set up PKO_MEM_IPORT_PTRS 138 */ 139 for (port = 0; port < 48; port++) { 140 interface = cvmx_helper_get_interface_num(port); 141 index = cvmx_helper_get_interface_index_num(port); 142 mode = cvmx_helper_interface_get_mode(interface); 143 if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED) 144 continue; 145 146 config.s.ipid = port; 147 config.s.qos_mask = 0xff; 148 config.s.crc = 1; 149 config.s.min_pkt = 1; 150 config.s.intr = __cvmx_pko_int(interface, index); 151 config.s.eid = config.s.intr; 152 config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ? 153 index : port; 154 cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64); 155 } 156 } 157 158 static void __cvmx_pko_chip_init(void) 159 { 160 int i; 161 162 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { 163 __cvmx_pko_port_map_o68(); 164 __cvmx_pko_queue_alloc_o68(); 165 return; 166 } 167 168 /* 169 * Initialize queues 170 */ 171 for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) { 172 const uint64_t priority = 8; 173 174 cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1, 175 &priority); 176 } 177 } 178 179 /** 180 * Call before any other calls to initialize the packet 181 * output system. This does chip global config, and should only be 182 * done by one core. 183 */ 184 185 void cvmx_pko_initialize_global(void) 186 { 187 union cvmx_pko_reg_cmd_buf config; 188 189 /* 190 * Set the size of the PKO command buffers to an odd number of 191 * 64bit words. This allows the normal two word send to stay 192 * aligned and never span a comamnd word buffer. 193 */ 194 config.u64 = 0; 195 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; 196 config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1; 197 198 cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64); 199 200 /* 201 * Chip-specific setup. 202 */ 203 __cvmx_pko_chip_init(); 204 205 /* 206 * If we aren't using all of the queues optimize PKO's 207 * internal memory. 208 */ 209 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) 210 || OCTEON_IS_MODEL(OCTEON_CN56XX) 211 || OCTEON_IS_MODEL(OCTEON_CN52XX)) { 212 int num_interfaces = cvmx_helper_get_number_of_interfaces(); 213 int last_port = 214 cvmx_helper_get_last_ipd_port(num_interfaces - 1); 215 int max_queues = 216 cvmx_pko_get_base_queue(last_port) + 217 cvmx_pko_get_num_queues(last_port); 218 if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { 219 if (max_queues <= 32) 220 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2); 221 else if (max_queues <= 64) 222 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1); 223 } else { 224 if (max_queues <= 64) 225 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2); 226 else if (max_queues <= 128) 227 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1); 228 } 229 } 230 } 231 232 /** 233 * This function does per-core initialization required by the PKO routines. 234 * This must be called on all cores that will do packet output, and must 235 * be called after the FPA has been initialized and filled with pages. 236 * 237 * Returns 0 on success 238 * !0 on failure 239 */ 240 int cvmx_pko_initialize_local(void) 241 { 242 /* Nothing to do */ 243 return 0; 244 } 245 246 /** 247 * Enables the packet output hardware. It must already be 248 * configured. 249 */ 250 void cvmx_pko_enable(void) 251 { 252 union cvmx_pko_reg_flags flags; 253 254 flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); 255 if (flags.s.ena_pko) 256 cvmx_dprintf 257 ("Warning: Enabling PKO when PKO already enabled.\n"); 258 259 flags.s.ena_dwb = 1; 260 flags.s.ena_pko = 1; 261 /* 262 * always enable big endian for 3-word command. Does nothing 263 * for 2-word. 264 */ 265 flags.s.store_be = 1; 266 cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64); 267 } 268 269 /** 270 * Disables the packet output. Does not affect any configuration. 271 */ 272 void cvmx_pko_disable(void) 273 { 274 union cvmx_pko_reg_flags pko_reg_flags; 275 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); 276 pko_reg_flags.s.ena_pko = 0; 277 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64); 278 } 279 EXPORT_SYMBOL_GPL(cvmx_pko_disable); 280 281 /** 282 * Reset the packet output. 283 */ 284 static void __cvmx_pko_reset(void) 285 { 286 union cvmx_pko_reg_flags pko_reg_flags; 287 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); 288 pko_reg_flags.s.reset = 1; 289 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64); 290 } 291 292 /** 293 * Shutdown and free resources required by packet output. 294 */ 295 void cvmx_pko_shutdown(void) 296 { 297 union cvmx_pko_mem_queue_ptrs config; 298 int queue; 299 300 cvmx_pko_disable(); 301 302 for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) { 303 config.u64 = 0; 304 config.s.tail = 1; 305 config.s.index = 0; 306 config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID; 307 config.s.queue = queue & 0x7f; 308 config.s.qos_mask = 0; 309 config.s.buf_ptr = 0; 310 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) { 311 union cvmx_pko_reg_queue_ptrs1 config1; 312 config1.u64 = 0; 313 config1.s.qid7 = queue >> 7; 314 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); 315 } 316 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); 317 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue)); 318 } 319 __cvmx_pko_reset(); 320 } 321 EXPORT_SYMBOL_GPL(cvmx_pko_shutdown); 322 323 /** 324 * Configure a output port and the associated queues for use. 325 * 326 * @port: Port to configure. 327 * @base_queue: First queue number to associate with this port. 328 * @num_queues: Number of queues to associate with this port 329 * @priority: Array of priority levels for each queue. Values are 330 * allowed to be 0-8. A value of 8 get 8 times the traffic 331 * of a value of 1. A value of 0 indicates that no rounds 332 * will be participated in. These priorities can be changed 333 * on the fly while the pko is enabled. A priority of 9 334 * indicates that static priority should be used. If static 335 * priority is used all queues with static priority must be 336 * contiguous starting at the base_queue, and lower numbered 337 * queues have higher priority than higher numbered queues. 338 * There must be num_queues elements in the array. 339 */ 340 cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, 341 uint64_t num_queues, 342 const uint64_t priority[]) 343 { 344 cvmx_pko_status_t result_code; 345 uint64_t queue; 346 union cvmx_pko_mem_queue_ptrs config; 347 union cvmx_pko_reg_queue_ptrs1 config1; 348 int static_priority_base = -1; 349 int static_priority_end = -1; 350 351 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 352 return CVMX_PKO_SUCCESS; 353 354 if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) 355 && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) { 356 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n", 357 (unsigned long long)port); 358 return CVMX_PKO_INVALID_PORT; 359 } 360 361 if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { 362 cvmx_dprintf 363 ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", 364 (unsigned long long)(base_queue + num_queues)); 365 return CVMX_PKO_INVALID_QUEUE; 366 } 367 368 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { 369 /* 370 * Validate the static queue priority setup and set 371 * static_priority_base and static_priority_end 372 * accordingly. 373 */ 374 for (queue = 0; queue < num_queues; queue++) { 375 /* Find first queue of static priority */ 376 if (static_priority_base == -1 377 && priority[queue] == 378 CVMX_PKO_QUEUE_STATIC_PRIORITY) 379 static_priority_base = queue; 380 /* Find last queue of static priority */ 381 if (static_priority_base != -1 382 && static_priority_end == -1 383 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY 384 && queue) 385 static_priority_end = queue - 1; 386 else if (static_priority_base != -1 387 && static_priority_end == -1 388 && queue == num_queues - 1) 389 /* all queues are static priority */ 390 static_priority_end = queue; 391 /* 392 * Check to make sure all static priority 393 * queues are contiguous. Also catches some 394 * cases of static priorites not starting at 395 * queue 0. 396 */ 397 if (static_priority_end != -1 398 && (int)queue > static_priority_end 399 && priority[queue] == 400 CVMX_PKO_QUEUE_STATIC_PRIORITY) { 401 cvmx_dprintf("ERROR: cvmx_pko_config_port: " 402 "Static priority queues aren't " 403 "contiguous or don't start at " 404 "base queue. q: %d, eq: %d\n", 405 (int)queue, static_priority_end); 406 return CVMX_PKO_INVALID_PRIORITY; 407 } 408 } 409 if (static_priority_base > 0) { 410 cvmx_dprintf("ERROR: cvmx_pko_config_port: Static " 411 "priority queues don't start at base " 412 "queue. sq: %d\n", 413 static_priority_base); 414 return CVMX_PKO_INVALID_PRIORITY; 415 } 416 #if 0 417 cvmx_dprintf("Port %d: Static priority queue base: %d, " 418 "end: %d\n", port, 419 static_priority_base, static_priority_end); 420 #endif 421 } 422 /* 423 * At this point, static_priority_base and static_priority_end 424 * are either both -1, or are valid start/end queue 425 * numbers. 426 */ 427 428 result_code = CVMX_PKO_SUCCESS; 429 430 #ifdef PKO_DEBUG 431 cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, 432 CVMX_PKO_QUEUES_PER_PORT_INTERFACE0, 433 CVMX_PKO_QUEUES_PER_PORT_INTERFACE1); 434 #endif 435 436 for (queue = 0; queue < num_queues; queue++) { 437 uint64_t *buf_ptr = NULL; 438 439 config1.u64 = 0; 440 config1.s.idx3 = queue >> 3; 441 config1.s.qid7 = (base_queue + queue) >> 7; 442 443 config.u64 = 0; 444 config.s.tail = queue == (num_queues - 1); 445 config.s.index = queue; 446 config.s.port = port; 447 config.s.queue = base_queue + queue; 448 449 if (!cvmx_octeon_is_pass1()) { 450 config.s.static_p = static_priority_base >= 0; 451 config.s.static_q = (int)queue <= static_priority_end; 452 config.s.s_tail = (int)queue == static_priority_end; 453 } 454 /* 455 * Convert the priority into an enable bit field. Try 456 * to space the bits out evenly so the packet don't 457 * get grouped up 458 */ 459 switch ((int)priority[queue]) { 460 case 0: 461 config.s.qos_mask = 0x00; 462 break; 463 case 1: 464 config.s.qos_mask = 0x01; 465 break; 466 case 2: 467 config.s.qos_mask = 0x11; 468 break; 469 case 3: 470 config.s.qos_mask = 0x49; 471 break; 472 case 4: 473 config.s.qos_mask = 0x55; 474 break; 475 case 5: 476 config.s.qos_mask = 0x57; 477 break; 478 case 6: 479 config.s.qos_mask = 0x77; 480 break; 481 case 7: 482 config.s.qos_mask = 0x7f; 483 break; 484 case 8: 485 config.s.qos_mask = 0xff; 486 break; 487 case CVMX_PKO_QUEUE_STATIC_PRIORITY: 488 /* Pass 1 will fall through to the error case */ 489 if (!cvmx_octeon_is_pass1()) { 490 config.s.qos_mask = 0xff; 491 break; 492 } 493 default: 494 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid " 495 "priority %llu\n", 496 (unsigned long long)priority[queue]); 497 config.s.qos_mask = 0xff; 498 result_code = CVMX_PKO_INVALID_PRIORITY; 499 break; 500 } 501 502 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { 503 cvmx_cmd_queue_result_t cmd_res = 504 cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO 505 (base_queue + queue), 506 CVMX_PKO_MAX_QUEUE_DEPTH, 507 CVMX_FPA_OUTPUT_BUFFER_POOL, 508 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 509 - 510 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST 511 * 8); 512 if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { 513 switch (cmd_res) { 514 case CVMX_CMD_QUEUE_NO_MEMORY: 515 cvmx_dprintf("ERROR: " 516 "cvmx_pko_config_port: " 517 "Unable to allocate " 518 "output buffer.\n"); 519 return CVMX_PKO_NO_MEMORY; 520 case CVMX_CMD_QUEUE_ALREADY_SETUP: 521 cvmx_dprintf 522 ("ERROR: cvmx_pko_config_port: Port already setup.\n"); 523 return CVMX_PKO_PORT_ALREADY_SETUP; 524 case CVMX_CMD_QUEUE_INVALID_PARAM: 525 default: 526 cvmx_dprintf 527 ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); 528 return CVMX_PKO_CMD_QUEUE_INIT_ERROR; 529 } 530 } 531 532 buf_ptr = 533 (uint64_t *) 534 cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO 535 (base_queue + queue)); 536 config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); 537 } else 538 config.s.buf_ptr = 0; 539 540 CVMX_SYNCWS; 541 542 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) 543 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); 544 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); 545 } 546 547 return result_code; 548 } 549 550 #ifdef PKO_DEBUG 551 /** 552 * Show map of ports -> queues for different cores. 553 */ 554 void cvmx_pko_show_queue_map() 555 { 556 int core, port; 557 int pko_output_ports = 36; 558 559 cvmx_dprintf("port"); 560 for (port = 0; port < pko_output_ports; port++) 561 cvmx_dprintf("%3d ", port); 562 cvmx_dprintf("\n"); 563 564 for (core = 0; core < CVMX_MAX_CORES; core++) { 565 cvmx_dprintf("\n%2d: ", core); 566 for (port = 0; port < pko_output_ports; port++) { 567 cvmx_dprintf("%3d ", 568 cvmx_pko_get_base_queue_per_core(port, 569 core)); 570 } 571 } 572 cvmx_dprintf("\n"); 573 } 574 #endif 575 576 /** 577 * Rate limit a PKO port to a max packets/sec. This function is only 578 * supported on CN51XX and higher, excluding CN58XX. 579 * 580 * @port: Port to rate limit 581 * @packets_s: Maximum packet/sec 582 * @burst: Maximum number of packets to burst in a row before rate 583 * limiting cuts in. 584 * 585 * Returns Zero on success, negative on failure 586 */ 587 int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst) 588 { 589 union cvmx_pko_mem_port_rate0 pko_mem_port_rate0; 590 union cvmx_pko_mem_port_rate1 pko_mem_port_rate1; 591 592 pko_mem_port_rate0.u64 = 0; 593 pko_mem_port_rate0.s.pid = port; 594 pko_mem_port_rate0.s.rate_pkt = 595 cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16; 596 /* No cost per word since we are limited by packets/sec, not bits/sec */ 597 pko_mem_port_rate0.s.rate_word = 0; 598 599 pko_mem_port_rate1.u64 = 0; 600 pko_mem_port_rate1.s.pid = port; 601 pko_mem_port_rate1.s.rate_lim = 602 ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8; 603 604 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64); 605 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64); 606 return 0; 607 } 608 609 /** 610 * Rate limit a PKO port to a max bits/sec. This function is only 611 * supported on CN51XX and higher, excluding CN58XX. 612 * 613 * @port: Port to rate limit 614 * @bits_s: PKO rate limit in bits/sec 615 * @burst: Maximum number of bits to burst before rate 616 * limiting cuts in. 617 * 618 * Returns Zero on success, negative on failure 619 */ 620 int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst) 621 { 622 union cvmx_pko_mem_port_rate0 pko_mem_port_rate0; 623 union cvmx_pko_mem_port_rate1 pko_mem_port_rate1; 624 uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz; 625 uint64_t tokens_per_bit = clock_rate * 16 / bits_s; 626 627 pko_mem_port_rate0.u64 = 0; 628 pko_mem_port_rate0.s.pid = port; 629 /* 630 * Each packet has a 12 bytes of interframe gap, an 8 byte 631 * preamble, and a 4 byte CRC. These are not included in the 632 * per word count. Multiply by 8 to covert to bits and divide 633 * by 256 for limit granularity. 634 */ 635 pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256; 636 /* Each 8 byte word has 64bits */ 637 pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit; 638 639 pko_mem_port_rate1.u64 = 0; 640 pko_mem_port_rate1.s.pid = port; 641 pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256; 642 643 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64); 644 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64); 645 return 0; 646 } 647