1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/pci.h> 19 #include <linux/netdevice.h> 20 #include <linux/vmalloc.h> 21 #include "liquidio_common.h" 22 #include "octeon_droq.h" 23 #include "octeon_iq.h" 24 #include "response_manager.h" 25 #include "octeon_device.h" 26 #include "octeon_main.h" 27 #include "octeon_network.h" 28 #include "cn66xx_regs.h" 29 #include "cn66xx_device.h" 30 #include "cn23xx_pf_device.h" 31 #include "cn23xx_vf_device.h" 32 33 /** Default configuration 34 * for CN66XX OCTEON Models. 35 */ 36 static struct octeon_config default_cn66xx_conf = { 37 .card_type = LIO_210SV, 38 .card_name = LIO_210SV_NAME, 39 40 /** IQ attributes */ 41 .iq = { 42 .max_iqs = CN6XXX_CFG_IO_QUEUES, 43 .pending_list_size = 44 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), 45 .instr_type = OCTEON_64BYTE_INSTR, 46 .db_min = CN6XXX_DB_MIN, 47 .db_timeout = CN6XXX_DB_TIMEOUT, 48 } 49 , 50 51 /** OQ attributes */ 52 .oq = { 53 .max_oqs = CN6XXX_CFG_IO_QUEUES, 54 .info_ptr = OCTEON_OQ_INFOPTR_MODE, 55 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, 56 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, 57 .oq_intr_time = CN6XXX_OQ_INTR_TIME, 58 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, 59 } 60 , 61 62 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX, 63 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 64 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 65 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, 66 67 /* For ethernet interface 0: Port cfg Attributes */ 68 .nic_if_cfg[0] = { 69 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 70 .max_txqs = MAX_TXQS_PER_INTF, 71 72 /* Actual configured value. Range could be: 1...max_txqs */ 73 .num_txqs = DEF_TXQS_PER_INTF, 74 75 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 76 .max_rxqs = MAX_RXQS_PER_INTF, 77 78 /* Actual configured value. Range could be: 1...max_rxqs */ 79 .num_rxqs = DEF_RXQS_PER_INTF, 80 81 /* Num of desc for rx rings */ 82 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 83 84 /* Num of desc for tx rings */ 85 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 86 87 /* SKB size, We need not change buf size even for Jumbo frames. 88 * Octeon can send jumbo frames in 4 consecutive descriptors, 89 */ 90 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 91 92 .base_queue = BASE_QUEUE_NOT_REQUESTED, 93 94 .gmx_port_id = 0, 95 }, 96 97 .nic_if_cfg[1] = { 98 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 99 .max_txqs = MAX_TXQS_PER_INTF, 100 101 /* Actual configured value. Range could be: 1...max_txqs */ 102 .num_txqs = DEF_TXQS_PER_INTF, 103 104 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 105 .max_rxqs = MAX_RXQS_PER_INTF, 106 107 /* Actual configured value. Range could be: 1...max_rxqs */ 108 .num_rxqs = DEF_RXQS_PER_INTF, 109 110 /* Num of desc for rx rings */ 111 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 112 113 /* Num of desc for tx rings */ 114 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 115 116 /* SKB size, We need not change buf size even for Jumbo frames. 117 * Octeon can send jumbo frames in 4 consecutive descriptors, 118 */ 119 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 120 121 .base_queue = BASE_QUEUE_NOT_REQUESTED, 122 123 .gmx_port_id = 1, 124 }, 125 126 /** Miscellaneous attributes */ 127 .misc = { 128 /* Host driver link query interval */ 129 .oct_link_query_interval = 100, 130 131 /* Octeon link query interval */ 132 .host_link_query_interval = 500, 133 134 .enable_sli_oq_bp = 0, 135 136 /* Control queue group */ 137 .ctrlq_grp = 1, 138 } 139 , 140 }; 141 142 /** Default configuration 143 * for CN68XX OCTEON Model. 144 */ 145 146 static struct octeon_config default_cn68xx_conf = { 147 .card_type = LIO_410NV, 148 .card_name = LIO_410NV_NAME, 149 150 /** IQ attributes */ 151 .iq = { 152 .max_iqs = CN6XXX_CFG_IO_QUEUES, 153 .pending_list_size = 154 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), 155 .instr_type = OCTEON_64BYTE_INSTR, 156 .db_min = CN6XXX_DB_MIN, 157 .db_timeout = CN6XXX_DB_TIMEOUT, 158 } 159 , 160 161 /** OQ attributes */ 162 .oq = { 163 .max_oqs = CN6XXX_CFG_IO_QUEUES, 164 .info_ptr = OCTEON_OQ_INFOPTR_MODE, 165 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, 166 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, 167 .oq_intr_time = CN6XXX_OQ_INTR_TIME, 168 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, 169 } 170 , 171 172 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX, 173 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 174 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 175 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, 176 177 .nic_if_cfg[0] = { 178 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 179 .max_txqs = MAX_TXQS_PER_INTF, 180 181 /* Actual configured value. Range could be: 1...max_txqs */ 182 .num_txqs = DEF_TXQS_PER_INTF, 183 184 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 185 .max_rxqs = MAX_RXQS_PER_INTF, 186 187 /* Actual configured value. Range could be: 1...max_rxqs */ 188 .num_rxqs = DEF_RXQS_PER_INTF, 189 190 /* Num of desc for rx rings */ 191 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 192 193 /* Num of desc for tx rings */ 194 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 195 196 /* SKB size, We need not change buf size even for Jumbo frames. 197 * Octeon can send jumbo frames in 4 consecutive descriptors, 198 */ 199 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 200 201 .base_queue = BASE_QUEUE_NOT_REQUESTED, 202 203 .gmx_port_id = 0, 204 }, 205 206 .nic_if_cfg[1] = { 207 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 208 .max_txqs = MAX_TXQS_PER_INTF, 209 210 /* Actual configured value. Range could be: 1...max_txqs */ 211 .num_txqs = DEF_TXQS_PER_INTF, 212 213 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 214 .max_rxqs = MAX_RXQS_PER_INTF, 215 216 /* Actual configured value. Range could be: 1...max_rxqs */ 217 .num_rxqs = DEF_RXQS_PER_INTF, 218 219 /* Num of desc for rx rings */ 220 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 221 222 /* Num of desc for tx rings */ 223 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 224 225 /* SKB size, We need not change buf size even for Jumbo frames. 226 * Octeon can send jumbo frames in 4 consecutive descriptors, 227 */ 228 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 229 230 .base_queue = BASE_QUEUE_NOT_REQUESTED, 231 232 .gmx_port_id = 1, 233 }, 234 235 .nic_if_cfg[2] = { 236 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 237 .max_txqs = MAX_TXQS_PER_INTF, 238 239 /* Actual configured value. Range could be: 1...max_txqs */ 240 .num_txqs = DEF_TXQS_PER_INTF, 241 242 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 243 .max_rxqs = MAX_RXQS_PER_INTF, 244 245 /* Actual configured value. Range could be: 1...max_rxqs */ 246 .num_rxqs = DEF_RXQS_PER_INTF, 247 248 /* Num of desc for rx rings */ 249 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 250 251 /* Num of desc for tx rings */ 252 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 253 254 /* SKB size, We need not change buf size even for Jumbo frames. 255 * Octeon can send jumbo frames in 4 consecutive descriptors, 256 */ 257 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 258 259 .base_queue = BASE_QUEUE_NOT_REQUESTED, 260 261 .gmx_port_id = 2, 262 }, 263 264 .nic_if_cfg[3] = { 265 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 266 .max_txqs = MAX_TXQS_PER_INTF, 267 268 /* Actual configured value. Range could be: 1...max_txqs */ 269 .num_txqs = DEF_TXQS_PER_INTF, 270 271 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 272 .max_rxqs = MAX_RXQS_PER_INTF, 273 274 /* Actual configured value. Range could be: 1...max_rxqs */ 275 .num_rxqs = DEF_RXQS_PER_INTF, 276 277 /* Num of desc for rx rings */ 278 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 279 280 /* Num of desc for tx rings */ 281 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 282 283 /* SKB size, We need not change buf size even for Jumbo frames. 284 * Octeon can send jumbo frames in 4 consecutive descriptors, 285 */ 286 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 287 288 .base_queue = BASE_QUEUE_NOT_REQUESTED, 289 290 .gmx_port_id = 3, 291 }, 292 293 /** Miscellaneous attributes */ 294 .misc = { 295 /* Host driver link query interval */ 296 .oct_link_query_interval = 100, 297 298 /* Octeon link query interval */ 299 .host_link_query_interval = 500, 300 301 .enable_sli_oq_bp = 0, 302 303 /* Control queue group */ 304 .ctrlq_grp = 1, 305 } 306 , 307 }; 308 309 /** Default configuration 310 * for CN68XX OCTEON Model. 311 */ 312 static struct octeon_config default_cn68xx_210nv_conf = { 313 .card_type = LIO_210NV, 314 .card_name = LIO_210NV_NAME, 315 316 /** IQ attributes */ 317 318 .iq = { 319 .max_iqs = CN6XXX_CFG_IO_QUEUES, 320 .pending_list_size = 321 (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), 322 .instr_type = OCTEON_64BYTE_INSTR, 323 .db_min = CN6XXX_DB_MIN, 324 .db_timeout = CN6XXX_DB_TIMEOUT, 325 } 326 , 327 328 /** OQ attributes */ 329 .oq = { 330 .max_oqs = CN6XXX_CFG_IO_QUEUES, 331 .info_ptr = OCTEON_OQ_INFOPTR_MODE, 332 .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, 333 .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, 334 .oq_intr_time = CN6XXX_OQ_INTR_TIME, 335 .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, 336 } 337 , 338 339 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV, 340 .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 341 .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 342 .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, 343 344 .nic_if_cfg[0] = { 345 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 346 .max_txqs = MAX_TXQS_PER_INTF, 347 348 /* Actual configured value. Range could be: 1...max_txqs */ 349 .num_txqs = DEF_TXQS_PER_INTF, 350 351 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 352 .max_rxqs = MAX_RXQS_PER_INTF, 353 354 /* Actual configured value. Range could be: 1...max_rxqs */ 355 .num_rxqs = DEF_RXQS_PER_INTF, 356 357 /* Num of desc for rx rings */ 358 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 359 360 /* Num of desc for tx rings */ 361 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 362 363 /* SKB size, We need not change buf size even for Jumbo frames. 364 * Octeon can send jumbo frames in 4 consecutive descriptors, 365 */ 366 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 367 368 .base_queue = BASE_QUEUE_NOT_REQUESTED, 369 370 .gmx_port_id = 0, 371 }, 372 373 .nic_if_cfg[1] = { 374 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 375 .max_txqs = MAX_TXQS_PER_INTF, 376 377 /* Actual configured value. Range could be: 1...max_txqs */ 378 .num_txqs = DEF_TXQS_PER_INTF, 379 380 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 381 .max_rxqs = MAX_RXQS_PER_INTF, 382 383 /* Actual configured value. Range could be: 1...max_rxqs */ 384 .num_rxqs = DEF_RXQS_PER_INTF, 385 386 /* Num of desc for rx rings */ 387 .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, 388 389 /* Num of desc for tx rings */ 390 .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, 391 392 /* SKB size, We need not change buf size even for Jumbo frames. 393 * Octeon can send jumbo frames in 4 consecutive descriptors, 394 */ 395 .rx_buf_size = CN6XXX_OQ_BUF_SIZE, 396 397 .base_queue = BASE_QUEUE_NOT_REQUESTED, 398 399 .gmx_port_id = 1, 400 }, 401 402 /** Miscellaneous attributes */ 403 .misc = { 404 /* Host driver link query interval */ 405 .oct_link_query_interval = 100, 406 407 /* Octeon link query interval */ 408 .host_link_query_interval = 500, 409 410 .enable_sli_oq_bp = 0, 411 412 /* Control queue group */ 413 .ctrlq_grp = 1, 414 } 415 , 416 }; 417 418 static struct octeon_config default_cn23xx_conf = { 419 .card_type = LIO_23XX, 420 .card_name = LIO_23XX_NAME, 421 /** IQ attributes */ 422 .iq = { 423 .max_iqs = CN23XX_CFG_IO_QUEUES, 424 .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS * 425 CN23XX_CFG_IO_QUEUES), 426 .instr_type = OCTEON_64BYTE_INSTR, 427 .db_min = CN23XX_DB_MIN, 428 .db_timeout = CN23XX_DB_TIMEOUT, 429 .iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD, 430 }, 431 432 /** OQ attributes */ 433 .oq = { 434 .max_oqs = CN23XX_CFG_IO_QUEUES, 435 .info_ptr = OCTEON_OQ_INFOPTR_MODE, 436 .pkts_per_intr = CN23XX_OQ_PKTSPER_INTR, 437 .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD, 438 .oq_intr_pkt = CN23XX_OQ_INTR_PKT, 439 .oq_intr_time = CN23XX_OQ_INTR_TIME, 440 }, 441 442 .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX, 443 .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, 444 .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, 445 .def_rx_buf_size = CN23XX_OQ_BUF_SIZE, 446 447 /* For ethernet interface 0: Port cfg Attributes */ 448 .nic_if_cfg[0] = { 449 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 450 .max_txqs = MAX_TXQS_PER_INTF, 451 452 /* Actual configured value. Range could be: 1...max_txqs */ 453 .num_txqs = DEF_TXQS_PER_INTF, 454 455 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 456 .max_rxqs = MAX_RXQS_PER_INTF, 457 458 /* Actual configured value. Range could be: 1...max_rxqs */ 459 .num_rxqs = DEF_RXQS_PER_INTF, 460 461 /* Num of desc for rx rings */ 462 .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, 463 464 /* Num of desc for tx rings */ 465 .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, 466 467 /* SKB size, We need not change buf size even for Jumbo frames. 468 * Octeon can send jumbo frames in 4 consecutive descriptors, 469 */ 470 .rx_buf_size = CN23XX_OQ_BUF_SIZE, 471 472 .base_queue = BASE_QUEUE_NOT_REQUESTED, 473 474 .gmx_port_id = 0, 475 }, 476 477 .nic_if_cfg[1] = { 478 /* Max Txqs: Half for each of the two ports :max_iq/2 */ 479 .max_txqs = MAX_TXQS_PER_INTF, 480 481 /* Actual configured value. Range could be: 1...max_txqs */ 482 .num_txqs = DEF_TXQS_PER_INTF, 483 484 /* Max Rxqs: Half for each of the two ports :max_oq/2 */ 485 .max_rxqs = MAX_RXQS_PER_INTF, 486 487 /* Actual configured value. Range could be: 1...max_rxqs */ 488 .num_rxqs = DEF_RXQS_PER_INTF, 489 490 /* Num of desc for rx rings */ 491 .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, 492 493 /* Num of desc for tx rings */ 494 .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, 495 496 /* SKB size, We need not change buf size even for Jumbo frames. 497 * Octeon can send jumbo frames in 4 consecutive descriptors, 498 */ 499 .rx_buf_size = CN23XX_OQ_BUF_SIZE, 500 501 .base_queue = BASE_QUEUE_NOT_REQUESTED, 502 503 .gmx_port_id = 1, 504 }, 505 506 .misc = { 507 /* Host driver link query interval */ 508 .oct_link_query_interval = 100, 509 510 /* Octeon link query interval */ 511 .host_link_query_interval = 500, 512 513 .enable_sli_oq_bp = 0, 514 515 /* Control queue group */ 516 .ctrlq_grp = 1, 517 } 518 }; 519 520 static struct octeon_config_ptr { 521 u32 conf_type; 522 } oct_conf_info[MAX_OCTEON_DEVICES] = { 523 { 524 OCTEON_CONFIG_TYPE_DEFAULT, 525 }, { 526 OCTEON_CONFIG_TYPE_DEFAULT, 527 }, { 528 OCTEON_CONFIG_TYPE_DEFAULT, 529 }, { 530 OCTEON_CONFIG_TYPE_DEFAULT, 531 }, 532 }; 533 534 static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { 535 "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", 536 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", 537 "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", 538 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", 539 "INVALID" 540 }; 541 542 static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = { 543 "BASE", "NIC", "UNKNOWN"}; 544 545 static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES]; 546 static u32 octeon_device_count; 547 548 static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES]; 549 550 static void oct_set_config_info(int oct_id, int conf_type) 551 { 552 if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1)) 553 conf_type = OCTEON_CONFIG_TYPE_DEFAULT; 554 oct_conf_info[oct_id].conf_type = conf_type; 555 } 556 557 void octeon_init_device_list(int conf_type) 558 { 559 int i; 560 561 memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES)); 562 for (i = 0; i < MAX_OCTEON_DEVICES; i++) 563 oct_set_config_info(i, conf_type); 564 } 565 566 static void *__retrieve_octeon_config_info(struct octeon_device *oct, 567 u16 card_type) 568 { 569 u32 oct_id = oct->octeon_id; 570 void *ret = NULL; 571 572 switch (oct_conf_info[oct_id].conf_type) { 573 case OCTEON_CONFIG_TYPE_DEFAULT: 574 if (oct->chip_id == OCTEON_CN66XX) { 575 ret = &default_cn66xx_conf; 576 } else if ((oct->chip_id == OCTEON_CN68XX) && 577 (card_type == LIO_210NV)) { 578 ret = &default_cn68xx_210nv_conf; 579 } else if ((oct->chip_id == OCTEON_CN68XX) && 580 (card_type == LIO_410NV)) { 581 ret = &default_cn68xx_conf; 582 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { 583 ret = &default_cn23xx_conf; 584 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { 585 ret = &default_cn23xx_conf; 586 } 587 break; 588 default: 589 break; 590 } 591 return ret; 592 } 593 594 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf) 595 { 596 switch (oct->chip_id) { 597 case OCTEON_CN66XX: 598 case OCTEON_CN68XX: 599 return lio_validate_cn6xxx_config_info(oct, conf); 600 case OCTEON_CN23XX_PF_VID: 601 case OCTEON_CN23XX_VF_VID: 602 return 0; 603 default: 604 break; 605 } 606 607 return 1; 608 } 609 610 void *oct_get_config_info(struct octeon_device *oct, u16 card_type) 611 { 612 void *conf = NULL; 613 614 conf = __retrieve_octeon_config_info(oct, card_type); 615 if (!conf) 616 return NULL; 617 618 if (__verify_octeon_config_info(oct, conf)) { 619 dev_err(&oct->pci_dev->dev, "Configuration verification failed\n"); 620 return NULL; 621 } 622 623 return conf; 624 } 625 626 char *lio_get_state_string(atomic_t *state_ptr) 627 { 628 s32 istate = (s32)atomic_read(state_ptr); 629 630 if (istate > OCT_DEV_STATES || istate < 0) 631 return oct_dev_state_str[OCT_DEV_STATE_INVALID]; 632 return oct_dev_state_str[istate]; 633 } 634 635 static char *get_oct_app_string(u32 app_mode) 636 { 637 if (app_mode <= CVM_DRV_APP_END) 638 return oct_dev_app_str[app_mode - CVM_DRV_APP_START]; 639 return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; 640 } 641 642 void octeon_free_device_mem(struct octeon_device *oct) 643 { 644 int i; 645 646 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 647 if (oct->io_qmask.oq & BIT_ULL(i)) 648 vfree(oct->droq[i]); 649 } 650 651 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 652 if (oct->io_qmask.iq & BIT_ULL(i)) 653 vfree(oct->instr_queue[i]); 654 } 655 656 i = oct->octeon_id; 657 vfree(oct); 658 659 octeon_device[i] = NULL; 660 octeon_device_count--; 661 } 662 663 static struct octeon_device *octeon_allocate_device_mem(u32 pci_id, 664 u32 priv_size) 665 { 666 struct octeon_device *oct; 667 u8 *buf = NULL; 668 u32 octdevsize = 0, configsize = 0, size; 669 670 switch (pci_id) { 671 case OCTEON_CN68XX: 672 case OCTEON_CN66XX: 673 configsize = sizeof(struct octeon_cn6xxx); 674 break; 675 676 case OCTEON_CN23XX_PF_VID: 677 configsize = sizeof(struct octeon_cn23xx_pf); 678 break; 679 case OCTEON_CN23XX_VF_VID: 680 configsize = sizeof(struct octeon_cn23xx_vf); 681 break; 682 default: 683 pr_err("%s: Unknown PCI Device: 0x%x\n", 684 __func__, 685 pci_id); 686 return NULL; 687 } 688 689 if (configsize & 0x7) 690 configsize += (8 - (configsize & 0x7)); 691 692 octdevsize = sizeof(struct octeon_device); 693 if (octdevsize & 0x7) 694 octdevsize += (8 - (octdevsize & 0x7)); 695 696 if (priv_size & 0x7) 697 priv_size += (8 - (priv_size & 0x7)); 698 699 size = octdevsize + priv_size + configsize + 700 (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE); 701 702 buf = vmalloc(size); 703 if (!buf) 704 return NULL; 705 706 memset(buf, 0, size); 707 708 oct = (struct octeon_device *)buf; 709 oct->priv = (void *)(buf + octdevsize); 710 oct->chip = (void *)(buf + octdevsize + priv_size); 711 oct->dispatch.dlist = (struct octeon_dispatch *) 712 (buf + octdevsize + priv_size + configsize); 713 714 return oct; 715 } 716 717 struct octeon_device *octeon_allocate_device(u32 pci_id, 718 u32 priv_size) 719 { 720 u32 oct_idx = 0; 721 struct octeon_device *oct = NULL; 722 723 for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++) 724 if (!octeon_device[oct_idx]) 725 break; 726 727 if (oct_idx == MAX_OCTEON_DEVICES) 728 return NULL; 729 730 oct = octeon_allocate_device_mem(pci_id, priv_size); 731 if (!oct) 732 return NULL; 733 734 spin_lock_init(&oct->pci_win_lock); 735 spin_lock_init(&oct->mem_access_lock); 736 737 octeon_device_count++; 738 octeon_device[oct_idx] = oct; 739 740 oct->octeon_id = oct_idx; 741 snprintf(oct->device_name, sizeof(oct->device_name), 742 "LiquidIO%d", (oct->octeon_id)); 743 744 return oct; 745 } 746 747 int 748 octeon_allocate_ioq_vector(struct octeon_device *oct) 749 { 750 int i, num_ioqs = 0; 751 struct octeon_ioq_vector *ioq_vector; 752 int cpu_num; 753 int size; 754 755 if (OCTEON_CN23XX_PF(oct)) 756 num_ioqs = oct->sriov_info.num_pf_rings; 757 else if (OCTEON_CN23XX_VF(oct)) 758 num_ioqs = oct->sriov_info.rings_per_vf; 759 760 size = sizeof(struct octeon_ioq_vector) * num_ioqs; 761 762 oct->ioq_vector = vmalloc(size); 763 if (!oct->ioq_vector) 764 return 1; 765 memset(oct->ioq_vector, 0, size); 766 for (i = 0; i < num_ioqs; i++) { 767 ioq_vector = &oct->ioq_vector[i]; 768 ioq_vector->oct_dev = oct; 769 ioq_vector->iq_index = i; 770 ioq_vector->droq_index = i; 771 ioq_vector->mbox = oct->mbox[i]; 772 773 cpu_num = i % num_online_cpus(); 774 cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask); 775 776 if (oct->chip_id == OCTEON_CN23XX_PF_VID) 777 ioq_vector->ioq_num = i + oct->sriov_info.pf_srn; 778 else 779 ioq_vector->ioq_num = i; 780 } 781 return 0; 782 } 783 784 void 785 octeon_free_ioq_vector(struct octeon_device *oct) 786 { 787 vfree(oct->ioq_vector); 788 } 789 790 /* this function is only for setting up the first queue */ 791 int octeon_setup_instr_queues(struct octeon_device *oct) 792 { 793 u32 num_descs = 0; 794 u32 iq_no = 0; 795 union oct_txpciq txpciq; 796 int numa_node = dev_to_node(&oct->pci_dev->dev); 797 798 if (OCTEON_CN6XXX(oct)) 799 num_descs = 800 CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx)); 801 else if (OCTEON_CN23XX_PF(oct)) 802 num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf)); 803 else if (OCTEON_CN23XX_VF(oct)) 804 num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf)); 805 806 oct->num_iqs = 0; 807 808 oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), 809 numa_node); 810 if (!oct->instr_queue[0]) 811 oct->instr_queue[0] = 812 vmalloc(sizeof(struct octeon_instr_queue)); 813 if (!oct->instr_queue[0]) 814 return 1; 815 memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); 816 oct->instr_queue[0]->q_index = 0; 817 oct->instr_queue[0]->app_ctx = (void *)(size_t)0; 818 oct->instr_queue[0]->ifidx = 0; 819 txpciq.u64 = 0; 820 txpciq.s.q_no = iq_no; 821 txpciq.s.pkind = oct->pfvf_hsword.pkind; 822 txpciq.s.use_qpg = 0; 823 txpciq.s.qpg = 0; 824 if (octeon_init_instr_queue(oct, txpciq, num_descs)) { 825 /* prevent memory leak */ 826 vfree(oct->instr_queue[0]); 827 oct->instr_queue[0] = NULL; 828 return 1; 829 } 830 831 oct->num_iqs++; 832 return 0; 833 } 834 835 int octeon_setup_output_queues(struct octeon_device *oct) 836 { 837 u32 num_descs = 0; 838 u32 desc_size = 0; 839 u32 oq_no = 0; 840 int numa_node = dev_to_node(&oct->pci_dev->dev); 841 842 if (OCTEON_CN6XXX(oct)) { 843 num_descs = 844 CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx)); 845 desc_size = 846 CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx)); 847 } else if (OCTEON_CN23XX_PF(oct)) { 848 num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf)); 849 desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf)); 850 } else if (OCTEON_CN23XX_VF(oct)) { 851 num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf)); 852 desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf)); 853 } 854 oct->num_oqs = 0; 855 oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); 856 if (!oct->droq[0]) 857 oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); 858 if (!oct->droq[0]) 859 return 1; 860 861 if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) { 862 vfree(oct->droq[oq_no]); 863 oct->droq[oq_no] = NULL; 864 return 1; 865 } 866 oct->num_oqs++; 867 868 return 0; 869 } 870 871 int octeon_set_io_queues_off(struct octeon_device *oct) 872 { 873 int loop = BUSY_READING_REG_VF_LOOP_COUNT; 874 875 if (OCTEON_CN6XXX(oct)) { 876 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 877 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 878 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { 879 u32 q_no; 880 881 /* IOQs will already be in reset. 882 * If RST bit is set, wait for quiet bit to be set. 883 * Once quiet bit is set, clear the RST bit. 884 */ 885 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { 886 u64 reg_val = octeon_read_csr64( 887 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); 888 889 while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && 890 !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) && 891 loop) { 892 reg_val = octeon_read_csr64( 893 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 894 loop--; 895 } 896 if (!loop) { 897 dev_err(&oct->pci_dev->dev, 898 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", 899 q_no); 900 return -1; 901 } 902 903 reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST; 904 octeon_write_csr64(oct, 905 CN23XX_SLI_IQ_PKT_CONTROL64(q_no), 906 reg_val); 907 908 reg_val = octeon_read_csr64( 909 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); 910 if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { 911 dev_err(&oct->pci_dev->dev, 912 "unable to reset qno %u\n", q_no); 913 return -1; 914 } 915 } 916 } 917 return 0; 918 } 919 920 void octeon_set_droq_pkt_op(struct octeon_device *oct, 921 u32 q_no, 922 u32 enable) 923 { 924 u32 reg_val = 0; 925 926 /* Disable the i/p and o/p queues for this Octeon. */ 927 if (OCTEON_CN6XXX(oct)) { 928 reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); 929 930 if (enable) 931 reg_val = reg_val | (1 << q_no); 932 else 933 reg_val = reg_val & (~(1 << q_no)); 934 935 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val); 936 } 937 } 938 939 int octeon_init_dispatch_list(struct octeon_device *oct) 940 { 941 u32 i; 942 943 oct->dispatch.count = 0; 944 945 for (i = 0; i < DISPATCH_LIST_SIZE; i++) { 946 oct->dispatch.dlist[i].opcode = 0; 947 INIT_LIST_HEAD(&oct->dispatch.dlist[i].list); 948 } 949 950 for (i = 0; i <= REQTYPE_LAST; i++) 951 octeon_register_reqtype_free_fn(oct, i, NULL); 952 953 spin_lock_init(&oct->dispatch.lock); 954 955 return 0; 956 } 957 958 void octeon_delete_dispatch_list(struct octeon_device *oct) 959 { 960 u32 i; 961 struct list_head freelist, *temp, *tmp2; 962 963 INIT_LIST_HEAD(&freelist); 964 965 spin_lock_bh(&oct->dispatch.lock); 966 967 for (i = 0; i < DISPATCH_LIST_SIZE; i++) { 968 struct list_head *dispatch; 969 970 dispatch = &oct->dispatch.dlist[i].list; 971 while (dispatch->next != dispatch) { 972 temp = dispatch->next; 973 list_del(temp); 974 list_add_tail(temp, &freelist); 975 } 976 977 oct->dispatch.dlist[i].opcode = 0; 978 } 979 980 oct->dispatch.count = 0; 981 982 spin_unlock_bh(&oct->dispatch.lock); 983 984 list_for_each_safe(temp, tmp2, &freelist) { 985 list_del(temp); 986 vfree(temp); 987 } 988 } 989 990 octeon_dispatch_fn_t 991 octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode, 992 u16 subcode) 993 { 994 u32 idx; 995 struct list_head *dispatch; 996 octeon_dispatch_fn_t fn = NULL; 997 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); 998 999 idx = combined_opcode & OCTEON_OPCODE_MASK; 1000 1001 spin_lock_bh(&octeon_dev->dispatch.lock); 1002 1003 if (octeon_dev->dispatch.count == 0) { 1004 spin_unlock_bh(&octeon_dev->dispatch.lock); 1005 return NULL; 1006 } 1007 1008 if (!(octeon_dev->dispatch.dlist[idx].opcode)) { 1009 spin_unlock_bh(&octeon_dev->dispatch.lock); 1010 return NULL; 1011 } 1012 1013 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { 1014 fn = octeon_dev->dispatch.dlist[idx].dispatch_fn; 1015 } else { 1016 list_for_each(dispatch, 1017 &octeon_dev->dispatch.dlist[idx].list) { 1018 if (((struct octeon_dispatch *)dispatch)->opcode == 1019 combined_opcode) { 1020 fn = ((struct octeon_dispatch *) 1021 dispatch)->dispatch_fn; 1022 break; 1023 } 1024 } 1025 } 1026 1027 spin_unlock_bh(&octeon_dev->dispatch.lock); 1028 return fn; 1029 } 1030 1031 /* octeon_register_dispatch_fn 1032 * Parameters: 1033 * octeon_id - id of the octeon device. 1034 * opcode - opcode for which driver should call the registered function 1035 * subcode - subcode for which driver should call the registered function 1036 * fn - The function to call when a packet with "opcode" arrives in 1037 * octeon output queues. 1038 * fn_arg - The argument to be passed when calling function "fn". 1039 * Description: 1040 * Registers a function and its argument to be called when a packet 1041 * arrives in Octeon output queues with "opcode". 1042 * Returns: 1043 * Success: 0 1044 * Failure: 1 1045 * Locks: 1046 * No locks are held. 1047 */ 1048 int 1049 octeon_register_dispatch_fn(struct octeon_device *oct, 1050 u16 opcode, 1051 u16 subcode, 1052 octeon_dispatch_fn_t fn, void *fn_arg) 1053 { 1054 u32 idx; 1055 octeon_dispatch_fn_t pfn; 1056 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); 1057 1058 idx = combined_opcode & OCTEON_OPCODE_MASK; 1059 1060 spin_lock_bh(&oct->dispatch.lock); 1061 /* Add dispatch function to first level of lookup table */ 1062 if (oct->dispatch.dlist[idx].opcode == 0) { 1063 oct->dispatch.dlist[idx].opcode = combined_opcode; 1064 oct->dispatch.dlist[idx].dispatch_fn = fn; 1065 oct->dispatch.dlist[idx].arg = fn_arg; 1066 oct->dispatch.count++; 1067 spin_unlock_bh(&oct->dispatch.lock); 1068 return 0; 1069 } 1070 1071 spin_unlock_bh(&oct->dispatch.lock); 1072 1073 /* Check if there was a function already registered for this 1074 * opcode/subcode. 1075 */ 1076 pfn = octeon_get_dispatch(oct, opcode, subcode); 1077 if (!pfn) { 1078 struct octeon_dispatch *dispatch; 1079 1080 dev_dbg(&oct->pci_dev->dev, 1081 "Adding opcode to dispatch list linked list\n"); 1082 dispatch = (struct octeon_dispatch *) 1083 vmalloc(sizeof(struct octeon_dispatch)); 1084 if (!dispatch) { 1085 dev_err(&oct->pci_dev->dev, 1086 "No memory to add dispatch function\n"); 1087 return 1; 1088 } 1089 dispatch->opcode = combined_opcode; 1090 dispatch->dispatch_fn = fn; 1091 dispatch->arg = fn_arg; 1092 1093 /* Add dispatch function to linked list of fn ptrs 1094 * at the hashed index. 1095 */ 1096 spin_lock_bh(&oct->dispatch.lock); 1097 list_add(&dispatch->list, &oct->dispatch.dlist[idx].list); 1098 oct->dispatch.count++; 1099 spin_unlock_bh(&oct->dispatch.lock); 1100 1101 } else { 1102 dev_err(&oct->pci_dev->dev, 1103 "Found previously registered dispatch fn for opcode/subcode: %x/%x\n", 1104 opcode, subcode); 1105 return 1; 1106 } 1107 1108 return 0; 1109 } 1110 1111 int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) 1112 { 1113 u32 i; 1114 char app_name[16]; 1115 struct octeon_device *oct = (struct octeon_device *)buf; 1116 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 1117 struct octeon_core_setup *cs = NULL; 1118 u32 num_nic_ports = 0; 1119 1120 if (OCTEON_CN6XXX(oct)) 1121 num_nic_ports = 1122 CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx)); 1123 else if (OCTEON_CN23XX_PF(oct)) 1124 num_nic_ports = 1125 CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf)); 1126 1127 if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) { 1128 dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n", 1129 atomic_read(&oct->status)); 1130 goto core_drv_init_err; 1131 } 1132 1133 strncpy(app_name, 1134 get_oct_app_string( 1135 (u32)recv_pkt->rh.r_core_drv_init.app_mode), 1136 sizeof(app_name) - 1); 1137 oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; 1138 if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) { 1139 oct->fw_info.max_nic_ports = 1140 (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports; 1141 oct->fw_info.num_gmx_ports = 1142 (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports; 1143 } 1144 1145 if (oct->fw_info.max_nic_ports < num_nic_ports) { 1146 dev_err(&oct->pci_dev->dev, 1147 "Config has more ports than firmware allows (%d > %d).\n", 1148 num_nic_ports, oct->fw_info.max_nic_ports); 1149 goto core_drv_init_err; 1150 } 1151 oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags; 1152 oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; 1153 oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; 1154 1155 oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind; 1156 1157 for (i = 0; i < oct->num_iqs; i++) 1158 oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind; 1159 1160 atomic_set(&oct->status, OCT_DEV_CORE_OK); 1161 1162 cs = &core_setup[oct->octeon_id]; 1163 1164 if (recv_pkt->buffer_size[0] != sizeof(*cs)) { 1165 dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n", 1166 (u32)sizeof(*cs), 1167 recv_pkt->buffer_size[0]); 1168 } 1169 1170 memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs)); 1171 strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); 1172 strncpy(oct->boardinfo.serial_number, cs->board_serial_number, 1173 OCT_SERIAL_LEN); 1174 1175 octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3)); 1176 1177 oct->boardinfo.major = cs->board_rev_major; 1178 oct->boardinfo.minor = cs->board_rev_minor; 1179 1180 dev_info(&oct->pci_dev->dev, 1181 "Running %s (%llu Hz)\n", 1182 app_name, CVM_CAST64(cs->corefreq)); 1183 1184 core_drv_init_err: 1185 for (i = 0; i < recv_pkt->buffer_count; i++) 1186 recv_buffer_free(recv_pkt->buffer_ptr[i]); 1187 octeon_free_recv_info(recv_info); 1188 return 0; 1189 } 1190 1191 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) 1192 1193 { 1194 if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && 1195 (oct->io_qmask.iq & BIT_ULL(q_no))) 1196 return oct->instr_queue[q_no]->max_count; 1197 1198 return -1; 1199 } 1200 1201 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) 1202 { 1203 if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && 1204 (oct->io_qmask.oq & BIT_ULL(q_no))) 1205 return oct->droq[q_no]->max_count; 1206 return -1; 1207 } 1208 1209 /* Retruns the host firmware handshake OCTEON specific configuration */ 1210 struct octeon_config *octeon_get_conf(struct octeon_device *oct) 1211 { 1212 struct octeon_config *default_oct_conf = NULL; 1213 1214 /* check the OCTEON Device model & return the corresponding octeon 1215 * configuration 1216 */ 1217 1218 if (OCTEON_CN6XXX(oct)) { 1219 default_oct_conf = 1220 (struct octeon_config *)(CHIP_CONF(oct, cn6xxx)); 1221 } else if (OCTEON_CN23XX_PF(oct)) { 1222 default_oct_conf = (struct octeon_config *) 1223 (CHIP_CONF(oct, cn23xx_pf)); 1224 } else if (OCTEON_CN23XX_VF(oct)) { 1225 default_oct_conf = (struct octeon_config *) 1226 (CHIP_CONF(oct, cn23xx_vf)); 1227 } 1228 return default_oct_conf; 1229 } 1230 1231 /* scratch register address is same in all the OCT-II and CN70XX models */ 1232 #define CNXX_SLI_SCRATCH1 0x3C0 1233 1234 /** Get the octeon device pointer. 1235 * @param octeon_id - The id for which the octeon device pointer is required. 1236 * @return Success: Octeon device pointer. 1237 * @return Failure: NULL. 1238 */ 1239 struct octeon_device *lio_get_device(u32 octeon_id) 1240 { 1241 if (octeon_id >= MAX_OCTEON_DEVICES) 1242 return NULL; 1243 else 1244 return octeon_device[octeon_id]; 1245 } 1246 1247 u64 lio_pci_readq(struct octeon_device *oct, u64 addr) 1248 { 1249 u64 val64; 1250 unsigned long flags; 1251 u32 val32, addrhi; 1252 1253 spin_lock_irqsave(&oct->pci_win_lock, flags); 1254 1255 /* The windowed read happens when the LSB of the addr is written. 1256 * So write MSB first 1257 */ 1258 addrhi = (addr >> 32); 1259 if ((oct->chip_id == OCTEON_CN66XX) || 1260 (oct->chip_id == OCTEON_CN68XX) || 1261 (oct->chip_id == OCTEON_CN23XX_PF_VID)) 1262 addrhi |= 0x00060000; 1263 writel(addrhi, oct->reg_list.pci_win_rd_addr_hi); 1264 1265 /* Read back to preserve ordering of writes */ 1266 val32 = readl(oct->reg_list.pci_win_rd_addr_hi); 1267 1268 writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo); 1269 val32 = readl(oct->reg_list.pci_win_rd_addr_lo); 1270 1271 val64 = readq(oct->reg_list.pci_win_rd_data); 1272 1273 spin_unlock_irqrestore(&oct->pci_win_lock, flags); 1274 1275 return val64; 1276 } 1277 1278 void lio_pci_writeq(struct octeon_device *oct, 1279 u64 val, 1280 u64 addr) 1281 { 1282 u32 val32; 1283 unsigned long flags; 1284 1285 spin_lock_irqsave(&oct->pci_win_lock, flags); 1286 1287 writeq(addr, oct->reg_list.pci_win_wr_addr); 1288 1289 /* The write happens when the LSB is written. So write MSB first. */ 1290 writel(val >> 32, oct->reg_list.pci_win_wr_data_hi); 1291 /* Read the MSB to ensure ordering of writes. */ 1292 val32 = readl(oct->reg_list.pci_win_wr_data_hi); 1293 1294 writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo); 1295 1296 spin_unlock_irqrestore(&oct->pci_win_lock, flags); 1297 } 1298 1299 int octeon_mem_access_ok(struct octeon_device *oct) 1300 { 1301 u64 access_okay = 0; 1302 u64 lmc0_reset_ctl; 1303 1304 /* Check to make sure a DDR interface is enabled */ 1305 if (OCTEON_CN23XX_PF(oct)) { 1306 lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL); 1307 access_okay = 1308 (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK); 1309 } else { 1310 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); 1311 access_okay = 1312 (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); 1313 } 1314 1315 return access_okay ? 0 : 1; 1316 } 1317 1318 int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) 1319 { 1320 int ret = 1; 1321 u32 ms; 1322 1323 if (!timeout) 1324 return ret; 1325 1326 for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); 1327 ms += HZ / 10) { 1328 ret = octeon_mem_access_ok(oct); 1329 1330 /* wait 100 ms */ 1331 if (ret) 1332 schedule_timeout_uninterruptible(HZ / 10); 1333 } 1334 1335 return ret; 1336 } 1337 1338 /** Get the octeon id assigned to the octeon device passed as argument. 1339 * This function is exported to other modules. 1340 * @param dev - octeon device pointer passed as a void *. 1341 * @return octeon device id 1342 */ 1343 int lio_get_device_id(void *dev) 1344 { 1345 struct octeon_device *octeon_dev = (struct octeon_device *)dev; 1346 u32 i; 1347 1348 for (i = 0; i < MAX_OCTEON_DEVICES; i++) 1349 if (octeon_device[i] == octeon_dev) 1350 return octeon_dev->octeon_id; 1351 return -1; 1352 } 1353 1354 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) 1355 { 1356 u64 instr_cnt; 1357 struct octeon_device *oct = NULL; 1358 1359 /* the whole thing needs to be atomic, ideally */ 1360 if (droq) { 1361 spin_lock_bh(&droq->lock); 1362 writel(droq->pkt_count, droq->pkts_sent_reg); 1363 droq->pkt_count = 0; 1364 /* this write needs to be flushed before we release the lock */ 1365 mmiowb(); 1366 spin_unlock_bh(&droq->lock); 1367 oct = droq->oct_dev; 1368 } 1369 if (iq) { 1370 spin_lock_bh(&iq->lock); 1371 writel(iq->pkt_in_done, iq->inst_cnt_reg); 1372 iq->pkt_in_done = 0; 1373 /* this write needs to be flushed before we release the lock */ 1374 mmiowb(); 1375 spin_unlock_bh(&iq->lock); 1376 oct = iq->oct_dev; 1377 } 1378 /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough 1379 *to trigger tx interrupts as well, if they are pending. 1380 */ 1381 if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) { 1382 if (droq) 1383 writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg); 1384 /*we race with firmrware here. read and write the IN_DONE_CNTS*/ 1385 else if (iq) { 1386 instr_cnt = readq(iq->inst_cnt_reg); 1387 writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) | 1388 CN23XX_INTR_RESEND), 1389 iq->inst_cnt_reg); 1390 } 1391 } 1392 } 1393