1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/errno.h> 7 #include <linux/io.h> 8 #include <linux/slab.h> 9 #include <linux/etherdevice.h> 10 #include "ionic.h" 11 #include "ionic_dev.h" 12 #include "ionic_lif.h" 13 14 static void ionic_watchdog_cb(struct timer_list *t) 15 { 16 struct ionic *ionic = from_timer(ionic, t, watchdog_timer); 17 int hb; 18 19 mod_timer(&ionic->watchdog_timer, 20 round_jiffies(jiffies + ionic->watchdog_period)); 21 22 hb = ionic_heartbeat_check(ionic); 23 24 if (hb >= 0 && ionic->master_lif) 25 ionic_link_status_check_request(ionic->master_lif); 26 } 27 28 void ionic_init_devinfo(struct ionic *ionic) 29 { 30 struct ionic_dev *idev = &ionic->idev; 31 32 idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type); 33 idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev); 34 35 memcpy_fromio(idev->dev_info.fw_version, 36 idev->dev_info_regs->fw_version, 37 IONIC_DEVINFO_FWVERS_BUFLEN); 38 39 memcpy_fromio(idev->dev_info.serial_num, 40 idev->dev_info_regs->serial_num, 41 IONIC_DEVINFO_SERIAL_BUFLEN); 42 43 idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0; 44 idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0; 45 46 dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version); 47 } 48 49 int ionic_dev_setup(struct ionic *ionic) 50 { 51 struct ionic_dev_bar *bar = ionic->bars; 52 unsigned int num_bars = ionic->num_bars; 53 struct ionic_dev *idev = &ionic->idev; 54 struct device *dev = ionic->dev; 55 u32 sig; 56 57 /* BAR0: dev_cmd and interrupts */ 58 if (num_bars < 1) { 59 dev_err(dev, "No bars found, aborting\n"); 60 return -EFAULT; 61 } 62 63 if (bar->len < IONIC_BAR0_SIZE) { 64 dev_err(dev, "Resource bar size %lu too small, aborting\n", 65 bar->len); 66 return -EFAULT; 67 } 68 69 idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET; 70 idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET; 71 idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET; 72 idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET; 73 74 sig = ioread32(&idev->dev_info_regs->signature); 75 if (sig != IONIC_DEV_INFO_SIGNATURE) { 76 dev_err(dev, "Incompatible firmware signature %x", sig); 77 return -EFAULT; 78 } 79 80 ionic_init_devinfo(ionic); 81 82 /* BAR1: doorbells */ 83 bar++; 84 if (num_bars < 2) { 85 dev_err(dev, "Doorbell bar missing, aborting\n"); 86 return -EFAULT; 87 } 88 89 idev->last_fw_status = 0xff; 90 timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0); 91 ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ; 92 mod_timer(&ionic->watchdog_timer, 93 round_jiffies(jiffies + ionic->watchdog_period)); 94 95 idev->db_pages = bar->vaddr; 96 idev->phy_db_pages = bar->bus_addr; 97 98 return 0; 99 } 100 101 void ionic_dev_teardown(struct ionic *ionic) 102 { 103 del_timer_sync(&ionic->watchdog_timer); 104 } 105 106 /* Devcmd Interface */ 107 int ionic_heartbeat_check(struct ionic *ionic) 108 { 109 struct ionic_dev *idev = &ionic->idev; 110 unsigned long hb_time; 111 u8 fw_status; 112 u32 hb; 113 114 /* wait a little more than one second before testing again */ 115 hb_time = jiffies; 116 if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period))) 117 return 0; 118 119 /* firmware is useful only if the running bit is set and 120 * fw_status != 0xff (bad PCI read) 121 */ 122 fw_status = ioread8(&idev->dev_info_regs->fw_status); 123 if (fw_status != 0xff) 124 fw_status &= IONIC_FW_STS_F_RUNNING; /* use only the run bit */ 125 126 /* is this a transition? */ 127 if (fw_status != idev->last_fw_status && 128 idev->last_fw_status != 0xff) { 129 struct ionic_lif *lif = ionic->master_lif; 130 bool trigger = false; 131 132 if (!fw_status || fw_status == 0xff) { 133 dev_info(ionic->dev, "FW stopped %u\n", fw_status); 134 if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 135 trigger = true; 136 } else { 137 dev_info(ionic->dev, "FW running %u\n", fw_status); 138 if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 139 trigger = true; 140 } 141 142 if (trigger) { 143 struct ionic_deferred_work *work; 144 145 work = kzalloc(sizeof(*work), GFP_ATOMIC); 146 if (!work) { 147 dev_err(ionic->dev, "%s OOM\n", __func__); 148 } else { 149 work->type = IONIC_DW_TYPE_LIF_RESET; 150 if (fw_status & IONIC_FW_STS_F_RUNNING && 151 fw_status != 0xff) 152 work->fw_status = 1; 153 ionic_lif_deferred_enqueue(&lif->deferred, work); 154 } 155 } 156 } 157 idev->last_fw_status = fw_status; 158 159 if (!fw_status || fw_status == 0xff) 160 return -ENXIO; 161 162 /* early FW has no heartbeat, else FW will return non-zero */ 163 hb = ioread32(&idev->dev_info_regs->fw_heartbeat); 164 if (!hb) 165 return 0; 166 167 /* are we stalled? */ 168 if (hb == idev->last_hb) { 169 /* only complain once for each stall seen */ 170 if (idev->last_hb_time != 1) { 171 dev_info(ionic->dev, "FW heartbeat stalled at %d\n", 172 idev->last_hb); 173 idev->last_hb_time = 1; 174 } 175 176 return -ENXIO; 177 } 178 179 if (idev->last_hb_time == 1) 180 dev_info(ionic->dev, "FW heartbeat restored at %d\n", hb); 181 182 idev->last_hb = hb; 183 idev->last_hb_time = hb_time; 184 185 return 0; 186 } 187 188 u8 ionic_dev_cmd_status(struct ionic_dev *idev) 189 { 190 return ioread8(&idev->dev_cmd_regs->comp.comp.status); 191 } 192 193 bool ionic_dev_cmd_done(struct ionic_dev *idev) 194 { 195 return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; 196 } 197 198 void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) 199 { 200 memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); 201 } 202 203 void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) 204 { 205 memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); 206 iowrite32(0, &idev->dev_cmd_regs->done); 207 iowrite32(1, &idev->dev_cmd_regs->doorbell); 208 } 209 210 /* Device commands */ 211 void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver) 212 { 213 union ionic_dev_cmd cmd = { 214 .identify.opcode = IONIC_CMD_IDENTIFY, 215 .identify.ver = ver, 216 }; 217 218 ionic_dev_cmd_go(idev, &cmd); 219 } 220 221 void ionic_dev_cmd_init(struct ionic_dev *idev) 222 { 223 union ionic_dev_cmd cmd = { 224 .init.opcode = IONIC_CMD_INIT, 225 .init.type = 0, 226 }; 227 228 ionic_dev_cmd_go(idev, &cmd); 229 } 230 231 void ionic_dev_cmd_reset(struct ionic_dev *idev) 232 { 233 union ionic_dev_cmd cmd = { 234 .reset.opcode = IONIC_CMD_RESET, 235 }; 236 237 ionic_dev_cmd_go(idev, &cmd); 238 } 239 240 /* Port commands */ 241 void ionic_dev_cmd_port_identify(struct ionic_dev *idev) 242 { 243 union ionic_dev_cmd cmd = { 244 .port_init.opcode = IONIC_CMD_PORT_IDENTIFY, 245 .port_init.index = 0, 246 }; 247 248 ionic_dev_cmd_go(idev, &cmd); 249 } 250 251 void ionic_dev_cmd_port_init(struct ionic_dev *idev) 252 { 253 union ionic_dev_cmd cmd = { 254 .port_init.opcode = IONIC_CMD_PORT_INIT, 255 .port_init.index = 0, 256 .port_init.info_pa = cpu_to_le64(idev->port_info_pa), 257 }; 258 259 ionic_dev_cmd_go(idev, &cmd); 260 } 261 262 void ionic_dev_cmd_port_reset(struct ionic_dev *idev) 263 { 264 union ionic_dev_cmd cmd = { 265 .port_reset.opcode = IONIC_CMD_PORT_RESET, 266 .port_reset.index = 0, 267 }; 268 269 ionic_dev_cmd_go(idev, &cmd); 270 } 271 272 void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state) 273 { 274 union ionic_dev_cmd cmd = { 275 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 276 .port_setattr.index = 0, 277 .port_setattr.attr = IONIC_PORT_ATTR_STATE, 278 .port_setattr.state = state, 279 }; 280 281 ionic_dev_cmd_go(idev, &cmd); 282 } 283 284 void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed) 285 { 286 union ionic_dev_cmd cmd = { 287 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 288 .port_setattr.index = 0, 289 .port_setattr.attr = IONIC_PORT_ATTR_SPEED, 290 .port_setattr.speed = cpu_to_le32(speed), 291 }; 292 293 ionic_dev_cmd_go(idev, &cmd); 294 } 295 296 void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable) 297 { 298 union ionic_dev_cmd cmd = { 299 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 300 .port_setattr.index = 0, 301 .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG, 302 .port_setattr.an_enable = an_enable, 303 }; 304 305 ionic_dev_cmd_go(idev, &cmd); 306 } 307 308 void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type) 309 { 310 union ionic_dev_cmd cmd = { 311 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 312 .port_setattr.index = 0, 313 .port_setattr.attr = IONIC_PORT_ATTR_FEC, 314 .port_setattr.fec_type = fec_type, 315 }; 316 317 ionic_dev_cmd_go(idev, &cmd); 318 } 319 320 void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type) 321 { 322 union ionic_dev_cmd cmd = { 323 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 324 .port_setattr.index = 0, 325 .port_setattr.attr = IONIC_PORT_ATTR_PAUSE, 326 .port_setattr.pause_type = pause_type, 327 }; 328 329 ionic_dev_cmd_go(idev, &cmd); 330 } 331 332 /* VF commands */ 333 int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data) 334 { 335 union ionic_dev_cmd cmd = { 336 .vf_setattr.opcode = IONIC_CMD_VF_SETATTR, 337 .vf_setattr.attr = attr, 338 .vf_setattr.vf_index = vf, 339 }; 340 int err; 341 342 switch (attr) { 343 case IONIC_VF_ATTR_SPOOFCHK: 344 cmd.vf_setattr.spoofchk = *data; 345 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", 346 __func__, vf, *data); 347 break; 348 case IONIC_VF_ATTR_TRUST: 349 cmd.vf_setattr.trust = *data; 350 dev_dbg(ionic->dev, "%s: vf %d trust %d\n", 351 __func__, vf, *data); 352 break; 353 case IONIC_VF_ATTR_LINKSTATE: 354 cmd.vf_setattr.linkstate = *data; 355 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", 356 __func__, vf, *data); 357 break; 358 case IONIC_VF_ATTR_MAC: 359 ether_addr_copy(cmd.vf_setattr.macaddr, data); 360 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", 361 __func__, vf, data); 362 break; 363 case IONIC_VF_ATTR_VLAN: 364 cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data); 365 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", 366 __func__, vf, *(u16 *)data); 367 break; 368 case IONIC_VF_ATTR_RATE: 369 cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data); 370 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", 371 __func__, vf, *(u32 *)data); 372 break; 373 case IONIC_VF_ATTR_STATSADDR: 374 cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data); 375 dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n", 376 __func__, vf, *(u64 *)data); 377 break; 378 default: 379 return -EINVAL; 380 } 381 382 mutex_lock(&ionic->dev_cmd_lock); 383 ionic_dev_cmd_go(&ionic->idev, &cmd); 384 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 385 mutex_unlock(&ionic->dev_cmd_lock); 386 387 return err; 388 } 389 390 /* LIF commands */ 391 void ionic_dev_cmd_queue_identify(struct ionic_dev *idev, 392 u16 lif_type, u8 qtype, u8 qver) 393 { 394 union ionic_dev_cmd cmd = { 395 .q_identify.opcode = IONIC_CMD_Q_IDENTIFY, 396 .q_identify.lif_type = lif_type, 397 .q_identify.type = qtype, 398 .q_identify.ver = qver, 399 }; 400 401 ionic_dev_cmd_go(idev, &cmd); 402 } 403 404 void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver) 405 { 406 union ionic_dev_cmd cmd = { 407 .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY, 408 .lif_identify.type = type, 409 .lif_identify.ver = ver, 410 }; 411 412 ionic_dev_cmd_go(idev, &cmd); 413 } 414 415 void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, 416 dma_addr_t info_pa) 417 { 418 union ionic_dev_cmd cmd = { 419 .lif_init.opcode = IONIC_CMD_LIF_INIT, 420 .lif_init.index = cpu_to_le16(lif_index), 421 .lif_init.info_pa = cpu_to_le64(info_pa), 422 }; 423 424 ionic_dev_cmd_go(idev, &cmd); 425 } 426 427 void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index) 428 { 429 union ionic_dev_cmd cmd = { 430 .lif_init.opcode = IONIC_CMD_LIF_RESET, 431 .lif_init.index = cpu_to_le16(lif_index), 432 }; 433 434 ionic_dev_cmd_go(idev, &cmd); 435 } 436 437 void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, 438 u16 lif_index, u16 intr_index) 439 { 440 struct ionic_queue *q = &qcq->q; 441 struct ionic_cq *cq = &qcq->cq; 442 443 union ionic_dev_cmd cmd = { 444 .q_init.opcode = IONIC_CMD_Q_INIT, 445 .q_init.lif_index = cpu_to_le16(lif_index), 446 .q_init.type = q->type, 447 .q_init.ver = qcq->q.lif->qtype_info[q->type].version, 448 .q_init.index = cpu_to_le32(q->index), 449 .q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 450 IONIC_QINIT_F_ENA), 451 .q_init.pid = cpu_to_le16(q->pid), 452 .q_init.intr_index = cpu_to_le16(intr_index), 453 .q_init.ring_size = ilog2(q->num_descs), 454 .q_init.ring_base = cpu_to_le64(q->base_pa), 455 .q_init.cq_ring_base = cpu_to_le64(cq->base_pa), 456 }; 457 458 ionic_dev_cmd_go(idev, &cmd); 459 } 460 461 int ionic_db_page_num(struct ionic_lif *lif, int pid) 462 { 463 return (lif->hw_index * lif->dbid_count) + pid; 464 } 465 466 int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, 467 struct ionic_intr_info *intr, 468 unsigned int num_descs, size_t desc_size) 469 { 470 struct ionic_cq_info *cur; 471 unsigned int ring_size; 472 unsigned int i; 473 474 if (desc_size == 0 || !is_power_of_2(num_descs)) 475 return -EINVAL; 476 477 ring_size = ilog2(num_descs); 478 if (ring_size < 2 || ring_size > 16) 479 return -EINVAL; 480 481 cq->lif = lif; 482 cq->bound_intr = intr; 483 cq->num_descs = num_descs; 484 cq->desc_size = desc_size; 485 cq->tail = cq->info; 486 cq->done_color = 1; 487 488 cur = cq->info; 489 490 for (i = 0; i < num_descs; i++) { 491 if (i + 1 == num_descs) { 492 cur->next = cq->info; 493 cur->last = true; 494 } else { 495 cur->next = cur + 1; 496 } 497 cur->index = i; 498 cur++; 499 } 500 501 return 0; 502 } 503 504 void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa) 505 { 506 struct ionic_cq_info *cur; 507 unsigned int i; 508 509 cq->base = base; 510 cq->base_pa = base_pa; 511 512 for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) 513 cur->cq_desc = base + (i * cq->desc_size); 514 } 515 516 void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) 517 { 518 cq->bound_q = q; 519 } 520 521 unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, 522 ionic_cq_cb cb, ionic_cq_done_cb done_cb, 523 void *done_arg) 524 { 525 unsigned int work_done = 0; 526 527 if (work_to_do == 0) 528 return 0; 529 530 while (cb(cq, cq->tail)) { 531 if (cq->tail->last) 532 cq->done_color = !cq->done_color; 533 cq->tail = cq->tail->next; 534 DEBUG_STATS_CQE_CNT(cq); 535 536 if (++work_done >= work_to_do) 537 break; 538 } 539 540 if (work_done && done_cb) 541 done_cb(done_arg); 542 543 return work_done; 544 } 545 546 int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, 547 struct ionic_queue *q, unsigned int index, const char *name, 548 unsigned int num_descs, size_t desc_size, 549 size_t sg_desc_size, unsigned int pid) 550 { 551 struct ionic_desc_info *cur; 552 unsigned int ring_size; 553 unsigned int i; 554 555 if (desc_size == 0 || !is_power_of_2(num_descs)) 556 return -EINVAL; 557 558 ring_size = ilog2(num_descs); 559 if (ring_size < 2 || ring_size > 16) 560 return -EINVAL; 561 562 q->lif = lif; 563 q->idev = idev; 564 q->index = index; 565 q->num_descs = num_descs; 566 q->desc_size = desc_size; 567 q->sg_desc_size = sg_desc_size; 568 q->tail = q->info; 569 q->head = q->tail; 570 q->pid = pid; 571 572 snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index); 573 574 cur = q->info; 575 576 for (i = 0; i < num_descs; i++) { 577 if (i + 1 == num_descs) 578 cur->next = q->info; 579 else 580 cur->next = cur + 1; 581 cur->index = i; 582 cur->left = num_descs - i; 583 cur++; 584 } 585 586 return 0; 587 } 588 589 void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) 590 { 591 struct ionic_desc_info *cur; 592 unsigned int i; 593 594 q->base = base; 595 q->base_pa = base_pa; 596 597 for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) 598 cur->desc = base + (i * q->desc_size); 599 } 600 601 void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) 602 { 603 struct ionic_desc_info *cur; 604 unsigned int i; 605 606 q->sg_base = base; 607 q->sg_base_pa = base_pa; 608 609 for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) 610 cur->sg_desc = base + (i * q->sg_desc_size); 611 } 612 613 void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, 614 void *cb_arg) 615 { 616 struct device *dev = q->lif->ionic->dev; 617 struct ionic_lif *lif = q->lif; 618 619 q->head->cb = cb; 620 q->head->cb_arg = cb_arg; 621 q->head = q->head->next; 622 623 dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n", 624 q->lif->index, q->name, q->hw_type, q->hw_index, 625 q->head->index, ring_doorbell); 626 627 if (ring_doorbell) 628 ionic_dbell_ring(lif->kern_dbpage, q->hw_type, 629 q->dbval | q->head->index); 630 } 631 632 static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) 633 { 634 unsigned int mask, tail, head; 635 636 mask = q->num_descs - 1; 637 tail = q->tail->index; 638 head = q->head->index; 639 640 return ((pos - tail) & mask) < ((head - tail) & mask); 641 } 642 643 void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, 644 unsigned int stop_index) 645 { 646 struct ionic_desc_info *desc_info; 647 ionic_desc_cb cb; 648 void *cb_arg; 649 650 /* check for empty queue */ 651 if (q->tail->index == q->head->index) 652 return; 653 654 /* stop index must be for a descriptor that is not yet completed */ 655 if (unlikely(!ionic_q_is_posted(q, stop_index))) 656 dev_err(q->lif->ionic->dev, 657 "ionic stop is not posted %s stop %u tail %u head %u\n", 658 q->name, stop_index, q->tail->index, q->head->index); 659 660 do { 661 desc_info = q->tail; 662 q->tail = desc_info->next; 663 664 cb = desc_info->cb; 665 cb_arg = desc_info->cb_arg; 666 667 desc_info->cb = NULL; 668 desc_info->cb_arg = NULL; 669 670 if (cb) 671 cb(q, desc_info, cq_info, cb_arg); 672 } while (desc_info->index != stop_index); 673 } 674