Lines Matching refs:snf

328 static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)  in snand_prepare_bouncebuf()  argument
330 if (snf->buf_len >= size) in snand_prepare_bouncebuf()
332 kfree(snf->buf); in snand_prepare_bouncebuf()
333 snf->buf = kmalloc(size, GFP_KERNEL); in snand_prepare_bouncebuf()
334 if (!snf->buf) in snand_prepare_bouncebuf()
336 snf->buf_len = size; in snand_prepare_bouncebuf()
337 memset(snf->buf, 0xff, snf->buf_len); in snand_prepare_bouncebuf()
341 static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg) in nfi_read32() argument
343 return readl(snf->nfi_base + reg); in nfi_read32()
346 static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val) in nfi_write32() argument
348 writel(val, snf->nfi_base + reg); in nfi_write32()
351 static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val) in nfi_write16() argument
353 writew(val, snf->nfi_base + reg); in nfi_write16()
356 static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set) in nfi_rmw32() argument
360 val = readl(snf->nfi_base + reg); in nfi_rmw32()
363 writel(val, snf->nfi_base + reg); in nfi_rmw32()
366 static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len) in nfi_read_data() argument
372 val = nfi_read32(snf, i & ~(es - 1)); in nfi_read_data()
378 static int mtk_nfi_reset(struct mtk_snand *snf) in mtk_nfi_reset() argument
383 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); in mtk_nfi_reset()
385 ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, in mtk_nfi_reset()
386 !(val & snf->caps->mastersta_mask), 0, in mtk_nfi_reset()
389 dev_err(snf->dev, "NFI master is still busy after reset\n"); in mtk_nfi_reset()
393 ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val, in mtk_nfi_reset()
394 !(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0, in mtk_nfi_reset()
397 dev_err(snf->dev, "Failed to reset NFI\n"); in mtk_nfi_reset()
401 fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) | in mtk_nfi_reset()
402 ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S); in mtk_nfi_reset()
403 ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val, in mtk_nfi_reset()
406 dev_err(snf->dev, "NFI FIFOs are not empty\n"); in mtk_nfi_reset()
413 static int mtk_snand_mac_reset(struct mtk_snand *snf) in mtk_snand_mac_reset() argument
418 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST); in mtk_snand_mac_reset()
420 ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val, in mtk_snand_mac_reset()
423 dev_err(snf->dev, "Failed to reset SNFI MAC\n"); in mtk_snand_mac_reset()
425 nfi_write32(snf, SNF_MISC_CTL, in mtk_snand_mac_reset()
431 static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen) in mtk_snand_mac_trigger() argument
436 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN); in mtk_snand_mac_trigger()
437 nfi_write32(snf, SNF_MAC_OUTL, outlen); in mtk_snand_mac_trigger()
438 nfi_write32(snf, SNF_MAC_INL, inlen); in mtk_snand_mac_trigger()
440 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG); in mtk_snand_mac_trigger()
442 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, in mtk_snand_mac_trigger()
445 dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); in mtk_snand_mac_trigger()
449 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP), in mtk_snand_mac_trigger()
452 dev_err(snf->dev, "Timed out waiting for WIP cleared\n"); in mtk_snand_mac_trigger()
455 nfi_write32(snf, SNF_MAC_CTL, 0); in mtk_snand_mac_trigger()
460 static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op) in mtk_snand_mac_io() argument
477 mtk_snand_mac_reset(snf); in mtk_snand_mac_io()
483 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); in mtk_snand_mac_io()
492 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); in mtk_snand_mac_io()
499 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); in mtk_snand_mac_io()
508 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); in mtk_snand_mac_io()
515 nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val); in mtk_snand_mac_io()
518 dev_dbg(snf->dev, "%d: %08X", i, in mtk_snand_mac_io()
519 nfi_read32(snf, SNF_GPRAM + i)); in mtk_snand_mac_io()
521 dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len); in mtk_snand_mac_io()
523 ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len); in mtk_snand_mac_io()
530 nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len); in mtk_snand_mac_io()
534 static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size, in mtk_snand_setup_pagefmt() argument
544 if (snf->nfi_cfg.page_size == page_size && in mtk_snand_setup_pagefmt()
545 snf->nfi_cfg.oob_size == oob_size) in mtk_snand_setup_pagefmt()
548 nsectors = page_size / snf->caps->sector_size; in mtk_snand_setup_pagefmt()
549 if (nsectors > snf->caps->max_sectors) { in mtk_snand_setup_pagefmt()
550 dev_err(snf->dev, "too many sectors required.\n"); in mtk_snand_setup_pagefmt()
554 if (snf->caps->sector_size == 512) { in mtk_snand_setup_pagefmt()
567 if (snf->caps->sector_size == 512) in mtk_snand_setup_pagefmt()
573 if (snf->caps->sector_size == 512) in mtk_snand_setup_pagefmt()
579 if (snf->caps->sector_size == 512) in mtk_snand_setup_pagefmt()
588 dev_err(snf->dev, "unsupported page size.\n"); in mtk_snand_setup_pagefmt()
595 if (snf->caps->sector_size == 1024) in mtk_snand_setup_pagefmt()
598 for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { in mtk_snand_setup_pagefmt()
599 if (snf->caps->spare_sizes[i] <= spare_size) { in mtk_snand_setup_pagefmt()
600 spare_size = snf->caps->spare_sizes[i]; in mtk_snand_setup_pagefmt()
601 if (snf->caps->sector_size == 1024) in mtk_snand_setup_pagefmt()
609 dev_err(snf->dev, "unsupported spare size: %u\n", spare_size); in mtk_snand_setup_pagefmt()
613 nfi_write32(snf, NFI_PAGEFMT, in mtk_snand_setup_pagefmt()
614 (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) | in mtk_snand_setup_pagefmt()
615 (snf->caps->fdm_size << NFI_FDM_NUM_S) | in mtk_snand_setup_pagefmt()
620 snf->nfi_cfg.page_size = page_size; in mtk_snand_setup_pagefmt()
621 snf->nfi_cfg.oob_size = oob_size; in mtk_snand_setup_pagefmt()
622 snf->nfi_cfg.nsectors = nsectors; in mtk_snand_setup_pagefmt()
623 snf->nfi_cfg.spare_size = spare_size; in mtk_snand_setup_pagefmt()
625 dev_dbg(snf->dev, "page format: (%u + %u) * %u\n", in mtk_snand_setup_pagefmt()
626 snf->caps->sector_size, spare_size, nsectors); in mtk_snand_setup_pagefmt()
627 return snand_prepare_bouncebuf(snf, page_size + oob_size); in mtk_snand_setup_pagefmt()
629 dev_err(snf->dev, "page size %u + %u is not supported\n", page_size, in mtk_snand_setup_pagefmt()
662 struct mtk_snand *snf = nand_to_mtk_snand(nand); in mtk_snand_ecc_init_ctx() local
673 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, in mtk_snand_ecc_init_ctx()
696 strength = desired_correction / snf->nfi_cfg.nsectors; in mtk_snand_ecc_init_ctx()
700 ecc_cfg->sectors = snf->nfi_cfg.nsectors; in mtk_snand_ecc_init_ctx()
701 ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size; in mtk_snand_ecc_init_ctx()
704 parity_bits = mtk_ecc_get_parity_bits(snf->ecc); in mtk_snand_ecc_init_ctx()
705 max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size; in mtk_snand_ecc_init_ctx()
707 mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength); in mtk_snand_ecc_init_ctx()
716 mtk_ecc_adjust_strength(snf->ecc, &s_next); in mtk_snand_ecc_init_ctx()
727 conf->step_size = snf->caps->sector_size; in mtk_snand_ecc_init_ctx()
731 dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n", in mtk_snand_ecc_init_ctx()
733 dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n", in mtk_snand_ecc_init_ctx()
734 ecc_cfg->strength, snf->caps->sector_size); in mtk_snand_ecc_init_ctx()
749 struct mtk_snand *snf = nand_to_mtk_snand(nand); in mtk_snand_ecc_prepare_io_req() local
753 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, in mtk_snand_ecc_prepare_io_req()
757 snf->autofmt = true; in mtk_snand_ecc_prepare_io_req()
758 snf->ecc_cfg = ecc_cfg; in mtk_snand_ecc_prepare_io_req()
765 struct mtk_snand *snf = nand_to_mtk_snand(nand); in mtk_snand_ecc_finish_io_req() local
768 snf->ecc_cfg = NULL; in mtk_snand_ecc_finish_io_req()
769 snf->autofmt = false; in mtk_snand_ecc_finish_io_req()
773 if (snf->ecc_stats.failed) in mtk_snand_ecc_finish_io_req()
774 mtd->ecc_stats.failed += snf->ecc_stats.failed; in mtk_snand_ecc_finish_io_req()
775 mtd->ecc_stats.corrected += snf->ecc_stats.corrected; in mtk_snand_ecc_finish_io_req()
776 return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; in mtk_snand_ecc_finish_io_req()
786 static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf) in mtk_snand_read_fdm() argument
792 for (i = 0; i < snf->nfi_cfg.nsectors; i++) { in mtk_snand_read_fdm()
793 vall = nfi_read32(snf, NFI_FDML(i)); in mtk_snand_read_fdm()
794 valm = nfi_read32(snf, NFI_FDMM(i)); in mtk_snand_read_fdm()
796 for (j = 0; j < snf->caps->fdm_size; j++) in mtk_snand_read_fdm()
799 oobptr += snf->caps->fdm_size; in mtk_snand_read_fdm()
803 static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf) in mtk_snand_write_fdm() argument
805 u32 fdm_size = snf->caps->fdm_size; in mtk_snand_write_fdm()
810 for (i = 0; i < snf->nfi_cfg.nsectors; i++) { in mtk_snand_write_fdm()
823 nfi_write32(snf, NFI_FDML(i), vall); in mtk_snand_write_fdm()
824 nfi_write32(snf, NFI_FDMM(i), valm); in mtk_snand_write_fdm()
830 static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf) in mtk_snand_bm_swap() argument
834 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) in mtk_snand_bm_swap()
839 buf_bbm_pos = snf->nfi_cfg.page_size - in mtk_snand_bm_swap()
840 (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size; in mtk_snand_bm_swap()
841 fdm_bbm_pos = snf->nfi_cfg.page_size + in mtk_snand_bm_swap()
842 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; in mtk_snand_bm_swap()
844 swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]); in mtk_snand_bm_swap()
847 static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf) in mtk_snand_fdm_bm_swap() argument
851 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) in mtk_snand_fdm_bm_swap()
855 fdm_bbm_pos1 = snf->nfi_cfg.page_size; in mtk_snand_fdm_bm_swap()
856 fdm_bbm_pos2 = snf->nfi_cfg.page_size + in mtk_snand_fdm_bm_swap()
857 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; in mtk_snand_fdm_bm_swap()
858 swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]); in mtk_snand_fdm_bm_swap()
861 static int mtk_snand_read_page_cache(struct mtk_snand *snf, in mtk_snand_read_page_cache() argument
864 u8 *buf = snf->buf; in mtk_snand_read_page_cache()
865 u8 *buf_fdm = buf + snf->nfi_cfg.page_size; in mtk_snand_read_page_cache()
872 u32 dma_len = snf->buf_len; in mtk_snand_read_page_cache()
877 if (snf->autofmt) { in mtk_snand_read_page_cache()
881 dma_len = snf->nfi_cfg.page_size; in mtk_snand_read_page_cache()
890 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); in mtk_snand_read_page_cache()
896 if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size) in mtk_snand_read_page_cache()
899 mtk_snand_mac_reset(snf); in mtk_snand_read_page_cache()
900 mtk_nfi_reset(snf); in mtk_snand_read_page_cache()
903 nfi_write32(snf, SNF_RD_CTL2, in mtk_snand_read_page_cache()
908 nfi_write32(snf, SNF_RD_CTL3, op_addr); in mtk_snand_read_page_cache()
920 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, in mtk_snand_read_page_cache()
924 rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * in mtk_snand_read_page_cache()
925 snf->nfi_cfg.nsectors; in mtk_snand_read_page_cache()
926 nfi_write32(snf, SNF_MISC_CTL2, in mtk_snand_read_page_cache()
930 nfi_write16(snf, NFI_CNFG, in mtk_snand_read_page_cache()
934 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); in mtk_snand_read_page_cache()
936 buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE); in mtk_snand_read_page_cache()
937 ret = dma_mapping_error(snf->dev, buf_dma); in mtk_snand_read_page_cache()
939 dev_err(snf->dev, "DMA mapping failed.\n"); in mtk_snand_read_page_cache()
942 nfi_write32(snf, NFI_STRADDR, buf_dma); in mtk_snand_read_page_cache()
944 snf->ecc_cfg->op = ECC_DECODE; in mtk_snand_read_page_cache()
945 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); in mtk_snand_read_page_cache()
950 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ); in mtk_snand_read_page_cache()
951 reinit_completion(&snf->op_done); in mtk_snand_read_page_cache()
954 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ); in mtk_snand_read_page_cache()
957 nfi_rmw32(snf, NFI_CON, 0, CON_BRD); in mtk_snand_read_page_cache()
958 nfi_write16(snf, NFI_STRDATA, STR_DATA); in mtk_snand_read_page_cache()
961 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { in mtk_snand_read_page_cache()
962 dev_err(snf->dev, "DMA timed out for reading from cache.\n"); in mtk_snand_read_page_cache()
968 ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val, in mtk_snand_read_page_cache()
969 BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, in mtk_snand_read_page_cache()
972 dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); in mtk_snand_read_page_cache()
977 ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, in mtk_snand_read_page_cache()
978 !(val & snf->caps->mastersta_mask), 0, in mtk_snand_read_page_cache()
981 dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); in mtk_snand_read_page_cache()
986 ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); in mtk_snand_read_page_cache()
988 dev_err(snf->dev, "wait ecc done timeout\n"); in mtk_snand_read_page_cache()
992 mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats, in mtk_snand_read_page_cache()
993 snf->nfi_cfg.nsectors); in mtk_snand_read_page_cache()
996 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); in mtk_snand_read_page_cache()
998 if (snf->autofmt) { in mtk_snand_read_page_cache()
999 mtk_snand_read_fdm(snf, buf_fdm); in mtk_snand_read_page_cache()
1000 if (snf->caps->bbm_swap) { in mtk_snand_read_page_cache()
1001 mtk_snand_bm_swap(snf, buf); in mtk_snand_read_page_cache()
1002 mtk_snand_fdm_bm_swap(snf); in mtk_snand_read_page_cache()
1007 if (nfi_read32(snf, NFI_STA) & READ_EMPTY) { in mtk_snand_read_page_cache()
1009 snf->ecc_stats.bitflips = 0; in mtk_snand_read_page_cache()
1010 snf->ecc_stats.failed = 0; in mtk_snand_read_page_cache()
1011 snf->ecc_stats.corrected = 0; in mtk_snand_read_page_cache()
1014 u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size; in mtk_snand_read_page_cache()
1015 u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size; in mtk_snand_read_page_cache()
1018 memcpy(op->data.buf.in + snf->nfi_cfg.page_size, in mtk_snand_read_page_cache()
1021 } else if (rd_offset < snf->buf_len) { in mtk_snand_read_page_cache()
1022 u32 cap_len = snf->buf_len - rd_offset; in mtk_snand_read_page_cache()
1026 memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len); in mtk_snand_read_page_cache()
1031 mtk_ecc_disable(snf->ecc); in mtk_snand_read_page_cache()
1036 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); in mtk_snand_read_page_cache()
1039 nfi_write32(snf, NFI_CON, 0); in mtk_snand_read_page_cache()
1040 nfi_write16(snf, NFI_CNFG, 0); in mtk_snand_read_page_cache()
1043 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE); in mtk_snand_read_page_cache()
1044 nfi_write32(snf, SNF_STA_CTL1, 0); in mtk_snand_read_page_cache()
1047 nfi_read32(snf, NFI_INTR_STA); in mtk_snand_read_page_cache()
1048 nfi_write32(snf, NFI_INTR_EN, 0); in mtk_snand_read_page_cache()
1050 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0); in mtk_snand_read_page_cache()
1054 static int mtk_snand_write_page_cache(struct mtk_snand *snf, in mtk_snand_write_page_cache() argument
1064 u32 dma_len = snf->buf_len; in mtk_snand_write_page_cache()
1069 if (snf->autofmt) { in mtk_snand_write_page_cache()
1073 dma_len = snf->nfi_cfg.page_size; in mtk_snand_write_page_cache()
1078 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); in mtk_snand_write_page_cache()
1083 mtk_snand_mac_reset(snf); in mtk_snand_write_page_cache()
1084 mtk_nfi_reset(snf); in mtk_snand_write_page_cache()
1087 memset(snf->buf, 0xff, wr_offset); in mtk_snand_write_page_cache()
1089 cap_len = snf->buf_len - wr_offset; in mtk_snand_write_page_cache()
1092 memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len); in mtk_snand_write_page_cache()
1093 if (snf->autofmt) { in mtk_snand_write_page_cache()
1094 if (snf->caps->bbm_swap) { in mtk_snand_write_page_cache()
1095 mtk_snand_fdm_bm_swap(snf); in mtk_snand_write_page_cache()
1096 mtk_snand_bm_swap(snf, snf->buf); in mtk_snand_write_page_cache()
1098 mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size); in mtk_snand_write_page_cache()
1102 nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S)); in mtk_snand_write_page_cache()
1105 nfi_write32(snf, SNF_PG_CTL2, op_addr); in mtk_snand_write_page_cache()
1111 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, in mtk_snand_write_page_cache()
1115 wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * in mtk_snand_write_page_cache()
1116 snf->nfi_cfg.nsectors; in mtk_snand_write_page_cache()
1117 nfi_write32(snf, SNF_MISC_CTL2, in mtk_snand_write_page_cache()
1121 nfi_write16(snf, NFI_CNFG, in mtk_snand_write_page_cache()
1125 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); in mtk_snand_write_page_cache()
1126 buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE); in mtk_snand_write_page_cache()
1127 ret = dma_mapping_error(snf->dev, buf_dma); in mtk_snand_write_page_cache()
1129 dev_err(snf->dev, "DMA mapping failed.\n"); in mtk_snand_write_page_cache()
1132 nfi_write32(snf, NFI_STRADDR, buf_dma); in mtk_snand_write_page_cache()
1134 snf->ecc_cfg->op = ECC_ENCODE; in mtk_snand_write_page_cache()
1135 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); in mtk_snand_write_page_cache()
1140 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG); in mtk_snand_write_page_cache()
1141 reinit_completion(&snf->op_done); in mtk_snand_write_page_cache()
1145 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE); in mtk_snand_write_page_cache()
1148 nfi_rmw32(snf, NFI_CON, 0, CON_BWR); in mtk_snand_write_page_cache()
1149 nfi_write16(snf, NFI_STRDATA, STR_DATA); in mtk_snand_write_page_cache()
1152 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { in mtk_snand_write_page_cache()
1153 dev_err(snf->dev, "DMA timed out for program load.\n"); in mtk_snand_write_page_cache()
1159 ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val, in mtk_snand_write_page_cache()
1160 NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, in mtk_snand_write_page_cache()
1163 dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n"); in mtk_snand_write_page_cache()
1167 mtk_ecc_disable(snf->ecc); in mtk_snand_write_page_cache()
1169 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE); in mtk_snand_write_page_cache()
1172 nfi_write32(snf, NFI_CON, 0); in mtk_snand_write_page_cache()
1173 nfi_write16(snf, NFI_CNFG, 0); in mtk_snand_write_page_cache()
1176 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE); in mtk_snand_write_page_cache()
1177 nfi_write32(snf, SNF_STA_CTL1, 0); in mtk_snand_write_page_cache()
1180 nfi_read32(snf, NFI_INTR_STA); in mtk_snand_write_page_cache()
1181 nfi_write32(snf, NFI_INTR_EN, 0); in mtk_snand_write_page_cache()
1183 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0); in mtk_snand_write_page_cache()
1312 struct mtk_snand *snf = id; in mtk_snand_irq() local
1315 sta = nfi_read32(snf, NFI_INTR_STA); in mtk_snand_irq()
1316 ien = nfi_read32(snf, NFI_INTR_EN); in mtk_snand_irq()
1321 nfi_write32(snf, NFI_INTR_EN, 0); in mtk_snand_irq()
1322 complete(&snf->op_done); in mtk_snand_irq()