@@ -534,10 +534,20 @@ static int spinand_erase_op(struct spinand_device *spinand,
534534 return spi_mem_exec_op (spinand -> spimem , & op );
535535}
536536
537- static int spinand_wait (struct spinand_device * spinand ,
538- unsigned long initial_delay_us ,
539- unsigned long poll_delay_us ,
540- u8 * s )
537+ /**
538+ * spinand_wait() - Poll memory device status
539+ * @spinand: the spinand device
540+ * @initial_delay_us: delay in us before starting to poll
541+ * @poll_delay_us: time to sleep between reads in us
542+ * @s: the pointer to variable to store the value of REG_STATUS
543+ *
544+ * This function polls a status register (REG_STATUS) and returns when
545+ * the STATUS_READY bit is 0 or when the timeout has expired.
546+ *
547+ * Return: 0 on success, a negative error code otherwise.
548+ */
549+ int spinand_wait (struct spinand_device * spinand , unsigned long initial_delay_us ,
550+ unsigned long poll_delay_us , u8 * s )
541551{
542552 struct spi_mem_op op = SPINAND_GET_FEATURE_OP (REG_STATUS ,
543553 spinand -> scratchbuf );
@@ -604,8 +614,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
604614 return spinand_write_reg_op (spinand , REG_BLOCK_LOCK , lock );
605615}
606616
607- static int spinand_read_page (struct spinand_device * spinand ,
608- const struct nand_page_io_req * req )
617+ /**
618+ * spinand_read_page() - Read a page
619+ * @spinand: the spinand device
620+ * @req: the I/O request
621+ *
622+ * Return: 0 or a positive number of bitflips corrected on success.
623+ * A negative error code otherwise.
624+ */
625+ int spinand_read_page (struct spinand_device * spinand ,
626+ const struct nand_page_io_req * req )
609627{
610628 struct nand_device * nand = spinand_to_nand (spinand );
611629 u8 status ;
@@ -635,8 +653,16 @@ static int spinand_read_page(struct spinand_device *spinand,
635653 return nand_ecc_finish_io_req (nand , (struct nand_page_io_req * )req );
636654}
637655
638- static int spinand_write_page (struct spinand_device * spinand ,
639- const struct nand_page_io_req * req )
656+ /**
657+ * spinand_write_page() - Write a page
658+ * @spinand: the spinand device
659+ * @req: the I/O request
660+ *
661+ * Return: 0 or a positive number of bitflips corrected on success.
662+ * A negative error code otherwise.
663+ */
664+ int spinand_write_page (struct spinand_device * spinand ,
665+ const struct nand_page_io_req * req )
640666{
641667 struct nand_device * nand = spinand_to_nand (spinand );
642668 u8 status ;
@@ -674,11 +700,15 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
674700{
675701 struct spinand_device * spinand = mtd_to_spinand (mtd );
676702 struct nand_device * nand = mtd_to_nanddev (mtd );
703+ struct mtd_ecc_stats old_stats ;
677704 struct nand_io_iter iter ;
678705 bool disable_ecc = false;
679706 bool ecc_failed = false;
707+ unsigned int retry_mode = 0 ;
680708 int ret ;
681709
710+ old_stats = mtd -> ecc_stats ;
711+
682712 if (ops -> mode == MTD_OPS_RAW || !mtd -> ooblayout )
683713 disable_ecc = true;
684714
@@ -690,18 +720,43 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
690720 if (ret )
691721 break ;
692722
723+ read_retry :
693724 ret = spinand_read_page (spinand , & iter .req );
694725 if (ret < 0 && ret != - EBADMSG )
695726 break ;
696727
697- if (ret == - EBADMSG )
728+ if (ret == - EBADMSG && spinand -> set_read_retry ) {
729+ if (spinand -> read_retries && (++ retry_mode <= spinand -> read_retries )) {
730+ ret = spinand -> set_read_retry (spinand , retry_mode );
731+ if (ret < 0 ) {
732+ spinand -> set_read_retry (spinand , 0 );
733+ return ret ;
734+ }
735+
736+ /* Reset ecc_stats; retry */
737+ mtd -> ecc_stats = old_stats ;
738+ goto read_retry ;
739+ } else {
740+ /* No more retry modes; real failure */
741+ ecc_failed = true;
742+ }
743+ } else if (ret == - EBADMSG ) {
698744 ecc_failed = true;
699- else
745+ } else {
700746 * max_bitflips = max_t (unsigned int , * max_bitflips , ret );
747+ }
701748
702749 ret = 0 ;
703750 ops -> retlen += iter .req .datalen ;
704751 ops -> oobretlen += iter .req .ooblen ;
752+
753+ /* Reset to retry mode 0 */
754+ if (retry_mode ) {
755+ retry_mode = 0 ;
756+ ret = spinand -> set_read_retry (spinand , retry_mode );
757+ if (ret < 0 )
758+ return ret ;
759+ }
705760 }
706761
707762 if (ecc_failed && !ret )
@@ -1292,6 +1347,10 @@ int spinand_match_and_init(struct spinand_device *spinand,
12921347 spinand -> id .len = 1 + table [i ].devid .len ;
12931348 spinand -> select_target = table [i ].select_target ;
12941349 spinand -> set_cont_read = table [i ].set_cont_read ;
1350+ spinand -> fact_otp = & table [i ].fact_otp ;
1351+ spinand -> user_otp = & table [i ].user_otp ;
1352+ spinand -> read_retries = table [i ].read_retries ;
1353+ spinand -> set_read_retry = table [i ].set_read_retry ;
12951354
12961355 op = spinand_select_op_variant (spinand ,
12971356 info -> op_variants .read_cache );
@@ -1478,6 +1537,12 @@ static int spinand_init(struct spinand_device *spinand)
14781537 mtd -> _max_bad_blocks = nanddev_mtd_max_bad_blocks ;
14791538 mtd -> _resume = spinand_mtd_resume ;
14801539
1540+ if (spinand_user_otp_size (spinand ) || spinand_fact_otp_size (spinand )) {
1541+ ret = spinand_set_mtd_otp_ops (spinand );
1542+ if (ret )
1543+ goto err_cleanup_ecc_engine ;
1544+ }
1545+
14811546 if (nand -> ecc .engine ) {
14821547 ret = mtd_ooblayout_count_freebytes (mtd );
14831548 if (ret < 0 )
0 commit comments