diff --git a/hw/femu/zns/zftl.c b/hw/femu/zns/zftl.c index 2a4f932d28..9b14e4648c 100644 --- a/hw/femu/zns/zftl.c +++ b/hw/femu/zns/zftl.c @@ -167,6 +167,124 @@ static int zns_get_wcidx(struct zns_ssd* zns) return -1; } +static void zns_invalidate_zone_cache(struct zns_ssd *zns, uint32_t zone_idx) +{ + int i; + + for (i = 0; i < zns->cache.num_wc; i++) { + if (zns->cache.write_cache[i].sblk == zone_idx) { + #ifdef FEMU_DEBUG_ZFTL + uint64_t discarded_entries = zns->cache.write_cache[i].used; + #endif + + zns->cache.write_cache[i].used = 0; + zns->cache.write_cache[i].sblk = INVALID_SBLK; + + ftl_debug("Invalidated write_cache[%d] for zone %u (%lu entries discarded)\n", + i, zone_idx, discarded_entries); + return; + } + } +} + +static void zns_invalidate_zone_mappings(struct zns_ssd *zns, uint32_t zone_idx, + uint64_t zone_size_lbas, uint64_t lbasz) +{ + uint64_t zone_start_lba = zone_idx * zone_size_lbas; + uint64_t zone_end_lba = (zone_idx + 1) * zone_size_lbas; + uint64_t secs_per_pg = LOGICAL_PAGE_SIZE / lbasz; + uint64_t start_lpn = zone_start_lba / secs_per_pg; + uint64_t end_lpn = zone_end_lba / secs_per_pg; + uint64_t lpn; + uint64_t invalidated_count = 0; + + for (lpn = start_lpn; lpn < end_lpn && lpn < zns->l2p_sz; lpn++) { + if (zns->maptbl[lpn].ppa != UNMAPPED_PPA) { + zns->maptbl[lpn].ppa = UNMAPPED_PPA; + invalidated_count++; + } + } + + ftl_debug("Invalidated %lu LPN mappings for zone %u (LPN %lu-%lu)\n", + invalidated_count, zone_idx, start_lpn, end_lpn - 1); +} + +static void zns_reset_block_state(struct zns_ssd *zns, uint32_t zone_idx) +{ + int ch, lun, pl; + struct ppa ppa; + struct zns_blk *blk; + + for (ch = 0; ch < zns->num_ch; ch++) { + for (lun = 0; lun < zns->num_lun; lun++) { + for (pl = 0; pl < zns->num_plane; pl++) { + ppa.g.ch = ch; + ppa.g.fc = lun; + ppa.g.pl = pl; + ppa.g.blk = zone_idx; + ppa.g.pg = 0; + ppa.g.spg = 0; + + blk = get_blk(zns, &ppa); + blk->page_wp = 0; + } + } + } + + ftl_debug("Reset block state for zone %u (all page_wp = 0)\n", zone_idx); +} + +uint64_t zns_zone_reset(struct zns_ssd *zns, uint32_t zone_idx, + uint64_t zone_size_lbas, uint64_t lbasz, uint64_t stime) +{ + int ch, lun, pl; + struct ppa ppa; + struct nand_cmd erase_cmd; + uint64_t sublat, maxlat = 0; + uint64_t total_blocks_erased = 0; + + ftl_debug("=== Zone Reset Started for Zone %u ===\n", zone_idx); + + /* Step 1: Invalidate write cache (instant) */ + zns_invalidate_zone_cache(zns, zone_idx); + + /* Step 2: Invalidate L2P mappings (instant) */ + zns_invalidate_zone_mappings(zns, zone_idx, zone_size_lbas, lbasz); + + /* Step 3: Reset block state (instant) */ + zns_reset_block_state(zns, zone_idx); + + /* Step 4: Simulate physical erase across all channels/LUNs/planes (parallel) */ + erase_cmd.type = USER_IO; + erase_cmd.cmd = NAND_ERASE; + erase_cmd.stime = stime; + + for (ch = 0; ch < zns->num_ch; ch++) { + for (lun = 0; lun < zns->num_lun; lun++) { + for (pl = 0; pl < zns->num_plane; pl++) { + ppa.ppa = 0; + ppa.g.ch = ch; + ppa.g.fc = lun; + ppa.g.pl = pl; + ppa.g.blk = zone_idx; + ppa.g.pg = 0; + ppa.g.spg = 0; + + sublat = zns_advance_status(zns, &ppa, &erase_cmd); + maxlat = (sublat > maxlat) ? sublat : maxlat; + total_blocks_erased++; + } + } + } + + ftl_debug("Zone %u reset complete: erased %lu blocks across %d ch * %d lun * %d planes\n", + zone_idx, total_blocks_erased, (int)zns->num_ch, (int)zns->num_lun, (int)zns->num_plane); + ftl_debug("Maximum erase latency: %lu ns (%.2f ms)\n", maxlat, maxlat / 1000000.0); + ftl_debug("=== Zone Reset Finished ===\n\n"); + + return maxlat; +} + static uint64_t zns_read(struct zns_ssd *zns, NvmeRequest *req) { uint64_t lba = req->slba; @@ -192,7 +310,7 @@ static uint64_t zns_read(struct zns_ssd *zns, NvmeRequest *req) srd.stime = req->stime; sublat = zns_advance_status(zns, &ppa, &srd); - femu_log("[R] lpn:\t%lu\t<--ch:\t%u\tlun:\t%u\tpl:\t%u\tblk:\t%u\tpg:\t%u\tsubpg:\t%u\tlat\t%lu\n",lpn,ppa.g.ch,ppa.g.fc,ppa.g.pl,ppa.g.blk,ppa.g.pg,ppa.g.spg,sublat); + ftl_debug("[R] lpn:\t%lu\t<--ch:\t%u\tlun:\t%u\tpl:\t%u\tblk:\t%u\tpg:\t%u\tsubpg:\t%u\tlat\t%lu\n",lpn,ppa.g.ch,ppa.g.fc,ppa.g.pl,ppa.g.blk,ppa.g.pg,ppa.g.spg,sublat); maxlat = (sublat > maxlat) ? sublat : maxlat; } @@ -234,7 +352,7 @@ static uint64_t zns_wc_flush(struct zns_ssd* zns, int wcidx, int type,uint64_t s ppa.g.spg = subpage; /* update maptbl */ set_maptbl_ent(zns, lpn, &ppa); - //femu_log("[F] lpn:\t%lu\t-->ch:\t%u\tlun:\t%u\tpl:\t%u\tblk:\t%u\tpg:\t%u\tsubpg:\t%u\tlat\t%lu\n",lpn,ppa.g.ch,ppa.g.fc,ppa.g.pl,ppa.g.blk,ppa.g.pg,ppa.g.spg,sublat); + // ftl_debug("[F] lpn:\t%lu\t-->ch:\t%u\tlun:\t%u\tpl:\t%u\tblk:\t%u\tpg:\t%u\tsubpg:\t%u\tlat\t%lu\n",lpn,ppa.g.ch,ppa.g.fc,ppa.g.pl,ppa.g.blk,ppa.g.pg,ppa.g.spg,sublat); } i+=ZNS_PAGE_SIZE/LOGICAL_PAGE_SIZE; } @@ -295,16 +413,16 @@ static uint64_t zns_write(struct zns_ssd *zns, NvmeRequest *req) for (lpn = start_lpn; lpn <= end_lpn; lpn++) { if(zns->cache.write_cache[wcidx].used==zns->cache.write_cache[wcidx].cap) { - femu_log("[W] flush wc %d (%u/%u)\n",wcidx,(int)zns->cache.write_cache[wcidx].used,(int)zns->cache.write_cache[wcidx].cap); + ftl_debug("[W] flush wc %d (%u/%u)\n",wcidx,(int)zns->cache.write_cache[wcidx].used,(int)zns->cache.write_cache[wcidx].cap); sublat = zns_wc_flush(zns,wcidx,USER_IO,req->stime); - femu_log("[W] flush lat: %u\n", (int)sublat); + ftl_debug("[W] flush lat: %u\n", (int)sublat); maxlat = (sublat > maxlat) ? sublat : maxlat; sublat = 0; } zns->cache.write_cache[wcidx].lpns[zns->cache.write_cache[wcidx].used++]=lpn; sublat += SRAM_WRITE_LATENCY_NS; //Simplified timing emulation maxlat = (sublat > maxlat) ? sublat : maxlat; - femu_log("[W] lpn:\t%lu\t-->wc cache:%u, used:%u\n",lpn,(int)wcidx,(int)zns->cache.write_cache[wcidx].used); + ftl_debug("[W] lpn:\t%lu\t-->wc cache:%u, used:%u\n",lpn,(int)wcidx,(int)zns->cache.write_cache[wcidx].used); } return maxlat; } diff --git a/hw/femu/zns/zftl.h b/hw/femu/zns/zftl.h index f7521ba3df..a2b4418f43 100644 --- a/hw/femu/zns/zftl.h +++ b/hw/femu/zns/zftl.h @@ -9,6 +9,9 @@ void zftl_init(FemuCtrl *n); +uint64_t zns_zone_reset(struct zns_ssd *zns, uint32_t zone_idx, + uint64_t zone_size_lbas, uint64_t lbasz, uint64_t stime); + #ifdef FEMU_DEBUG_ZFTL #define ftl_debug(fmt, ...) \ do { printf("[Misao] ZFTL-Dbg: " fmt, ## __VA_ARGS__); } while (0) diff --git a/hw/femu/zns/zns.c b/hw/femu/zns/zns.c index 396b71bdf0..f2a316f140 100644 --- a/hw/femu/zns/zns.c +++ b/hw/femu/zns/zns.c @@ -502,11 +502,14 @@ struct zns_zone_reset_ctx { NvmeZone *zone; }; -static void zns_aio_zone_reset_cb(NvmeRequest *req, NvmeZone *zone) +static uint64_t zns_aio_zone_reset_cb(NvmeRequest *req, NvmeZone *zone) { NvmeNamespace *ns = req->ns; + FemuCtrl *n = ns->ctrl; + struct zns_ssd *zns = n->zns; + uint32_t zone_idx = zns_zone_idx(ns, zone->d.zslba); + uint64_t erase_latency = 0; - /* FIXME, We always assume reset SUCCESS */ switch (zns_get_zone_state(zone)) { case NVME_ZONE_STATE_EXPLICITLY_OPEN: /* fall through */ @@ -520,27 +523,20 @@ static void zns_aio_zone_reset_cb(NvmeRequest *req, NvmeZone *zone) zone->w_ptr = zone->d.zslba; zone->d.wp = zone->w_ptr; zns_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY); + break; default: break; } -#if 0 - FemuCtrl *n = ns->ctrl; - int ch, lun; - struct zns_ssd *zns = n->zns; - uint64_t num_ch = zns->num_ch; - uint64_t num_lun = zns->num_lun; - - struct ppa ppa; - for (ch = 0; ch < num_ch; ch++) { - for (lun = 0; lun < num_lun; lun++) { - ppa.g.ch = ch; - ppa.g.fc = lun; - ppa.g.blk = zns_zone_idx(ns, zone->d.zslba); - //FIXME: no erase - } + erase_latency = zns_zone_reset(zns, zone_idx, n->zone_size, zns->lbasz, req->stime); + + /* Reset write pointer if this was the active zone */ + if (zns->active_zone == zone_idx) { + zns->wp.ch = 0; + zns->wp.lun = 0; } -#endif + + return erase_latency; } typedef uint16_t (*op_handler_t)(NvmeNamespace *, NvmeZone *, NvmeZoneState, @@ -631,6 +627,8 @@ static uint16_t zns_finish_zone(NvmeNamespace *ns, NvmeZone *zone, static uint16_t zns_reset_zone(NvmeNamespace *ns, NvmeZone *zone, NvmeZoneState state, NvmeRequest *req) { + uint64_t erase_lat = 0; + switch (state) { case NVME_ZONE_STATE_EMPTY: return NVME_SUCCESS; @@ -643,7 +641,10 @@ static uint16_t zns_reset_zone(NvmeNamespace *ns, NvmeZone *zone, return NVME_ZONE_INVAL_TRANSITION; } - zns_aio_zone_reset_cb(req, zone); + erase_lat = zns_aio_zone_reset_cb(req, zone); + + req->reqlat = erase_lat; + req->expire_time += erase_lat; return NVME_SUCCESS; }