@@ -15,7 +15,9 @@ import { CustomProgressEvent } from 'progress-events'
1515async function walkDAG ( blockstore : ReadableStorage , node : dagPb . PBNode | Uint8Array , queue : Pushable < Uint8Array > , streamPosition : bigint , start : bigint , end : bigint , options : ExporterOptions ) : Promise < void > {
1616 // a `raw` node
1717 if ( node instanceof Uint8Array ) {
18- queue . push ( extractDataFromBlock ( node , streamPosition , start , end ) )
18+ const buf = extractDataFromBlock ( node , streamPosition , start , end )
19+
20+ queue . push ( buf )
1921
2022 return
2123 }
@@ -123,6 +125,10 @@ async function walkDAG (blockstore: ReadableStorage, node: dagPb.PBNode | Uint8A
123125 }
124126 }
125127 )
128+
129+ if ( streamPosition >= end ) {
130+ queue . end ( )
131+ }
126132}
127133
128134const fileContent : UnixfsV1Resolver = ( cid , node , unixfs , path , resolve , depth , blockstore ) => {
@@ -134,34 +140,23 @@ const fileContent: UnixfsV1Resolver = (cid, node, unixfs, path, resolve, depth,
134140 }
135141
136142 const {
137- offset ,
138- length
143+ start ,
144+ end
139145 } = validateOffsetAndLength ( fileSize , options . offset , options . length )
140146
141- if ( length === 0n ) {
147+ if ( end === 0n ) {
142148 return
143149 }
144150
145151 let read = 0n
146- const wanted = length - offset
152+ const wanted = end - start
147153 const queue = pushable ( )
148154
149155 options . onProgress ?.( new CustomProgressEvent < ExportWalk > ( 'unixfs:exporter:walk:file' , {
150156 cid
151157 } ) )
152158
153- void walkDAG ( blockstore , node , queue , 0n , offset , offset + length , options )
154- . then ( ( ) => {
155- if ( read < wanted ) {
156- throw errCode ( new Error ( 'Traversed entire DAG but did not read enough bytes' ) , 'ERR_UNDER_READ' )
157- }
158-
159- if ( read > wanted ) {
160- throw errCode ( new Error ( 'Read too many bytes - the file size reported by the UnixFS data in the root node may be incorrect' ) , 'ERR_OVER_READ' )
161- }
162-
163- queue . end ( )
164- } )
159+ void walkDAG ( blockstore , node , queue , 0n , start , end , options )
165160 . catch ( err => {
166161 queue . end ( err )
167162 } )
@@ -173,7 +168,12 @@ const fileContent: UnixfsV1Resolver = (cid, node, unixfs, path, resolve, depth,
173168
174169 read += BigInt ( buf . byteLength )
175170
176- if ( read === length ) {
171+ if ( read > wanted ) {
172+ queue . end ( )
173+ throw errCode ( new Error ( 'Read too many bytes - the file size reported by the UnixFS data in the root node may be incorrect' ) , 'ERR_OVER_READ' )
174+ }
175+
176+ if ( read === wanted ) {
177177 queue . end ( )
178178 }
179179
@@ -185,6 +185,10 @@ const fileContent: UnixfsV1Resolver = (cid, node, unixfs, path, resolve, depth,
185185
186186 yield buf
187187 }
188+
189+ if ( read < wanted ) {
190+ throw errCode ( new Error ( 'Traversed entire DAG but did not read enough bytes' ) , 'ERR_UNDER_READ' )
191+ }
188192 }
189193
190194 return yieldFileContent
0 commit comments