3737 * Operations include:
3838 * - writing data obtained from an InputStream
3939 * - getting an OutputStream to stream the data out
40- *
40+ *
4141 * @author Eliot Horowitz and Guy K. Kloss
4242 */
4343public class GridFSInputFile extends GridFSFile {
44-
44+
4545 /**
4646 * Default constructor setting the GridFS file name and providing an input
4747 * stream containing data to be written to the file.
48- *
48+ *
4949 * @param fs
5050 * The GridFS connection handle.
5151 * @param in
5252 * Stream used for reading data from.
5353 * @param filename
5454 * Name of the file to be created.
55- * @param closeStreamOnPersist
55+ * @param closeStreamOnPersist
5656 indicate the passed in input stream should be closed once the data chunk persisted
5757 */
5858 GridFSInputFile ( GridFS fs , InputStream in , String filename , boolean closeStreamOnPersist ) {
5959 _fs = fs ;
6060 _in = in ;
6161 _filename = filename ;
6262 _closeStreamOnPersist = closeStreamOnPersist ;
63-
63+
6464 _id = new ObjectId ();
6565 _chunkSize = GridFS .DEFAULT_CHUNKSIZE ;
6666 _uploadDate = new Date ();
6767 _messageDigester = _md5Pool .get ();
6868 _messageDigester .reset ();
6969 _buffer = new byte [(int ) _chunkSize ];
7070 }
71-
71+
7272 /**
7373 * Default constructor setting the GridFS file name and providing an input
7474 * stream containing data to be written to the file.
75- *
75+ *
7676 * @param fs
7777 * The GridFS connection handle.
7878 * @param in
@@ -83,13 +83,13 @@ public class GridFSInputFile extends GridFSFile {
8383 GridFSInputFile ( GridFS fs , InputStream in , String filename ) {
8484 this ( fs , in , filename , false );
8585 }
86-
86+
8787 /**
8888 * Constructor that only provides a file name, but does not rely on the
8989 * presence of an {@link java.io.InputStream}. An
9090 * {@link java.io.OutputStream} can later be obtained for writing using the
9191 * {@link #getOutputStream()} method.
92- *
92+ *
9393 * @param fs
9494 * The GridFS connection handle.
9595 * @param filename
@@ -98,32 +98,32 @@ public class GridFSInputFile extends GridFSFile {
9898 GridFSInputFile ( GridFS fs , String filename ) {
9999 this ( fs , null , filename );
100100 }
101-
101+
102102 /**
103103 * Minimal constructor that does not rely on the presence of an
104104 * {@link java.io.InputStream}. An {@link java.io.OutputStream} can later be
105105 * obtained for writing using the {@link #getOutputStream()} method.
106- *
106+ *
107107 * @param fs
108108 * The GridFS connection handle.
109109 */
110110 GridFSInputFile ( GridFS fs ) {
111111 this ( fs , null , null );
112112 }
113-
113+
114114 /**
115115 * Sets the file name on the GridFS entry.
116- *
116+ *
117117 * @param fn
118118 * File name.
119119 */
120120 public void setFilename ( String fn ) {
121121 _filename = fn ;
122122 }
123-
123+
124124 /**
125125 * Sets the content type (MIME type) on the GridFS entry.
126- *
126+ *
127127 * @param ct
128128 * Content type.
129129 */
@@ -148,11 +148,11 @@ public void setChunkSize(long _chunkSize) {
148148 public void save () {
149149 save ( _chunkSize );
150150 }
151-
151+
152152 /**
153153 * This method first calls saveChunks(long) if the file data has not been saved yet.
154154 * Then it persists the file entry to GridFS.
155- *
155+ *
156156 * @param chunkSize
157157 * Size of chunks for file in bytes.
158158 */
@@ -169,13 +169,13 @@ public void save( long chunkSize ) {
169169 throw new MongoException ( "couldn't save chunks" , ioe );
170170 }
171171 }
172-
172+
173173 super .save ();
174174 }
175-
175+
176176 /**
177177 * @see com.mongodb.gridfs.GridFSInputFile#saveChunks(long)
178- *
178+ *
179179 * @return Number of the next chunk.
180180 * @throws IOException
181181 * on problems reading the new entry's
@@ -184,12 +184,12 @@ public void save( long chunkSize ) {
184184 public int saveChunks () throws IOException {
185185 return saveChunks ( _chunkSize );
186186 }
187-
187+
188188 /**
189189 * Saves all data into chunks from configured {@link java.io.InputStream} input stream
190190 * to GridFS. A non-default chunk size can be specified.
191191 * This method does NOT save the file object itself, one must call save() to do so.
192- *
192+ *
193193 * @param chunkSize
194194 * Size of chunks for file in bytes.
195195 * @return Number of the next chunk.
@@ -207,11 +207,11 @@ public int saveChunks( long chunkSize ) throws IOException {
207207 _chunkSize = chunkSize ;
208208 _buffer = new byte [(int ) _chunkSize ];
209209 }
210-
210+
211211 if ( chunkSize > 3.5 * 1000 * 1000 ) {
212212 throw new MongoException ( "chunkSize must be less than 3.5MiB!" );
213213 }
214-
214+
215215 int bytesRead = 0 ;
216216 while ( bytesRead >= 0 ) {
217217 _currentBufferPosition = 0 ;
@@ -223,14 +223,14 @@ public int saveChunks( long chunkSize ) throws IOException {
223223 _finishData ();
224224 return _currentChunkNumber ;
225225 }
226-
226+
227227 /**
228228 * After retrieving this {@link java.io.OutputStream}, this object will be
229229 * capable of accepting successively written data to the output stream.
230230 * To completely persist this GridFS object, you must finally call the {@link java.io.OutputStream#close()}
231231 * method on the output stream. Note that calling the save() and saveChunks()
232232 * methods will throw Exceptions once you obtained the OutputStream.
233- *
233+ *
234234 * @return Writable stream object.
235235 */
236236 public OutputStream getOutputStream () {
@@ -239,11 +239,11 @@ public OutputStream getOutputStream() {
239239 }
240240 return _outputStream ;
241241 }
242-
242+
243243 /**
244244 * Dumps a new chunk into the chunks collection. Depending on the flag, also
245245 * partial buffers (at the end) are going to be written immediately.
246- *
246+ *
247247 * @param data
248248 * Data for chunk.
249249 * @param writePartial
@@ -264,7 +264,7 @@ private void _dumpBuffer( boolean writePartial ) {
264264 writeBuffer = new byte [_currentBufferPosition ];
265265 System .arraycopy ( _buffer , 0 , writeBuffer , 0 , _currentBufferPosition );
266266 }
267-
267+
268268 DBObject chunk = BasicDBObjectBuilder .start ()
269269 .add ( "files_id" , _id )
270270 .add ( "n" , _currentChunkNumber )
@@ -275,10 +275,10 @@ private void _dumpBuffer( boolean writePartial ) {
275275 _messageDigester .update ( writeBuffer );
276276 _currentBufferPosition = 0 ;
277277 }
278-
278+
279279 /**
280280 * Reads a buffer full from the {@link java.io.InputStream}.
281- *
281+ *
282282 * @return Number of bytes read from stream.
283283 * @throws IOException
284284 * if the reading from the stream fails.
@@ -296,7 +296,7 @@ private int _readStream2Buffer() throws IOException {
296296 }
297297 return bytesRead ;
298298 }
299-
299+
300300 /**
301301 * Marks the data as fully written. This needs to be called before super.save()
302302 */
@@ -316,7 +316,7 @@ private void _finishData() {
316316 }
317317 }
318318 }
319-
319+
320320 private final InputStream _in ;
321321 private boolean _closeStreamOnPersist ;
322322 private boolean _savedChunks = false ;
@@ -326,15 +326,15 @@ private void _finishData() {
326326 private long _totalBytes = 0 ;
327327 private MessageDigest _messageDigester = null ;
328328 private OutputStream _outputStream = null ;
329-
329+
330330 /**
331331 * A pool of {@link java.security.MessageDigest} objects.
332332 */
333333 static SimplePool <MessageDigest > _md5Pool
334334 = new SimplePool <MessageDigest >( "md5" , 10 , -1 , false , false ) {
335335 /**
336336 * {@inheritDoc}
337- *
337+ *
338338 * @see com.mongodb.util.SimplePool#createNew()
339339 */
340340 protected MessageDigest createNew () {
@@ -345,18 +345,18 @@ protected MessageDigest createNew() {
345345 }
346346 }
347347 };
348-
348+
349349 /**
350350 * An output stream implementation that can be used to successively write to
351351 * a GridFS file.
352- *
352+ *
353353 * @author Guy K. Kloss
354354 */
355355 class MyOutputStream extends OutputStream {
356-
356+
357357 /**
358358 * {@inheritDoc}
359- *
359+ *
360360 * @see java.io.OutputStream#write(int)
361361 */
362362 @ Override
@@ -365,10 +365,10 @@ public void write( int b ) throws IOException {
365365 byteArray [0 ] = (byte ) (b & 0xff );
366366 write ( byteArray , 0 , 1 );
367367 }
368-
368+
369369 /**
370370 * {@inheritDoc}
371- *
371+ *
372372 * @see java.io.OutputStream#write(byte[], int, int)
373373 */
374374 @ Override
@@ -390,7 +390,7 @@ public void write( byte[] b , int off , int len ) throws IOException {
390390 }
391391 }
392392 }
393-
393+
394394 /**
395395 * Processes/saves all data from {@link java.io.InputStream} and closes
396396 * the potentially present {@link java.io.OutputStream}. The GridFS file
0 commit comments