1919
2020from splunklib .searchcommands .internals import MetadataDecoder , MetadataEncoder , Recorder , RecordWriterV2
2121from splunklib .searchcommands import SearchMetric
22- from collections import deque , OrderedDict
22+ from collections import deque , namedtuple , OrderedDict
2323from cStringIO import StringIO
2424from functools import wraps
2525from glob import iglob
@@ -63,7 +63,7 @@ def random_dict():
6363 # contain utf-8 encoded byte strings or--better still--unicode strings. This is because the json package
6464 # converts all bytes strings to unicode strings before serializing them.
6565
66- return { 'a' : random_float (), 'b' : random_unicode (), '福 酒吧' : { 'fu' : random_float (), 'bar' : random_float ()}}
66+ return OrderedDict ((( 'a' , random_float ()), ( 'b' , random_unicode ()), ( '福 酒吧' , OrderedDict ((( 'fu' , random_float ()), ( 'bar' , random_float ()))))))
6767
6868
6969def random_float ():
@@ -260,8 +260,8 @@ def test_record_writer_with_recordings(self):
260260
261261 for input_file in iglob (base_path + '*.input.gz' ):
262262
263- with gzip .open (input_file , 'rb' ) as f :
264- test_data = pickle .load (f )
263+ with gzip .open (input_file , 'rb' ) as ifile :
264+ test_data = pickle .load (ifile )
265265
266266 writer = RecordWriterV2 (StringIO (), maxresultrows = 10 ) # small for the purposes of this unit test
267267 write_record = writer .write_record
@@ -282,13 +282,76 @@ def test_record_writer_with_recordings(self):
282282
283283 writer .flush (finished = True )
284284
285- with io .open (os .path .splitext (os .path .splitext (input_file )[0 ])[0 ] + '.output' , 'rb' ) as f :
286- expected = f .read ()
285+ # Read expected data
287286
288- self .assertMultiLineEqual (writer ._ofile .getvalue (), expected )
287+ expected_path = os .path .splitext (os .path .splitext (input_file )[0 ])[0 ] + '.output'
288+
289+ with io .open (expected_path , 'rb' ) as ifile :
290+ expected = ifile .read ()
291+
292+ expected = self ._load_chunks (StringIO (expected ))
293+
294+ # Read observed data
295+
296+ ifile = writer ._ofile
297+ ifile .seek (0 )
298+
299+ observed = self ._load_chunks (ifile )
300+
301+ # Write observed data (as an aid to diagnostics)
302+
303+ observed_path = expected_path + '.observed'
304+ observed_value = ifile .getvalue ()
305+
306+ with io .open (observed_path , 'wb' ) as ifile :
307+ ifile .write (observed_value )
308+
309+ self ._compare_chunks (observed , expected )
289310
290311 return
291312
313+ def _compare_chunks (self , chunks_1 , chunks_2 ):
314+ self .assertEqual (len (chunks_1 ), len (chunks_2 ))
315+ n = 0
316+ for chunk_1 , chunk_2 in izip (chunks_1 , chunks_2 ):
317+ self .assertDictEqual (
318+ chunk_1 .metadata , chunk_2 .metadata ,
319+ 'Chunk {0}: metadata error: "{1}" != "{2}"' .format (n , chunk_1 .metadata , chunk_2 .metadata ))
320+ self .assertMultiLineEqual (chunk_1 .body , chunk_2 .body , 'Chunk {0}: data error' .format (n ))
321+ n += 1
322+ return
323+
324+ def _load_chunks (self , ifile ):
325+ import re
326+
327+ pattern = re .compile (r'chunked 1.0,(?P<metadata_length>\d+),(?P<body_length>\d+)\n' )
328+ decoder = json .JSONDecoder ()
329+
330+ chunks = []
331+
332+ while True :
333+
334+ line = ifile .readline ()
335+
336+ if len (line ) == 0 :
337+ break
338+
339+ match = pattern .match (line )
340+ self .assertIsNotNone (match )
341+
342+ metadata_length = int (match .group ('metadata_length' ))
343+ metadata = ifile .read (metadata_length )
344+ metadata = decoder .decode (metadata )
345+
346+ body_length = int (match .group ('body_length' ))
347+ body = ifile .read (body_length ) if body_length > 0 else ''
348+
349+ chunks .append (TestInternals ._Chunk (metadata , body ))
350+
351+ return chunks
352+
353+ _Chunk = namedtuple ('Chunk' , (b'metadata' , b'body' ))
354+
292355 _dictionary = {
293356 'a' : 1 ,
294357 'b' : 2 ,
0 commit comments