@@ -101,13 +101,15 @@ def test_standard_add_and_get(standard_history):
101101 "role" : "tool" ,
102102 "content" : "tool result 1" ,
103103 "tool_call_id" : "tool call one" ,
104+ "metadata" : {"tool call params" : "abc 123" },
104105 }
105106 )
106107 standard_history .add_message (
107108 {
108109 "role" : "tool" ,
109110 "content" : "tool result 2" ,
110111 "tool_call_id" : "tool call two" ,
112+ "metadata" : {"tool call params" : "abc 456" },
111113 }
112114 )
113115 standard_history .add_message ({"role" : "user" , "content" : "third prompt" })
@@ -121,7 +123,12 @@ def test_standard_add_and_get(standard_history):
121123 partial_context = standard_history .get_recent (top_k = 3 )
122124 assert len (partial_context ) == 3
123125 assert partial_context == [
124- {"role" : "tool" , "content" : "tool result 2" , "tool_call_id" : "tool call two" },
126+ {
127+ "role" : "tool" ,
128+ "content" : "tool result 2" ,
129+ "tool_call_id" : "tool call two" ,
130+ "metadata" : {"tool call params" : "abc 456" },
131+ },
125132 {"role" : "user" , "content" : "third prompt" },
126133 {"role" : "llm" , "content" : "third response" },
127134 ]
@@ -133,8 +140,18 @@ def test_standard_add_and_get(standard_history):
133140 {"role" : "llm" , "content" : "first response" },
134141 {"role" : "user" , "content" : "second prompt" },
135142 {"role" : "llm" , "content" : "second response" },
136- {"role" : "tool" , "content" : "tool result 1" , "tool_call_id" : "tool call one" },
137- {"role" : "tool" , "content" : "tool result 2" , "tool_call_id" : "tool call two" },
143+ {
144+ "role" : "tool" ,
145+ "content" : "tool result 1" ,
146+ "tool_call_id" : "tool call one" ,
147+ "metadata" : {"tool call params" : "abc 123" },
148+ },
149+ {
150+ "role" : "tool" ,
151+ "content" : "tool result 2" ,
152+ "tool_call_id" : "tool call two" ,
153+ "metadata" : {"tool call params" : "abc 456" },
154+ },
138155 {"role" : "user" , "content" : "third prompt" },
139156 {"role" : "llm" , "content" : "third response" },
140157 ]
@@ -160,7 +177,11 @@ def test_standard_add_messages(standard_history):
160177 standard_history .add_messages (
161178 [
162179 {"role" : "user" , "content" : "first prompt" },
163- {"role" : "llm" , "content" : "first response" },
180+ {
181+ "role" : "llm" ,
182+ "content" : "first response" ,
183+ "metadata" : {"llm provider" : "openai" },
184+ },
164185 {"role" : "user" , "content" : "second prompt" },
165186 {"role" : "llm" , "content" : "second response" },
166187 {
@@ -182,7 +203,11 @@ def test_standard_add_messages(standard_history):
182203 assert len (full_context ) == 8
183204 assert full_context == [
184205 {"role" : "user" , "content" : "first prompt" },
185- {"role" : "llm" , "content" : "first response" },
206+ {
207+ "role" : "llm" ,
208+ "content" : "first response" ,
209+ "metadata" : {"llm provider" : "openai" },
210+ },
186211 {"role" : "user" , "content" : "second prompt" },
187212 {"role" : "llm" , "content" : "second response" },
188213 {"role" : "tool" , "content" : "tool result 1" , "tool_call_id" : "tool call one" },
@@ -198,17 +223,21 @@ def test_standard_messages_property(standard_history):
198223 {"role" : "user" , "content" : "first prompt" },
199224 {"role" : "llm" , "content" : "first response" },
200225 {"role" : "user" , "content" : "second prompt" },
201- {"role" : "llm" , "content" : "second response" },
202- {"role" : "user" , "content" : "third prompt" },
226+ {
227+ "role" : "llm" ,
228+ "content" : "second response" ,
229+ "metadata" : {"params" : "abc" },
230+ },
231+ {"role" : "user" , "content" : "third prompt" , "metadata" : 42 },
203232 ]
204233 )
205234
206235 assert standard_history .messages == [
207236 {"role" : "user" , "content" : "first prompt" },
208237 {"role" : "llm" , "content" : "first response" },
209238 {"role" : "user" , "content" : "second prompt" },
210- {"role" : "llm" , "content" : "second response" },
211- {"role" : "user" , "content" : "third prompt" },
239+ {"role" : "llm" , "content" : "second response" , "metadata" : { "params" : "abc" } },
240+ {"role" : "user" , "content" : "third prompt" , "metadata" : 42 },
212241 ]
213242
214243
@@ -357,7 +386,14 @@ def test_semantic_store_and_get_recent(semantic_history):
357386 semantic_history .add_message (
358387 {"role" : "tool" , "content" : "tool result" , "tool_call_id" : "tool id" }
359388 )
360- # test default context history size
389+ semantic_history .add_message (
390+ {
391+ "role" : "tool" ,
392+ "content" : "tool result" ,
393+ "tool_call_id" : "tool id" ,
394+ "metadata" : "return value from tool" ,
395+ }
396+ ) # test default context history size
361397 default_context = semantic_history .get_recent ()
362398 assert len (default_context ) == 5 # 5 is default
363399
@@ -367,10 +403,10 @@ def test_semantic_store_and_get_recent(semantic_history):
367403
368404 # test larger context history returns full history
369405 too_large_context = semantic_history .get_recent (top_k = 100 )
370- assert len (too_large_context ) == 9
406+ assert len (too_large_context ) == 10
371407
372408 # test that order is maintained
373- full_context = semantic_history .get_recent (top_k = 9 )
409+ full_context = semantic_history .get_recent (top_k = 10 )
374410 assert full_context == [
375411 {"role" : "user" , "content" : "first prompt" },
376412 {"role" : "llm" , "content" : "first response" },
@@ -381,15 +417,26 @@ def test_semantic_store_and_get_recent(semantic_history):
381417 {"role" : "user" , "content" : "fourth prompt" },
382418 {"role" : "llm" , "content" : "fourth response" },
383419 {"role" : "tool" , "content" : "tool result" , "tool_call_id" : "tool id" },
420+ {
421+ "role" : "tool" ,
422+ "content" : "tool result" ,
423+ "tool_call_id" : "tool id" ,
424+ "metadata" : "return value from tool" ,
425+ },
384426 ]
385427
386428 # test that more recent entries are returned
387429 context = semantic_history .get_recent (top_k = 4 )
388430 assert context == [
389- {"role" : "llm" , "content" : "third response" },
390431 {"role" : "user" , "content" : "fourth prompt" },
391432 {"role" : "llm" , "content" : "fourth response" },
392433 {"role" : "tool" , "content" : "tool result" , "tool_call_id" : "tool id" },
434+ {
435+ "role" : "tool" ,
436+ "content" : "tool result" ,
437+ "tool_call_id" : "tool id" ,
438+ "metadata" : "return value from tool" ,
439+ },
393440 ]
394441
395442 # test no entries are returned and no error is raised if top_k == 0
@@ -422,11 +469,13 @@ def test_semantic_messages_property(semantic_history):
422469 "role" : "tool" ,
423470 "content" : "tool result 1" ,
424471 "tool_call_id" : "tool call one" ,
472+ "metadata" : 42 ,
425473 },
426474 {
427475 "role" : "tool" ,
428476 "content" : "tool result 2" ,
429477 "tool_call_id" : "tool call two" ,
478+ "metadata" : [1 , 2 , 3 ],
430479 },
431480 {"role" : "user" , "content" : "second prompt" },
432481 {"role" : "llm" , "content" : "second response" },
@@ -437,8 +486,18 @@ def test_semantic_messages_property(semantic_history):
437486 assert semantic_history .messages == [
438487 {"role" : "user" , "content" : "first prompt" },
439488 {"role" : "llm" , "content" : "first response" },
440- {"role" : "tool" , "content" : "tool result 1" , "tool_call_id" : "tool call one" },
441- {"role" : "tool" , "content" : "tool result 2" , "tool_call_id" : "tool call two" },
489+ {
490+ "role" : "tool" ,
491+ "content" : "tool result 1" ,
492+ "tool_call_id" : "tool call one" ,
493+ "metadata" : 42 ,
494+ },
495+ {
496+ "role" : "tool" ,
497+ "content" : "tool result 2" ,
498+ "tool_call_id" : "tool call two" ,
499+ "metadata" : [1 , 2 , 3 ],
500+ },
442501 {"role" : "user" , "content" : "second prompt" },
443502 {"role" : "llm" , "content" : "second response" },
444503 {"role" : "user" , "content" : "third prompt" },
0 commit comments