1717class OpenlayerHandler (BaseCallbackHandler ):
1818 """LangChain callback handler that logs to Openlayer."""
1919
20- def __init__ (
21- self ,
22- ** kwargs : Any ,
23- ) -> None :
20+ def __init__ (self , ** kwargs : Any ) -> None :
2421 super ().__init__ ()
2522
2623 self .start_time : float = None
@@ -37,14 +34,14 @@ def __init__(
3734 self .output : str = None
3835 self .metatada : Dict [str , Any ] = kwargs or {}
3936
40- def on_llm_start (
41- self , serialized : Dict [str , Any ], prompts : List [str ], ** kwargs : Any
42- ) -> Any :
37+ # noqa arg002
38+ def on_llm_start (self , serialized : Dict [str , Any ], prompts : List [str ], ** kwargs : Any ) -> Any :
4339 """Run when LLM starts running."""
40+ pass
4441
4542 def on_chat_model_start (
4643 self ,
47- serialized : Dict [str , Any ],
44+ serialized : Dict [str , Any ], # noqa: ARG002
4845 messages : List [List [langchain_schema .BaseMessage ]],
4946 ** kwargs : Any ,
5047 ) -> Any :
@@ -80,44 +77,34 @@ def _langchain_messages_to_prompt(
8077
8178 def on_llm_new_token (self , token : str , ** kwargs : Any ) -> Any :
8279 """Run on new LLM token. Only available when streaming is enabled."""
80+ pass
8381
84- def on_llm_end (self , response : langchain_schema .LLMResult , ** kwargs : Any ) -> Any :
82+ def on_llm_end (self , response : langchain_schema .LLMResult , ** kwargs : Any ) -> Any : # noqa: ARG002, E501
8583 """Run when LLM ends running."""
8684 self .end_time = time .time ()
8785 self .latency = (self .end_time - self .start_time ) * 1000
8886
8987 if response .llm_output and "token_usage" in response .llm_output :
90- self .prompt_tokens = response .llm_output ["token_usage" ].get (
91- "prompt_tokens" , 0
92- )
93- self .completion_tokens = response .llm_output ["token_usage" ].get (
94- "completion_tokens" , 0
95- )
88+ self .prompt_tokens = response .llm_output ["token_usage" ].get ("prompt_tokens" , 0 )
89+ self .completion_tokens = response .llm_output ["token_usage" ].get ("completion_tokens" , 0 )
9690 self .cost = self ._get_cost_estimate (
9791 num_input_tokens = self .prompt_tokens ,
9892 num_output_tokens = self .completion_tokens ,
9993 )
100- self .total_tokens = response .llm_output ["token_usage" ].get (
101- "total_tokens" , 0
102- )
94+ self .total_tokens = response .llm_output ["token_usage" ].get ("total_tokens" , 0 )
10395
10496 for generations in response .generations :
10597 for generation in generations :
10698 self .output += generation .text .replace ("\n " , " " )
10799
108100 self ._add_to_trace ()
109101
110- def _get_cost_estimate (
111- self , num_input_tokens : int , num_output_tokens : int
112- ) -> float :
102+ def _get_cost_estimate (self , num_input_tokens : int , num_output_tokens : int ) -> float :
113103 """Returns the cost estimate for a given model and number of tokens."""
114104 if self .model not in constants .OPENAI_COST_PER_TOKEN :
115105 return None
116106 cost_per_token = constants .OPENAI_COST_PER_TOKEN [self .model ]
117- return (
118- cost_per_token ["input" ] * num_input_tokens
119- + cost_per_token ["output" ] * num_output_tokens
120- )
107+ return cost_per_token ["input" ] * num_input_tokens + cost_per_token ["output" ] * num_output_tokens
121108
122109 def _add_to_trace (self ) -> None :
123110 """Adds to the trace."""
@@ -139,46 +126,42 @@ def _add_to_trace(self) -> None:
139126 metadata = self .metatada ,
140127 )
141128
142- def on_llm_error (
143- self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any
144- ) -> Any :
129+ def on_llm_error (self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any ) -> Any :
145130 """Run when LLM errors."""
131+ pass
146132
147- def on_chain_start (
148- self , serialized : Dict [str , Any ], inputs : Dict [str , Any ], ** kwargs : Any
149- ) -> Any :
133+ def on_chain_start (self , serialized : Dict [str , Any ], inputs : Dict [str , Any ], ** kwargs : Any ) -> Any :
150134 """Run when chain starts running."""
135+ pass
151136
152137 def on_chain_end (self , outputs : Dict [str , Any ], ** kwargs : Any ) -> Any :
153138 """Run when chain ends running."""
139+ pass
154140
155- def on_chain_error (
156- self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any
157- ) -> Any :
141+ def on_chain_error (self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any ) -> Any :
158142 """Run when chain errors."""
143+ pass
159144
160- def on_tool_start (
161- self , serialized : Dict [str , Any ], input_str : str , ** kwargs : Any
162- ) -> Any :
145+ def on_tool_start (self , serialized : Dict [str , Any ], input_str : str , ** kwargs : Any ) -> Any :
163146 """Run when tool starts running."""
147+ pass
164148
165149 def on_tool_end (self , output : str , ** kwargs : Any ) -> Any :
166150 """Run when tool ends running."""
151+ pass
167152
168- def on_tool_error (
169- self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any
170- ) -> Any :
153+ def on_tool_error (self , error : Union [Exception , KeyboardInterrupt ], ** kwargs : Any ) -> Any :
171154 """Run when tool errors."""
155+ pass
172156
173157 def on_text (self , text : str , ** kwargs : Any ) -> Any :
174158 """Run on arbitrary text."""
159+ pass
175160
176- def on_agent_action (
177- self , action : langchain_schema .AgentAction , ** kwargs : Any
178- ) -> Any :
161+ def on_agent_action (self , action : langchain_schema .AgentAction , ** kwargs : Any ) -> Any :
179162 """Run on agent action."""
163+ pass
180164
181- def on_agent_finish (
182- self , finish : langchain_schema .AgentFinish , ** kwargs : Any
183- ) -> Any :
165+ def on_agent_finish (self , finish : langchain_schema .AgentFinish , ** kwargs : Any ) -> Any :
184166 """Run on agent end."""
167+ pass
0 commit comments