1313# See the License for the specific language governing permissions and
1414# limitations under the License.
1515
16+ import logging
1617import re
1718from typing import Any , Dict , List , Optional , Sequence , Union
1819
3334from nemoguardrails .logging .callbacks import logging_callbacks
3435from nemoguardrails .logging .explain import LLMCallInfo
3536
37+ log = logging .getLogger (__name__ )
38+
3639
3740class LLMCallException (Exception ):
3841 """A wrapper around the LLM call invocation exception.
@@ -46,7 +49,7 @@ def __init__(self, inner_exception: Any):
4649 self .inner_exception = inner_exception
4750
4851
49- def _infer_model_name (llm : BaseLanguageModel ) :
52+ def _infer_model_name (llm : Union [ BaseLanguageModel , Runnable ]) -> str :
5053 """Helper to infer the model name based from an LLM instance.
5154
5255 Because not all models implement correctly _identifying_params from LangChain, we have to
@@ -142,13 +145,45 @@ def _prepare_callbacks(
142145 return logging_callbacks
143146
144147
148+ def _log_model_and_base_url (llm : Union [BaseLanguageModel , Runnable ]) -> None :
149+ """Extract and log the model and base URL from an LLM instance."""
150+ model_name = _infer_model_name (llm )
151+ base_url = None
152+
153+ # If llm is a `ChatNIM` instance, we expect its `client` to be an `OpenAI` client with a `base_url` attribute.
154+ if hasattr (llm , "client" ):
155+ client = getattr (llm , "client" )
156+ if hasattr (client , "base_url" ):
157+ base_url = str (client .base_url )
158+ else :
159+ # If llm is a `ChatNVIDIA` instance or other provider, check common attribute names that store the base URL.
160+ for attr in [
161+ "base_url" ,
162+ "openai_api_base" ,
163+ "azure_endpoint" ,
164+ "api_base" ,
165+ "endpoint" ,
166+ ]:
167+ if hasattr (llm , attr ):
168+ value = getattr (llm , attr , None )
169+ if value :
170+ base_url = str (value )
171+ break
172+
173+ if base_url :
174+ log .info (f"Invoking LLM: model={ model_name } , url={ base_url } " )
175+ else :
176+ log .info (f"Invoking LLM: model={ model_name } " )
177+
178+
145179async def _invoke_with_string_prompt (
146180 llm : Union [BaseLanguageModel , Runnable ],
147181 prompt : str ,
148182 callbacks : BaseCallbackManager ,
149183):
150184 """Invoke LLM with string prompt."""
151185 try :
186+ _log_model_and_base_url (llm )
152187 return await llm .ainvoke (prompt , config = RunnableConfig (callbacks = callbacks ))
153188 except Exception as e :
154189 raise LLMCallException (e )
@@ -163,6 +198,7 @@ async def _invoke_with_message_list(
163198 messages = _convert_messages_to_langchain_format (prompt )
164199
165200 try :
201+ _log_model_and_base_url (llm )
166202 return await llm .ainvoke (messages , config = RunnableConfig (callbacks = callbacks ))
167203 except Exception as e :
168204 raise LLMCallException (e )
0 commit comments