Skip to content

Commit 54d2f96

Browse files
committed
chore(logging): Log model name and base URL before invoking LLMs
1 parent bab0026 commit 54d2f96

File tree

1 file changed

+37
-1
lines changed

1 file changed

+37
-1
lines changed

nemoguardrails/actions/llm/utils.py

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16+
import logging
1617
import re
1718
from typing import Any, Dict, List, Optional, Sequence, Union
1819

@@ -33,6 +34,8 @@
3334
from nemoguardrails.logging.callbacks import logging_callbacks
3435
from nemoguardrails.logging.explain import LLMCallInfo
3536

37+
log = logging.getLogger(__name__)
38+
3639

3740
class LLMCallException(Exception):
3841
"""A wrapper around the LLM call invocation exception.
@@ -46,7 +49,7 @@ def __init__(self, inner_exception: Any):
4649
self.inner_exception = inner_exception
4750

4851

49-
def _infer_model_name(llm: BaseLanguageModel):
52+
def _infer_model_name(llm: Union[BaseLanguageModel, Runnable]) -> str:
5053
"""Helper to infer the model name based from an LLM instance.
5154
5255
Because not all models implement correctly _identifying_params from LangChain, we have to
@@ -142,13 +145,45 @@ def _prepare_callbacks(
142145
return logging_callbacks
143146

144147

148+
def _log_model_and_base_url(llm: Union[BaseLanguageModel, Runnable]) -> None:
149+
"""Extract and log the model and base URL from an LLM instance."""
150+
model_name = _infer_model_name(llm)
151+
base_url = None
152+
153+
# If llm is a `ChatNIM` instance, we expect its `client` to be an `OpenAI` client with a `base_url` attribute.
154+
if hasattr(llm, "client"):
155+
client = getattr(llm, "client")
156+
if hasattr(client, "base_url"):
157+
base_url = str(client.base_url)
158+
else:
159+
# If llm is a `ChatNVIDIA` instance or other provider, check common attribute names that store the base URL.
160+
for attr in [
161+
"base_url",
162+
"openai_api_base",
163+
"azure_endpoint",
164+
"api_base",
165+
"endpoint",
166+
]:
167+
if hasattr(llm, attr):
168+
value = getattr(llm, attr, None)
169+
if value:
170+
base_url = str(value)
171+
break
172+
173+
if base_url:
174+
log.info(f"Invoking LLM: model={model_name}, url={base_url}")
175+
else:
176+
log.info(f"Invoking LLM: model={model_name}")
177+
178+
145179
async def _invoke_with_string_prompt(
146180
llm: Union[BaseLanguageModel, Runnable],
147181
prompt: str,
148182
callbacks: BaseCallbackManager,
149183
):
150184
"""Invoke LLM with string prompt."""
151185
try:
186+
_log_model_and_base_url(llm)
152187
return await llm.ainvoke(prompt, config=RunnableConfig(callbacks=callbacks))
153188
except Exception as e:
154189
raise LLMCallException(e)
@@ -163,6 +198,7 @@ async def _invoke_with_message_list(
163198
messages = _convert_messages_to_langchain_format(prompt)
164199

165200
try:
201+
_log_model_and_base_url(llm)
166202
return await llm.ainvoke(messages, config=RunnableConfig(callbacks=callbacks))
167203
except Exception as e:
168204
raise LLMCallException(e)

0 commit comments

Comments
 (0)