diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt
index 809335e3d..28b51f9f9 100644
--- a/THIRD_PARTY_LICENSES.txt
+++ b/THIRD_PARTY_LICENSES.txt
@@ -24,6 +24,12 @@ autots
* Source code: https://github.com/winedarksea/AutoTS
* Project home: https://winedarksea.github.io/AutoTS/build/html/index.html
+autogen
+* Copyright (c) 2024 Microsoft Corporation.
+* License: MIT License
+* Source code: https://github.com/microsoft/autogen
+* Project home: microsoft.github.io/autogen/
+
bokeh
* Copyright Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors
* License: BSD 3-Clause "New" or "Revised" License
diff --git a/ads/llm/autogen/__init__.py b/ads/llm/autogen/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ads/llm/autogen/client_v02.py b/ads/llm/autogen/client_v02.py
new file mode 100644
index 000000000..8dd9b6c9e
--- /dev/null
+++ b/ads/llm/autogen/client_v02.py
@@ -0,0 +1,282 @@
+# coding: utf-8
+# Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
+# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
+
+"""This module contains the custom LLM client for AutoGen v0.2 to use LangChain chat models.
+https://microsoft.github.io/autogen/0.2/blog/2024/01/26/Custom-Models/
+
+To use the custom client:
+1. Prepare the LLM config, including the parameters for initializing the LangChain client.
+2. Register the custom LLM
+
+The LLM config should config the following keys:
+* model_client_cls: Required by AutoGen to identify the custom client. It should be "LangChainModelClient"
+* langchain_cls: LangChain class including the full import path.
+* model: Name of the model to be used by AutoGen
+* client_params: A dictionary containing the parameters to initialize the LangChain chat model.
+
+Although the `LangChainModelClient` is designed to be generic and can potentially support any LangChain chat model,
+the invocation depends on the server API spec and it may not be compatible with some implementations.
+
+Following is an example config for OCI Generative AI service:
+{
+ "model_client_cls": "LangChainModelClient",
+ "langchain_cls": "langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI",
+ "model": "cohere.command-r-plus",
+ # client_params will be used to initialize the LangChain ChatOCIGenAI class.
+ "client_params": {
+ "model_id": "cohere.command-r-plus",
+ "compartment_id": COMPARTMENT_OCID,
+ "model_kwargs": {"temperature": 0, "max_tokens": 2048},
+ # Update the authentication method as needed
+ "auth_type": "SECURITY_TOKEN",
+ "auth_profile": "DEFAULT",
+ # You may need to specify `service_endpoint` if the service is in a different region.
+ },
+}
+
+Following is an example config for OCI Data Science Model Deployment:
+{
+ "model_client_cls": "LangChainModelClient",
+ "langchain_cls": "ads.llm.ChatOCIModelDeploymentVLLM",
+ "model": "odsc-llm",
+ "endpoint": "https://MODEL_DEPLOYMENT_URL/predict",
+ "model_kwargs": {"temperature": 0.1, "max_tokens": 2048},
+ # function_call_params will only be added to the API call when function/tools are added.
+ "function_call_params": {
+ "tool_choice": "auto",
+ "chat_template": ChatTemplates.mistral(),
+ },
+}
+
+Note that if `client_params` is not specified in the config, all arguments from the config except
+`model_client_cls` and `langchain_cls`, and `function_call_params`, will be used to initialize
+the LangChain chat model.
+
+The `function_call_params` will only be used for function/tool calling when tools are specified.
+
+To register the custom client:
+
+from ads.llm.autogen.client_v02 import LangChainModelClient, register_custom_client
+register_custom_client(LangChainModelClient)
+
+Once registered with ADS, the custom LLM class will be auto-registered for all new agents.
+There is no need to call `register_model_client()` on each agent.
+
+References:
+https://microsoft.github.io/autogen/0.2/docs/notebooks/agentchat_huggingface_langchain/
+https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_custom_model.ipynb
+
+"""
+import copy
+import importlib
+import json
+import logging
+from typing import Any, Dict, List, Union
+from types import SimpleNamespace
+
+from autogen import ModelClient
+from autogen.oai.client import OpenAIWrapper, PlaceHolderClient
+from langchain_core.messages import AIMessage
+
+
+logger = logging.getLogger(__name__)
+
+# custom_clients is a dictionary mapping the name of the class to the actual class
+custom_clients = {}
+
+# There is a bug in GroupChat when using custom client:
+# https://github.com/microsoft/autogen/issues/2956
+# Here we will be patching the OpenAIWrapper to fix the issue.
+# With this patch, you only need to register the client once with ADS.
+# For example:
+#
+# from ads.llm.autogen.client_v02 import LangChainModelClient, register_custom_client
+# register_custom_client(LangChainModelClient)
+#
+# This patch will auto-register the custom LLM to all new agents.
+# So there is no need to call `register_model_client()` on each agent.
+OpenAIWrapper._original_register_default_client = OpenAIWrapper._register_default_client
+
+
+def _new_register_default_client(
+ self: OpenAIWrapper, config: Dict[str, Any], openai_config: Dict[str, Any]
+) -> None:
+ """This is a patched version of the _register_default_client() method
+ to automatically register custom client for agents.
+ """
+ model_client_cls_name = config.get("model_client_cls")
+ if model_client_cls_name in custom_clients:
+ self._clients.append(PlaceHolderClient(config))
+ self.register_model_client(custom_clients[model_client_cls_name])
+ else:
+ self._original_register_default_client(
+ config=config, openai_config=openai_config
+ )
+
+
+# Patch the _register_default_client() method
+OpenAIWrapper._register_default_client = _new_register_default_client
+
+
+def register_custom_client(client_class):
+ """Registers custom client for AutoGen."""
+ if client_class.__name__ not in custom_clients:
+ custom_clients[client_class.__name__] = client_class
+
+
+def _convert_to_langchain_tool(tool):
+ """Converts the OpenAI tool spec to LangChain tool spec."""
+ if tool["type"] == "function":
+ tool = tool["function"]
+ required = tool["parameters"].get("required", [])
+ properties = copy.deepcopy(tool["parameters"]["properties"])
+ for key in properties.keys():
+ val = properties[key]
+ val["default"] = key in required
+ return {
+ "title": tool["name"],
+ "description": tool["description"],
+ "properties": properties,
+ }
+ raise NotImplementedError(f"Type {tool['type']} is not supported.")
+
+
+def _convert_to_openai_tool_call(tool_call):
+ """Converts the LangChain tool call in AI message to OpenAI tool call."""
+ return {
+ "id": tool_call.get("id"),
+ "function": {
+ "name": tool_call.get("name"),
+ "arguments": (
+ ""
+ if tool_call.get("args") is None
+ else json.dumps(tool_call.get("args"))
+ ),
+ },
+ "type": "function",
+ }
+
+
+class Message(AIMessage):
+ """Represents message returned from the LLM."""
+
+ @classmethod
+ def from_message(cls, message: AIMessage):
+ """Converts from LangChain AIMessage."""
+ message = copy.deepcopy(message)
+ message.__class__ = cls
+ message.tool_calls = [
+ _convert_to_openai_tool_call(tool) for tool in message.tool_calls
+ ]
+ return message
+
+ @property
+ def function_call(self):
+ """Function calls."""
+ return self.tool_calls
+
+
+class LangChainModelClient(ModelClient):
+ """Represents a model client wrapping a LangChain chat model."""
+
+ def __init__(self, config: dict, **kwargs) -> None:
+ super().__init__()
+ logger.info("LangChain model client config: %s", str(config))
+ # Make a copy of the config since we are popping some keys
+ config = copy.deepcopy(config)
+ # model_client_cls will always be LangChainModelClient
+ self.client_class = config.pop("model_client_cls")
+
+ # model_name is used in constructing the response.
+ self.model_name = config.get("model", "")
+
+ # If the config specified function_call_params,
+ # Pop the params and use them only for tool calling.
+ self.function_call_params = config.pop("function_call_params", {})
+
+ # If the config specified invoke_params,
+ # Pop the params and use them only for invoking.
+ self.invoke_params = config.pop("invoke_params", {})
+
+ # Import the LangChain class
+ if "langchain_cls" not in config:
+ raise ValueError("Missing langchain_cls in LangChain Model Client config.")
+ module_cls = config.pop("langchain_cls")
+ module_name, cls_name = str(module_cls).rsplit(".", 1)
+ langchain_module = importlib.import_module(module_name)
+ langchain_cls = getattr(langchain_module, cls_name)
+
+ # If the config specified client_params,
+ # Only use the client_params to initialize the LangChain model.
+ # Otherwise, use the config
+ self.client_params = config.get("client_params", config)
+
+ # Initialize the LangChain client
+ self.model = langchain_cls(**self.client_params)
+
+ def create(self, params) -> ModelClient.ModelClientResponseProtocol:
+ """Creates a LLM completion for a given config.
+
+ Parameters
+ ----------
+ params : dict
+ OpenAI API compatible parameters, including all the keys from llm_config.
+
+ Returns
+ -------
+ ModelClientResponseProtocol
+ Response from LLM
+
+ """
+ streaming = params.get("stream", False)
+ # TODO: num_of_responses
+ num_of_responses = params.get("n", 1)
+ messages = params.pop("messages", [])
+
+ invoke_params = copy.deepcopy(self.invoke_params)
+
+ tools = params.get("tools")
+ if tools:
+ model = self.model.bind_tools(
+ [_convert_to_langchain_tool(tool) for tool in tools]
+ )
+ # invoke_params["tools"] = tools
+ invoke_params.update(self.function_call_params)
+ else:
+ model = self.model
+
+ response = SimpleNamespace()
+ response.choices = []
+ response.model = self.model_name
+
+ if streaming and messages:
+ # If streaming is enabled and has messages, then iterate over the chunks of the response.
+ raise NotImplementedError()
+ else:
+ # If streaming is not enabled, send a regular chat completion request
+ ai_message = model.invoke(messages, **invoke_params)
+ choice = SimpleNamespace()
+ choice.message = Message.from_message(ai_message)
+ response.choices.append(choice)
+ return response
+
+ def message_retrieval(
+ self, response: ModelClient.ModelClientResponseProtocol
+ ) -> Union[List[str], List[ModelClient.ModelClientResponseProtocol.Choice.Message]]:
+ """
+ Retrieve and return a list of strings or a list of Choice.Message from the response.
+
+ NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
+ since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
+ """
+ return [choice.message for choice in response.choices]
+
+ def cost(self, response: ModelClient.ModelClientResponseProtocol) -> float:
+ response.cost = 0
+ return 0
+
+ @staticmethod
+ def get_usage(response: ModelClient.ModelClientResponseProtocol) -> Dict:
+ """Return usage summary of the response using RESPONSE_USAGE_KEYS."""
+ return {}
diff --git a/docs/source/user_guide/large_language_model/autogen_integration.rst b/docs/source/user_guide/large_language_model/autogen_integration.rst
new file mode 100644
index 000000000..e21a8bd3e
--- /dev/null
+++ b/docs/source/user_guide/large_language_model/autogen_integration.rst
@@ -0,0 +1,106 @@
+AutoGen Integration
+*******************
+
+ADS provides custom LLM clients for `AutoGen `_. This custom client allows you to use LangChain chat models for AutoGen.
+
+.. admonition:: Requirements
+ :class: note
+
+ The LangChain integration requires ``python>=3.9``, ``langchain-community>=0.3`` and ``langchain-openai``.
+
+ .. code-block:: bash
+
+ pip install "langchain-community>0.3" langchain-openai
+
+
+Custom Client Registration
+==========================
+
+AutoGen requires custom clients to be registered with each agent after the agent is created. To simplify the process, ADS provides a global ``register_model_client()`` method to register the client globally. Once registered with ADS, all new agents created subsequently will have the custom client registered automatically.
+
+The following code shows how you can import the custom client and register it with AutoGen.
+
+.. code-block:: python3
+
+ from ads.llm.autogen.client_v02 import LangChainModelClient, register_custom_client
+
+ # Register the custom LLM globally
+ register_custom_client(LangChainModelClient)
+
+If you don't want the custom client to be registered for all agents. You may skip the above code and still use the ``register_model_client()`` method from each agent.
+
+
+LLM Config
+==========
+
+The LLM config for the ``LangChainModelClient`` should have the following keys:
+
+* ``model_client_cls``, the name of the client class, which should always be ``LangChainModelClient``.
+* ``langchain_cls``, the LangChain chat model class with the full path.
+* ``model``, the model name for AutoGen to identify the model.
+* ``client_params``, the parameters for initializing the LangChain client.
+
+The following keys are optional:
+* ``invoke_params``, the parameters for invoking the chat model.
+* ``function_call_params``, the parameters for invoking the chat model with functions/tools.
+
+Data Science Model Deployment
+-----------------------------
+
+Following is an example LLM config for LLM deployed with AI Quick Action on OCI Data Science Model Deployment:
+
+.. code-block:: python3
+
+ import ads
+ from ads.llm.chat_template import ChatTemplates
+
+ # You may use ADS to config the authentication globally
+ ads.set_auth("security_token", profile="DEFAULT")
+
+ {
+ "model_client_cls": "LangChainModelClient",
+ "langchain_cls": "ads.llm.ChatOCIModelDeploymentVLLM",
+ # Note that you may use a different model name for the `model` in `client_params`.
+ "model": "Mistral-7B",
+ # client_params will be used to initialize the LangChain ChatOCIModelDeploymentVLLM class.
+ "client_params": {
+ "model": "odsc-llm",
+ "endpoint": "",
+ "model_kwargs": {
+ "temperature": 0,
+ "max_tokens": 500
+ },
+ }
+ # function_call_params will only be added to the API call when function/tools are added.
+ "function_call_params": {
+ "tool_choice": "auto",
+ "chat_template": ChatTemplates.hermes()
+ }
+ }
+
+
+OCI Generative AI
+-----------------
+
+Following is an example LLM config for the OCI Generative AI service:
+
+.. code-block:: python3
+
+ {
+ "model_client_cls": "LangChainModelClient",
+ "langchain_cls": "langchain_community.chat_models.oci_generative_ai.ChatOCIGenAI",
+ "model": "cohere.command-r-plus",
+ # client_params will be used to initialize the LangChain ChatOCIGenAI class.
+ "client_params": {
+ "model_id": "cohere.command-r-plus",
+ "compartment_id": "",
+ "model_kwargs": {
+ "temperature": 0,
+ "max_tokens": 4000
+ },
+ "service_endpoint": "https://inference.generativeai.us-chicago-1.oci.oraclecloud.com"
+ "auth_type": "SECURITY_TOKEN",
+ "auth_profile": "DEFAULT",
+ },
+ }
+
diff --git a/docs/source/user_guide/large_language_model/index.rst b/docs/source/user_guide/large_language_model/index.rst
index 53959af3f..9b9a2d07a 100644
--- a/docs/source/user_guide/large_language_model/index.rst
+++ b/docs/source/user_guide/large_language_model/index.rst
@@ -39,6 +39,7 @@ ADS is designed to work with LangChain, enabling developers to incorporate vario
training_llm
langchain_models
+ autogen_integration
deploy_langchain_application
retrieval
guardrails
diff --git a/pyproject.toml b/pyproject.toml
index c77206338..8cf2a63f9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -212,6 +212,7 @@ aqua = ["jupyter_server"]
# Revisit this section continuously and update to recent version of libraries. focus on pyt3.9/3.10 versions.
testsuite = [
"arff",
+ "autogen-agentchat~=0.2",
"category_encoders==2.6.3", # set version to avoid backtracking
"cohere==4.53", # set version to avoid backtracking
"faiss-cpu",
diff --git a/tests/unitary/with_extras/autogen/__init__.py b/tests/unitary/with_extras/autogen/__init__.py
new file mode 100644
index 000000000..8b5902cd5
--- /dev/null
+++ b/tests/unitary/with_extras/autogen/__init__.py
@@ -0,0 +1,3 @@
+# coding: utf-8
+# Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
+# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
diff --git a/tests/unitary/with_extras/autogen/test_autogen_client.py b/tests/unitary/with_extras/autogen/test_autogen_client.py
new file mode 100644
index 000000000..c8cce9121
--- /dev/null
+++ b/tests/unitary/with_extras/autogen/test_autogen_client.py
@@ -0,0 +1,104 @@
+# coding: utf-8
+# Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
+# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
+import sys
+from unittest import TestCase, mock
+
+import pytest
+
+if sys.version_info < (3, 9):
+ pytest.skip(allow_module_level=True)
+
+import autogen
+from langchain_core.messages import AIMessage, ToolCall
+from ads.llm.autogen.client_v02 import (
+ LangChainModelClient,
+ register_custom_client,
+ custom_clients,
+)
+from ads.llm import ChatOCIModelDeploymentVLLM
+
+
+ODSC_LLM_CONFIG = {
+ "model_client_cls": "LangChainModelClient",
+ "langchain_cls": "ads.llm.ChatOCIModelDeploymentVLLM",
+ "model": "Mistral",
+ "client_params": {
+ "model": "odsc-llm",
+ "endpoint": "",
+ "model_kwargs": {"temperature": 0, "max_tokens": 500},
+ },
+}
+
+TEST_PAYLOAD = {
+ "messages": ["hello", "hi"],
+ "tool": {
+ "type": "function",
+ "function": {
+ "name": "my_tool",
+ "description": "my_desc",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "order_id": {
+ "type": "string",
+ "description": "The customer's order ID.",
+ }
+ },
+ "required": ["order_id"],
+ },
+ },
+ },
+}
+
+MOCKED_RESPONSE_CONTENT = "hello"
+MOCKED_AI_MESSAGE = AIMessage(
+ content=MOCKED_RESPONSE_CONTENT,
+ tool_calls=[ToolCall(name="my_tool", args={"arg": "val"}, id="a")],
+)
+MOCKED_TOOL_CALL = [
+ {
+ "id": "a",
+ "function": {
+ "name": "my_tool",
+ "arguments": '{"arg": "val"}',
+ },
+ "type": "function",
+ }
+]
+
+
+class AutoGenTestCase(TestCase):
+ @mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
+ def test_register_client(self, signer):
+ # There should be no custom client before registration.
+ self.assertEqual(custom_clients, {})
+ register_custom_client(LangChainModelClient)
+ self.assertEqual(custom_clients, {"LangChainModelClient": LangChainModelClient})
+ # Test LLM config without custom LLM
+ config_list = [
+ {
+ "model": "llama-7B",
+ "api_key": "123",
+ }
+ ]
+ wrapper = autogen.oai.client.OpenAIWrapper(config_list=config_list)
+ self.assertEqual(type(wrapper._clients[0]), autogen.oai.client.OpenAIClient)
+ # Test LLM config with custom LLM
+ config_list = [ODSC_LLM_CONFIG]
+ wrapper = autogen.oai.client.OpenAIWrapper(config_list=config_list)
+ self.assertEqual(type(wrapper._clients[0]), LangChainModelClient)
+
+ @mock.patch("ads.common.auth.default_signer", return_value=dict(signer=None))
+ @mock.patch(
+ "ads.llm.ChatOCIModelDeploymentVLLM.invoke", return_value=MOCKED_AI_MESSAGE
+ )
+ def test_create_completion(self, mocked_invoke, *args):
+ client = LangChainModelClient(config=ODSC_LLM_CONFIG)
+ self.assertEqual(client.model_name, "Mistral")
+ self.assertEqual(type(client.model), ChatOCIModelDeploymentVLLM)
+ self.assertEqual(client.model._invocation_params(stop=None)["max_tokens"], 500)
+ response = client.create(TEST_PAYLOAD)
+ message = response.choices[0].message
+ self.assertEqual(message.content, MOCKED_RESPONSE_CONTENT)
+ self.assertEqual(message.tool_calls, MOCKED_TOOL_CALL)