Skip to content

Commit 44342ad

Browse files
pjoshi30Preetam Joshi
andauthored
Simplifing Detect decorator. Added more documentation. (#20)
Adding the Aimon Rely README, images, the postman collection, a simple client and examples. A few small changes for error handling in the client and the example application. Getting the Aimon API key from the streamlit app updating README Updating langchain example gif Updating API endpoint Adding V2 API with support for conciseness, completeness and toxicity checks (#1) * Adding V2 API with support for conciseness, completeness and toxicity checks. * Removing prints and updating config for the example application. * Updating README --------- Updating postman collection Fixed the simple aimon client's handling of batch requests. Updated postman collection. Added support for a user_query parameter in the input data dictionary. Updating readme Fixed bug in the example app Uploading client code Adding more convenience APIs Fixing bug in create_dataset Added Github actions config to publish to PyPI. Cleaned up dependencies and updated documentation. Fixing langchain example Fixing doc links Formatting changes Changes for aimon-rely * Adding instruction adherence and hallucination v0.2 to the client Updating git ignore Adding more to gitignore Removing .idea files * Fixing doc string * Updating documentation * Updating Client to use V3 API * Fixing test * Updating tests * Updating documentation in the client * Adding .streamlit dir to .gitignore * initial version of decorators for syntactic sugar * A few more changes * updating analyze and detect decorators * Adding new notebooks * Fixing bug in analyze decorator * Updating Detect decorator to make it simpler. Adding Metaflow example. Adding documentation for the chatbot. --------- Co-authored-by: Preetam Joshi <preetam@aimon.ai>
1 parent 642c883 commit 44342ad

File tree

10 files changed

+181
-109
lines changed

10 files changed

+181
-109
lines changed

.gitignore

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,8 @@ lib64
2323
# Installer logs
2424
pip-log.txt
2525

26-
.streamlit/*
26+
.streamlit
27+
.streamlit/
28+
.metaflow
29+
.metaflow/
30+
.ipynb_checkpoints

aimon/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,5 +80,5 @@
8080
# Some of our exported symbols are builtins which we can't set attributes for.
8181
pass
8282

83-
from .decorators.detect import DetectWithContextQuery, DetectWithContextQueryInstructions, DetectWithQueryFuncReturningContext, DetectWithQueryInstructionsFuncReturningContext
83+
from .decorators.detect import Detect
8484
from .decorators.analyze import Analyze, Application, Model

aimon/decorators/detect.py

Lines changed: 27 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -3,105 +3,45 @@
33
from .common import AimonClientSingleton
44

55

6-
class DetectWithQueryFuncReturningContext(object):
6+
class Detect:
77
DEFAULT_CONFIG = {'hallucination': {'detector_name': 'default'}}
88

9-
def __init__(self, api_key=None, config=None):
9+
def __init__(self, values_returned, api_key=None, config=None):
10+
"""
11+
:param values_returned: A list of values in the order returned by the decorated function
12+
Acceptable values are 'generated_text', 'context', 'user_query', 'instructions'
13+
"""
1014
self.client = AimonClientSingleton.get_instance(api_key)
1115
self.config = config if config else self.DEFAULT_CONFIG
16+
self.values_returned = values_returned
17+
if self.values_returned is None or len(self.values_returned) == 0:
18+
raise ValueError("Values returned by the decorated function must be specified")
1219

1320
def __call__(self, func):
1421
@wraps(func)
15-
def wrapper(user_query, *args, **kwargs):
16-
result, context = func(user_query, *args, **kwargs)
22+
def wrapper(*args, **kwargs):
23+
result = func(*args, **kwargs)
1724

18-
if result is None or context is None:
19-
raise ValueError("Result and context must be returned by the decorated function")
25+
# Handle the case where the result is a single value
26+
if not isinstance(result, tuple):
27+
result = (result,)
2028

21-
data_to_send = [{
22-
"user_query": user_query,
23-
"context": context,
24-
"generated_text": result,
25-
"config": self.config
26-
}]
29+
# Create a dictionary mapping output names to results
30+
result_dict = {name: value for name, value in zip(self.values_returned, result)}
2731

28-
aimon_response = self.client.inference.detect(body=data_to_send)[0]
29-
return result, context, aimon_response
30-
31-
return wrapper
32-
33-
34-
class DetectWithQueryInstructionsFuncReturningContext(DetectWithQueryFuncReturningContext):
35-
def __call__(self, func):
36-
@wraps(func)
37-
def wrapper(user_query, instructions, *args, **kwargs):
38-
result, context = func(user_query, instructions, *args, **kwargs)
39-
40-
if result is None or context is None:
41-
raise ValueError("Result and context must be returned by the decorated function")
42-
43-
data_to_send = [{
44-
"user_query": user_query,
45-
"context": context,
46-
"generated_text": result,
47-
"instructions": instructions,
48-
"config": self.config
49-
}]
50-
51-
aimon_response = self.client.inference.detect(body=data_to_send)[0]
52-
return result, context, aimon_response
53-
54-
return wrapper
55-
56-
57-
# Another class but does not include instructions in the wrapper call
58-
class DetectWithContextQuery(object):
59-
DEFAULT_CONFIG = {'hallucination': {'detector_name': 'default'}}
60-
61-
def __init__(self, api_key=None, config=None):
62-
self.client = AimonClientSingleton.get_instance(api_key)
63-
self.config = config if config else self.DEFAULT_CONFIG
64-
65-
def __call__(self, func):
66-
@wraps(func)
67-
def wrapper(context, user_query, *args, **kwargs):
68-
result = func(context, user_query, *args, **kwargs)
32+
aimon_payload = {}
33+
if 'generated_text' in result_dict:
34+
aimon_payload['generated_text'] = result_dict['generated_text']
35+
if 'context' in result_dict:
36+
aimon_payload['context'] = result_dict['context']
37+
if 'user_query' in result_dict:
38+
aimon_payload['user_query'] = result_dict['user_query']
39+
if 'instructions' in result_dict:
40+
aimon_payload['instructions'] = result_dict['instructions']
6941

70-
if result is None:
71-
raise ValueError("Result must be returned by the decorated function")
72-
73-
data_to_send = [{
74-
"context": context,
75-
"user_query": user_query,
76-
"generated_text": result,
77-
"config": self.config
78-
}]
42+
data_to_send = [aimon_payload]
7943

8044
aimon_response = self.client.inference.detect(body=data_to_send)[0]
81-
return result, aimon_response
45+
return result + (aimon_response,)
8246

8347
return wrapper
84-
85-
86-
class DetectWithContextQueryInstructions(DetectWithContextQuery):
87-
def __call__(self, func):
88-
@wraps(func)
89-
def wrapper(context, user_query, instructions, *args, **kwargs):
90-
result = func(context, user_query, instructions, *args, **kwargs)
91-
92-
if result is None:
93-
raise ValueError("Result must be returned by the decorated function")
94-
95-
data_to_send = [{
96-
"context": context,
97-
"user_query": user_query,
98-
"generated_text": result,
99-
"instructions": instructions,
100-
"config": self.config
101-
}]
102-
103-
aimon_response = self.client.inference.detect(body=data_to_send)[0]
104-
return result, aimon_response
105-
106-
return wrapper
107-

examples/chatbot/README.md

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# AIMon Chatbot Demo
2+
3+
This is a simple chatbot demo that uses AIMon to check responses to user queries.
4+
The chatbot is built using LLamaIndex. This chatbot application intentionally crawls a [single webpage](http://paulgraham.com/worked.html).
5+
This way we can demonstrate how AIMon's hallucination detector works when the chatbot is asked questions that are not
6+
related to the webpage, in which case it is likely to answer out of its own learned knowledge.
7+
8+
## Setup
9+
10+
### Installation
11+
12+
Install the required packages from the `requirements.txt` file specified in this directory.
13+
14+
```bash
15+
pip install -r requirements.txt
16+
```
17+
18+
### API Keys
19+
20+
You will need to specify AIMon and OpenAI API keys in a `secrets.toml` file inside the
21+
`.streamlit` directory.
22+
23+
```toml
24+
openai_key=YOUR_OPENAI_API
25+
aimon_api_key=YOUR_AIMON_API
26+
```
27+
28+
### Running the Chatbot
29+
30+
The chatbot is a streamlit app. You can run it using this command:
31+
32+
```bash
33+
streamlit run chatbot.py
34+
```
35+
36+

examples/aimon_chatbot_demo.py renamed to examples/chatbot/aimon_chatbot_demo.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,17 @@
55
from llama_index.core.memory import ChatMemoryBuffer
66
from llama_index.core import StorageContext, load_index_from_storage
77
from llama_index.readers.web import SimpleWebPageReader
8-
from aimon import DetectWithQueryInstructionsFuncReturningContext
8+
from aimon import Detect
99
from aimon import AuthenticationError
1010
import logging
1111
import os
1212

1313
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
14-
st.set_page_config(page_title="AIMon Chatbot Demo", page_icon="🦙", layout="centered", initial_sidebar_state="auto",
14+
st.set_page_config(page_title="AIMon Chatbot Demo", layout="centered", initial_sidebar_state="auto",
1515
menu_items=None)
1616

1717
aimon_config = {'hallucination': {'detector_name': 'default'}, 'instruction_adherence': {'detector_name': 'default'}}
18-
detect = DetectWithQueryInstructionsFuncReturningContext(st.secrets.aimon_api_key, aimon_config)
18+
detect = Detect(values_returned=['context', 'user_query', 'instructions', 'generated_text'], api_key=st.secrets.aimon_api_key, config=aimon_config)
1919

2020

2121
@st.cache_resource(show_spinner=False)
@@ -85,7 +85,7 @@ def split_into_paragraphs(text):
8585
def am_chat(usr_prompt, instructions):
8686
response = st.session_state.chat_engine.chat(usr_prompt)
8787
context = get_source_docs(response)
88-
return response.response, context
88+
return context, usr_prompt, instructions, response.response
8989

9090

9191
def execute():
@@ -136,7 +136,7 @@ def execute():
136136
if st.session_state.messages[-1]["role"] != "assistant":
137137
with st.chat_message("assistant"):
138138
if cprompt:
139-
response, context, am_res = am_chat(cprompt, instructions)
139+
context, usr_prompt, instructions, response, am_res = am_chat(cprompt, instructions)
140140
message = {"role": "assistant", "content": response}
141141
am_res_json = am_res.to_json()
142142
st.write(response)
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
llama-index
22
llama-index-readers-web
33
streamlit
4+
openai
5+
aimon
46

examples/metaflow/README.md

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# AIMon Metaflow Demo
2+
3+
This is a simple Metaflow flow that generates a summary of an input document.
4+
It uses AIMon to check the quality of the generated summary.
5+
The summarizer is built using Langchain.
6+
7+
## Setup
8+
9+
### Installation
10+
11+
Install the required packages from the `requirements.txt` file specified in this directory.
12+
13+
```bash
14+
pip install -r requirements.txt
15+
```
16+
17+
### API Keys
18+
19+
You will need to specify AIMon and OpenAI API keys as part of their respective environment variables.
20+
21+
```bash
22+
export OPENAI_KEY=YOUR_OPENAI_API
23+
export AIMON_API_KEY=YOUR_AIMON_API
24+
```
25+
26+
### Running the flow
27+
28+
The flow can be run using the following command:
29+
30+
```bash
31+
python summarization_flow.py run
32+
```
33+
34+

examples/metaflow/requirements.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
langchain
2+
langchain-community
3+
metaflow
4+
aimon
5+
openai
6+
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
from metaflow import FlowSpec, step
2+
from langchain_community.llms import OpenAI
3+
from langchain.text_splitter import CharacterTextSplitter
4+
from langchain.docstore.document import Document
5+
from langchain.chains.summarize import load_summarize_chain
6+
from aimon import Detect
7+
import os
8+
9+
detect = Detect(values_returned=['context', 'generated_text'], config={"hallucination": {"detector_name": "default"}})
10+
11+
class SummarizeFlow(FlowSpec):
12+
13+
@step
14+
def start(self):
15+
# Load your document here
16+
self.document = """
17+
Your document text goes here. Replace this text with the actual content you want to summarize.
18+
"""
19+
20+
context, summary, aimon_res = self.summarize(self.document)
21+
22+
# Print the summary
23+
print("Summary:")
24+
print(summary)
25+
26+
27+
# Print the AIMon result
28+
print("AIMon hallucination detection:")
29+
print(aimon_res.hallucination)
30+
31+
self.next(self.end)
32+
33+
@detect
34+
def summarize(self, context):
35+
# Split the source text
36+
text_splitter = CharacterTextSplitter()
37+
texts = text_splitter.split_text(context)
38+
39+
# Create Document objects for the texts
40+
docs = [Document(page_content=t) for t in texts[:3]]
41+
42+
openai_key = os.getenv("OPENAI_KEY")
43+
44+
45+
# Initialize the OpenAI model
46+
llm = OpenAI(temperature=0, api_key=openai_key)
47+
48+
# Create the summarization chain
49+
summarize_chain = load_summarize_chain(llm)
50+
51+
# Summarize the document
52+
return context, summarize_chain.run(docs)
53+
54+
@step
55+
def end(self):
56+
print("Flow completed.")
57+
58+
if __name__ == "__main__":
59+
SummarizeFlow()
60+

examples/notebooks/aimon_decorators_langchain_summarization_0_5_0.ipynb

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -90,42 +90,32 @@
9090
},
9191
{
9292
"cell_type": "code",
93-
"execution_count": 6,
93+
"execution_count": 11,
9494
"id": "bcdddfa8-43c7-446a-9337-3ad0f16a015e",
9595
"metadata": {},
9696
"outputs": [
97-
{
98-
"name": "stderr",
99-
"output_type": "stream",
100-
"text": [
101-
"/Users/preetamjoshi/projects/aimon/pj_aimon_rely/examples/chbt/lib/python3.9/site-packages/langchain_core/_api/deprecation.py:139: LangChainDeprecationWarning: The class `OpenAI` was deprecated in LangChain 0.0.10 and will be removed in 0.3.0. An updated version of the class exists in the langchain-openai package and should be used instead. To use it run `pip install -U langchain-openai` and import as `from langchain_openai import OpenAI`.\n",
102-
" warn_deprecated(\n",
103-
"/Users/preetamjoshi/projects/aimon/pj_aimon_rely/examples/chbt/lib/python3.9/site-packages/langchain_core/_api/deprecation.py:139: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 0.3.0. Use invoke instead.\n",
104-
" warn_deprecated(\n"
105-
]
106-
},
10797
{
10898
"data": {
10999
"text/plain": [
110-
"[(' Acme recently launched version 2.1 of their Python library, which has deep integrations with the Python ecosystem and has been proven to be beneficial for developers. This new version includes features like async support and improved error handling.',\n",
100+
"[(' Acme recently launched version 2.1 of their Python library, which has deep integrations with the Python ecosystem and has been proven to be valuable for developers. This new version includes features like async support and improved error handling.',\n",
111101
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
112-
" ('\\n\\nTo configure the Acme python client, environment variables must be set up and dependencies must be installed. Detailed instructions for both basic and advanced setups can be found in the official documentation.',\n",
102+
" ('\\n\\nTo configure the Acme python client, follow the official documentation which includes setting up environment variables and installing dependencies for both basic and advanced setups.',\n",
113103
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
114104
" (' The Acme python client is compatible with Python 3.6+ and multiple databases, including MySQL, PostgreSQL, and MongoDB. It is also suitable for cross-language projects with Node.js.',\n",
115105
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
116106
" (' The Acme python client may have installation, package conflicts, and connectivity issues. Troubleshooting involves checking the Python environment, dependencies, and log files, with specific error resolutions available in the online help section.',\n",
117107
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
118108
" (' Acme recently launched version 2.1 of their Python library, which has deep integrations with the Python ecosystem and has been proven to be beneficial for developers. This new version includes features like async support and improved error handling.',\n",
119109
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
120-
" ('\\n\\nTo configure the Acme python client, environment variables must be set up and dependencies must be installed. Detailed instructions for both basic and advanced setups can be found in the official documentation.',\n",
110+
" ('\\n\\nTo configure the Acme python client, follow the official documentation which includes setting up environment variables and installing dependencies for both basic and advanced setups.',\n",
121111
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
122112
" (' The Acme python client is compatible with Python 3.6+ and multiple databases, including MySQL, PostgreSQL, and MongoDB. It is also suitable for cross-language projects with Node.js.',\n",
123113
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.')),\n",
124114
" (' The Acme python client may have installation, package conflicts, and connectivity issues. Troubleshooting involves checking the Python environment, dependencies, and log files, with specific error resolutions available in the online help section.',\n",
125115
" AnalyzeCreateResponse(message='Data successfully sent to AIMon.'))]"
126116
]
127117
},
128-
"execution_count": 6,
118+
"execution_count": 11,
129119
"metadata": {},
130120
"output_type": "execute_result"
131121
}

0 commit comments

Comments
 (0)