Skip to content

Commit 5911408

Browse files
authored
Merge pull request #16 from hideya/dev
Update to ver 0.3.0
2 parents 69fe174 + 6f340e2 commit 5911408

File tree

8 files changed

+821
-2137
lines changed

8 files changed

+821
-2137
lines changed

.env.template

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
1-
1+
# https://console.anthropic.com/settings/keys
22
ANTHROPIC_API_KEY=sk-ant-...
3+
# https://platform.openai.com/api-keys
34
OPENAI_API_KEY=sk-proj-...
5+
# https://aistudio.google.com/apikey
46
GOOGLE_API_KEY=AI...
5-
# GROQ_API_KEY=gsk_...
67

7-
# GITHUB_PERSONAL_ACCESS_TOKEN=github_pat_...
88
# BRAVE_API_KEY=BSA...
9+
# GITHUB_PERSONAL_ACCESS_TOKEN=github_pat_...
10+
# NOTION_INTEGRATION_SECRET=ntn_...

README.md

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,17 @@
44
**Quickly test and explore MCP servers from the command line!**
55

66
A simple, text-based CLI client for [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) servers built with LangChain and TypeScript.
7+
This tool performs automatic schema adjustments for LLM compatibility.
78
Suitable for testing MCP servers, exploring their capabilities, and prototyping integrations.
89

910
Internally it uses [LangChain ReAct Agent](https://github.com/langchain-ai/react-agent-js) and
10-
a utility function `convertMcpToLangchainTools()` from [`@h1deya/langchain-mcp-tools`](https://www.npmjs.com/package/@h1deya/langchain-mcp-tools).
11+
a utility function `convertMcpToLangchainTools()` from
12+
[`@h1deya/langchain-mcp-tools`](https://www.npmjs.com/package/@h1deya/langchain-mcp-tools).
13+
This function performs the aforementioned MCP tools schema transformations for LLM compatibility.
14+
See [this page](https://github.com/hideya/langchain-mcp-tools-ts/blob/main/README.md#llm-provider-schema-compatibility)
15+
for details.
16+
17+
A Python equivalent of this utility is available [here](https://pypi.org/project/mcp-chat/)
1118

1219
## Prerequisites
1320

@@ -18,7 +25,7 @@ a utility function `convertMcpToLangchainTools()` from [`@h1deya/langchain-mcp-t
1825
[OpenAI](https://platform.openai.com/api-keys),
1926
[Anthropic](https://console.anthropic.com/settings/keys),
2027
and/or
21-
[Google GenAI](https://aistudio.google.com/apikey)
28+
[Google AI Studio (for GenAI/Gemini)](https://aistudio.google.com/apikey)
2229
as needed
2330

2431
## Quick Start
@@ -43,7 +50,7 @@ a utility function `convertMcpToLangchainTools()` from [`@h1deya/langchain-mcp-t
4350
// "model_provider": "anthropic",
4451
// "model": "claude-3-5-haiku-latest",
4552
// "model_provider": "google_genai",
46-
// "model": "gemini-2.0-flash",
53+
// "model": "gemini-2.5-flash",
4754
},
4855

4956
"mcp_servers": {
@@ -128,9 +135,9 @@ mcp-try-cli --help
128135

129136
## Supported LLM Providers
130137

131-
- **OpenAI**: `gpt-4o`, `gpt-4o-mini`, etc.
138+
- **OpenAI**: `o4-mini`, `gpt-4o-mini`, etc.
132139
- **Anthropic**: `claude-sonnet-4-0`, `claude-3-5-haiku-latest`, etc.
133-
- **Google (GenAI)**: `gemini-2.0-flash`, `gemini-1.5-pro`, etc.
140+
- **Google (GenAI)**: `gemini-2.5-pro`, `gemini-2.5-flash`, etc.
134141

135142
## Configuration
136143

@@ -153,7 +160,7 @@ Create a `llm_mcp_config.json5` file:
153160
{
154161
"llm": {
155162
"model_provider": "openai",
156-
"model": "gpt-4o-mini",
163+
"model": "gpt-4.1-nano",
157164
// model: "o4-mini",
158165
},
159166

@@ -165,8 +172,8 @@ Create a `llm_mcp_config.json5` file:
165172

166173
// "llm": {
167174
// "model_provider": "google_genai",
168-
// "model": "gemini-2.0-flash",
169-
// // "model": "gemini-2.5-pro-preview-06-05",
175+
// "model": "gemini-2.5-flash",
176+
// // "model": "gemini-2.5-pro",
170177
// }
171178

172179
"example_queries": [
@@ -246,14 +253,6 @@ There are quite a few useful MCP servers already available:
246253
- Use `--verbose` flag for detailed output
247254
- Refer to [MCP documentation](https://modelcontextprotocol.io/)
248255

249-
## Development
250-
251-
This tool is built with:
252-
- [Model Context Protocol (MCP)](https://modelcontextprotocol.io/)
253-
- [LangChain](https://langchain.com/) for LLM integration
254-
- [TypeScript](https://www.typescriptlang.org/) for type safety
255-
- [Yargs](https://yargs.js.org/) for CLI parsing
256-
257256
## License
258257

259258
MIT License - see [LICENSE](LICENSE) file for details.

README_DEV.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ For the convenience of debugging MCP servers, this client prints local (stdio) M
6060

6161
LLMs from Anthropic, OpenAI and Google (GenAI) are currently supported.
6262

63-
A python version of this MCP client is available
63+
A Python version of this MCP client is available
6464
[here](https://github.com/hideya/mcp-client-langchain-py)
6565

6666
## Prerequisites

llm_mcp_config.json5

Lines changed: 32 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -15,32 +15,33 @@
1515
// // "max_tokens": 10000,
1616
// },
1717

18-
"llm": {
19-
// https://platform.openai.com/docs/pricing
20-
// https://platform.openai.com/settings/organization/billing/overview
21-
"model_provider": "openai",
22-
"model": "gpt-4o-mini",
23-
// "model": "o4-mini",
24-
// "temperature": 0.0, // 'temperature' is not supported with "o4-mini"
25-
// "max_completion_tokens": 10000, // Use 'max_completion_tokens' instead of 'max_tokens'
26-
},
27-
2818
// "llm": {
29-
// // https://ai.google.dev/gemini-api/docs/pricing
30-
// // https://console.cloud.google.com/billing
31-
// "model_provider": "google_genai",
32-
// "model": "gemini-2.0-flash",
33-
// // "model": "gemini-1.5-pro",
34-
// // "temperature": 0.0,
35-
// // "max_tokens": 10000,
19+
// // https://platform.openai.com/docs/pricing
20+
// // https://platform.openai.com/settings/organization/billing/overview
21+
// "model_provider": "openai",
22+
// "model": "gpt-4.1-nano",
23+
// // "model": "o4-mini",
24+
// // "temperature": 0.0, // 'temperature' is not supported with "o4-mini"
25+
// // "max_completion_tokens": 10000, // Use 'max_completion_tokens' instead of 'max_tokens'
3626
// },
3727

28+
"llm": {
29+
// https://ai.google.dev/gemini-api/docs/pricing
30+
// https://console.cloud.google.com/billing
31+
"model_provider": "google_genai",
32+
"model": "gemini-2.5-flash",
33+
// "model": "gemini-2.5-pro",
34+
// "temperature": 0.0,
35+
// "max_tokens": 10000,
36+
},
37+
3838
"example_queries": [
39-
"Are there any weather alerts in California?",
4039
"Read the news headlines on bbc.com",
4140
"Read and briefly summarize the LICENSE file",
41+
"Are there any weather alerts in California?",
42+
// "What's the news from Tokyo today?",
4243
// "Open the webpage at bbc.com",
43-
// "Search the web and get today's news related to tokyo",
44+
// "Tell me about my Notion account",
4445
],
4546

4647
"mcp_servers": {
@@ -82,7 +83,7 @@
8283
// },
8384

8485
// // Test SSE connection with the auto fallback
85-
// // See the comments at the top of index.ts
86+
// // See the comments at the top of src/index.ts
8687
// weather: {
8788
// "url": "http://localhost:${SSE_SERVER_PORT}/sse"
8889
// },
@@ -112,5 +113,16 @@
112113
// "args": [ "-y", "@modelcontextprotocol/server-brave-search"],
113114
// "env": { "BRAVE_API_KEY": "${BRAVE_API_KEY}" }
114115
// },
116+
117+
// notion: {
118+
// "command": "npx",
119+
// "args": ["-y", "@notionhq/notion-mcp-server"],
120+
// "env": {
121+
// // Although the following implies that this MCP server is designed for
122+
// // OpenAI LLMs, it works fine with others models.
123+
// // Tested Claude and Gemini (with schema adjustments).
124+
// "OPENAPI_MCP_HEADERS": '{"Authorization": "Bearer ${NOTION_INTEGRATION_SECRET}", "Notion-Version": "2022-06-28"}'
125+
// },
126+
// },
115127
}
116128
}

0 commit comments

Comments
 (0)