Skip to content

Commit 631579c

Browse files
authored
oss(py): integrations work (#1365)
1 parent 218218a commit 631579c

File tree

21 files changed

+158
-166
lines changed

21 files changed

+158
-166
lines changed

pipeline/preprocessors/link_map.py

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -147,17 +147,36 @@ class LinkMap(TypedDict):
147147
# Integrations
148148
# langchain-openai
149149
"langchain-openai": "integrations/langchain_openai",
150-
"BaseChatOpenAI": "integrations/langchain_openai/BaseChatOpenAI/",
151-
"ChatOpenAI": "integrations/langchain_openai/ChatOpenAI/",
152-
"AzureChatOpenAI": "integrations/langchain_openai/AzureChatOpenAI/",
153-
"OpenAI": "integrations/langchain_openai/OpenAI/",
154-
"AzureOpenAI": "integrations/langchain_openai/AzureOpenAI/",
155-
"OpenAIEmbeddings": "integrations/langchain_openai/OpenAIEmbeddings/",
156-
"AzureOpenAIEmbeddings": "integrations/langchain_openai/AzureOpenAIEmbeddings/",
150+
"BaseChatOpenAI": "integrations/langchain_openai/BaseChatOpenAI",
151+
"ChatOpenAI": "integrations/langchain_openai/ChatOpenAI",
152+
"AzureChatOpenAI": "integrations/langchain_openai/AzureChatOpenAI",
153+
"OpenAI": "integrations/langchain_openai/OpenAI",
154+
"AzureOpenAI": "integrations/langchain_openai/AzureOpenAI",
155+
"OpenAIEmbeddings": "integrations/langchain_openai/OpenAIEmbeddings",
156+
"AzureOpenAIEmbeddings": "integrations/langchain_openai/AzureOpenAIEmbeddings",
157157
# langchain-anthropic
158158
"langchain-anthropic": "integrations/langchain_anthropic",
159-
"ChatAnthropic": "integrations/langchain_anthropic/ChatAnthropic/",
160-
"AnthropicLLM": "integrations/langchain_anthropic/AnthropicLLM/",
159+
"ChatAnthropic": "integrations/langchain_anthropic/ChatAnthropic",
160+
"AnthropicLLM": "integrations/langchain_anthropic/AnthropicLLM",
161+
# langchain-google
162+
"langchain-google": "integrations/langchain_google",
163+
"langchain-google-genai": "integrations/langchain_google_genai",
164+
"ChatGoogleGenerativeAI": "integrations/langchain_google_genai/#langchain_google_genai.ChatGoogleGenerativeAI",
165+
"langchain-google-vertexai": "integrations/langchain_google_vertexai",
166+
"ChatVertexAI": "integrations/langchain_google_vertexai/#langchain_google_vertexai.ChatVertexAI",
167+
"langchain-google-community": "integrations/langchain_google_community/",
168+
# langchain-ollama
169+
"langchain-ollama": "integrations/langchain_ollama",
170+
"ChatOllama": "integrations/langchain_ollama/#langchain_ollama.ChatOllama",
171+
# langchain-xai
172+
"langchain-xai": "integrations/langchain_xai",
173+
"ChatXAI": "integrations/langchain_xai/#langchain_xai.ChatXAI",
174+
# langchain-groq
175+
"langchain-groq": "integrations/langchain_groq",
176+
"ChatGroq": "integrations/langchain_groq/#langchain_groq.ChatGroq",
177+
# langchain-deepseek
178+
"langchain-deepseek": "integrations/langchain_deepseek",
179+
"ChatDeepSeek": "integrations/langchain_deepseek/#langchain_deepseek.ChatDeepSeek",
161180
# Models
162181
"init_chat_model": "langchain/models/#langchain.chat_models.init_chat_model",
163182
"init_chat_model(model)": "langchain/models/#langchain.chat_models.init_chat_model(model)",

src/images/cat.png

2.34 MB
Loading

src/oss/javascript/integrations/chat/google_generative_ai.mdx

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ Now we can instantiate our model object and generate chat completions:
7474
import { ChatGoogleGenerativeAI } from "@langchain/google-genai"
7575

7676
const llm = new ChatGoogleGenerativeAI({
77-
model: "gemini-1.5-pro",
77+
model: "gemini-2.5-pro",
7878
temperature: 0,
7979
maxRetries: 2,
8080
// other params...
@@ -168,7 +168,7 @@ import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
168168
import { HarmBlockThreshold, HarmCategory } from "@google/generative-ai";
169169

170170
const llmWithSafetySettings = new ChatGoogleGenerativeAI({
171-
model: "gemini-1.5-pro",
171+
model: "gemini-2.5-pro",
172172
temperature: 0,
173173
safetySettings: [
174174
{
@@ -255,7 +255,7 @@ const searchRetrievalTool: GoogleSearchRetrievalTool = {
255255
}
256256
};
257257
const searchRetrievalModel = new ChatGoogleGenerativeAI({
258-
model: "gemini-1.5-pro",
258+
model: "gemini-2.5-pro",
259259
temperature: 0,
260260
maxRetries: 0,
261261
}).bindTools([searchRetrievalTool]);
@@ -453,7 +453,7 @@ const codeExecutionTool: CodeExecutionTool = {
453453
codeExecution: {}, // Simply pass an empty object to enable it.
454454
};
455455
const codeExecutionModel = new ChatGoogleGenerativeAI({
456-
model: "gemini-1.5-pro",
456+
model: "gemini-2.5-pro",
457457
temperature: 0,
458458
maxRetries: 0,
459459
}).bindTools([codeExecutionTool]);
@@ -564,7 +564,7 @@ const fileResult = await fileManager.uploadFile(pathToVideoFile, {
564564

565565
// creates cached content AFTER uploading is finished
566566
const cachedContent = await cacheManager.create({
567-
model: "models/gemini-1.5-flash-001",
567+
model: "models/gemini-2.5-flash",
568568
displayName: displayName,
569569
systemInstruction: "You are an expert video analyzer, and your job is to answer " +
570570
"the user's query based on the video file you have access to.",
@@ -594,7 +594,6 @@ await model.invoke("Summarize the video");
594594

595595
**Note**
596596

597-
- Context caching supports both Gemini 1.5 Pro and Gemini 1.5 Flash. Context caching is only available for stable models with fixed versions (for example, gemini-1.5-pro-001). You must include the version postfix (for example, the -001 in gemini-1.5-pro-001).
598597
- The minimum input token count for context caching is 32,768, and the maximum is the same as the maximum for the given model.
599598

600599
## Gemini Prompting FAQs

src/oss/javascript/integrations/chat/google_vertex_ai.mdx

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ const searchRetrievalTool = {
172172
};
173173

174174
const searchRetrievalModel = new ChatVertexAI({
175-
model: "gemini-1.5-pro",
175+
model: "gemini-2.5-pro",
176176
temperature: 0,
177177
maxRetries: 0,
178178
}).bindTools([searchRetrievalTool]);
@@ -218,7 +218,7 @@ const searchRetrievalToolWithDataset = {
218218
};
219219

220220
const searchRetrievalModelWithDataset = new ChatVertexAI({
221-
model: "gemini-1.5-pro",
221+
model: "gemini-2.5-pro",
222222
temperature: 0,
223223
maxRetries: 0,
224224
}).bindTools([searchRetrievalToolWithDataset]);
@@ -248,7 +248,7 @@ Once you've created a cache, you can pass its id in as a runtime param as follow
248248
import { ChatVertexAI } from "@langchain/google-vertexai";
249249

250250
const modelWithCachedContent = new ChatVertexAI({
251-
model: "gemini-1.5-pro-002",
251+
model: "gemini-2.5-pro-002",
252252
location: "us-east5",
253253
});
254254

@@ -262,7 +262,7 @@ You can also bind this field directly onto the model instance:
262262

263263
```typescript
264264
const modelWithBoundCachedContent = new ChatVertexAI({
265-
model: "gemini-1.5-pro-002",
265+
model: "gemini-2.5-pro-002",
266266
location: "us-east5",
267267
}).bind({
268268
cachedContent:

src/oss/javascript/integrations/chat/index.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ title: Chat models
143143
import { ChatVertexAI } from "@langchain/google-vertexai";
144144

145145
const model = new ChatVertexAI({
146-
model: "gemini-1.5-flash",
146+
model: "gemini-2.5-flash",
147147
temperature: 0
148148
});
149149
```

src/oss/javascript/integrations/llms/google_vertex_ai.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ You may be looking for [this page instead](/oss/integrations/chat/google_vertex_
1111
</Warning>
1212

1313

14-
[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.
14+
[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-2.5-pro`, `gemini-2.5-flash`, etc.
1515

1616
This will help you get started with VertexAI completion models (LLMs) using LangChain. For detailed documentation on `VertexAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html).
1717

src/oss/javascript/integrations/providers/google.mdx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ and [AI Studio](https://aistudio.google.com/)
99

1010
### Gemini Models
1111

12-
Access Gemini models such as `gemini-1.5-pro` and `gemini-2.0-flex` through the [`ChatGoogleGenerativeAI`](/oss/integrations/chat/google_generative_ai),
12+
Access Gemini models such as `gemini-2.5-pro` and `gemini-2.0-flex` through the [`ChatGoogleGenerativeAI`](/oss/integrations/chat/google_generative_ai),
1313
or if using VertexAI, via the [`ChatVertexAI`](/oss/integrations/chat/google_vertex_ai) class.
1414

1515
<Tip>
@@ -97,7 +97,7 @@ import { ChatVertexAI } from "@langchain/google-vertexai";
9797
// import { ChatVertexAI } from "@langchain/google-vertexai-web";
9898

9999
const model = new ChatVertexAI({
100-
model: "gemini-1.0-pro",
100+
model: "gemini-2.5-pro",
101101
maxOutputTokens: 2048,
102102
});
103103

src/oss/python/integrations/chat/anthropic.mdx

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
---
22
title: ChatAnthropic
3+
description: Get started using Anthropic [chat models](/oss/langchain/models) in LangChain.
34
---
45

5-
This guide provides a quick overview for getting started with Claude [chat models](/oss/langchain/models).
6-
76
You can find information about Anthropic's latest models, their costs, context windows, and supported input types in the [Claude](https://docs.claude.com/en/docs/about-claude/models/overview) docs.
87

98
<Tip>
@@ -49,7 +48,7 @@ To access Anthropic (Claude) models you'll need to install the `langchain-anthro
4948

5049
### Credentials
5150

52-
Head to [console.anthropic.com/](https://console.anthropic.com) to sign up for Anthropic and generate an API key. Once you've done this set the `ANTHROPIC_API_KEY` environment variable:
51+
Head to the [Claude console](https://console.anthropic.com) to sign up and generate a Claude API key. Once you've done this set the `ANTHROPIC_API_KEY` environment variable:
5352

5453
```python
5554
import getpass
@@ -145,7 +144,7 @@ response.content
145144
'type': 'tool_use'}]
146145
```
147146

148-
Using `content_blocks` will render the content in a standard format that is consistent across other model providers:
147+
Using `content_blocks` will render the content in a standard format that is consistent across other model providers. Read more about [content blocks](/oss/langchain/messages#standard-content-blocks).
149148

150149
```python
151150
response.content_blocks
@@ -306,7 +305,7 @@ print(json.dumps(response.content_blocks, indent=2))
306305

307306
## Prompt caching
308307

309-
Anthropic supports [caching](https://docs.claude.com/en/docs/build-with-claude/prompt-caching) of [elements of your prompts](https://docs.claude.com/en/docs/build-with-claude/prompt-caching#what-can-be-cached), including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs.
308+
Anthropic supports [caching](https://docs.claude.com/en/docs/build-with-claude/prompt-caching) of elements of your prompts, including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs.
310309

311310
To enable caching on an element of a prompt, mark its associated content block using the `cache_control` key. See examples below:
312311

@@ -400,7 +399,7 @@ Second:
400399

401400
### Tools
402401

403-
```python
402+
```python expandable
404403
from langchain_anthropic import convert_to_anthropic_tool
405404
from langchain.tools import tool
406405

src/oss/python/integrations/chat/azure_chat_openai.mdx

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
---
22
title: AzureChatOpenAI
3+
description: Get started using OpenAI [chat models](/oss/langchain/models) via Azure in LangChain.
34
---
45

5-
This guide provides a quick overview for getting started with OpenAI [chat models](/oss/langchain/models) on Azure.
6-
76
You can find information about Azure OpenAI's latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).
87

98
<Info>
@@ -32,6 +31,12 @@ You can find information about Azure OpenAI's latest models and their costs, con
3231
features, or head to the @[`AzureChatOpenAI`] API reference.
3332
</Note>
3433

34+
<Tip>
35+
**API Reference**
36+
37+
For detailed documentation of all features and configuration options, head to the @[`AzureChatOpenAI`] API reference.
38+
</Tip>
39+
3540
## Overview
3641

3742
### Integration details

src/oss/python/integrations/chat/deepseek.mdx

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,18 @@
11
---
22
title: ChatDeepSeek
3+
description: Get started using DeepSeek [chat models](/oss/langchain/models) in LangChain.
34
---
45

5-
This will help you get started with DeepSeek's hosted [chat models](/oss/langchain/models). For detailed documentation of all ChatDeepSeek features and configurations head to the [API reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html).
6+
This will help you get started with DeepSeek's hosted [chat models](/oss/langchain/models).
67

78
<Tip>
8-
**DeepSeek's models are open source and can be run locally (e.g. in [Ollama](./ollama.ipynb)) or on other inference providers (e.g. [Fireworks](./fireworks.ipynb), [Together](./together.ipynb)) as well.**
9+
**API Reference**
910

11+
For detailed documentation of all features and configuration options, head to the @[`ChatDeepSeek`] API reference.
12+
</Tip>
13+
14+
<Tip>
15+
**DeepSeek's models are open source and can be run locally (e.g. in [Ollama](./ollama.ipynb)) or on other inference providers (e.g. [Fireworks](./fireworks.ipynb), [Together](./together.ipynb)) as well.**
1016
</Tip>
1117

1218
## Overview
@@ -15,7 +21,7 @@ This will help you get started with DeepSeek's hosted [chat models](/oss/langcha
1521

1622
| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/deepseek) | Downloads | Version |
1723
| :--- | :--- | :---: | :---: | :---: | :---: | :---: |
18-
| [ChatDeepSeek](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html) | [langchain-deepseek](https://python.langchain.com/api_reference/deepseek/) || beta || ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-deepseek?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-deepseek?style=flat-square&label=%20) |
24+
| @[`ChatDeepSeek`] | @[`langchain-deepseek`] || beta || ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-deepseek?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-deepseek?style=flat-square&label=%20) |
1925

2026
### Model features
2127

0 commit comments

Comments
 (0)