@@ -31,32 +31,30 @@ npm install openai
3131Import the OpenAI and Prompt Foundry SDKs
3232
3333``` js
34- import PromptFoundry from " @prompt-foundry/typescript-sdk" ;
35- import { Configuration , OpenAIApi } from " openai" ;
34+ import PromptFoundry from ' @prompt-foundry/typescript-sdk' ;
35+ import { Configuration , OpenAIApi } from ' openai' ;
3636
3737// Initialize Prompt Foundry SDK with your API key
3838const promptFoundry = new PromptFoundry ({
39- apiKey: process .env [" PROMPT_FOUNDRY_API_KEY" ],
39+ apiKey: process .env [' PROMPT_FOUNDRY_API_KEY' ],
4040});
4141
4242// Initialize OpenAI SDK with your API key
4343const configuration = new Configuration ({
44- apiKey: process .env [" OPENAI_API_KEY" ],
44+ apiKey: process .env [' OPENAI_API_KEY' ],
4545});
4646const openai = new OpenAIApi (configuration);
4747
4848async function main () {
4949 // Retrieve model parameters for the prompt
50- const modelParameters = await promptFoundry .prompts .getParameters (" 1212121" , {
51- variables: { hello: " world" },
50+ const modelParameters = await promptFoundry .prompts .getParameters (' 1212121' , {
51+ variables: { hello: ' world' },
5252 });
5353
5454 // check if provider is Open AI
55- if (modelParameters .provider === " openai" ) {
55+ if (modelParameters .provider === ' openai' ) {
5656 // Use the retrieved parameters to create a chat completion request
57- const modelResponse = await openai .chat .completions .create (
58- modelParameters .parameters
59- );
57+ const modelResponse = await openai .chat .completions .create (modelParameters .parameters );
6058
6159 // Print the response from OpenAI
6260 console .log (modelResponse .data );
@@ -77,27 +75,27 @@ npm install @anthropic-ai/sdk
7775Import the Anthropic and Prompt Foundry SDKs
7876
7977``` js
80- import PromptFoundry from " @prompt-foundry/typescript-sdk" ;
81- import Anthropic from " @anthropic-ai/sdk" ;
78+ import PromptFoundry from ' @prompt-foundry/typescript-sdk' ;
79+ import Anthropic from ' @anthropic-ai/sdk' ;
8280
8381// Initialize Prompt Foundry SDK with your API key
8482const promptFoundry = new PromptFoundry ({
85- apiKey: process .env [" PROMPT_FOUNDRY_API_KEY" ],
83+ apiKey: process .env [' PROMPT_FOUNDRY_API_KEY' ],
8684});
8785
8886// Initialize Anthropic SDK with your API key
8987const anthropic = new Anthropic ({
90- apiKey: process .env [" ANTHROPIC_API_KEY" ],
88+ apiKey: process .env [' ANTHROPIC_API_KEY' ],
9189});
9290
9391async function main () {
9492 // Retrieve model parameters for the prompt
95- const modelParameters = await promptFoundry .prompts .getParameters (" 1212121" , {
96- variables: { hello: " world" },
93+ const modelParameters = await promptFoundry .prompts .getParameters (' 1212121' , {
94+ variables: { hello: ' world' },
9795 });
9896
9997 // check if provider is Open AI
100- if (modelParameters .provider === " anthropic" ) {
98+ if (modelParameters .provider === ' anthropic' ) {
10199 // Use the retrieved parameters to create a chat completion request
102100 const message = await anthropic .messages .create (modelParameters .parameters );
103101
@@ -117,7 +115,7 @@ This library includes TypeScript definitions for all request params and response
117115``` ts
118116import PromptFoundry from ' @prompt-foundry/typescript-sdk' ;
119117
120- const promptFoundry = new PromptFoundry ({
118+ const client = new PromptFoundry ({
121119 apiKey: process .env [' PROMPT_FOUNDRY_API_KEY' ], // This is the default and can be omitted
122120});
123121
@@ -177,7 +175,7 @@ You can use the `maxRetries` option to configure or disable this:
177175<!-- prettier-ignore -->
178176``` js
179177// Configure the default for all requests:
180- const promptFoundry = new PromptFoundry ({
178+ const client = new PromptFoundry ({
181179 maxRetries: 0 , // default is 2
182180});
183181
@@ -194,7 +192,7 @@ Requests time out after 1 minute by default. You can configure this with a `time
194192<!-- prettier-ignore -->
195193``` ts
196194// Configure the default for all requests:
197- const promptFoundry = new PromptFoundry ({
195+ const client = new PromptFoundry ({
198196 timeout: 20 * 1000 , // 20 seconds (default is 1 minute)
199197});
200198
@@ -218,7 +216,7 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
218216
219217<!-- prettier-ignore -->
220218``` ts
221- const promptFoundry = new PromptFoundry ();
219+ const client = new PromptFoundry ();
222220
223221const response = await promptFoundry .prompts .getParameters (' 1212121' ).asResponse ();
224222console .log (response .headers .get (' X-My-Header' ));
@@ -327,7 +325,7 @@ import http from 'http';
327325import { HttpsProxyAgent } from ' https-proxy-agent' ;
328326
329327// Configure the default for all requests:
330- const promptFoundry = new PromptFoundry ({
328+ const client = new PromptFoundry ({
331329 httpAgent: new HttpsProxyAgent (process .env .PROXY_URL ),
332330});
333331
0 commit comments