|
| 1 | + |
1 | 2 | const { |
2 | 3 | BedrockRuntimeClient, |
3 | | - InvokeModelCommand |
| 4 | + ConversationRole, |
| 5 | + ConverseCommand, |
| 6 | +} = require( "@aws-sdk/client-bedrock-runtime"); |
4 | 7 |
|
5 | | -} = require("@aws-sdk/client-bedrock-runtime") |
| 8 | +const client = new BedrockRuntimeClient({ region: "us-west-2" }); |
6 | 9 |
|
7 | | -// a client can be shared by different commands. |
8 | | -const client = new BedrockRuntimeClient({ region: "us-west-2" }) |
| 10 | +const modelId = "us.amazon.nova-lite-v1:0"; |
9 | 11 |
|
10 | 12 | exports.handler = async event => { |
11 | | - |
12 | 13 | const { text } = event |
13 | 14 |
|
14 | | - const body = { |
15 | | - "prompt": `provide a summary of the following text in 5 bulletpoints; |
16 | | - extract 5 tags to categorize the article in a CMS; |
17 | | - provide output as a json object with properties : |
18 | | - "summary" as a list of bulletpoints and "tags" as a list of tags; |
19 | | - <text>${text}</text> |
20 | | - `, |
21 | | - "maxTokens": 1600, |
22 | | - "temperature": 0.3, |
23 | | - "topP": 1.0, |
24 | | - "stopSequences": [], |
25 | | - "countPenalty": { "scale": 0 }, |
26 | | - "presencePenalty": { "scale": 0 }, |
27 | | - "frequencyPenalty": { "scale": 0 } |
28 | | - } |
29 | | - |
30 | | - const params = { |
31 | | - "modelId": "ai21.j2-ultra-v1", |
32 | | - "contentType": "application/json", |
33 | | - "accept": "application/json", |
34 | | - "body": JSON.stringify(body) |
35 | | - } |
36 | | - |
37 | | - const command = new InvokeModelCommand(params) |
38 | | - |
39 | | - let data, completions |
| 15 | + const inputText =`provide a summary of the following text in 5 bulletpoints; |
| 16 | + extract 5 tags to categorize the article in a CMS; |
| 17 | + provide output as a json object with properties : |
| 18 | + "summary" as a list of bulletpoints and "tags" as a list of tags; |
| 19 | + <text>${text}</text> |
| 20 | + `; |
| 21 | + |
| 22 | + const message = { |
| 23 | + content: [{ text: inputText }], |
| 24 | + role: ConversationRole.USER, |
| 25 | + }; |
| 26 | + |
| 27 | + const request = { |
| 28 | + modelId, |
| 29 | + messages: [message], |
| 30 | + inferenceConfig: { |
| 31 | + maxTokens: 500, // The maximum response length |
| 32 | + temperature: 0.5, // Using temperature for randomness control |
| 33 | + //topP: 0.9, // Alternative: use topP instead of temperature |
| 34 | + }, |
| 35 | + }; |
| 36 | + |
| 37 | + let responseText = "", statusCode = 200; |
40 | 38 |
|
41 | 39 | try { |
42 | | - data = await client.send(command) |
43 | | - |
44 | | - completions = JSON.parse(new TextDecoder().decode(data.body)).completions |
45 | | - console.log(JSON.parse(completions[0].data.text)) |
46 | | - |
47 | | - |
48 | | - } |
49 | | - catch (error) { |
50 | | - console.error(error) |
| 40 | + const response = await client.send(new ConverseCommand(request)); |
| 41 | + responseText = response.output.message.content[0].text; |
| 42 | + console.log(response.output.message.content[0].text); |
| 43 | + } catch (error) { |
| 44 | + console.error(`ERROR: Can't invoke '${modelId}'. Reason: ${error.message}`); |
| 45 | + responseText = `ERROR: Can't invoke '${modelId}'. Reason: ${error.message}`; |
| 46 | + statusCode = 500; |
51 | 47 | } |
52 | 48 |
|
53 | 49 | const response = { |
54 | | - statusCode: 200, |
55 | | - body: JSON.stringify(completions[0].data.text), |
| 50 | + statusCode, |
| 51 | + body: JSON.stringify(responseText), |
56 | 52 | } |
57 | 53 |
|
58 | | - return response |
| 54 | + return response; |
| 55 | + |
59 | 56 | } |
0 commit comments