Skip to content

Commit abd5811

Browse files
committed
Update and expand XML documentation
1 parent eb23c26 commit abd5811

File tree

7 files changed

+122
-8
lines changed

7 files changed

+122
-8
lines changed

OpenAI_API/Chat/ChatEndpoint.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ internal ChatEndpoint(OpenAIAPI api) : base(api) { }
3333
/// Creates an ongoing chat which can easily encapsulate the conversation. This is the simplest way to use the Chat endpoint.
3434
/// </summary>
3535
/// <param name="defaultChatRequestArgs">Allows setting the parameters to use when calling the ChatGPT API. Can be useful for setting temperature, presence_penalty, and more. See <see href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI documentation for a list of possible parameters to tweak.</see></param>
36-
/// <returns></returns>
36+
/// <returns>A <see cref="Conversation"/> which encapulates a back and forth chat betwen a user and an assistant.</returns>
3737
public Conversation CreateConversation(ChatRequest defaultChatRequestArgs = null)
3838
{
3939
return new Conversation(this, defaultChatRequestArgs: defaultChatRequestArgs ?? DefaultChatRequestArgs);

OpenAI_API/Chat/Conversation.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ public async Task StreamResponseFromChatbotAsync(Action<int, string> resultHandl
158158

159159
/// <summary>
160160
/// Calls the API to get a response, which is appended to the current chat's <see cref="Messages"/> as an <see cref="ChatMessageRole.Assistant"/> <see cref="ChatMessage"/>, and streams the results as they come in. <br/>
161-
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamResponseFromChatbotAsync"/> instead.
161+
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamResponseFromChatbotAsync(Action{string})"/> instead.
162162
/// </summary>
163163
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
164164
public async IAsyncEnumerable<string> StreamResponseEnumerableFromChatbotAsync()

OpenAI_API/Chat/IChatEndpoint.cs

Lines changed: 89 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,21 +6,108 @@
66
namespace OpenAI_API.Chat
77
{
88
/// <summary>
9-
/// An interface for <see cref="ChatEndpoint"/>, for ease of mock testing, etc
9+
/// An interface for <see cref="ChatEndpoint"/>, the ChatGPT API endpoint. Use this endpoint to send multiple messages and carry on a conversation.
1010
/// </summary>
1111
public interface IChatEndpoint
1212
{
13+
/// <summary>
14+
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
15+
/// </summary>
1316
ChatRequest DefaultChatRequestArgs { get; set; }
1417

18+
/// <summary>
19+
/// Creates an ongoing chat which can easily encapsulate the conversation. This is the simplest way to use the Chat endpoint.
20+
/// </summary>
21+
/// <param name="defaultChatRequestArgs">Allows setting the parameters to use when calling the ChatGPT API. Can be useful for setting temperature, presence_penalty, and more. See <see href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI documentation for a list of possible parameters to tweak.</see></param>
22+
/// <returns>A <see cref="Conversation"/> which encapulates a back and forth chat betwen a user and an assistant.</returns>
23+
Conversation CreateConversation(ChatRequest defaultChatRequestArgs = null);
24+
25+
26+
/// <summary>
27+
/// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
28+
/// </summary>
29+
/// <param name="request">The request to send to the API.</param>
30+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
1531
Task<ChatResult> CreateChatCompletionAsync(ChatRequest request);
32+
33+
/// <summary>
34+
/// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
35+
/// </summary>
36+
/// <param name="request">The request to send to the API.</param>
37+
/// <param name="numOutputs">Overrides <see cref="ChatRequest.NumChoicesPerMessage"/> as a convenience.</param>
38+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
1639
Task<ChatResult> CreateChatCompletionAsync(ChatRequest request, int numOutputs = 5);
40+
41+
/// <summary>
42+
/// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
43+
/// </summary>
44+
/// <param name="messages">The array of messages to send to the API</param>
45+
/// <param name="model">The model to use. See the ChatGPT models available from <see cref="ModelsEndpoint.GetModelsAsync()"/></param>
46+
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
47+
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
48+
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
49+
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
50+
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
51+
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
52+
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
53+
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
54+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
1755
Task<ChatResult> CreateChatCompletionAsync(IList<ChatMessage> messages, Model model = null, double? temperature = null, double? top_p = null, int? numOutputs = null, int? max_tokens = null, double? frequencyPenalty = null, double? presencePenalty = null, IReadOnlyDictionary<string, float> logitBias = null, params string[] stopSequences);
56+
57+
/// <summary>
58+
/// Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
59+
/// </summary>
60+
/// <param name="messages">The messages to use in the generation.</param>
61+
/// <returns>The <see cref="ChatResult"/> with the API response.</returns>
1862
Task<ChatResult> CreateChatCompletionAsync(params ChatMessage[] messages);
63+
64+
/// <summary>
65+
/// Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
66+
/// </summary>
67+
/// <param name="userMessages">The user message or messages to use in the generation. All strings are assumed to be of Role <see cref="ChatMessageRole.User"/></param>
68+
/// <returns>The <see cref="ChatResult"/> with the API response.</returns>
1969
Task<ChatResult> CreateChatCompletionAsync(params string[] userMessages);
20-
Conversation CreateConversation(ChatRequest defaultChatRequestArgs = null);
70+
71+
72+
/// <summary>
73+
/// Ask the API to complete the message(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
74+
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamChatEnumerableAsync(ChatRequest)"/> instead.
75+
/// </summary>
76+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultChatRequestArgs"/>.</param>
77+
/// <param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
2178
Task StreamChatAsync(ChatRequest request, Action<ChatResult> resultHandler);
79+
80+
/// <summary>
81+
/// Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
82+
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamChatAsync(ChatRequest, Action{ChatResult})"/> instead.
83+
/// </summary>
84+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultChatRequestArgs"/>.</param>
85+
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
2286
IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(ChatRequest request);
87+
88+
/// <summary>
89+
/// Ask the API to complete the message(s) using the specified request, and stream the results as they come in.
90+
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamChatAsync(ChatRequest, Action{ChatResult})"/> instead.
91+
/// </summary>
92+
/// <param name="messages">The array of messages to send to the API</param>
93+
/// <param name="model">The model to use. See the ChatGPT models available from <see cref="ModelsEndpoint.GetModelsAsync()"/></param>
94+
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
95+
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
96+
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
97+
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
98+
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
99+
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
100+
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
101+
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
102+
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
23103
IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(IList<ChatMessage> messages, Model model = null, double? temperature = null, double? top_p = null, int? numOutputs = null, int? max_tokens = null, double? frequencyPenalty = null, double? presencePenalty = null, IReadOnlyDictionary<string, float> logitBias = null, params string[] stopSequences);
104+
105+
/// <summary>
106+
/// Ask the API to complete the message(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
107+
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamChatEnumerableAsync(ChatRequest)"/> instead.
108+
/// </summary>
109+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultChatRequestArgs"/>.</param>
110+
/// <param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
24111
Task StreamCompletionAsync(ChatRequest request, Action<int, ChatResult> resultHandler);
25112
}
26113
}

OpenAI_API/Embedding/IEmbeddingEndpoint.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
using OpenAI_API.Models;
12
using System.Threading.Tasks;
23

34
namespace OpenAI_API.Embedding

OpenAI_API/Images/IImageGenerationEndpoint.cs

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,22 @@
33
namespace OpenAI_API.Images
44
{
55
/// <summary>
6-
/// An interface for <see cref="ImageGenerationEndpoint"/>, for ease of mock testing, etc
6+
/// An interface for <see cref="ImageGenerationEndpoint"/>. Given a prompt, the model will generate a new image.
77
/// </summary>
88
public interface IImageGenerationEndpoint
99
{
10+
/// <summary>
11+
/// Ask the API to Creates an image given a prompt.
12+
/// </summary>
13+
/// <param name="request">Request to be send</param>
14+
/// <returns>Asynchronously returns the image result. Look in its <see cref="Data.Url"/> </returns>
1015
Task<ImageResult> CreateImageAsync(ImageGenerationRequest request);
16+
17+
/// <summary>
18+
/// Ask the API to Creates an image given a prompt.
19+
/// </summary>
20+
/// <param name="input">A text description of the desired image(s)</param>
21+
/// <returns>Asynchronously returns the image result. Look in its <see cref="Data.Url"/> </returns>
1122
Task<ImageResult> CreateImageAsync(string input);
1223
}
1324
}

OpenAI_API/Images/ImageGenerationEndpoint.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
namespace OpenAI_API.Images
88
{
99
/// <summary>
10-
/// Given a prompt and/or an input image, the model will generate a new image.
10+
/// Given a prompt, the model will generate a new image.
1111
/// </summary>
1212
public class ImageGenerationEndpoint : EndpointBase, IImageGenerationEndpoint
1313
{
Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,30 @@
1-
using System.Threading.Tasks;
1+
using OpenAI_API.Models;
2+
using System.Threading.Tasks;
23

34
namespace OpenAI_API.Moderation
45
{
56
/// <summary>
6-
/// An interface for <see cref="ModerationEndpoint"/>, for ease of mock testing, etc
7+
/// An interface for <see cref="ModerationEndpoint"/>, which classifies text against the OpenAI Content Policy
78
/// </summary>
89
public interface IModerationEndpoint
910
{
11+
/// <summary>
12+
/// This allows you to send request to the recommended model without needing to specify. OpenAI recommends using the <see cref="Model.TextModerationLatest"/> model
13+
/// </summary>
1014
ModerationRequest DefaultModerationRequestArgs { get; set; }
1115

16+
/// <summary>
17+
/// Ask the API to classify the text using a custom request.
18+
/// </summary>
19+
/// <param name="request">Request to send to the API</param>
20+
/// <returns>Asynchronously returns the classification result</returns>
1221
Task<ModerationResult> CallModerationAsync(ModerationRequest request);
22+
23+
/// <summary>
24+
/// Ask the API to classify the text using the default model.
25+
/// </summary>
26+
/// <param name="input">Text to classify</param>
27+
/// <returns>Asynchronously returns the classification result</returns>
1328
Task<ModerationResult> CallModerationAsync(string input);
1429
}
1530
}

0 commit comments

Comments
 (0)