2020using System . Runtime . CompilerServices ;
2121using System . Threading ;
2222using System . Threading . Tasks ;
23- using Firebase . VertexAI . Internal ;
23+ using Firebase . FirebaseAI . Internal ;
2424
25- namespace Firebase . VertexAI {
25+ namespace Firebase . FirebaseAI {
2626
2727/// <summary>
2828/// An object that represents a back-and-forth chat with a model, capturing the history and saving
@@ -64,7 +64,7 @@ internal static Chat InternalCreateChat(GenerativeModel model, IEnumerable<Model
6464 /// <param name="content">The input given to the model as a prompt.</param>
6565 /// <param name="cancellationToken">An optional token to cancel the operation.</param>
6666 /// <returns>The model's response if no error occurred.</returns>
67- /// <exception cref="VertexAIException ">Thrown when an error occurs during content generation.</exception>
67+ /// <exception cref="FirebaseAIException ">Thrown when an error occurs during content generation.</exception>
6868 public Task < GenerateContentResponse > SendMessageAsync (
6969 ModelContent content , CancellationToken cancellationToken = default ) {
7070 return SendMessageAsync ( new [ ] { content } , cancellationToken ) ;
@@ -76,7 +76,7 @@ public Task<GenerateContentResponse> SendMessageAsync(
7676 /// <param name="text">The text given to the model as a prompt.</param>
7777 /// <param name="cancellationToken">An optional token to cancel the operation.</param>
7878 /// <returns>The model's response if no error occurred.</returns>
79- /// <exception cref="VertexAIException ">Thrown when an error occurs during content generation.</exception>
79+ /// <exception cref="FirebaseAIException ">Thrown when an error occurs during content generation.</exception>
8080 public Task < GenerateContentResponse > SendMessageAsync (
8181 string text , CancellationToken cancellationToken = default ) {
8282 return SendMessageAsync ( new ModelContent [ ] { ModelContent . Text ( text ) } , cancellationToken ) ;
@@ -88,7 +88,7 @@ public Task<GenerateContentResponse> SendMessageAsync(
8888 /// <param name="content">The input given to the model as a prompt.</param>
8989 /// <param name="cancellationToken">An optional token to cancel the operation.</param>
9090 /// <returns>The model's response if no error occurred.</returns>
91- /// <exception cref="VertexAIException ">Thrown when an error occurs during content generation.</exception>
91+ /// <exception cref="FirebaseAIException ">Thrown when an error occurs during content generation.</exception>
9292 public Task < GenerateContentResponse > SendMessageAsync (
9393 IEnumerable < ModelContent > content , CancellationToken cancellationToken = default ) {
9494 return SendMessageAsyncInternal ( content , cancellationToken ) ;
@@ -101,7 +101,7 @@ public Task<GenerateContentResponse> SendMessageAsync(
101101 /// <param name="content">The input given to the model as a prompt.</param>
102102 /// <param name="cancellationToken">An optional token to cancel the operation.</param>
103103 /// <returns>A stream of generated content responses from the model.</returns>
104- /// <exception cref="VertexAIException ">Thrown when an error occurs during content generation.</exception>
104+ /// <exception cref="FirebaseAIException ">Thrown when an error occurs during content generation.</exception>
105105 public IAsyncEnumerable < GenerateContentResponse > SendMessageStreamAsync (
106106 ModelContent content , CancellationToken cancellationToken = default ) {
107107 return SendMessageStreamAsync ( new [ ] { content } , cancellationToken ) ;
@@ -113,7 +113,7 @@ public IAsyncEnumerable<GenerateContentResponse> SendMessageStreamAsync(
113113 /// <param name="text">The text given to the model as a prompt.</param>
114114 /// <param name="cancellationToken">An optional token to cancel the operation.</param>
115115 /// <returns>A stream of generated content responses from the model.</returns>
116- /// <exception cref="VertexAIException ">Thrown when an error occurs during content generation.</exception>
116+ /// <exception cref="FirebaseAIException ">Thrown when an error occurs during content generation.</exception>
117117 public IAsyncEnumerable < GenerateContentResponse > SendMessageStreamAsync (
118118 string text , CancellationToken cancellationToken = default ) {
119119 return SendMessageStreamAsync ( new ModelContent [ ] { ModelContent . Text ( text ) } , cancellationToken ) ;
@@ -125,7 +125,7 @@ public IAsyncEnumerable<GenerateContentResponse> SendMessageStreamAsync(
125125 /// <param name="content">The input given to the model as a prompt.</param>
126126 /// <param name="cancellationToken">An optional token to cancel the operation.</param>
127127 /// <returns>A stream of generated content responses from the model.</returns>
128- /// <exception cref="VertexAIException ">Thrown when an error occurs during content generation.</exception>
128+ /// <exception cref="FirebaseAIException ">Thrown when an error occurs during content generation.</exception>
129129 public IAsyncEnumerable < GenerateContentResponse > SendMessageStreamAsync (
130130 IEnumerable < ModelContent > content , CancellationToken cancellationToken = default ) {
131131 return SendMessageStreamAsyncInternal ( content , cancellationToken ) ;
@@ -134,7 +134,7 @@ public IAsyncEnumerable<GenerateContentResponse> SendMessageStreamAsync(
134134 private async Task < GenerateContentResponse > SendMessageAsyncInternal (
135135 IEnumerable < ModelContent > requestContent , CancellationToken cancellationToken = default ) {
136136 // Make sure that the requests are set to to role "user".
137- List < ModelContent > fixedRequests = requestContent . Select ( VertexAIExtensions . ConvertToUser ) . ToList ( ) ;
137+ List < ModelContent > fixedRequests = requestContent . Select ( FirebaseAIExtensions . ConvertToUser ) . ToList ( ) ;
138138 // Set up the context to send in the request
139139 List < ModelContent > fullRequest = new ( chatHistory ) ;
140140 fullRequest . AddRange ( fixedRequests ) ;
@@ -159,7 +159,7 @@ private async IAsyncEnumerable<GenerateContentResponse> SendMessageStreamAsyncIn
159159 IEnumerable < ModelContent > requestContent ,
160160 [ EnumeratorCancellation ] CancellationToken cancellationToken = default ) {
161161 // Make sure that the requests are set to to role "user".
162- List < ModelContent > fixedRequests = requestContent . Select ( VertexAIExtensions . ConvertToUser ) . ToList ( ) ;
162+ List < ModelContent > fixedRequests = requestContent . Select ( FirebaseAIExtensions . ConvertToUser ) . ToList ( ) ;
163163 // Set up the context to send in the request
164164 List < ModelContent > fullRequest = new ( chatHistory ) ;
165165 fullRequest . AddRange ( fixedRequests ) ;
0 commit comments