1+ import { GoogleGenerativeAI } from '@google/generative-ai' ;
2+ import { BaseEmbedding , TEmbeddings } from './BaseEmbedding' ;
3+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class' ;
4+ import { getLLMCredentials } from '@sre/LLMManager/LLM.service/LLMCredentials.helper' ;
5+ import { TLLMCredentials , TLLMModel , BasicCredentials } from '@sre/types/LLM.types' ;
6+
7+ const DEFAULT_MODEL = 'gemini-embedding-001' ;
8+
9+ export class GoogleEmbeds extends BaseEmbedding {
10+ protected client : GoogleGenerativeAI ;
11+
12+ public static models = [ 'gemini-embedding-001' ] ;
13+ public canSpecifyDimensions = true ;
14+
15+ constructor ( private settings ?: Partial < TEmbeddings > ) {
16+ super ( { model : settings ?. model ?? DEFAULT_MODEL , ...settings } ) ;
17+ }
18+
19+ async embedTexts ( texts : string [ ] , candidate : AccessCandidate ) : Promise < number [ ] [ ] > {
20+ const batches = this . chunkArr ( this . processTexts ( texts ) , this . chunkSize ) ;
21+
22+ const batchRequests = batches . map ( ( batch ) => {
23+ return this . embed ( batch , candidate ) ;
24+ } ) ;
25+ const batchResponses = await Promise . all ( batchRequests ) ;
26+
27+ const embeddings : number [ ] [ ] = [ ] ;
28+ for ( let i = 0 ; i < batchResponses . length ; i += 1 ) {
29+ const batch = batches [ i ] ;
30+ const batchResponse = batchResponses [ i ] ;
31+ for ( let j = 0 ; j < batch . length ; j += 1 ) {
32+ embeddings . push ( batchResponse [ j ] ) ;
33+ }
34+ }
35+ return embeddings ;
36+ }
37+
38+ async embedText ( text : string , candidate : AccessCandidate ) : Promise < number [ ] > {
39+ const processedText = this . processTexts ( [ text ] ) [ 0 ] ;
40+ const embeddings = await this . embed ( [ processedText ] , candidate ) ;
41+ return embeddings [ 0 ] ;
42+ }
43+
44+ protected async embed ( texts : string [ ] , candidate : AccessCandidate ) : Promise < number [ ] [ ] > {
45+ let apiKey : string | undefined ;
46+
47+ // Try to get from credentials first
48+ try {
49+ const modelInfo : TLLMModel = {
50+ provider : 'GoogleAI' ,
51+ modelId : this . model ,
52+ credentials : this . settings ?. credentials as unknown as TLLMCredentials ,
53+ } ;
54+ const credentials = await getLLMCredentials ( candidate , modelInfo ) ;
55+ apiKey = ( credentials as BasicCredentials ) ?. apiKey ;
56+ } catch ( e ) {
57+ // If credential system fails, fall back to environment variable
58+ }
59+
60+ // Fall back to environment variable if not found in credentials
61+ if ( ! apiKey ) {
62+ apiKey = process . env . GOOGLE_AI_API_KEY ;
63+ }
64+
65+ if ( ! apiKey ) {
66+ throw new Error ( 'Please provide an API key for Google AI embeddings via credentials or GOOGLE_AI_API_KEY environment variable' ) ;
67+ }
68+
69+ if ( ! this . client ) {
70+ this . client = new GoogleGenerativeAI ( apiKey ) ;
71+ }
72+
73+ try {
74+ const model = this . client . getGenerativeModel ( { model : this . model } ) ;
75+
76+ const embeddings : number [ ] [ ] = [ ] ;
77+
78+ for ( const text of texts ) {
79+ const result = await model . embedContent ( text ) ;
80+ if ( result ?. embedding ?. values ) {
81+ embeddings . push ( result . embedding . values ) ;
82+ } else {
83+ throw new Error ( 'Invalid embedding response from Google AI' ) ;
84+ }
85+ }
86+
87+ return embeddings ;
88+ } catch ( e ) {
89+ throw new Error ( `Google Embeddings API error: ${ e . message || e } ` ) ;
90+ }
91+ }
92+ }
0 commit comments