@@ -6,110 +6,119 @@ const fs = require('fs');
66logger . level = "info" ;
77
88// The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
9- // You can config those params as is described in the readme file (as environment variables, or by using the .env file), or you
10- // or you can just update the BASE_URL and API_KEY vars and use this sample code (not recommended for production environments).
9+ // You can configure those params as described in the README file (as environment variables, or by using the .env file),
10+ // or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
1111// The MODZY_BASE_URL should point to the API services route which may be different from the Modzy page URL.
1212// (ie: https://modzy.example.com/api).
1313const BASE_URL = process . env . MODZY_BASE_URL ;
1414// The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character, and a private part
1515// (ie: AzQBJ3h4B1z60xNmhAJF.uQyQh8putLIRDi1nOldh).
16- const API_KEY = process . env . MODZY_API_KEY ;
16+ const API_KEY = process . env . MODZY_API_KEY ;
1717
18- // Client initialization
18+ // Client initialization:
1919// Initialize the ApiClient instance with the BASE_URL and the API_KEY to store those arguments
2020// for the following API calls.
2121const modzyClient = new modzy . ModzyClient ( BASE_URL , API_KEY ) ;
2222
2323// Create a Job with an embedded input, wait, and retrieve results:
2424
25- async function createJobWithEmbeddedInput ( ) {
26- try {
27- // Get the model object:
28- // If you already know the model identifier (i.e.: you got from the URL of the model details page or the input sample),
25+
26+ async function createJobWithEmbeddedInput ( ) {
27+ try {
28+ // Get the model object:
29+ // If you already know the model identifier (i.e.: you got it from the URL of the model details page or the input sample),
30+
2931 // you can skip this step. If you don't, you can find the model identifier by using its name as follows:
3032 let model = await modzyClient . getModelByName ( "Multi-Language OCR" ) ;
3133 // Or if you already know the model id and want to know more about the model, you can use this instead:
32- //let model = await modzyClient.getModel("c60c8dbd79");
33-
34- // The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
34+ //let model = await modzyClient.getModel("c60c8dbd79");
35+ //You can find more information about how to query the models on the model_samples.js file.
36+ // The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
3537 logger . info ( Object . keys ( model ) . toString ( ) . replace ( '\n' , ' ' ) ) ;
3638 // Or just log the model identifier and the latest version
3739 logger . info ( `The model identifier is ${ model . modelId } and the latest version is ${ model . latestVersion } ` ) ;
40+
3841 // Get the model version object:
39- // If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
42+
43+ // If you already know the model version and the input key(s) of the model version, you can skip this step. Also, you can
44+
4045 // use the following code block to know about the input keys and skip the call on future job submissions.
4146 let modelVersion = await modzyClient . getModelVersion ( model . modelId , model . latestVersion ) ;
42- // The info stored in modelVersion provides insights about the amount of time that the model can spend processing, the inputs , and
47+ // The info stored in modelVersion provides insights about the amount of time that the model can spend processing, the input , and
4348 // output keys of the model.
44- logger . info ( `Ths model version is ${ modelVersion . version } ` ) ;
49+ logger . info ( `This model version is ${ modelVersion . version } ` ) ;
4550 logger . info ( ` timeouts: status ${ modelVersion . timeout . status } ms, run ${ modelVersion . timeout . run } ms ` ) ;
4651 logger . info ( " inputs: " ) ;
47- for ( key in modelVersion . inputs ) {
52+ for ( key in modelVersion . inputs ) {
4853 let input = modelVersion . inputs [ key ] ;
4954 logger . info ( ` key ${ input . name } , type ${ input . acceptedMediaTypes } , description: ${ input . description } ` ) ;
5055 }
5156 logger . info ( " outputs: " )
52- for ( key in modelVersion . outputs ) {
57+ for ( key in modelVersion . outputs ) {
5358 let output = modelVersion . outputs [ key ] ;
5459 logger . info ( ` key ${ output . name } , type ${ output . mediaType } , description: ${ output . description } ` ) ;
5560 }
5661
57- // Send the job:
58- // An embedded input is a byte array encoded as a string in Base64, that's very handy for small to middle size files, for
59- // bigger files can cause memory issues because you need to load the file in the memory (load + encode).
60- const imageBytes = fs . readFileSync ( 'samples/image.png' ) ;
61- let configBytes = fs . readFileSync ( 'samples/config.json' ) ;
62- // With the info about the model (identifier), the model version (version string, input/output keys), you are ready to
63- // submit the job. Just prepare the source object:
64- let sources = { "source-key" : { "input" : imageBytes , "config.json" : configBytes } } ;
65- // An inference job groups input data that you send to a model. You can send any amount of inputs to
66- // process and you can identify and refer to a specific input by the key that you assign, for example we can add:
67- sources [ "second-key" ] = { "input" : imageBytes , "config.json" :configBytes }
68- // You don’t need to load all the inputs from the files, just convert to bytes as follows:
69- configBytes = Buffer . from ( JSON . stringify ( { "languages" :[ "spa" ] } ) ) ;
70- sources [ "another-key" ] = { "input" : imageBytes , "config.json" :configBytes }
71- // If you send an incorrect input key, the model fails to process the input.
72- sources [ "wrong-key" ] = { "a.wrong.key" : imageBytes , "config.json" :configBytes }
73- // If you send a correct input key, but some wrong values, the model fails to process the input.
74- sources [ "wrong-value" ] = { "input" : configBytes , "config.json" :imageBytes }
75- // When you have all your inputs ready, you can use our helper method to submit the job as follows:
76- let job = await modzyClient . submitJobEmbedded ( model . modelId , modelVersion . version , "application/octet-stream" , sources ) ;
62+
63+ // Send the job:
64+ // An embedded input is a byte array encoded as a string in Base64. This input type comes very handy for small to middle size files. However,
65+ // it requires to load and encode files in memory which can be an issue for larger files.
66+ const imageBytes = fs . readFileSync ( 'samples/image.png' ) ;
67+ let configBytes = fs . readFileSync ( 'samples/config.json' ) ;
68+ // With the info about the model (identifier) and the model version (version string, input/output keys), you are ready to
69+ // submit the job. Just prepare the source object:
70+ let sources = { "source-key" : { "input" : imageBytes , "config.json" : configBytes } } ;
71+ // An inference job groups input data sent to a model. You can send any amount of inputs to
72+ // process and you can identify and refer to a specific input by the key assigned. For example we can add:
73+ sources [ "second-key" ] = { "input" : imageBytes , "config.json" : configBytes }
74+ // You don't need to load all the inputs from files, just convert to bytes as follows:
75+ configBytes = Buffer . from ( JSON . stringify ( { "languages" : [ "spa" ] } ) ) ;
76+ sources [ "another-key" ] = { "input" : imageBytes , "config.json" : configBytes }
77+ // If you send a wrong input key, the model fails to process the input.
78+ sources [ "wrong-key" ] = { "a.wrong.key" : imageBytes , "config.json" : configBytes }
79+ // If you send a correct input key, but some wrong values, the model fails to process the input.
80+ sources [ "wrong-value" ] = { "input" : configBytes , "config.json" : imageBytes }
81+ // When you have all your inputs ready, you can use our helper method to submit the job as follows:
82+ let job = await modzyClient . submitJobEmbedded ( model . modelId , modelVersion . version , "application/octet-stream" , sources ) ;
83+
7784 // Modzy creates the job and queue for processing. The job object contains all the info that you need to keep track
7885 // of the process, the most important being the job identifier and the job status.
79- logger . info ( "job: " + job . jobIdentifier + " " + job . status ) ;
86+ logger . info ( "job: " + job . jobIdentifier + " " + job . status ) ;
8087 // The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
81- // We provide a helper method to listen until the job finishes processing. It will listen until the job finishes
88+ // We provide a helper method to listen until the job finishes processing. It listens until the job finishes
8289 // and moves to COMPLETED, CANCELED, or TIMEOUT.
8390 job = await modzyClient . blockUntilComplete ( job ) ;
91+
8492 // Get the results:
8593 // Check the status of the job. Jobs may be canceled or may reach a timeout.
86- if ( job . status === "COMPLETED" ) {
94+ if ( job . status === "COMPLETED" ) {
8795 // A completed job means that all the inputs were processed by the model. Check the results for each
8896 // input key provided in the source object to see the model output.
8997 let result = await modzyClient . getResult ( job . jobIdentifier ) ;
9098 // The result object has some useful info:
9199 logger . info ( `Result: finished: ${ result . finished } , total: ${ result . total } , completed: ${ result . completed } , failed: ${ result . failed } ` ) ;
92- // Notice that we are iterating through the same input sources keys
93- for ( key in sources ) {
94- // The result object has the individual results of each job input. In this case, the output key is called
100+ // Notice that we are iterating through the same input source keys
101+ for ( key in sources ) {
102+ // The results object has the individual results of each job input. In this case the output key is called
95103 // results.json, so we can get the results as follows:
96- if ( result . results [ key ] ) {
104+ if ( result . results [ key ] ) {
97105 let model_res = result . results [ key ] [ "results.json" ] ;
98- // The output for this model comes in a JSON format, so we can directly log the model results:
106+ // The output for this model comes in a JSON format, so we can directly log the results:
99107 logger . info ( ` ${ key } : ${ JSON . stringify ( model_res ) } ` ) ;
100108 }
101- else {
109+ else {
110+ // If the model raises an error, we can get the specific error message:
102111 logger . warn ( ` ${ key } : failure ${ result . failures [ key ] [ 'error' ] } ` ) ;
103112 }
104113 }
105114 }
106- else {
115+ else {
107116 log . warn ( `The job ends with status ${ job . status } ` ) ;
108- }
109- }
110- catch ( error ) {
111- logger . warn ( error ) ;
112- }
117+ }
118+ }
119+ catch ( error ) {
120+ logger . warn ( error ) ;
121+ }
113122}
114123
115124
0 commit comments