@@ -6,110 +6,112 @@ const fs = require('fs');
66logger . level = "info" ;
77
88// The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
9- // You can config those params as is described in the readme file (as environment variables, or by using the .env file), or you
10- // or you can just update the BASE_URL and API_KEY vars and use this sample code (not recommended for production environments).
9+ // You can configure those params as described in the README file (as environment variables, or by using the .env file),
10+ // or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
1111// The MODZY_BASE_URL should point to the API services route which may be different from the Modzy page URL.
1212// (ie: https://modzy.example.com/api).
1313const BASE_URL = process . env . MODZY_BASE_URL ;
14- // The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character and a private part
14+ // The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character, and a private part
1515// (ie: AzQBJ3h4B1z60xNmhAJF.uQyQh8putLIRDi1nOldh).
16- const API_KEY = process . env . MODZY_API_KEY ;
16+ const API_KEY = process . env . MODZY_API_KEY ;
1717
18- // Client initialization
18+ // Client initialization:
1919// Initialize the ApiClient instance with the BASE_URL and the API_KEY to store those arguments
2020// for the following API calls.
2121const modzyClient = new modzy . ModzyClient ( BASE_URL , API_KEY ) ;
2222
2323// Create a Job with an embedded input, wait, and retrieve results:
2424
25- async function createJobWithEmbeddedInput ( ) {
26- try {
27- // Get the model object:
28- // If you already know the model identifier (i.e.: you got from the URL of the model details page or the input sample),
29- // you can skip this step. If you don't you can find the model identifier by using its name as follows:
25+ async function createJobWithEmbeddedInput ( ) {
26+ try {
27+ // Get the model object:
28+ // If you already know the model identifier (i.e.: you got it from the URL of the model details page or the input sample),
29+ // you can skip this step. If you don't, you can find the model identifier by using its name as follows:
3030 let model = await modzyClient . getModelByName ( "Multi-Language OCR" ) ;
3131 // Or if you already know the model id and want to know more about the model, you can use this instead:
32- //let model = await modzyClient.getModel("c60c8dbd79");
33-
34- // The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
32+ //let model = await modzyClient.getModel("c60c8dbd79");
33+ //You can find more information about how to query the models on the model_samples.js file.
34+
35+ // The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
3536 logger . info ( Object . keys ( model ) . toString ( ) . replace ( '\n' , ' ' ) ) ;
3637 // Or just log the model identifier and the latest version
3738 logger . info ( `The model identifier is ${ model . modelId } and the latest version is ${ model . latestVersion } ` ) ;
3839 // Get the model version object:
39- // If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
40- // use the following code block to know about the inputs keys and skip the call on future job submissions.
40+ // If you already know the model version and the input key(s) of the model version, you can skip this step. Also, you can
41+ // use the following code block to know about the input keys and skip the call on future job submissions.
4142 let modelVersion = await modzyClient . getModelVersion ( model . modelId , model . latestVersion ) ;
42- // The info stored in modelVersion provides insights about the amount of time that the model can spend processing, the inputs , and
43+ // The info stored in modelVersion provides insights about the amount of time that the model can spend processing, the input , and
4344 // output keys of the model.
44- logger . info ( `Ths model version is ${ modelVersion . version } ` ) ;
45+ logger . info ( `This model version is ${ modelVersion . version } ` ) ;
4546 logger . info ( ` timeouts: status ${ modelVersion . timeout . status } ms, run ${ modelVersion . timeout . run } ms ` ) ;
4647 logger . info ( " inputs: " ) ;
47- for ( key in modelVersion . inputs ) {
48+ for ( key in modelVersion . inputs ) {
4849 let input = modelVersion . inputs [ key ] ;
4950 logger . info ( ` key ${ input . name } , type ${ input . acceptedMediaTypes } , description: ${ input . description } ` ) ;
5051 }
5152 logger . info ( " outputs: " )
52- for ( key in modelVersion . outputs ) {
53+ for ( key in modelVersion . outputs ) {
5354 let output = modelVersion . outputs [ key ] ;
5455 logger . info ( ` key ${ output . name } , type ${ output . mediaType } , description: ${ output . description } ` ) ;
5556 }
5657
57- // Send the job:
58- // An embedded input is a byte array encoded as a string in Base64, that's very handy for small to middle size files, for
59- // bigger files can be a memory issue because you need to load the file in memory (load + encode) .
60- const imageBytes = fs . readFileSync ( 'samples/image.png' ) ;
61- let configBytes = fs . readFileSync ( 'samples/config.json' ) ;
62- // With the info about the model (identifier), the model version (version string, input/output keys), you are ready to
63- // submit the job. Just prepare the source object:
64- let sources = { "source-key" : { "input" : imageBytes , "config.json" : configBytes } } ;
65- // An inference job groups input data that you send to a model. You can send any amount of inputs to
66- // process and you can identify and refer to a specific input by the key that you assign, for example we can add:
67- sources [ "second-key" ] = { "input" : imageBytes , "config.json" :configBytes }
68- // You don't need to load all the inputs from files, just convert to bytes as follows:
69- configBytes = Buffer . from ( JSON . stringify ( { "languages" :[ "spa" ] } ) ) ;
70- sources [ "another-key" ] = { "input" : imageBytes , "config.json" :configBytes }
71- // If you send a wrong input key, the model fails to process the input.
72- sources [ "wrong-key" ] = { "a.wrong.key" : imageBytes , "config.json" :configBytes }
73- // If you send a correct input key, but some wrong values, the model fails to process the input.
74- sources [ "wrong-value" ] = { "input" : configBytes , "config.json" :imageBytes }
75- // When you have all your inputs ready, you can use our helper method to submit the job as follows:
76- let job = await modzyClient . submitJobEmbedded ( model . modelId , modelVersion . version , "application/octet-stream" , sources ) ;
58+ // Send the job:
59+ // An embedded input is a byte array encoded as a string in Base64. This input type comes very handy for small to middle size files. However,
60+ // it requires to load and encode files in memory which can be an issue for larger files .
61+ const imageBytes = fs . readFileSync ( 'samples/image.png' ) ;
62+ let configBytes = fs . readFileSync ( 'samples/config.json' ) ;
63+ // With the info about the model (identifier) and the model version (version string, input/output keys), you are ready to
64+ // submit the job. Just prepare the source object:
65+ let sources = { "source-key" : { "input" : imageBytes , "config.json" : configBytes } } ;
66+ // An inference job groups input data sent to a model. You can send any amount of inputs to
67+ // process and you can identify and refer to a specific input by the key assigned. For example we can add:
68+ sources [ "second-key" ] = { "input" : imageBytes , "config.json" : configBytes }
69+ // You don't need to load all the inputs from files, just convert to bytes as follows:
70+ configBytes = Buffer . from ( JSON . stringify ( { "languages" : [ "spa" ] } ) ) ;
71+ sources [ "another-key" ] = { "input" : imageBytes , "config.json" : configBytes }
72+ // If you send a wrong input key, the model fails to process the input.
73+ sources [ "wrong-key" ] = { "a.wrong.key" : imageBytes , "config.json" : configBytes }
74+ // If you send a correct input key, but some wrong values, the model fails to process the input.
75+ sources [ "wrong-value" ] = { "input" : configBytes , "config.json" : imageBytes }
76+ // When you have all your inputs ready, you can use our helper method to submit the job as follows:
77+ let job = await modzyClient . submitJobEmbedded ( model . modelId , modelVersion . version , "application/octet-stream" , sources ) ;
7778 // Modzy creates the job and queue for processing. The job object contains all the info that you need to keep track
7879 // of the process, the most important being the job identifier and the job status.
79- logger . info ( "job: " + job . jobIdentifier + " " + job . status ) ;
80+ logger . info ( "job: " + job . jobIdentifier + " " + job . status ) ;
8081 // The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
81- // We provide a helper method to listen until the job finishes processing. it will listen until the job finishes
82+ // We provide a helper method to listen until the job finishes processing. It listens until the job finishes
8283 // and moves to COMPLETED, CANCELED, or TIMEOUT.
8384 job = await modzyClient . blockUntilComplete ( job ) ;
8485 // Get the results:
8586 // Check the status of the job. Jobs may be canceled or may reach a timeout.
86- if ( job . status === "COMPLETED" ) {
87+ if ( job . status === "COMPLETED" ) {
8788 // A completed job means that all the inputs were processed by the model. Check the results for each
88- // input keys provided in the source object to see the model output.
89+ // input key provided in the source object to see the model output.
8990 let result = await modzyClient . getResult ( job . jobIdentifier ) ;
9091 // The result object has some useful info:
9192 logger . info ( `Result: finished: ${ result . finished } , total: ${ result . total } , completed: ${ result . completed } , failed: ${ result . failed } ` ) ;
92- // Notice that we are iterating through the same input sources keys
93- for ( key in sources ) {
94- // The result object has the individual results of each job input. In this case the output key is called
93+ // Notice that we are iterating through the same input source keys
94+ for ( key in sources ) {
95+ // The results object has the individual results of each job input. In this case the output key is called
9596 // results.json, so we can get the results as follows:
96- if ( result . results [ key ] ) {
97+ if ( result . results [ key ] ) {
9798 let model_res = result . results [ key ] [ "results.json" ] ;
9899 // The output for this model comes in a JSON format, so we can directly log the model results:
99100 logger . info ( ` ${ key } : ${ JSON . stringify ( model_res ) } ` ) ;
100101 }
101- else {
102+ else {
103+ // If the model raises an error, we can get the specific error message:
102104 logger . warn ( ` ${ key } : failure ${ result . failures [ key ] [ 'error' ] } ` ) ;
103105 }
104106 }
105107 }
106- else {
108+ else {
107109 log . warn ( `The job ends with status ${ job . status } ` ) ;
108- }
109- }
110- catch ( error ) {
111- logger . warn ( error ) ;
112- }
110+ }
111+ }
112+ catch ( error ) {
113+ logger . warn ( error ) ;
114+ }
113115}
114116
115117
0 commit comments