|
| 1 | +const core = globalThis.Deno.core; |
1 | 2 | import { InferenceSession, Tensor } from 'ext:ai/onnxruntime/onnx.js'; |
2 | 3 |
|
3 | 4 | const DEFAULT_HUGGING_FACE_OPTIONS = { |
4 | | - hostname: 'https://huggingface.co', |
5 | | - path: { |
6 | | - template: '{REPO_ID}/resolve/{REVISION}/onnx/{MODEL_FILE}?donwload=true', |
7 | | - revision: 'main', |
8 | | - modelFile: 'model_quantized.onnx', |
9 | | - }, |
| 5 | + hostname: 'https://huggingface.co', |
| 6 | + path: { |
| 7 | + template: '{REPO_ID}/resolve/{REVISION}/onnx/{MODEL_FILE}?donwload=true', |
| 8 | + revision: 'main', |
| 9 | + modelFile: 'model_quantized.onnx', |
| 10 | + }, |
10 | 11 | }; |
11 | 12 |
|
12 | 13 | /** |
13 | 14 | * An user friendly API for onnx backend |
14 | 15 | */ |
15 | 16 | class UserInferenceSession { |
16 | | - inner; |
| 17 | + inner; |
17 | 18 |
|
18 | | - id; |
19 | | - inputs; |
20 | | - outputs; |
| 19 | + id; |
| 20 | + inputs; |
| 21 | + outputs; |
21 | 22 |
|
22 | | - constructor(session) { |
23 | | - this.inner = session; |
| 23 | + constructor(session) { |
| 24 | + this.inner = session; |
24 | 25 |
|
25 | | - this.id = session.sessionId; |
26 | | - this.inputs = session.inputNames; |
27 | | - this.outputs = session.outputNames; |
28 | | - } |
29 | | - |
30 | | - static async fromUrl(modelUrl) { |
31 | | - if (modelUrl instanceof URL) { |
32 | | - modelUrl = modelUrl.toString(); |
33 | | - } |
34 | | - |
35 | | - const encoder = new TextEncoder(); |
36 | | - const modelUrlBuffer = encoder.encode(modelUrl); |
37 | | - const session = await InferenceSession.fromBuffer(modelUrlBuffer); |
| 26 | + this.id = session.sessionId; |
| 27 | + this.inputs = session.inputNames; |
| 28 | + this.outputs = session.outputNames; |
| 29 | + } |
38 | 30 |
|
39 | | - return new UserInferenceSession(session); |
| 31 | + static async fromUrl(modelUrl) { |
| 32 | + if (modelUrl instanceof URL) { |
| 33 | + modelUrl = modelUrl.toString(); |
40 | 34 | } |
41 | 35 |
|
42 | | - static async fromHuggingFace(repoId, opts = {}) { |
43 | | - const hostname = opts?.hostname ?? DEFAULT_HUGGING_FACE_OPTIONS.hostname; |
44 | | - const pathOpts = { |
45 | | - ...DEFAULT_HUGGING_FACE_OPTIONS.path, |
46 | | - ...opts?.path, |
47 | | - }; |
48 | | - |
49 | | - const modelPath = pathOpts.template |
50 | | - .replaceAll('{REPO_ID}', repoId) |
51 | | - .replaceAll('{REVISION}', pathOpts.revision) |
52 | | - .replaceAll('{MODEL_FILE}', pathOpts.modelFile); |
53 | | - |
54 | | - if (!URL.canParse(modelPath, hostname)) { |
55 | | - throw Error(`[Invalid URL] Couldn't parse the model path: "${modelPath}"`); |
56 | | - } |
57 | | - |
58 | | - return await UserInferenceSession.fromUrl(new URL(modelPath, hostname)); |
| 36 | + const encoder = new TextEncoder(); |
| 37 | + const modelUrlBuffer = encoder.encode(modelUrl); |
| 38 | + const session = await InferenceSession.fromBuffer(modelUrlBuffer); |
| 39 | + |
| 40 | + return new UserInferenceSession(session); |
| 41 | + } |
| 42 | + |
| 43 | + static async fromHuggingFace(repoId, opts = {}) { |
| 44 | + const hostname = opts?.hostname ?? DEFAULT_HUGGING_FACE_OPTIONS.hostname; |
| 45 | + const pathOpts = { |
| 46 | + ...DEFAULT_HUGGING_FACE_OPTIONS.path, |
| 47 | + ...opts?.path, |
| 48 | + }; |
| 49 | + |
| 50 | + const modelPath = pathOpts.template |
| 51 | + .replaceAll('{REPO_ID}', repoId) |
| 52 | + .replaceAll('{REVISION}', pathOpts.revision) |
| 53 | + .replaceAll('{MODEL_FILE}', pathOpts.modelFile); |
| 54 | + |
| 55 | + if (!URL.canParse(modelPath, hostname)) { |
| 56 | + throw Error( |
| 57 | + `[Invalid URL] Couldn't parse the model path: "${modelPath}"`, |
| 58 | + ); |
59 | 59 | } |
60 | 60 |
|
61 | | - async run(inputs) { |
62 | | - const outputs = await core.ops.op_sb_ai_ort_run_session(this.id, inputs); |
| 61 | + return await UserInferenceSession.fromUrl(new URL(modelPath, hostname)); |
| 62 | + } |
63 | 63 |
|
64 | | - // Parse to Tensor |
65 | | - for (const key in outputs) { |
66 | | - if (Object.hasOwn(outputs, key)) { |
67 | | - const { type, data, dims } = outputs[key]; |
| 64 | + async run(inputs) { |
| 65 | + const outputs = await core.ops.op_ai_ort_run_session(this.id, inputs); |
68 | 66 |
|
69 | | - outputs[key] = new UserTensor(type, data.buffer, dims); |
70 | | - } |
71 | | - } |
| 67 | + // Parse to Tensor |
| 68 | + for (const key in outputs) { |
| 69 | + if (Object.hasOwn(outputs, key)) { |
| 70 | + const { type, data, dims } = outputs[key]; |
72 | 71 |
|
73 | | - return outputs; |
| 72 | + outputs[key] = new UserTensor(type, data.buffer, dims); |
| 73 | + } |
74 | 74 | } |
| 75 | + |
| 76 | + return outputs; |
| 77 | + } |
75 | 78 | } |
76 | 79 |
|
77 | 80 | class UserTensor extends Tensor { |
78 | | - constructor(type, data, dim) { |
79 | | - super(type, data, dim); |
80 | | - } |
| 81 | + constructor(type, data, dim) { |
| 82 | + super(type, data, dim); |
| 83 | + } |
81 | 84 |
|
82 | | - async tryEncodeAudio(sampleRate) { |
83 | | - return await core.ops.op_sb_ai_ort_encode_tensor_audio(this.data, sampleRate); |
84 | | - } |
| 85 | + async tryEncodeAudio(sampleRate) { |
| 86 | + return await core.ops.op_ai_ort_encode_tensor_audio(this.data, sampleRate); |
| 87 | + } |
85 | 88 | } |
86 | 89 |
|
87 | 90 | export default { |
88 | | - RawSession: UserInferenceSession, |
89 | | - RawTensor: UserTensor, |
| 91 | + RawSession: UserInferenceSession, |
| 92 | + RawTensor: UserTensor, |
90 | 93 | }; |
0 commit comments