Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ RUN bun run build
FROM oven/bun:1.2.19-alpine AS runner
WORKDIR /app

# Install tzdata for timezone support
RUN apk add --no-cache tzdata

COPY ./package.json ./bun.lock ./
RUN bun install --frozen-lockfile --production --ignore-scripts --no-cache

Expand Down
33 changes: 29 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -204,10 +204,35 @@ These endpoints are designed to be compatible with the Anthropic Messages API.

New endpoints for monitoring your Copilot usage and quotas.

| Endpoint | Method | Description |
| ------------ | ------ | ------------------------------------------------------------ |
| `GET /usage` | `GET` | Get detailed Copilot usage statistics and quota information. |
| `GET /token` | `GET` | Get the current Copilot token being used by the API. |
| Endpoint | Method | Description |
| ------------- | ------ | ------------------------------------------------------------ |
| `GET /usage` | `GET` | Get detailed Copilot usage statistics and quota information. |
| `GET /token` | `GET` | Get the current Copilot token being used by the API. |
| `GET /metrics`| `GET` | Prometheus metrics endpoint exposing token usage per model. |

### Prometheus Metrics

The `/metrics` endpoint exposes Prometheus-compatible metrics for monitoring token usage:

**Available Metrics:**
- `copilot_api_tokens_in_total` - Total input tokens processed per model
- `copilot_api_tokens_out_total` - Total output tokens generated per model
- `copilot_api_requests_total` - Total number of requests per model and endpoint

Example metrics output:
```
# HELP copilot_api_tokens_in_total Total number of input tokens processed per model
# TYPE copilot_api_tokens_in_total counter
copilot_api_tokens_in_total{model="gpt-4o"} 1523

# HELP copilot_api_tokens_out_total Total number of output tokens generated per model
# TYPE copilot_api_tokens_out_total counter
copilot_api_tokens_out_total{model="gpt-4o"} 342

# HELP copilot_api_requests_total Total number of requests per model
# TYPE copilot_api_requests_total counter
copilot_api_requests_total{model="gpt-4o",endpoint="chat-completions"} 15
```

## Example Usage

Expand Down
9 changes: 9 additions & 0 deletions bun.lock
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
"fetch-event-stream": "^0.1.5",
"gpt-tokenizer": "^3.0.1",
"hono": "^4.9.9",
"prom-client": "^15.1.3",
"proxy-from-env": "^1.1.0",
"srvx": "^0.8.9",
"tiny-invariant": "^1.3.3",
Expand Down Expand Up @@ -114,6 +115,8 @@

"@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="],

"@opentelemetry/api": ["@opentelemetry/api@1.9.0", "", {}, "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg=="],

"@oxc-project/types": ["@oxc-project/types@0.93.0", "", {}, "sha512-yNtwmWZIBtJsMr5TEfoZFDxIWV6OdScOpza/f5YxbqUMJk+j6QX3Cf3jgZShGEFYWQJ5j9mJ6jM0tZHu2J9Yrg=="],

"@oxc-resolver/binding-android-arm-eabi": ["@oxc-resolver/binding-android-arm-eabi@11.9.0", "", { "os": "android", "cpu": "arm" }, "sha512-4AxaG6TkSBQ2FiC5oGZEJQ35DjsSfAbW6/AJauebq4EzIPVOIgDJCF4de+PvX/Xi9BkNw6VtJuMXJdWW97iEAA=="],
Expand Down Expand Up @@ -274,6 +277,8 @@

"baseline-browser-mapping": ["baseline-browser-mapping@2.8.11", "", { "bin": { "baseline-browser-mapping": "dist/cli.js" } }, "sha512-i+sRXGhz4+QW8aACZ3+r1GAKMt0wlFpeA8M5rOQd0HEYw9zhDrlx9Wc8uQ0IdXakjJRthzglEwfB/yqIjO6iDg=="],

"bintrees": ["bintrees@1.0.2", "", {}, "sha512-VOMgTMwjAaUG580SXn3LacVgjurrbMme7ZZNYGSSV7mmtY6QQRh0Eg3pwIcntQ77DErK1L0NxkbetjcoXzVwKw=="],

"birecord": ["birecord@0.1.1", "", {}, "sha512-VUpsf/qykW0heRlC8LooCq28Kxn3mAqKohhDG/49rrsQ1dT1CXyj/pgXS+5BSRzFTR/3DyIBOqQOrGyZOh71Aw=="],

"birpc": ["birpc@2.6.1", "", {}, "sha512-LPnFhlDpdSH6FJhJyn4M0kFO7vtQ5iPw24FnG0y21q09xC7e8+1LeR31S1MAIrDAHp4m7aas4bEkTDTvMAtebQ=="],
Expand Down Expand Up @@ -776,6 +781,8 @@

"pretty-ms": ["pretty-ms@9.3.0", "", { "dependencies": { "parse-ms": "^4.0.0" } }, "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ=="],

"prom-client": ["prom-client@15.1.3", "", { "dependencies": { "@opentelemetry/api": "^1.4.0", "tdigest": "^0.1.1" } }, "sha512-6ZiOBfCywsD4k1BN9IX0uZhF+tJkV8q8llP64G5Hajs4JOeVLPCwpPVcpXy3BwYiUGgyJzsJJQeOIv7+hDSq8g=="],

"proxy-from-env": ["proxy-from-env@1.1.0", "", {}, "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="],

"punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="],
Expand Down Expand Up @@ -896,6 +903,8 @@

"system-architecture": ["system-architecture@0.1.0", "", {}, "sha512-ulAk51I9UVUyJgxlv9M6lFot2WP3e7t8Kz9+IS6D4rVba1tR9kON+Ey69f+1R4Q8cd45Lod6a4IcJIxnzGc/zA=="],

"tdigest": ["tdigest@0.1.2", "", { "dependencies": { "bintrees": "1.0.2" } }, "sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA=="],

"tiny-invariant": ["tiny-invariant@1.3.3", "", {}, "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg=="],

"tinyexec": ["tinyexec@1.0.1", "", {}, "sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw=="],
Expand Down
Loading