Skip to content

Commit 686c3e2

Browse files
committed
Remove all closeTo tests
1 parent 90f7c09 commit 686c3e2

File tree

2 files changed

+9
-27
lines changed

2 files changed

+9
-27
lines changed

packages/ai/integration/chat.test.ts

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import {
2424
SafetySetting,
2525
getGenerativeModel
2626
} from '../src';
27-
import { testConfigs, TOKEN_COUNT_DELTA } from './constants';
27+
import { testConfigs } from './constants';
2828

2929
describe('Chat Session', () => {
3030
testConfigs.forEach(testConfig => {
@@ -98,31 +98,19 @@ describe('Chat Session', () => {
9898

9999
if (model.model.includes('gemini-2.5-flash')) {
100100
// Token counts can vary slightly in chat context
101-
expect(response1.usageMetadata!.promptTokenCount).to.be.closeTo(
102-
17, // "What is the capital of France?" + system instruction
103-
TOKEN_COUNT_DELTA + 2 // More variance for chat context
104-
);
101+
expect(response1.usageMetadata!.promptTokenCount).to.not.equal(0);
105102
expect(response1.usageMetadata!.candidatesTokenCount).to.not.equal(0);
106103
expect(response1.usageMetadata!.totalTokenCount).to.not.equal(0);
107-
expect(response2.usageMetadata!.promptTokenCount).to.be.closeTo(
108-
32, // History + "And what about Italy?" + system instruction
109-
TOKEN_COUNT_DELTA + 5 // More variance for chat context with history
110-
);
104+
expect(response2.usageMetadata!.promptTokenCount).to.not.equal(0);
111105
expect(response2.usageMetadata!.candidatesTokenCount).to.not.equal(0);
112106
expect(response2.usageMetadata!.totalTokenCount).to.not.equal(0);
113107
} else if (model.model.includes('gemini-2.0-flash')) {
114108
expect(response1.usageMetadata).to.not.be.null;
115109
// Token counts can vary slightly in chat context
116-
expect(response1.usageMetadata!.promptTokenCount).to.be.closeTo(
117-
15, // "What is the capital of France?" + system instruction
118-
TOKEN_COUNT_DELTA + 2 // More variance for chat context
119-
);
110+
expect(response1.usageMetadata!.promptTokenCount).to.not.equal(0);
120111
expect(response1.usageMetadata!.candidatesTokenCount).to.not.equal(0);
121112
expect(response1.usageMetadata!.totalTokenCount).to.not.equal(0);
122-
expect(response2.usageMetadata!.promptTokenCount).to.be.closeTo(
123-
28, // History + "And what about Italy?" + system instruction
124-
TOKEN_COUNT_DELTA + 5 // More variance for chat context with history
125-
);
113+
expect(response2.usageMetadata!.promptTokenCount).to.not.equal(0);
126114
expect(response2.usageMetadata!.candidatesTokenCount).to.not.equal(0);
127115
expect(response2.usageMetadata!.totalTokenCount).to.not.equal(0);
128116
}

packages/ai/integration/generate-content.test.ts

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ import {
2929
URLRetrievalStatus,
3030
getGenerativeModel
3131
} from '../src';
32-
import { testConfigs, TOKEN_COUNT_DELTA } from './constants';
32+
import { testConfigs } from './constants';
3333

3434
describe('Generate Content', function () {
3535
this.timeout(20_000);
@@ -88,10 +88,7 @@ describe('Generate Content', function () {
8888
expect(response.usageMetadata).to.not.be.null;
8989

9090
if (model.model.includes('gemini-2.5-flash')) {
91-
expect(response.usageMetadata!.promptTokenCount).to.be.closeTo(
92-
22,
93-
TOKEN_COUNT_DELTA
94-
);
91+
expect(response.usageMetadata!.promptTokenCount).to.not.equal(0);
9592
expect(response.usageMetadata!.candidatesTokenCount).to.not.equal(0);
9693
expect(response.usageMetadata!.thoughtsTokenCount).to.not.equal(0);
9794
expect(response.usageMetadata!.totalTokenCount).to.not.equal(0);
@@ -104,14 +101,11 @@ describe('Generate Content', function () {
104101
).to.equal(Modality.TEXT);
105102
expect(
106103
response.usageMetadata!.promptTokensDetails![0].tokenCount
107-
).to.closeTo(22, TOKEN_COUNT_DELTA);
104+
).to.not.equal(0);
108105

109106
// candidatesTokenDetails comes back about half the time, so let's just not test it.
110107
} else if (model.model.includes('gemini-2.0-flash')) {
111-
expect(response.usageMetadata!.promptTokenCount).to.be.closeTo(
112-
21,
113-
TOKEN_COUNT_DELTA
114-
);
108+
expect(response.usageMetadata!.promptTokenCount).to.not.equal(0);
115109
expect(response.usageMetadata!.candidatesTokenCount).to.not.equal(0);
116110
expect(response.usageMetadata!.totalTokenCount).to.not.equal(0);
117111
expect(response.usageMetadata!.promptTokensDetails).to.not.be.null;

0 commit comments

Comments
 (0)