From 635e6c7caadef3da2fecb5564f7fd74ccc90cac5 Mon Sep 17 00:00:00 2001 From: Alka Trivedi Date: Fri, 31 Oct 2025 12:00:35 +0530 Subject: [PATCH] feat: multiplexed session as default session mode --- .github/sync-repo-settings.yaml | 2 +- ...on.cfg => system-test-regular-session.cfg} | 6 +- observability-test/helper.ts | 15 +- observability-test/spanner.ts | 320 +- src/index.ts | 2 +- src/multiplexed-session.ts | 6 +- src/session-factory.ts | 59 +- system-test/spanner.ts | 6 +- test/database.ts | 2757 ++++++++--------- test/metrics/metrics.ts | 12 +- test/session-factory.ts | 101 +- test/session-pool.ts | 8 + test/spanner.ts | 1610 +++++----- test/transaction.ts | 310 +- 14 files changed, 2502 insertions(+), 2712 deletions(-) rename .kokoro/presubmit/node18/{system-test-multiplexed-session.cfg => system-test-regular-session.cfg} (92%) diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml index bd2862b92..b5ae23b24 100644 --- a/.github/sync-repo-settings.yaml +++ b/.github/sync-repo-settings.yaml @@ -7,7 +7,7 @@ branchProtectionRules: requiredStatusCheckContexts: - "ci/kokoro: Samples test" - "ci/kokoro: System test" - - "ci/kokoro: System test with Multiplexed Session" + - "ci/kokoro: System test with Regular Sessions" - lint - test (18) - test (20) diff --git a/.kokoro/presubmit/node18/system-test-multiplexed-session.cfg b/.kokoro/presubmit/node18/system-test-regular-session.cfg similarity index 92% rename from .kokoro/presubmit/node18/system-test-multiplexed-session.cfg rename to .kokoro/presubmit/node18/system-test-regular-session.cfg index a2d946b3c..8ff7864ce 100644 --- a/.kokoro/presubmit/node18/system-test-multiplexed-session.cfg +++ b/.kokoro/presubmit/node18/system-test-regular-session.cfg @@ -13,15 +13,15 @@ env_vars: { env_vars: { key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS" - value: "true" + value: "false" } env_vars: { key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS" - value: "true" + value: "false" } env_vars: { key: "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW" - value: "true" + value: "false" } \ No newline at end of file diff --git a/observability-test/helper.ts b/observability-test/helper.ts index 46006ee1b..c707f9eab 100644 --- a/observability-test/helper.ts +++ b/observability-test/helper.ts @@ -19,6 +19,11 @@ import * as assert from 'assert'; const {ReadableSpan} = require('@opentelemetry/sdk-trace-base'); import {SEMATTRS_DB_NAME} from '@opentelemetry/semantic-conventions'; +export const createSessionEvents = [ + 'Requesting a multiplexed session', + 'Created a multiplexed session', +]; + export const batchCreateSessionsEvents = [ 'Requesting 25 sessions', 'Creating 25 sessions', @@ -26,16 +31,14 @@ export const batchCreateSessionsEvents = [ ]; export const waitingSessionsEvents = [ - 'Acquiring session', - 'Waiting for a session to become available', - 'Acquired session', + 'Waiting for a multiplexed session to become available', + 'Acquired multiplexed session', 'Using Session', ]; export const cacheSessionEvents = [ - 'Acquiring session', - 'Cache hit: has usable session', - 'Acquired session', + 'Cache hit: has usable multiplexed session', + 'Acquired multiplexed session', ]; /** diff --git a/observability-test/spanner.ts b/observability-test/spanner.ts index b11c6cf22..d298c7d7d 100644 --- a/observability-test/spanner.ts +++ b/observability-test/spanner.ts @@ -41,7 +41,7 @@ const { generateWithAllSpansHaveDBName, setGlobalContextManager, verifySpansAndEvents, - batchCreateSessionsEvents, + createSessionEvents, waitingSessionsEvents, cacheSessionEvents, } = require('./helper'); @@ -100,7 +100,6 @@ async function setup( sandbox?: sinon.SinonSandbox, ): Promise { const server = new grpc.Server(); - const spannerMock = mock.createMockSpanner(server); mockInstanceAdmin.createMockInstanceAdmin(server); mockDatabaseAdmin.createMockDatabaseAdmin(server); @@ -190,7 +189,7 @@ describe('EndToEnd', async () => { beforeEach(async () => { // To deflake expectations of session creation, let's // issue out a warm-up request request that'll ensure - // that the SessionPool is created deterministically. + // that the MultiplexedSession is created deterministically. await database.run('SELECT 1'); // Clear out any present traces to make a clean slate for testing. traceExporter.forceFlush(); @@ -526,7 +525,7 @@ describe('ObservabilityOptions injection and propagation', async () => { // To deflake expectations of session creation, let's // issue out a warm-up request request that'll ensure - // that the SessionPool is created deterministically. + // that the MultiplexedSession is created deterministically. await database.run('SELECT 1'); // Clear out any present traces to make a clean slate for testing. traceExporter.forceFlush(); @@ -542,54 +541,52 @@ describe('ObservabilityOptions injection and propagation', async () => { db.formattedName_, ); - it('run', done => { - database.getTransaction((err, tx) => { - assert.ifError(err); - - tx!.run('SELECT 1', async () => { - tx!.end(); - - await tracerProvider.forceFlush(); - traceExporter.forceFlush(); - - const spans = traceExporter.getFinishedSpans(); - withAllSpansHaveDBName(spans); - - const actualSpanNames: string[] = []; - const actualEventNames: string[] = []; - spans.forEach(span => { - actualSpanNames.push(span.name); - span.events.forEach(event => { - actualEventNames.push(event.name); - }); + it('run', async () => { + let txn; + try { + [txn] = await database.getTransaction(); + await txn.run('SELECT 1'); + await tracerProvider.forceFlush(); + traceExporter.forceFlush(); + + const spans = traceExporter.getFinishedSpans(); + withAllSpansHaveDBName(spans); + + const actualSpanNames: string[] = []; + const actualEventNames: string[] = []; + spans.forEach(span => { + actualSpanNames.push(span.name); + span.events.forEach(event => { + actualEventNames.push(event.name); }); - - const expectedSpanNames = [ - 'CloudSpanner.Database.getTransaction', - 'CloudSpanner.Snapshot.runStream', - 'CloudSpanner.Snapshot.run', - ]; - assert.deepStrictEqual( - actualSpanNames, - expectedSpanNames, - `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, - ); - - const expectedEventNames = [ - ...cacheSessionEvents, - 'Using Session', - 'Starting stream', - 'Transaction Creation Done', - ]; - assert.strictEqual( - actualEventNames.every(value => expectedEventNames.includes(value)), - true, - `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, - ); - - done(); }); - }); + const expectedSpanNames = [ + 'CloudSpanner.Database.getTransaction', + 'CloudSpanner.Snapshot.runStream', + 'CloudSpanner.Snapshot.run', + ]; + assert.deepStrictEqual( + actualSpanNames, + expectedSpanNames, + `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, + ); + + const expectedEventNames = [ + ...cacheSessionEvents, + 'Using Session', + 'Starting stream', + 'Transaction Creation Done', + ]; + assert.strictEqual( + actualEventNames.every(value => expectedEventNames.includes(value)), + true, + `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, + ); + } catch (err) { + assert.ifError(err); + } finally { + txn.end(); + } }); it('Transaction.begin+Dml.runUpdate', done => { @@ -787,14 +784,6 @@ describe('ObservabilityOptions injection and propagation', async () => { const server = setupResult.server; const spannerMock = setupResult.spannerMock; - after(async () => { - injectedTraceExporter.reset(); - await injectedTracerProvider.shutdown(); - spannerMock.resetRequests(); - spanner.close(); - server.tryShutdown(() => {}); - }); - const instance = spanner.instance('instance'); const database = instance.database('database'); @@ -802,14 +791,12 @@ describe('ObservabilityOptions injection and propagation', async () => { database.formattedName_, ); - database.run('SELECT 1', err => { - assert.ifError(err); - + try { + await database.run('SELECT 1'); injectedTraceExporter.forceFlush(); globalTraceExporter.forceFlush(); const spansFromInjected = injectedTraceExporter.getFinishedSpans(); const spansFromGlobal = globalTraceExporter.getFinishedSpans(); - assert.strictEqual( spansFromGlobal.length, 0, @@ -833,10 +820,9 @@ describe('ObservabilityOptions injection and propagation', async () => { actualEventNames.push(event.name); }); }); - const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Database.runStream', 'CloudSpanner.Database.run', @@ -846,9 +832,8 @@ describe('ObservabilityOptions injection and propagation', async () => { expectedSpanNames, `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, ); - const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, 'Starting stream', ...waitingSessionsEvents, ]; @@ -857,7 +842,15 @@ describe('ObservabilityOptions injection and propagation', async () => { expectedEventNames, `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, ); - }); + } catch (err) { + assert.ifError(err); + } finally { + injectedTraceExporter.reset(); + await injectedTracerProvider.shutdown(); + spannerMock.resetRequests(); + spanner.close(); + server.tryShutdown(() => {}); + } }); }); @@ -913,8 +906,8 @@ describe('E2E traces with async/await', async () => { }); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Database.runStream', 'CloudSpanner.Database.run', @@ -957,43 +950,43 @@ describe('E2E traces with async/await', async () => { 'Expected that runSpan has a defined spanId', ); - const databaseBatchCreateSessionsSpan = spans[0]; + const databaseCreateSessionSpan = spans[0]; assert.strictEqual( - databaseBatchCreateSessionsSpan.name, - 'CloudSpanner.Database.batchCreateSessions', + databaseCreateSessionSpan.name, + 'CloudSpanner.Database.createSession', ); - const sessionPoolCreateSessionsSpan = spans[1]; + const multiplexedSessionCreateSessionSpan = spans[1]; assert.strictEqual( - sessionPoolCreateSessionsSpan.name, - 'CloudSpanner.SessionPool.createSessions', + multiplexedSessionCreateSessionSpan.name, + 'CloudSpanner.MultiplexedSession.createSession', ); assert.ok( - sessionPoolCreateSessionsSpan.spanContext().traceId, - 'Expecting a defined sessionPoolCreateSessions traceId', + multiplexedSessionCreateSessionSpan.spanContext().traceId, + 'Expecting a defined multiplexedSessionCreateSession traceId', ); assert.deepStrictEqual( - sessionPoolCreateSessionsSpan.spanContext().traceId, - databaseBatchCreateSessionsSpan.spanContext().traceId, + multiplexedSessionCreateSessionSpan.spanContext().traceId, + databaseCreateSessionSpan.spanContext().traceId, 'Expected the same traceId', ); assert.deepStrictEqual( - databaseBatchCreateSessionsSpan.parentSpanContext.spanId, - sessionPoolCreateSessionsSpan.spanContext().spanId, - 'Expected that sessionPool.createSessions is the parent to db.batchCreassionSessions', + databaseCreateSessionSpan.parentSpanContext.spanId, + multiplexedSessionCreateSessionSpan.spanContext().spanId, + 'Expected that multiplexedSession.createSession is the parent to db.creassionSession', ); - // Assert that despite all being exported, SessionPool.createSessions + // Assert that despite all being exported, MultiplexedSession.createSession // is not in the same trace as runStream, createSessions is invoked at // Spanner Client instantiation, thus before database.run is invoked. assert.notEqual( - sessionPoolCreateSessionsSpan.spanContext().traceId, + multiplexedSessionCreateSessionSpan.spanContext().traceId, runSpan.spanContext().traceId, 'Did not expect the same traceId', ); // Finally check for the collective expected event names. const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, 'Starting stream', ...waitingSessionsEvents, ]; @@ -1134,8 +1127,8 @@ SELECT 1p }); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Database.runStream', 'CloudSpanner.Database.run', @@ -1178,36 +1171,36 @@ SELECT 1p 'Expected that runSpan has a defined spanId', ); - const databaseBatchCreateSessionsSpan = spans[0]; + const databaseCreateSessionSpan = spans[0]; assert.strictEqual( - databaseBatchCreateSessionsSpan.name, - 'CloudSpanner.Database.batchCreateSessions', + databaseCreateSessionSpan.name, + 'CloudSpanner.Database.createSession', ); - const sessionPoolCreateSessionsSpan = spans[1]; + const multiplexedSessionCreateSessionSpan = spans[1]; assert.strictEqual( - sessionPoolCreateSessionsSpan.name, - 'CloudSpanner.SessionPool.createSessions', + multiplexedSessionCreateSessionSpan.name, + 'CloudSpanner.MultiplexedSession.createSession', ); assert.ok( - sessionPoolCreateSessionsSpan.spanContext().traceId, - 'Expecting a defined sessionPoolCreateSessions traceId', + multiplexedSessionCreateSessionSpan.spanContext().traceId, + 'Expecting a defined multiplexedSessionCreateSession traceId', ); assert.deepStrictEqual( - sessionPoolCreateSessionsSpan.spanContext().traceId, - databaseBatchCreateSessionsSpan.spanContext().traceId, + multiplexedSessionCreateSessionSpan.spanContext().traceId, + databaseCreateSessionSpan.spanContext().traceId, 'Expected the same traceId', ); assert.deepStrictEqual( - databaseBatchCreateSessionsSpan.parentSpanContext.spanId, - sessionPoolCreateSessionsSpan.spanContext().spanId, - 'Expected that sessionPool.createSessions is the parent to db.batchCreassionSessions', + databaseCreateSessionSpan.parentSpanContext.spanId, + multiplexedSessionCreateSessionSpan.spanContext().spanId, + 'Expected that multiplexedSession.createSession is the parent to db.creassionSession', ); - // Assert that despite all being exported, SessionPool.createSessions + // Assert that despite all being exported, MultiplexedSession.createSession // is not in the same trace as runStream, createSessions is invoked at // Spanner Client instantiation, thus before database.run is invoked. assert.notEqual( - sessionPoolCreateSessionsSpan.spanContext().traceId, + multiplexedSessionCreateSessionSpan.spanContext().traceId, runSpan.spanContext().traceId, 'Did not expect the same traceId', ); @@ -1228,7 +1221,7 @@ SELECT 1p // Finally check for the collective expected event names. const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, 'Starting stream', ...waitingSessionsEvents, ]; @@ -1284,8 +1277,8 @@ SELECT 1p }); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Snapshot.run', 'CloudSpanner.Snapshot.begin', @@ -1313,29 +1306,29 @@ SELECT 1p 'Unexpexcted error message', ); - const databaseBatchCreateSessionsSpan = spans[0]; + const databaseCreateSessionSpan = spans[0]; assert.strictEqual( - databaseBatchCreateSessionsSpan.name, - 'CloudSpanner.Database.batchCreateSessions', + databaseCreateSessionSpan.name, + 'CloudSpanner.Database.createSession', ); - const sessionPoolCreateSessionsSpan = spans[1]; + const multiplexedSessionCreateSessionSpan = spans[1]; assert.strictEqual( - sessionPoolCreateSessionsSpan.name, - 'CloudSpanner.SessionPool.createSessions', + multiplexedSessionCreateSessionSpan.name, + 'CloudSpanner.MultiplexedSession.createSession', ); assert.ok( - sessionPoolCreateSessionsSpan.spanContext().traceId, - 'Expecting a defined sessionPoolCreateSessions traceId', + multiplexedSessionCreateSessionSpan.spanContext().traceId, + 'Expecting a defined multiplexedSessionCreateSession traceId', ); assert.deepStrictEqual( - sessionPoolCreateSessionsSpan.spanContext().traceId, - databaseBatchCreateSessionsSpan.spanContext().traceId, + multiplexedSessionCreateSessionSpan.spanContext().traceId, + databaseCreateSessionSpan.spanContext().traceId, 'Expected the same traceId', ); assert.deepStrictEqual( - databaseBatchCreateSessionsSpan.parentSpanContext.spanId, - sessionPoolCreateSessionsSpan.spanContext().spanId, - 'Expected that sessionPool.createSessions is the parent to db.batchCreassionSessions', + databaseCreateSessionSpan.parentSpanContext.spanId, + multiplexedSessionCreateSessionSpan.spanContext().spanId, + 'Expected that multiplexedSession.createSession is the parent to db.creassionSession', ); // We need to ensure a strict relationship between the spans. @@ -1368,18 +1361,18 @@ SELECT 1p 'Expected that Database.runTransaction is the parent to Snapshot.run', ); - // Assert that despite all being exported, SessionPool.createSessions + // Assert that despite all being exported, MultiplexedSession.createSessions // is not in the same trace as runStream, createSessions is invoked at // Spanner Client instantiation, thus before database.run is invoked. assert.notEqual( - sessionPoolCreateSessionsSpan.spanContext().traceId, + multiplexedSessionCreateSessionSpan.spanContext().traceId, spanDatabaseRunTransactionAsync.spanContext().traceId, 'Did not expect the same traceId', ); // Finally check for the collective expected event names. const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, 'Starting stream', 'Stream broken. Safe to retry', 'Begin Transaction', @@ -1510,7 +1503,6 @@ describe('Traces for ExecuteStream broken stream retries', () => { tracerProvider: tracerProvider, enableExtendedTracing: true, }; - spanner = new Spanner({ servicePath: 'localhost', port, @@ -1536,6 +1528,9 @@ describe('Traces for ExecuteStream broken stream retries', () => { }); describe('PartialResultStream', () => { + beforeEach(() => { + traceExporter.reset(); + }); const streamIndexes = [1, 2]; streamIndexes.forEach(index => { it('should retry UNAVAILABLE during streaming', async () => { @@ -1757,8 +1752,8 @@ describe('Traces for ExecuteStream broken stream retries', () => { }); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Database.runStream', ]; @@ -1770,7 +1765,7 @@ describe('Traces for ExecuteStream broken stream retries', () => { // Finally check for the collective expected event names. const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, 'Starting stream', 'Transaction Creation Done', ...waitingSessionsEvents, @@ -1823,8 +1818,8 @@ describe('Traces for ExecuteStream broken stream retries', () => { }); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Database.runStream', 'CloudSpanner.Database.run', @@ -1837,7 +1832,7 @@ describe('Traces for ExecuteStream broken stream retries', () => { // Finally check for the collective expected event names. const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, 'Starting stream', 'Re-attempting start stream', 'Resuming stream', @@ -1885,8 +1880,8 @@ describe('Traces for ExecuteStream broken stream retries', () => { }); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Snapshot.run', 'CloudSpanner.Dml.runUpdate', @@ -1903,7 +1898,7 @@ describe('Traces for ExecuteStream broken stream retries', () => { // Finally check for the collective expected event names. const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, 'Starting stream', 'Re-attempting start stream', 'Begin Transaction', @@ -1954,13 +1949,13 @@ describe('Traces for ExecuteStream broken stream retries', () => { 'runTransactionAsync.attempt must be 1', ); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Database.runTransactionAsync', ]; const expectedEventNames = [ - ...batchCreateSessionsEvents, + ...createSessionEvents, ...waitingSessionsEvents, ]; await verifySpansAndEvents( @@ -1997,35 +1992,36 @@ describe('End to end tracing headers', () => { sandbox.restore(); }); - it('run', done => { + it('run', async () => { const instance = spanner.instance('instance'); const database = instance.database('database'); - database.getTransaction((err, tx) => { - assert.ifError(err); - - tx!.run('SELECT 1', async () => { - tx!.end(); - let metadataCountWithE2EHeader = 0; - let metadataCountWithTraceParent = 0; - spannerMock.getMetadata().forEach(metadata => { - if (metadata.get(END_TO_END_TRACING_HEADER)[0] !== undefined) { - metadataCountWithE2EHeader++; - assert.strictEqual( - metadata.get(END_TO_END_TRACING_HEADER)[0], - 'true', - ); - } - if (metadata.get('traceparent')[0] !== undefined) { - metadataCountWithTraceParent++; - } - }); - - // Batch Create Session request and Select 1 request. - assert.strictEqual(spannerMock.getRequests().length, 2); - assert.strictEqual(metadataCountWithE2EHeader, 2); - assert.strictEqual(metadataCountWithTraceParent, 2); - done(); + let txn; + try { + [txn] = await database.getTransaction(); + await txn.run('SELECT 1'); + let metadataCountWithE2EHeader = 0; + let metadataCountWithTraceParent = 0; + spannerMock.getMetadata().forEach(metadata => { + if (metadata.get(END_TO_END_TRACING_HEADER)[0] !== undefined) { + metadataCountWithE2EHeader++; + assert.strictEqual( + metadata.get(END_TO_END_TRACING_HEADER)[0], + 'true', + ); + } + if (metadata.get('traceparent')[0] !== undefined) { + metadataCountWithTraceParent++; + } }); - }); + + // Create Session for multiplexed session(default) and Select 1 request. + assert.strictEqual(spannerMock.getRequests().length, 2); + assert.strictEqual(metadataCountWithE2EHeader, 2); + assert.strictEqual(metadataCountWithTraceParent, 2); + } catch (err) { + assert.ifError(err); + } finally { + txn.end(); + } }); }); diff --git a/src/index.ts b/src/index.ts index 339af94ce..4d5c90b53 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1840,7 +1840,7 @@ class Spanner extends GrpcService { metricsTracer = MetricsTracerFactory?.getInstance(this.projectId_)?.createMetricsTracer( config.method, - config.reqOpts.session ?? config.reqOpts.database, + config.reqOpts.database ?? config.reqOpts.session, config.headers['x-goog-spanner-request-id'], ) ?? null; } diff --git a/src/multiplexed-session.ts b/src/multiplexed-session.ts index c2a83f8a6..0d1bc2c02 100644 --- a/src/multiplexed-session.ts +++ b/src/multiplexed-session.ts @@ -126,9 +126,7 @@ export class MultiplexedSession multiplexed: true, }); this._multiplexedSession = createSessionResponse; - span.addEvent( - `Created multiplexed session ${this._multiplexedSession.id}`, - ); + span.addEvent('Created a multiplexed session'); this.emit(MUX_SESSION_AVAILABLE); } catch (e) { setSpanError(span, e as Error); @@ -197,7 +195,9 @@ export class MultiplexedSession * */ async _acquire(): Promise { + const span = getActiveOrNoopSpan(); const session = await this._getSession(); + span.addEvent('Acquired multiplexed session'); return session; } diff --git a/src/session-factory.ts b/src/session-factory.ts index 7b97e5703..a4c446e72 100644 --- a/src/session-factory.ts +++ b/src/session-factory.ts @@ -135,34 +135,37 @@ export class SessionFactory typeof poolOptions === 'function' ? new (poolOptions as SessionPoolConstructor)(database, null) : new SessionPool(database, poolOptions); - this.pool_.on('error', this.emit.bind(database, 'error')); - this.pool_.open(); this.multiplexedSession_ = new MultiplexedSession(database); - // set the isMultiplexed property to true if multiplexed session is enabled, otherwise set the property to false - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'true' - ? (this.isMultiplexed = true) - : (this.isMultiplexed = false); - // set the isMultiplexedPartitionedOps property to true if multiplexed session is enabled for paritioned ops, otherwise set the property to false - this.isMultiplexedPartitionOps = - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'true' && - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS === - 'true'; + this.multiplexedSession_.on('error', this.emit.bind(database, 'error')); + this.multiplexedSession_.createSession(); + // set the isMultiplexed property to false if multiplexed session is disabled, otherwise set the property to true + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'false' + ? (this.isMultiplexed = false) + : (this.isMultiplexed = true); + // set the isMultiplexedPartitionedOps property to false if multiplexed session is disabled for paritioned ops, otherwise set the property to true + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'false' && + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS === + 'false' + ? (this.isMultiplexedPartitionOps = false) + : (this.isMultiplexedPartitionOps = true); - this.isMultiplexedRW = - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'true' && - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW === 'true'; - // Multiplexed sessions should only be created if its enabled. - if (this.isMultiplexed) { - this.multiplexedSession_.on('error', this.emit.bind(database, 'error')); - this.multiplexedSession_.createSession(); + // set the isMultiplexedRW property to false if multiplexed session is disabled for read/write, otherwise set the property to true + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'false' && + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW === 'false' + ? (this.isMultiplexedRW = false) + : (this.isMultiplexedRW = true); + // Regular sessions should only be created if mux is disabled. + if (!this.isMultiplexed) { + this.pool_.on('error', this.emit.bind(database, 'error')); + this.pool_.open(); } } /** * Retrieves a session, either a regular session or a multiplexed session, based on the environment variable configuration. * - * If the environment variable `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS` is set to `true`, the method will attempt to - * retrieve a multiplexed session. Otherwise, it will retrieve a session from the regular pool. + * If the environment variable `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS` is set to `false`, the method will attempt to + * retrieve a regular session. Otherwise, it will return a multiplexed session. * * @param {GetSessionCallback} callback The callback function. */ @@ -179,12 +182,13 @@ export class SessionFactory /** * Retrieves a session for partitioned operations, selecting the appropriate session type - * based on whether multiplexed sessions are enabled. + * based on whether multiplexed sessions are disabled or not. * - * If multiplexed sessions are enabled for partitioned ops this methods delegates the request to `getSession()`, which returns - * either a multiplexed session or a regular session based on the configuration. + * If multiplexed sessions are disabled for partitioned ops this methods delegates the request to `getSession()`, which returns + * either a multiplexed session or a regular session based on the env configuration. * - * If the multiplexed sessions are disabled, a session is retrieved from the regular session pool. + * If the multiplexed sessions are disabled, a session will get retrieved from the regular session pool. + * Otherwise a multiplexed session will be used. * * @param {GetSessionCallback} callback The callback function. */ @@ -198,10 +202,11 @@ export class SessionFactory * Retrieves a session for read write operations, selecting the appropriate session type * based on whether multiplexed sessions are enabled. * - * If multiplexed sessions are enabled for read write this methods delegates the request to `getSession()`, which returns - * either a multiplexed session or a regular session based on the configuration. + * If multiplexed sessions are disabled for read write this methods delegates the request to `getSession()`, which returns + * either a multiplexed session or a regular session based on the env configuration. * - * If the multiplexed sessions are disabled, a session is retrieved from the regular session pool. + * If the multiplexed sessions are disabled, a session will get retrieved from the regular session pool. + * Otherise a multiplexed session will be used. * * @param {GetSessionCallback} callback The callback function. */ diff --git a/system-test/spanner.ts b/system-test/spanner.ts index 5a54093d8..0cc2b8404 100644 --- a/system-test/spanner.ts +++ b/system-test/spanner.ts @@ -1246,9 +1246,9 @@ describe('Spanner', () => { const numericInsertOutOfBounds = (done, dialect, value) => { insert({NumericValue: value}, dialect, err => { - KOKORO_JOB_NAME?.includes('system-test-multiplexed-session') - ? assert.strictEqual(err.code, grpc.status.INVALID_ARGUMENT) - : assert.strictEqual(err.code, grpc.status.FAILED_PRECONDITION); + KOKORO_JOB_NAME?.includes('system-test-regular-session') + ? assert.strictEqual(err.code, grpc.status.FAILED_PRECONDITION) + : assert.strictEqual(err.code, grpc.status.INVALID_ARGUMENT); done(); }); }; diff --git a/test/database.ts b/test/database.ts index 04e485903..775daca78 100644 --- a/test/database.ts +++ b/test/database.ts @@ -154,12 +154,12 @@ export class FakeSessionFactory extends EventEmitter { } release() {} isMultiplexedEnabled(): boolean { - return process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'true'; + return process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS! === 'false'; } isMultiplexedEnabledForRW(): boolean { return ( - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'true' && - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW === 'true' + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS! === 'false' && + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW! === 'false' ); } } @@ -352,21 +352,30 @@ describe('Database', () => { assert(database.pool_ instanceof FakeSessionPool); }); - it('should re-emit SessionPool errors', done => { - const error = new Error('err'); - - const sessionFactory = new SessionFactory(database, NAME); + describe('when multiplexed session is disabled', () => { + before(() => { + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; + }); - database.on('error', err => { - assert.strictEqual(err, error); - done(); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; }); - sessionFactory.pool_.emit('error', error); + it('should re-emit SessionPool errors', done => { + const error = new Error('err'); + + const sessionFactory = new SessionFactory(database, NAME); + + database.on('error', err => { + assert.strictEqual(err, error); + done(); + }); + + sessionFactory.pool_.emit('error', error); + }); }); it('should re-emit Multiplexed Session errors', done => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; const error = new Error('err'); const sessionFactory = new SessionFactory(database, NAME); @@ -671,201 +680,167 @@ describe('Database', () => { gaxOptions: {autoPaginate: false}, } as BatchWriteOptions; - // muxEnabled[i][0] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS - // muxEnabled[i][1] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW - const muxEnabled = [ - [true, true], - [true, false], - [false, true], - [false, false], - ]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled[0] ? 'enabled' : 'disable'}` + - ' and GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW is ' + - `${isMuxEnabled[1] ? 'enabled' : 'disable'}`, - () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - isMuxEnabled[0].toString(); - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = - isMuxEnabled[1].toString(); - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - beforeEach(() => { - fakeSessionFactory = database.sessionFactory_; - fakeSession = new FakeSession(); - fakeDataStream = through.obj(); - - getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub - ).callsFake(callback => callback(null, fakeSession)); - - requestStreamStub = sandbox - .stub(database, 'requestStream') - .returns(fakeDataStream); - }); - - it('should get a session via `getSessionForReadWrite`', done => { - getSessionStub.callsFake(() => {}); - database.batchWriteAtLeastOnce(mutationGroups, options); - assert.strictEqual(getSessionStub.callCount, 1); - done(); - }); - - it('should destroy the stream if `getSessionForReadWrite` errors', done => { - const fakeError = new Error('err'); - - getSessionStub.callsFake(callback => callback(fakeError)); - database - .batchWriteAtLeastOnce(mutationGroups, options) - .on('error', err => { - assert.strictEqual(err, fakeError); - done(); - }); - }); - - it('should call `requestStream` with correct arguments', () => { - const expectedGaxOpts = extend(true, {}, options?.gaxOptions); - const expectedReqOpts = Object.assign( - {} as google.spanner.v1.BatchWriteRequest, - { - session: fakeSession!.formattedName_!, - mutationGroups: mutationGroups.map(mg => mg.proto()), - requestOptions: options?.requestOptions, - excludeTxnFromChangeStream: - options?.excludeTxnFromChangeStreams, - }, - ); - - database.batchWriteAtLeastOnce(mutationGroups, options); - - assert.strictEqual(requestStreamStub.callCount, 1); - const args = requestStreamStub.firstCall.args[0]; - assert.strictEqual(args.client, 'SpannerClient'); - assert.strictEqual(args.method, 'batchWrite'); - assert.deepStrictEqual(args.reqOpts, expectedReqOpts); - assert.deepStrictEqual(args.gaxOpts, expectedGaxOpts); - assert.deepStrictEqual(args.headers, database.commonHeaders_); - }); - - it('should return error when passing an empty list of mutationGroups', done => { - const fakeError = new Error('err'); - database.batchWriteAtLeastOnce([], options).on('error', error => { - assert.strictEqual(error, fakeError); - done(); - }); - fakeDataStream.emit('error', fakeError); - }); - - it('should return data when passing a valid list of mutationGroups', done => { - database - .batchWriteAtLeastOnce(mutationGroups, options) - .on('data', data => { - assert.strictEqual(data, 'test'); - done(); - }); - fakeDataStream.emit('data', 'test'); - }); - - it('should emit correct event based on valid/invalid list of mutationGroups', done => { - const fakeError = new Error('err'); - const FakeMutationGroup1 = new MutationGroup(); - FakeMutationGroup1.insert('Singers', { - SingerId: 1, - FirstName: 'Scarlet', - LastName: 'Terry', - }); - FakeMutationGroup1.insert('Singers', { - SingerId: 1000000000000000000000000000000000, - FirstName: 'Scarlet', - LastName: 'Terry', - }); - - const FakeMutationGroup2 = new MutationGroup(); - FakeMutationGroup2.insert('Singers', { - SingerId: 2, - FirstName: 'Marc', - }); - FakeMutationGroup2.insert('Singers', { - SingerId: 3, - FirstName: 'Catalina', - LastName: 'Smith', - }); - FakeMutationGroup2.insert('Albums', { - AlbumId: 1, - SingerId: 2, - AlbumTitle: 'Total Junk', - }); - FakeMutationGroup2.insert('Albums', { - AlbumId: 2, - SingerId: 3, - AlbumTitle: 'Go, Go, Go', - }); - database - .batchWriteAtLeastOnce( - [FakeMutationGroup1, FakeMutationGroup2], - options, - ) - .on('data', data => { - assert.strictEqual(data, 'testData'); - }) - .on('error', err => { - assert.strictEqual(err, fakeError); - }); - fakeDataStream.emit('data', 'testData'); - fakeDataStream.emit('error', fakeError); - done(); - }); - - it('should retry on "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - let retryCount = 0; - - database - .batchWriteAtLeastOnce(mutationGroups, options) - .on('data', () => {}) - .on('error', err => { - assert.fail(err); - }) - .on('end', () => { - assert.strictEqual(retryCount, 1); - done(); - }); - - fakeDataStream.emit('error', sessionNotFoundError); - retryCount++; - }); - - if (isMuxEnabled[0] === false && isMuxEnabled[1] === false) { - it('should release session on stream end', () => { - const releaseStub = sandbox.stub( - fakeSessionFactory, - 'release', - ) as sinon.SinonStub; - - database.batchWriteAtLeastOnce(mutationGroups, options); - fakeDataStream.emit('end'); - - assert.strictEqual(releaseStub.callCount, 1); - assert.strictEqual(releaseStub.firstCall.args[0], fakeSession); - }); - } + beforeEach(() => { + fakeSessionFactory = database.sessionFactory_; + fakeSession = new FakeSession(); + fakeDataStream = through.obj(); + + getSessionStub = ( + sandbox.stub( + fakeSessionFactory, + 'getSessionForReadWrite', + ) as sinon.SinonStub + ).callsFake(callback => callback(null, fakeSession)); + + requestStreamStub = sandbox + .stub(database, 'requestStream') + .returns(fakeDataStream); + }); + + it('should get a session via `getSessionForReadWrite`', done => { + getSessionStub.callsFake(() => {}); + database.batchWriteAtLeastOnce(mutationGroups, options); + assert.strictEqual(getSessionStub.callCount, 1); + done(); + }); + + it('should destroy the stream if `getSessionForReadWrite` errors', done => { + const fakeError = new Error('err'); + + getSessionStub.callsFake(callback => callback(fakeError)); + database + .batchWriteAtLeastOnce(mutationGroups, options) + .on('error', err => { + assert.strictEqual(err, fakeError); + done(); + }); + }); + + it('should call `requestStream` with correct arguments', () => { + const expectedGaxOpts = extend(true, {}, options?.gaxOptions); + const expectedReqOpts = Object.assign( + {} as google.spanner.v1.BatchWriteRequest, + { + session: fakeSession!.formattedName_!, + mutationGroups: mutationGroups.map(mg => mg.proto()), + requestOptions: options?.requestOptions, + excludeTxnFromChangeStream: options?.excludeTxnFromChangeStreams, }, ); + + database.batchWriteAtLeastOnce(mutationGroups, options); + + assert.strictEqual(requestStreamStub.callCount, 1); + const args = requestStreamStub.firstCall.args[0]; + assert.strictEqual(args.client, 'SpannerClient'); + assert.strictEqual(args.method, 'batchWrite'); + assert.deepStrictEqual(args.reqOpts, expectedReqOpts); + assert.deepStrictEqual(args.gaxOpts, expectedGaxOpts); + assert.deepStrictEqual(args.headers, database.commonHeaders_); + }); + + it('should return error when passing an empty list of mutationGroups', done => { + const fakeError = new Error('err'); + database.batchWriteAtLeastOnce([], options).on('error', error => { + assert.strictEqual(error, fakeError); + done(); + }); + fakeDataStream.emit('error', fakeError); + }); + + it('should return data when passing a valid list of mutationGroups', done => { + database + .batchWriteAtLeastOnce(mutationGroups, options) + .on('data', data => { + assert.strictEqual(data, 'test'); + done(); + }); + fakeDataStream.emit('data', 'test'); + }); + + it('should emit correct event based on valid/invalid list of mutationGroups', done => { + const fakeError = new Error('err'); + const FakeMutationGroup1 = new MutationGroup(); + FakeMutationGroup1.insert('Singers', { + SingerId: 1, + FirstName: 'Scarlet', + LastName: 'Terry', + }); + FakeMutationGroup1.insert('Singers', { + SingerId: 1000000000000000000000000000000000, + FirstName: 'Scarlet', + LastName: 'Terry', + }); + + const FakeMutationGroup2 = new MutationGroup(); + FakeMutationGroup2.insert('Singers', { + SingerId: 2, + FirstName: 'Marc', + }); + FakeMutationGroup2.insert('Singers', { + SingerId: 3, + FirstName: 'Catalina', + LastName: 'Smith', + }); + FakeMutationGroup2.insert('Albums', { + AlbumId: 1, + SingerId: 2, + AlbumTitle: 'Total Junk', + }); + FakeMutationGroup2.insert('Albums', { + AlbumId: 2, + SingerId: 3, + AlbumTitle: 'Go, Go, Go', + }); + database + .batchWriteAtLeastOnce( + [FakeMutationGroup1, FakeMutationGroup2], + options, + ) + .on('data', data => { + assert.strictEqual(data, 'testData'); + }) + .on('error', err => { + assert.strictEqual(err, fakeError); + }); + fakeDataStream.emit('data', 'testData'); + fakeDataStream.emit('error', fakeError); + done(); + }); + + it('should retry on "Session not found" error', done => { + const sessionNotFoundError = { + code: grpc.status.NOT_FOUND, + message: 'Session not found', + } as grpc.ServiceError; + let retryCount = 0; + + database + .batchWriteAtLeastOnce(mutationGroups, options) + .on('data', () => {}) + .on('error', err => { + assert.fail(err); + }) + .on('end', () => { + assert.strictEqual(retryCount, 1); + done(); + }); + + fakeDataStream.emit('error', sessionNotFoundError); + retryCount++; + }); + + it('should release session on stream end', () => { + const releaseStub = sandbox.stub( + fakeSessionFactory, + 'release', + ) as sinon.SinonStub; + + database.batchWriteAtLeastOnce(mutationGroups, options); + fakeDataStream.emit('end'); + + assert.strictEqual(releaseStub.callCount, 1); + assert.strictEqual(releaseStub.firstCall.args[0], fakeSession); }); }); @@ -884,94 +859,76 @@ describe('Database', () => { let sessionFactory: FakeSessionFactory; - const muxEnabled = [true, false]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled ? 'enabled' : 'disable'}`, - () => { - before(() => { - isMuxEnabled - ? (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true') - : (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - 'false'); - }); - - beforeEach(() => { - sandbox.restore(); - sessionFactory = database.sessionFactory_; - ( - sandbox.stub(sessionFactory, 'getSession') as sinon.SinonStub - ).callsFake(callback => { - callback(null, SESSION, TRANSACTION); - }); - }); - - it('should return any errors getting a session', done => { - const fakeErr = new Error('err'); - - (sessionFactory.getSession as sinon.SinonStub).callsFake(callback => - callback(fakeErr, null, null), - ); - - database.writeAtLeastOnce(mutations, err => { - assert.deepStrictEqual(err, fakeErr); - done(); - }); - }); - - it('should return successful CommitResponse when passing an empty mutation', done => { - const fakeMutations = new MutationSet(); - try { - database.writeAtLeastOnce(fakeMutations, (err, response) => { - assert.ifError(err); - assert.deepStrictEqual( - response.commitTimestamp, - RESPONSE.commitTimestamp, - ); - }); - done(); - } catch (error) { - assert(error instanceof Error); - } - }); - - it('should return an error when passing null mutation', done => { - try { - database.writeAtLeastOnce(null, () => {}); - } catch (err) { - const errorMessage = (err as grpc.ServiceError).message; - assert.ok( - errorMessage.includes( - "Cannot read properties of null (reading 'proto')", - ) || - errorMessage.includes("Cannot read property 'proto' of null"), - ); - - done(); - } - }); - - it('should return CommitResponse on successful write using Callback', done => { - database.writeAtLeastOnce(mutations, (err, res) => { - assert.deepStrictEqual(err, null); - assert.deepStrictEqual(res, RESPONSE); - done(); - }); - }); - - it('should return CommitResponse on successful write using await', async () => { - sinon.stub(database, 'writeAtLeastOnce').resolves([RESPONSE]); - const [response] = await database.writeAtLeastOnce(mutations, {}); - assert.deepStrictEqual( - response.commitTimestamp, - RESPONSE.commitTimestamp, - ); - }); + beforeEach(() => { + sandbox.restore(); + sessionFactory = database.sessionFactory_; + (sandbox.stub(sessionFactory, 'getSession') as sinon.SinonStub).callsFake( + callback => { + callback(null, SESSION, TRANSACTION); }, ); }); + + it('should return any errors getting a session', done => { + const fakeErr = new Error('err'); + + (sessionFactory.getSession as sinon.SinonStub).callsFake(callback => + callback(fakeErr, null, null), + ); + + database.writeAtLeastOnce(mutations, err => { + assert.deepStrictEqual(err, fakeErr); + done(); + }); + }); + + it('should return successful CommitResponse when passing an empty mutation', done => { + const fakeMutations = new MutationSet(); + try { + database.writeAtLeastOnce(fakeMutations, (err, response) => { + assert.ifError(err); + assert.deepStrictEqual( + response.commitTimestamp, + RESPONSE.commitTimestamp, + ); + }); + done(); + } catch (error) { + assert(error instanceof Error); + } + }); + + it('should return an error when passing null mutation', done => { + try { + database.writeAtLeastOnce(null, () => {}); + } catch (err) { + const errorMessage = (err as grpc.ServiceError).message; + assert.ok( + errorMessage.includes( + "Cannot read properties of null (reading 'proto')", + ) || errorMessage.includes("Cannot read property 'proto' of null"), + ); + + done(); + } + }); + + it('should return CommitResponse on successful write using Callback', done => { + database.writeAtLeastOnce(mutations, (err, res) => { + assert.deepStrictEqual(err, null); + assert.deepStrictEqual(res, RESPONSE); + done(); + }); + }); + + it('should return CommitResponse on successful write using await', async () => { + sinon.stub(database, 'writeAtLeastOnce').resolves([RESPONSE]); + const [response] = await database.writeAtLeastOnce(mutations, {}); + assert.deepStrictEqual( + response.commitTimestamp, + RESPONSE.commitTimestamp, + ); + }); }); describe('close', () => { @@ -1032,98 +989,77 @@ describe('Database', () => { const SESSION = {}; const RESPONSE = {a: 'b'}; - const muxEnabled = [true, false]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled ? 'enabled' : 'disable'}`, - () => { - before(() => { - isMuxEnabled - ? (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true') - : (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - 'false'); - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - - beforeEach(() => { - database.sessionFactory_ = { - getSession(callback) { - callback(null, SESSION); - }, - }; - }); - - it('should return any get session errors', done => { - const error = new Error('err'); - - database.sessionFactory_ = { - getSession(callback) { - callback(error); - }, - }; - - database.createBatchTransaction((err, transaction, resp) => { - assert.strictEqual(err, error); - assert.strictEqual(transaction, null); - assert.strictEqual(resp, undefined); - done(); - }); - }); - - it('should create a transaction', done => { - const opts = {a: 'b'}; - - const fakeTransaction = { - begin(callback) { - callback(null, RESPONSE); - }, - - once() {}, - }; - - database.batchTransaction = (identifier, options) => { - assert.deepStrictEqual(identifier, {session: SESSION}); - assert.strictEqual(options, opts); - return fakeTransaction; - }; - - database.createBatchTransaction(opts, (err, transaction, resp) => { - assert.strictEqual(err, null); - assert.strictEqual(transaction, fakeTransaction); - assert.strictEqual(resp, RESPONSE); - done(); - }); - }); - - it('should return any transaction errors', done => { - const error = new Error('err'); - - const fakeTransaction = { - begin(callback) { - callback(error, RESPONSE); - }, - - once() {}, - }; - - database.batchTransaction = () => { - return fakeTransaction; - }; - - database.createBatchTransaction((err, transaction, resp) => { - assert.strictEqual(err, error); - assert.strictEqual(transaction, null); - assert.strictEqual(resp, RESPONSE); - done(); - }); - }); + beforeEach(() => { + database.sessionFactory_ = { + getSession(callback) { + callback(null, SESSION); }, - ); + }; + }); + + it('should return any get session errors', done => { + const error = new Error('err'); + + database.sessionFactory_ = { + getSession(callback) { + callback(error); + }, + }; + + database.createBatchTransaction((err, transaction, resp) => { + assert.strictEqual(err, error); + assert.strictEqual(transaction, null); + assert.strictEqual(resp, undefined); + done(); + }); + }); + + it('should create a transaction', done => { + const opts = {a: 'b'}; + + const fakeTransaction = { + begin(callback) { + callback(null, RESPONSE); + }, + + once() {}, + }; + + database.batchTransaction = (identifier, options) => { + assert.deepStrictEqual(identifier, {session: SESSION}); + assert.strictEqual(options, opts); + return fakeTransaction; + }; + + database.createBatchTransaction(opts, (err, transaction, resp) => { + assert.strictEqual(err, null); + assert.strictEqual(transaction, fakeTransaction); + assert.strictEqual(resp, RESPONSE); + done(); + }); + }); + + it('should return any transaction errors', done => { + const error = new Error('err'); + + const fakeTransaction = { + begin(callback) { + callback(error, RESPONSE); + }, + + once() {}, + }; + + database.batchTransaction = () => { + return fakeTransaction; + }; + + database.createBatchTransaction((err, transaction, resp) => { + assert.strictEqual(err, error); + assert.strictEqual(transaction, null); + assert.strictEqual(resp, RESPONSE); + done(); + }); }); }); @@ -1734,210 +1670,184 @@ describe('Database', () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const SESSIONFACTORY: any = {}; + beforeEach(() => { + REQUEST_STREAM = through(); - const muxEnabled = [true, false]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled ? 'enabled' : 'disable'}`, - () => { - before(() => { - isMuxEnabled - ? (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true') - : (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - 'false'); - }); - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - beforeEach(() => { - REQUEST_STREAM = through(); - - CONFIG = { - reqOpts: {}, - }; - - database.sessionFactory_ = SESSIONFACTORY; - - database.requestStream = () => { - return REQUEST_STREAM; - }; - - SESSIONFACTORY.getSession = callback => { - callback(null, SESSION); - }; - - SESSIONFACTORY.release = util.noop; - }); - - it('should get a session when stream opens', done => { - SESSIONFACTORY.getSession = () => { - done(); - }; - - database.makePooledStreamingRequest_(CONFIG).emit('reading'); - }); - - describe('could not get session', () => { - const ERROR = new Error('Error.'); - - beforeEach(() => { - SESSIONFACTORY.getSession = callback => { - callback(ERROR); - }; - }); - - it('should destroy the stream', done => { - database - .makePooledStreamingRequest_(CONFIG) - .on('error', err => { - assert.strictEqual(err, ERROR); - done(); - }) - .emit('reading'); - }); - }); - - describe('session retrieved successfully', () => { - beforeEach(() => { - SESSIONFACTORY.getSession = callback => { - callback(null, SESSION); - }; - }); - - it('should assign session to request options', done => { - database.requestStream = config => { - assert.strictEqual( - config.reqOpts.session, - SESSION.formattedName_, - ); - setImmediate(done); - return through.obj(); - }; - - database.makePooledStreamingRequest_(CONFIG).emit('reading'); - }); - - it('should make request and pipe to the stream', done => { - const responseData = Buffer.from('response-data'); - - database.makePooledStreamingRequest_(CONFIG).on('data', data => { - assert.deepStrictEqual(data, responseData); - done(); - }); - - REQUEST_STREAM.end(responseData); - }); - - it('should release session when request stream ends', done => { - SESSIONFACTORY.release = session => { - assert.strictEqual(session, SESSION); - done(); - }; - - database.makePooledStreamingRequest_(CONFIG).emit('reading'); - - REQUEST_STREAM.end(); - }); - - it('should release session when request stream errors', done => { - SESSIONFACTORY.release = session => { - assert.strictEqual(session, SESSION); - done(); - }; - - database.makePooledStreamingRequest_(CONFIG).emit('reading'); - - setImmediate(() => { - REQUEST_STREAM.emit('error'); - }); - }); - - it('should error user stream when request stream errors', done => { - const error = new Error('Error.'); - - database - .makePooledStreamingRequest_(CONFIG) - .on('error', err => { - assert.strictEqual(err, error); - done(); - }) - .emit('reading'); - - setImmediate(() => { - REQUEST_STREAM.destroy(error); - }); - }); - }); - - describe('abort', () => { - let SESSION; - - beforeEach(() => { - REQUEST_STREAM.cancel = util.noop; - - SESSION = { - cancel: util.noop, - }; - - SESSIONFACTORY.getSession = callback => { - callback(null, SESSION); - }; - }); - - it('should release the session', done => { - SESSIONFACTORY.release = session => { - assert.strictEqual(session, SESSION); - done(); - }; - - const requestStream = - database.makePooledStreamingRequest_(CONFIG); - - requestStream.emit('reading'); - - setImmediate(() => { - requestStream.abort(); - }); - }); - - it('should not release the session more than once', done => { - let numTimesReleased = 0; - - SESSIONFACTORY.release = session => { - numTimesReleased++; - assert.strictEqual(session, SESSION); - }; - - const requestStream = - database.makePooledStreamingRequest_(CONFIG); - - requestStream.emit('reading'); - - setImmediate(() => { - requestStream.abort(); - assert.strictEqual(numTimesReleased, 1); - - requestStream.abort(); - assert.strictEqual(numTimesReleased, 1); - - done(); - }); - }); - - it('should cancel the request stream', done => { - REQUEST_STREAM.cancel = done; - const requestStream = - database.makePooledStreamingRequest_(CONFIG); - requestStream.emit('reading'); - setImmediate(() => { - requestStream.abort(); - }); - }); - }); - }, - ); + CONFIG = { + reqOpts: {}, + }; + + database.sessionFactory_ = SESSIONFACTORY; + + database.requestStream = () => { + return REQUEST_STREAM; + }; + + SESSIONFACTORY.getSession = callback => { + callback(null, SESSION); + }; + + SESSIONFACTORY.release = util.noop; + }); + + it('should get a session when stream opens', done => { + SESSIONFACTORY.getSession = () => { + done(); + }; + + database.makePooledStreamingRequest_(CONFIG).emit('reading'); + }); + + describe('could not get session', () => { + const ERROR = new Error('Error.'); + + beforeEach(() => { + SESSIONFACTORY.getSession = callback => { + callback(ERROR); + }; + }); + + it('should destroy the stream', done => { + database + .makePooledStreamingRequest_(CONFIG) + .on('error', err => { + assert.strictEqual(err, ERROR); + done(); + }) + .emit('reading'); + }); + }); + + describe('session retrieved successfully', () => { + beforeEach(() => { + SESSIONFACTORY.getSession = callback => { + callback(null, SESSION); + }; + }); + + it('should assign session to request options', done => { + database.requestStream = config => { + assert.strictEqual(config.reqOpts.session, SESSION.formattedName_); + setImmediate(done); + return through.obj(); + }; + + database.makePooledStreamingRequest_(CONFIG).emit('reading'); + }); + + it('should make request and pipe to the stream', done => { + const responseData = Buffer.from('response-data'); + + database.makePooledStreamingRequest_(CONFIG).on('data', data => { + assert.deepStrictEqual(data, responseData); + done(); + }); + + REQUEST_STREAM.end(responseData); + }); + + it('should release session when request stream ends', done => { + SESSIONFACTORY.release = session => { + assert.strictEqual(session, SESSION); + done(); + }; + + database.makePooledStreamingRequest_(CONFIG).emit('reading'); + + REQUEST_STREAM.end(); + }); + + it('should release session when request stream errors', done => { + SESSIONFACTORY.release = session => { + assert.strictEqual(session, SESSION); + done(); + }; + + database.makePooledStreamingRequest_(CONFIG).emit('reading'); + + setImmediate(() => { + REQUEST_STREAM.emit('error'); + }); + }); + + it('should error user stream when request stream errors', done => { + const error = new Error('Error.'); + + database + .makePooledStreamingRequest_(CONFIG) + .on('error', err => { + assert.strictEqual(err, error); + done(); + }) + .emit('reading'); + + setImmediate(() => { + REQUEST_STREAM.destroy(error); + }); + }); + }); + + describe('abort', () => { + let SESSION; + + beforeEach(() => { + REQUEST_STREAM.cancel = util.noop; + + SESSION = { + cancel: util.noop, + }; + + SESSIONFACTORY.getSession = callback => { + callback(null, SESSION); + }; + }); + + it('should release the session', done => { + SESSIONFACTORY.release = session => { + assert.strictEqual(session, SESSION); + done(); + }; + + const requestStream = database.makePooledStreamingRequest_(CONFIG); + + requestStream.emit('reading'); + + setImmediate(() => { + requestStream.abort(); + }); + }); + + it('should not release the session more than once', done => { + let numTimesReleased = 0; + + SESSIONFACTORY.release = session => { + numTimesReleased++; + assert.strictEqual(session, SESSION); + }; + + const requestStream = database.makePooledStreamingRequest_(CONFIG); + + requestStream.emit('reading'); + + setImmediate(() => { + requestStream.abort(); + assert.strictEqual(numTimesReleased, 1); + + requestStream.abort(); + assert.strictEqual(numTimesReleased, 1); + + done(); + }); + }); + + it('should cancel the request stream', done => { + REQUEST_STREAM.cancel = done; + const requestStream = database.makePooledStreamingRequest_(CONFIG); + requestStream.emit('reading'); + setImmediate(() => { + requestStream.abort(); + }); + }); }); }); @@ -2025,190 +1935,169 @@ describe('Database', () => { let snapshotStub: sinon.SinonStub; let runStreamStub: sinon.SinonStub; - const muxEnabled = [true, false]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED is ' + - `${isMuxEnabled ? 'enabled' : 'disable'}`, - () => { - before(() => { - isMuxEnabled - ? (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true') - : (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - 'false'); - }); - beforeEach(() => { - fakeSessionFactory = database.sessionFactory_; - fakeSession = new FakeSession(); - fakeSession2 = new FakeSession(); - fakeSnapshot = new FakeTransaction( - {} as google.spanner.v1.TransactionOptions.ReadOnly, - ); - fakeSnapshot2 = new FakeTransaction( - {} as google.spanner.v1.TransactionOptions.ReadOnly, - ); - fakeStream = through.obj(); - fakeStream2 = through.obj(); - - getSessionStub = ( - sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub - ) - .onFirstCall() - .callsFake(callback => callback(null, fakeSession)) - .onSecondCall() - .callsFake(callback => callback(null, fakeSession2)); - - snapshotStub = sandbox - .stub(fakeSession, 'snapshot') - .returns(fakeSnapshot); - - sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2); - - runStreamStub = sandbox - .stub(fakeSnapshot, 'runStream') - .returns(fakeStream); - - sandbox.stub(fakeSnapshot2, 'runStream').returns(fakeStream2); - - sandbox - .stub(fakeSessionFactory, 'isMultiplexedEnabled') - .returns(isMuxEnabled ? true : false); - }); - - it('should get a read session via `getSession`', () => { - getSessionStub.callsFake(() => {}); - database.runStream(QUERY); - - assert.strictEqual(getSessionStub.callCount, 1); - }); - - it('should destroy the stream if `getSession` errors', done => { - const fakeError = new Error('err'); - - getSessionStub - .onFirstCall() - .callsFake(callback => callback(fakeError)); - - database.runStream(QUERY).on('error', err => { - assert.strictEqual(err, fakeError); - done(); - }); - }); - - it('should pass through timestamp bounds', () => { - const fakeOptions = {strong: false}; - database.runStream(QUERY, fakeOptions); - - const options = snapshotStub.lastCall.args[0]; - assert.strictEqual(options, fakeOptions); - }); - - it('should call through to `snapshot.runStream`', () => { - const pipeStub = sandbox.stub(fakeStream, 'pipe'); - const proxyStream = database.runStream(QUERY); - - const query = runStreamStub.lastCall.args[0]; - assert.strictEqual(query, QUERY); - - const stream = pipeStub.lastCall.args[0]; - assert.strictEqual(stream, proxyStream); - }); - - it('should end the snapshot on stream end', done => { - const endStub = sandbox.stub(fakeSnapshot, 'end'); - - database - .runStream(QUERY) - .on('data', done) - .on('end', () => { - assert.strictEqual(endStub.callCount, 1); - done(); - }); - - fakeStream.push(null); - }); - - it('should clean up the stream/transaction on error', done => { - const fakeError = new Error('err'); - const endStub = sandbox.stub(fakeSnapshot, 'end'); - - database.runStream(QUERY).on('error', err => { - assert.strictEqual(err, fakeError); - assert.strictEqual(endStub.callCount, 1); - done(); - }); - - fakeStream.destroy(fakeError); - }); - - if (isMuxEnabled) { - it('should not retry on "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - const endStub = sandbox.stub(fakeSnapshot, 'end'); - const endStub2 = sandbox.stub(fakeSnapshot2, 'end'); - const rows = 0; - - database.runStream(QUERY).on('error', err => { - assert.strictEqual(err, sessionNotFoundError); - assert.strictEqual(endStub.callCount, 1); - // make sure it is not retrying the stream - assert.strictEqual(endStub2.callCount, 0); - // row count should be 0 - assert.strictEqual(rows, 0); - done(); - }); - - fakeStream.emit('error', sessionNotFoundError); - fakeStream2.push('row1'); - fakeStream2.push(null); - }); - } else { - it('should release the session on transaction end', () => { - const releaseStub = sandbox.stub( - fakeSessionFactory, - 'release', - ) as sinon.SinonStub; - - database.runStream(QUERY); - fakeSnapshot.emit('end'); - - const session = releaseStub.lastCall.args[0]; - assert.strictEqual(session, fakeSession); - }); - - it('should retry "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - const endStub = sandbox.stub(fakeSnapshot, 'end'); - const endStub2 = sandbox.stub(fakeSnapshot2, 'end'); - let rows = 0; - - database - .runStream(QUERY) - .on('data', () => rows++) - .on('error', err => { - assert.fail(err); - }) - .on('end', () => { - assert.strictEqual(endStub.callCount, 1); - assert.strictEqual(endStub2.callCount, 1); - assert.strictEqual(rows, 1); - done(); - }); - - fakeStream.emit('error', sessionNotFoundError); - fakeStream2.push('row1'); - fakeStream2.push(null); - }); - } - }, + beforeEach(() => { + fakeSessionFactory = database.sessionFactory_; + fakeSession = new FakeSession(); + fakeSession2 = new FakeSession(); + fakeSnapshot = new FakeTransaction( + {} as google.spanner.v1.TransactionOptions.ReadOnly, ); + fakeSnapshot2 = new FakeTransaction( + {} as google.spanner.v1.TransactionOptions.ReadOnly, + ); + fakeStream = through.obj(); + fakeStream2 = through.obj(); + + getSessionStub = ( + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub + ) + .onFirstCall() + .callsFake(callback => callback(null, fakeSession)) + .onSecondCall() + .callsFake(callback => callback(null, fakeSession2)); + + snapshotStub = sandbox + .stub(fakeSession, 'snapshot') + .returns(fakeSnapshot); + + sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2); + + runStreamStub = sandbox + .stub(fakeSnapshot, 'runStream') + .returns(fakeStream); + + sandbox.stub(fakeSnapshot2, 'runStream').returns(fakeStream2); + + sandbox.stub(fakeSessionFactory, 'isMultiplexedEnabled').returns(true); + }); + + it('should get a read session via `getSession`', () => { + getSessionStub.callsFake(() => {}); + database.runStream(QUERY); + + assert.strictEqual(getSessionStub.callCount, 1); + }); + + it('should destroy the stream if `getSession` errors', done => { + const fakeError = new Error('err'); + + getSessionStub.onFirstCall().callsFake(callback => callback(fakeError)); + + database.runStream(QUERY).on('error', err => { + assert.strictEqual(err, fakeError); + done(); + }); + }); + + it('should pass through timestamp bounds', () => { + const fakeOptions = {strong: false}; + database.runStream(QUERY, fakeOptions); + + const options = snapshotStub.lastCall.args[0]; + assert.strictEqual(options, fakeOptions); + }); + + it('should call through to `snapshot.runStream`', () => { + const pipeStub = sandbox.stub(fakeStream, 'pipe'); + const proxyStream = database.runStream(QUERY); + + const query = runStreamStub.lastCall.args[0]; + assert.strictEqual(query, QUERY); + + const stream = pipeStub.lastCall.args[0]; + assert.strictEqual(stream, proxyStream); + }); + + it('should end the snapshot on stream end', done => { + const endStub = sandbox.stub(fakeSnapshot, 'end'); + + database + .runStream(QUERY) + .on('data', done) + .on('end', () => { + assert.strictEqual(endStub.callCount, 1); + done(); + }); + + fakeStream.push(null); + }); + + it('should clean up the stream/transaction on error', done => { + const fakeError = new Error('err'); + const endStub = sandbox.stub(fakeSnapshot, 'end'); + + database.runStream(QUERY).on('error', err => { + assert.strictEqual(err, fakeError); + assert.strictEqual(endStub.callCount, 1); + done(); + }); + + fakeStream.destroy(fakeError); + }); + + it('should not retry on "Session not found" error', done => { + const sessionNotFoundError = { + code: grpc.status.NOT_FOUND, + message: 'Session not found', + } as grpc.ServiceError; + const endStub = sandbox.stub(fakeSnapshot, 'end'); + const endStub2 = sandbox.stub(fakeSnapshot2, 'end'); + const rows = 0; + + database.runStream(QUERY).on('error', err => { + assert.strictEqual(err, sessionNotFoundError); + assert.strictEqual(endStub.callCount, 1); + // make sure it is not retrying the stream + assert.strictEqual(endStub2.callCount, 0); + // row count should be 0 + assert.strictEqual(rows, 0); + done(); + }); + + fakeStream.emit('error', sessionNotFoundError); + fakeStream2.push('row1'); + fakeStream2.push(null); + }); + + it('should release the session on transaction end', () => { + const releaseStub = sandbox.stub( + fakeSessionFactory, + 'release', + ) as sinon.SinonStub; + + database.runStream(QUERY); + fakeSnapshot.emit('end'); + + const session = releaseStub.lastCall.args[0]; + assert.strictEqual(session, fakeSession); + }); + + // since mux is default enabled, session pool is not getting created + it.skip('should retry "Session not found" error', done => { + const sessionNotFoundError = { + code: grpc.status.NOT_FOUND, + message: 'Session not found', + } as grpc.ServiceError; + const endStub = sandbox.stub(fakeSnapshot, 'end'); + const endStub2 = sandbox.stub(fakeSnapshot2, 'end'); + let rows = 0; + + database + .runStream(QUERY) + .on('data', () => rows++) + .on('error', err => { + assert.fail(err); + }) + .on('end', () => { + assert.strictEqual(endStub.callCount, 1); + assert.strictEqual(endStub2.callCount, 1); + assert.strictEqual(rows, 1); + done(); + }); + + fakeStream.emit('error', sessionNotFoundError); + fakeStream2.push('row1'); + fakeStream2.push(null); }); }); @@ -2468,201 +2357,178 @@ describe('Database', () => { let getSessionStub: sinon.SinonStub; let snapshotStub: sinon.SinonStub; - const muxEnabled = [true, false]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled ? 'enabled' : 'disable'}`, - () => { - before(() => { - isMuxEnabled - ? (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true') - : (process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - 'false'); - }); - - beforeEach(() => { - fakeSessionFactory = database.sessionFactory_; - fakeSession = new FakeSession(); - fakeSnapshot = new FakeTransaction( - {} as google.spanner.v1.TransactionOptions.ReadOnly, - ); - - beginSnapshotStub = ( - sandbox.stub(fakeSnapshot, 'begin') as sinon.SinonStub - ).callsFake(callback => callback(null)); - - getSessionStub = ( - sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub - ).callsFake(callback => callback(null, fakeSession)); - - snapshotStub = ( - sandbox.stub(fakeSession, 'snapshot') as sinon.SinonStub - ).returns(fakeSnapshot); - - ( - sandbox.stub( - fakeSessionFactory, - 'isMultiplexedEnabled', - ) as sinon.SinonStub - ).returns(isMuxEnabled ? true : false); - }); - - it( - 'should return any ' + - `${isMuxEnabled ? 'multiplexed session' : 'pool'}` + - ' errors', - done => { - const fakeError = new Error('err'); - - getSessionStub.callsFake(callback => callback(fakeError)); - - database.getSnapshot(err => { - assert.strictEqual(err, fakeError); - done(); - }); - }, - ); + beforeEach(() => { + fakeSessionFactory = database.sessionFactory_; + fakeSession = new FakeSession(); + fakeSnapshot = new FakeTransaction( + {} as google.spanner.v1.TransactionOptions.ReadOnly, + ); - it('should pass the timestamp bounds to the snapshot', () => { - const fakeTimestampBounds = {}; - - database.getSnapshot(fakeTimestampBounds, assert.ifError); - - const bounds = snapshotStub.lastCall.args[0]; - assert.strictEqual(bounds, fakeTimestampBounds); - }); - - it('should throw error if maxStaleness is passed in the timestamp bounds to the snapshot', () => { - const fakeTimestampBounds = {maxStaleness: 10}; - - database.getSnapshot(fakeTimestampBounds, err => { - assert.strictEqual(err.code, 3); - assert.strictEqual( - err.message, - 'maxStaleness / minReadTimestamp is not supported for multi-use read-only transactions.', - ); - }); - }); - - it('should throw error if minReadTimestamp is passed in the timestamp bounds to the snapshot', () => { - const fakeTimestampBounds = {minReadTimestamp: 10}; - - database.getSnapshot(fakeTimestampBounds, err => { - assert.strictEqual(err.code, 3); - assert.strictEqual( - err.message, - 'maxStaleness / minReadTimestamp is not supported for multi-use read-only transactions.', - ); - }); - }); - - it('should pass when maxStaleness is undefined', () => { - const fakeTimestampBounds = {minReadTimestamp: undefined}; - - database.getSnapshot(fakeTimestampBounds, assert.ifError); - - const bounds = snapshotStub.lastCall.args[0]; - assert.strictEqual(bounds, fakeTimestampBounds); - }); - - it('should return the `snapshot`', done => { - database.getSnapshot((err, snapshot) => { - assert.ifError(err); - assert.strictEqual(snapshot, fakeSnapshot); - done(); - }); - }); - - if (isMuxEnabled) { - it('should throw an error if `begin` errors with `Session not found`', done => { - const fakeError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError; - - beginSnapshotStub.callsFake(callback => callback(fakeError)); - - database.getSnapshot((err, snapshot) => { - assert.strictEqual(err, fakeError); - assert.strictEqual(snapshot, undefined); - done(); - }); - }); - } else { - it('should release the session if `begin` errors', done => { - const fakeError = new Error('err'); - - beginSnapshotStub.callsFake(callback => callback(fakeError)); - - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.getSnapshot(err => { - assert.strictEqual(err, fakeError); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - - it('should retry if `begin` errors with `Session not found`', done => { - const fakeError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError; - - const fakeSession2 = new FakeSession(); - const fakeSnapshot2 = new FakeTransaction( - {} as google.spanner.v1.TransactionOptions.ReadOnly, - ); - ( - sandbox.stub(fakeSnapshot2, 'begin') as sinon.SinonStub - ).callsFake(callback => callback(null)); - sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2); - - getSessionStub - .onFirstCall() - .callsFake(callback => callback(null, fakeSession)) - .onSecondCall() - .callsFake(callback => callback(null, fakeSession2)); - - beginSnapshotStub.callsFake(callback => callback(fakeError)); - - // The first session that was not found should be released back into the - // pool, so that the pool can remove it from its inventory. - const releaseStub = sandbox.stub(fakeSessionFactory, 'release'); - - database.getSnapshot((err, snapshot) => { - assert.ifError(err); - assert.strictEqual(snapshot, fakeSnapshot2); - // The first session that error should already have been released back - // to the pool. - assert.strictEqual(releaseStub.callCount, 1); - // Ending the valid snapshot will release its session back into the - // pool. - snapshot.emit('end'); - assert.strictEqual(releaseStub.callCount, 2); - done(); - }); - }); - - it('should release the snapshot on `end`', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.getSnapshot(err => { - assert.ifError(err); - fakeSnapshot.emit('end'); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - } - }, + beginSnapshotStub = ( + sandbox.stub(fakeSnapshot, 'begin') as sinon.SinonStub + ).callsFake(callback => callback(null)); + + getSessionStub = ( + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub + ).callsFake(callback => callback(null, fakeSession)); + + snapshotStub = ( + sandbox.stub(fakeSession, 'snapshot') as sinon.SinonStub + ).returns(fakeSnapshot); + + ( + sandbox.stub( + fakeSessionFactory, + 'isMultiplexedEnabled', + ) as sinon.SinonStub + ).returns(true); + }); + + it('should return any multiplexed session errors', done => { + const fakeError = new Error('err'); + + getSessionStub.callsFake(callback => callback(fakeError)); + + database.getSnapshot(err => { + assert.strictEqual(err, fakeError); + done(); + }); + }); + + it('should pass the timestamp bounds to the snapshot', () => { + const fakeTimestampBounds = {}; + + database.getSnapshot(fakeTimestampBounds, assert.ifError); + + const bounds = snapshotStub.lastCall.args[0]; + assert.strictEqual(bounds, fakeTimestampBounds); + }); + + it('should throw error if maxStaleness is passed in the timestamp bounds to the snapshot', () => { + const fakeTimestampBounds = {maxStaleness: 10}; + + database.getSnapshot(fakeTimestampBounds, err => { + assert.strictEqual(err.code, 3); + assert.strictEqual( + err.message, + 'maxStaleness / minReadTimestamp is not supported for multi-use read-only transactions.', + ); + }); + }); + + it('should throw error if minReadTimestamp is passed in the timestamp bounds to the snapshot', () => { + const fakeTimestampBounds = {minReadTimestamp: 10}; + + database.getSnapshot(fakeTimestampBounds, err => { + assert.strictEqual(err.code, 3); + assert.strictEqual( + err.message, + 'maxStaleness / minReadTimestamp is not supported for multi-use read-only transactions.', + ); + }); + }); + + it('should pass when maxStaleness is undefined', () => { + const fakeTimestampBounds = {minReadTimestamp: undefined}; + + database.getSnapshot(fakeTimestampBounds, assert.ifError); + + const bounds = snapshotStub.lastCall.args[0]; + assert.strictEqual(bounds, fakeTimestampBounds); + }); + + it('should return the `snapshot`', done => { + database.getSnapshot((err, snapshot) => { + assert.ifError(err); + assert.strictEqual(snapshot, fakeSnapshot); + done(); + }); + }); + + it('should throw an error if `begin` errors with `Session not found`', done => { + const fakeError = { + code: grpc.status.NOT_FOUND, + message: 'Session not found', + } as MockError; + + beginSnapshotStub.callsFake(callback => callback(fakeError)); + + database.getSnapshot((err, snapshot) => { + assert.strictEqual(err, fakeError); + assert.strictEqual(snapshot, undefined); + done(); + }); + }); + + it('should release the session if `begin` errors', done => { + const fakeError = new Error('err'); + + beginSnapshotStub.callsFake(callback => callback(fakeError)); + + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(fakeSession); + + database.getSnapshot(err => { + assert.strictEqual(err, fakeError); + assert.strictEqual(releaseStub.callCount, 1); + done(); + }); + }); + + // since mux is default enabled, session pool is not getting created + it.skip('should retry if `begin` errors with `Session not found`', done => { + const fakeError = { + code: grpc.status.NOT_FOUND, + message: 'Session not found', + } as MockError; + + const fakeSession2 = new FakeSession(); + const fakeSnapshot2 = new FakeTransaction( + {} as google.spanner.v1.TransactionOptions.ReadOnly, + ); + (sandbox.stub(fakeSnapshot2, 'begin') as sinon.SinonStub).callsFake( + callback => callback(null), ); + sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2); + + getSessionStub + .onFirstCall() + .callsFake(callback => callback(null, fakeSession)) + .onSecondCall() + .callsFake(callback => callback(null, fakeSession2)); + + beginSnapshotStub.callsFake(callback => callback(fakeError)); + + // The first session that was not found should be released back into the + // pool, so that the pool can remove it from its inventory. + const releaseStub = sandbox.stub(fakeSessionFactory, 'release'); + + database.getSnapshot((err, snapshot) => { + assert.ifError(err); + assert.strictEqual(snapshot, fakeSnapshot2); + // The first session that error should already have been released back + // to the pool. + assert.strictEqual(releaseStub.callCount, 1); + // Ending the valid snapshot will release its session back into the + // pool. + snapshot.emit('end'); + assert.strictEqual(releaseStub.callCount, 2); + done(); + }); + }); + + it('should release the snapshot on `end`', done => { + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(fakeSession); + + database.getSnapshot(err => { + assert.ifError(err); + fakeSnapshot.emit('end'); + assert.strictEqual(releaseStub.callCount, 1); + done(); + }); }); }); @@ -2673,109 +2539,78 @@ describe('Database', () => { let getSessionStub: sinon.SinonStub; - // muxEnabled[i][0] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS - // muxEnabled[i][1] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW - const muxEnabled = [ - [true, true], - [true, false], - [false, true], - [false, false], - ]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled[0] ? 'enabled' : 'disable'}` + - ' and GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW is ' + - `${isMuxEnabled[1] ? 'enabled' : 'disable'}`, - () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - isMuxEnabled[0].toString(); - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = - isMuxEnabled[1].toString(); - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - beforeEach(() => { - fakeSessionFactory = database.sessionFactory_; - fakeSession = new FakeSession(); - fakeTransaction = new FakeTransaction( - {} as google.spanner.v1.TransactionOptions.ReadWrite, - ); - - getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub - ).callsFake(callback => { - callback(null, fakeSession, fakeTransaction); - }); - }); - - it('should get a read/write transaction', () => { - getSessionStub.callsFake(() => {}); - - database.getTransaction(assert.ifError); - - assert.strictEqual(getSessionStub.callCount, 1); - }); - - it(`should return any ${isMuxEnabled[0] && isMuxEnabled[1] ? 'multiplexed session' : 'pool'} errors`, done => { - const fakeError = new Error('err'); - - getSessionStub.callsFake(callback => callback(fakeError)); - - database.getTransaction(err => { - assert.strictEqual(err, fakeError); - done(); - }); - }); - - it('should return the read/write transaction', done => { - database.getTransaction((err, transaction) => { - assert.ifError(err); - assert.strictEqual(transaction, fakeTransaction); - done(); - }); - }); - - it('should propagate an error', done => { - const error = new Error('resource'); - (sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub) - .withArgs(fakeSession) - .throws(error); - - database.on('error', err => { - assert.deepStrictEqual(err, error); - done(); - }); - - database.getTransaction((err, transaction) => { - assert.ifError(err); - transaction.emit('end'); - }); - }); - - it('should release the session on transaction end', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.getTransaction((err, transaction) => { - assert.ifError(err); - transaction.emit('end'); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - }, + beforeEach(() => { + fakeSessionFactory = database.sessionFactory_; + fakeSession = new FakeSession(); + fakeTransaction = new FakeTransaction( + {} as google.spanner.v1.TransactionOptions.ReadWrite, ); + + getSessionStub = ( + sandbox.stub( + fakeSessionFactory, + 'getSessionForReadWrite', + ) as sinon.SinonStub + ).callsFake(callback => { + callback(null, fakeSession, fakeTransaction); + }); + }); + + it('should get a read/write transaction', () => { + getSessionStub.callsFake(() => {}); + + database.getTransaction(assert.ifError); + + assert.strictEqual(getSessionStub.callCount, 1); + }); + + it('should return any multiplexed session errors', done => { + const fakeError = new Error('err'); + + getSessionStub.callsFake(callback => callback(fakeError)); + + database.getTransaction(err => { + assert.strictEqual(err, fakeError); + done(); + }); + }); + + it('should return the read/write transaction', done => { + database.getTransaction((err, transaction) => { + assert.ifError(err); + assert.strictEqual(transaction, fakeTransaction); + done(); + }); + }); + + it('should propagate an error', done => { + const error = new Error('resource'); + (sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub) + .withArgs(fakeSession) + .throws(error); + + database.on('error', err => { + assert.deepStrictEqual(err, error); + done(); + }); + + database.getTransaction((err, transaction) => { + assert.ifError(err); + transaction.emit('end'); + }); + }); + + it('should release the session on transaction end', done => { + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(fakeSession); + + database.getTransaction((err, transaction) => { + assert.ifError(err); + transaction.emit('end'); + assert.strictEqual(releaseStub.callCount, 1); + done(); + }); }); }); @@ -3089,220 +2924,184 @@ describe('Database', () => { }, }; - // muxEnabled[i][0] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS - // muxEnabled[i][1] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS - const muxEnabled = [ - [true, true], - [true, false], - [false, true], - [false, false], - ]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled[0] ? 'enabled' : 'disable'}` + - ' and GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS is ' + - `${isMuxEnabled[1] ? 'enabled' : 'disable'}`, - () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - isMuxEnabled[0].toString(); - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - isMuxEnabled[1].toString(); - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env - .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; - }); - - beforeEach(() => { - fakeSessionFactory = database.sessionFactory_; - fakeSession = new FakeSession(); - fakePartitionedDml = fakeSession.partitionedDml(); - - getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForPartitionedOps', - ) as sinon.SinonStub - ).callsFake(callback => { - callback(null, fakeSession); - }); - - sandbox - .stub(fakeSession, 'partitionedDml') - .returns(fakePartitionedDml); - - beginStub = ( - sandbox.stub(fakePartitionedDml, 'begin') as sinon.SinonStub - ).callsFake(callback => callback(null)); - - runUpdateStub = ( - sandbox.stub(fakePartitionedDml, 'runUpdate') as sinon.SinonStub - ).callsFake((_, callback) => callback(null)); - }); - - it('should make a call to getSessionForPartitionedOps', () => { - getSessionStub.callsFake(() => {}); - - database.runPartitionedUpdate(QUERY, assert.ifError); - - assert.strictEqual(getSessionStub.callCount, 1); - }); - - it('should get a session from the session factory', () => { - const fakeCallback = sandbox.spy(); - getSessionStub.callsFake(callback => callback(fakeSession)); - database.runPartitionedUpdate(QUERY, fakeCallback); - const [resp] = fakeCallback.lastCall.args; - assert.strictEqual(resp, fakeSession); - }); - - it('should return errors from getSessionForPartitionedOps', () => { - const fakeError = new Error('err'); - const fakeCallback = sandbox.spy(); - - getSessionStub.callsFake(callback => callback(fakeError)); - database.runPartitionedUpdate(QUERY, fakeCallback); - - const [err, rowCount] = fakeCallback.lastCall.args; - - assert.strictEqual(err, fakeError); - assert.strictEqual(rowCount, 0); - }); - - it('should get a partitioned dml transaction from the session factory', () => { - const fakeCallback = sandbox.spy(); - getSessionStub.callsFake(callback => callback(fakePartitionedDml)); - database.runPartitionedUpdate(QUERY, fakeCallback); - const [resp] = fakeCallback.lastCall.args; - assert.strictEqual(resp, fakePartitionedDml); - }); - - it('should call transaction begin', () => { - beginStub.callsFake(() => {}); - database.runPartitionedUpdate(QUERY, assert.ifError); - - assert.strictEqual(beginStub.callCount, 1); - }); - - it('should return any begin errors', done => { - const fakeError = new Error('err'); - - beginStub.callsFake(callback => callback(fakeError)); - - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.runPartitionedUpdate(QUERY, (err, rowCount) => { - assert.strictEqual(err, fakeError); - assert.strictEqual(rowCount, 0); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - - it('call `runUpdate` on the transaction', () => { - const fakeCallback = sandbox.spy(); - - database.runPartitionedUpdate(QUERY, fakeCallback); - - const [query] = runUpdateStub.lastCall.args; - - assert.strictEqual(query.sql, QUERY.sql); - assert.deepStrictEqual(query.params, QUERY.params); - assert.ok(fakeCallback.calledOnce); - }); - - if (!isMuxEnabled) { - it('should release the session on transaction end', () => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.runPartitionedUpdate(QUERY, assert.ifError); - fakePartitionedDml.emit('end'); - - assert.strictEqual(releaseStub.callCount, 1); - }); - } - - it('should accept requestOptions', () => { - const fakeCallback = sandbox.spy(); - - database.runPartitionedUpdate( - { - sql: QUERY.sql, - params: QUERY.params, - requestOptions: { - priority: RequestOptions.Priority.PRIORITY_LOW, - }, - }, - fakeCallback, - ); - - const [query] = runUpdateStub.lastCall.args; - - assert.deepStrictEqual(query, { - sql: QUERY.sql, - params: QUERY.params, - requestOptions: {priority: RequestOptions.Priority.PRIORITY_LOW}, - }); - assert.ok(fakeCallback.calledOnce); - }); - - it('should accept excludeTxnFromChangeStreams', () => { - const fakeCallback = sandbox.spy(); - - database.runPartitionedUpdate( - { - excludeTxnFromChangeStream: true, - }, - fakeCallback, - ); - - const [query] = runUpdateStub.lastCall.args; - - assert.deepStrictEqual(query, { - excludeTxnFromChangeStream: true, - }); - assert.ok(fakeCallback.calledOnce); - }); - - it('should ignore directedReadOptions set for client', () => { - const fakeCallback = sandbox.spy(); - - database.parent.parent = { - routeToLeaderEnabled: true, - directedReadOptions: fakeDirectedReadOptions, - }; - - database.runPartitionedUpdate( - { - sql: QUERY.sql, - params: QUERY.params, - requestOptions: { - priority: RequestOptions.Priority.PRIORITY_LOW, - }, - }, - fakeCallback, - ); - - const [query] = runUpdateStub.lastCall.args; - - assert.deepStrictEqual(query, { - sql: QUERY.sql, - params: QUERY.params, - requestOptions: {priority: RequestOptions.Priority.PRIORITY_LOW}, - }); - assert.ok(fakeCallback.calledOnce); - }); + beforeEach(() => { + fakeSessionFactory = database.sessionFactory_; + fakeSession = new FakeSession(); + fakePartitionedDml = fakeSession.partitionedDml(); + + getSessionStub = ( + sandbox.stub( + fakeSessionFactory, + 'getSessionForPartitionedOps', + ) as sinon.SinonStub + ).callsFake(callback => { + callback(null, fakeSession); + }); + + sandbox.stub(fakeSession, 'partitionedDml').returns(fakePartitionedDml); + + beginStub = ( + sandbox.stub(fakePartitionedDml, 'begin') as sinon.SinonStub + ).callsFake(callback => callback(null)); + + runUpdateStub = ( + sandbox.stub(fakePartitionedDml, 'runUpdate') as sinon.SinonStub + ).callsFake((_, callback) => callback(null)); + }); + + it('should make a call to getSessionForPartitionedOps', () => { + getSessionStub.callsFake(() => {}); + + database.runPartitionedUpdate(QUERY, assert.ifError); + + assert.strictEqual(getSessionStub.callCount, 1); + }); + + it('should get a session from the session factory', () => { + const fakeCallback = sandbox.spy(); + getSessionStub.callsFake(callback => callback(fakeSession)); + database.runPartitionedUpdate(QUERY, fakeCallback); + const [resp] = fakeCallback.lastCall.args; + assert.strictEqual(resp, fakeSession); + }); + + it('should return errors from getSessionForPartitionedOps', () => { + const fakeError = new Error('err'); + const fakeCallback = sandbox.spy(); + + getSessionStub.callsFake(callback => callback(fakeError)); + database.runPartitionedUpdate(QUERY, fakeCallback); + + const [err, rowCount] = fakeCallback.lastCall.args; + + assert.strictEqual(err, fakeError); + assert.strictEqual(rowCount, 0); + }); + + it('should get a partitioned dml transaction from the session factory', () => { + const fakeCallback = sandbox.spy(); + getSessionStub.callsFake(callback => callback(fakePartitionedDml)); + database.runPartitionedUpdate(QUERY, fakeCallback); + const [resp] = fakeCallback.lastCall.args; + assert.strictEqual(resp, fakePartitionedDml); + }); + + it('should call transaction begin', () => { + beginStub.callsFake(() => {}); + database.runPartitionedUpdate(QUERY, assert.ifError); + + assert.strictEqual(beginStub.callCount, 1); + }); + + it('should return any begin errors', done => { + const fakeError = new Error('err'); + + beginStub.callsFake(callback => callback(fakeError)); + + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(fakeSession); + + database.runPartitionedUpdate(QUERY, (err, rowCount) => { + assert.strictEqual(err, fakeError); + assert.strictEqual(rowCount, 0); + assert.strictEqual(releaseStub.callCount, 1); + done(); + }); + }); + + it('call `runUpdate` on the transaction', () => { + const fakeCallback = sandbox.spy(); + + database.runPartitionedUpdate(QUERY, fakeCallback); + + const [query] = runUpdateStub.lastCall.args; + + assert.strictEqual(query.sql, QUERY.sql); + assert.deepStrictEqual(query.params, QUERY.params); + assert.ok(fakeCallback.calledOnce); + }); + + it('should release the session on transaction end', () => { + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(fakeSession); + + database.runPartitionedUpdate(QUERY, assert.ifError); + fakePartitionedDml.emit('end'); + + assert.strictEqual(releaseStub.callCount, 1); + }); + + it('should accept requestOptions', () => { + const fakeCallback = sandbox.spy(); + + database.runPartitionedUpdate( + { + sql: QUERY.sql, + params: QUERY.params, + requestOptions: { + priority: RequestOptions.Priority.PRIORITY_LOW, + }, }, + fakeCallback, ); + + const [query] = runUpdateStub.lastCall.args; + + assert.deepStrictEqual(query, { + sql: QUERY.sql, + params: QUERY.params, + requestOptions: {priority: RequestOptions.Priority.PRIORITY_LOW}, + }); + assert.ok(fakeCallback.calledOnce); + }); + + it('should accept excludeTxnFromChangeStreams', () => { + const fakeCallback = sandbox.spy(); + + database.runPartitionedUpdate( + { + excludeTxnFromChangeStream: true, + }, + fakeCallback, + ); + + const [query] = runUpdateStub.lastCall.args; + + assert.deepStrictEqual(query, { + excludeTxnFromChangeStream: true, + }); + assert.ok(fakeCallback.calledOnce); + }); + + it('should ignore directedReadOptions set for client', () => { + const fakeCallback = sandbox.spy(); + + database.parent.parent = { + routeToLeaderEnabled: true, + directedReadOptions: fakeDirectedReadOptions, + }; + + database.runPartitionedUpdate( + { + sql: QUERY.sql, + params: QUERY.params, + requestOptions: { + priority: RequestOptions.Priority.PRIORITY_LOW, + }, + }, + fakeCallback, + ); + + const [query] = runUpdateStub.lastCall.args; + + assert.deepStrictEqual(query, { + sql: QUERY.sql, + params: QUERY.params, + requestOptions: {priority: RequestOptions.Priority.PRIORITY_LOW}, + }); + assert.ok(fakeCallback.calledOnce); }); }); @@ -3314,138 +3113,105 @@ describe('Database', () => { let fakeSessionFactory: FakeSessionFactory; - // muxEnabled[i][0] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS - // muxEnabled[i][1] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW - const muxEnabled = [ - [true, true], - [true, false], - [false, true], - [false, false], - ]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled[0] ? 'enabled' : 'disable'}` + - ' and GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW is ' + - `${isMuxEnabled[1] ? 'enabled' : 'disable'}`, - () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - isMuxEnabled[0].toString(); - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = - isMuxEnabled[1].toString(); - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - beforeEach(() => { - fakeSessionFactory = database.sessionFactory_; - - ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub - ).callsFake(callback => { - callback(null, SESSION, TRANSACTION); - }); - }); - - it('should return any errors getting a session', done => { - const fakeErr = new Error('err'); - - ( - fakeSessionFactory.getSessionForReadWrite as sinon.SinonStub - ).callsFake(callback => callback(fakeErr)); - - database.runTransaction(err => { - assert.strictEqual(err, fakeErr); - done(); - }); - }); - - it('should create a `TransactionRunner`', () => { - const fakeRunFn = sandbox.spy(); - - database.runTransaction(fakeRunFn); - - const [session, transaction, runFn, options] = - fakeTransactionRunner.calledWith_; - - assert.strictEqual(session, SESSION); - assert.strictEqual(transaction, TRANSACTION); - assert.deepStrictEqual(options, {}); - }); - - it('should optionally accept runner `options`', () => { - const fakeOptions = {timeout: 1}; - - database.runTransaction(fakeOptions, assert.ifError); - - const options = fakeTransactionRunner.calledWith_[3]; - - assert.strictEqual(options, fakeOptions); - }); - - it('should optionally accept runner `option` isolationLevel', async () => { - const fakeOptions = { - isolationLevel: IsolationLevel.REPEATABLE_READ, - }; - - await database.runTransaction(fakeOptions, assert.ifError); - - const options = fakeTransactionRunner.calledWith_[3]; - assert.strictEqual(options, fakeOptions); - }); - - it('should optionally accept runner `option` readLockMode', async () => { - const fakeOptions = { - readLockMode: ReadLockMode.PESSIMISTIC, - }; - - await database.runTransaction(fakeOptions, assert.ifError); - - const options = fakeTransactionRunner.calledWith_[3]; - assert.strictEqual(options, fakeOptions); - }); - - it('should release the session when finished', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(SESSION); - - sandbox.stub(FakeTransactionRunner.prototype, 'run').resolves(); - - database.runTransaction(assert.ifError); - - setImmediate(() => { - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - - it('should catch any run errors and return them', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(SESSION); - const fakeError = new Error('err'); + beforeEach(() => { + fakeSessionFactory = database.sessionFactory_; + + ( + sandbox.stub( + fakeSessionFactory, + 'getSessionForReadWrite', + ) as sinon.SinonStub + ).callsFake(callback => { + callback(null, SESSION, TRANSACTION); + }); + }); - sandbox - .stub(FakeTransactionRunner.prototype, 'run') - .rejects(fakeError); + it('should return any errors getting a session', done => { + const fakeErr = new Error('err'); - database.runTransaction(err => { - assert.strictEqual(err, fakeError); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - }, + (fakeSessionFactory.getSessionForReadWrite as sinon.SinonStub).callsFake( + callback => callback(fakeErr), ); + + database.runTransaction(err => { + assert.strictEqual(err, fakeErr); + done(); + }); + }); + + it('should create a `TransactionRunner`', () => { + const fakeRunFn = sandbox.spy(); + + database.runTransaction(fakeRunFn); + + const [session, transaction, runFn, options] = + fakeTransactionRunner.calledWith_; + + assert.strictEqual(session, SESSION); + assert.strictEqual(transaction, TRANSACTION); + assert.deepStrictEqual(options, {}); + }); + + it('should optionally accept runner `options`', () => { + const fakeOptions = {timeout: 1}; + + database.runTransaction(fakeOptions, assert.ifError); + + const options = fakeTransactionRunner.calledWith_[3]; + + assert.strictEqual(options, fakeOptions); + }); + + it('should optionally accept runner `option` isolationLevel', async () => { + const fakeOptions = { + isolationLevel: IsolationLevel.REPEATABLE_READ, + }; + + await database.runTransaction(fakeOptions, assert.ifError); + + const options = fakeTransactionRunner.calledWith_[3]; + assert.strictEqual(options, fakeOptions); + }); + + it('should optionally accept runner `option` readLockMode', async () => { + const fakeOptions = { + readLockMode: ReadLockMode.PESSIMISTIC, + }; + + await database.runTransaction(fakeOptions, assert.ifError); + + const options = fakeTransactionRunner.calledWith_[3]; + assert.strictEqual(options, fakeOptions); + }); + + it('should release the session when finished', done => { + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(SESSION); + + sandbox.stub(FakeTransactionRunner.prototype, 'run').resolves(); + + database.runTransaction(assert.ifError); + + setImmediate(() => { + assert.strictEqual(releaseStub.callCount, 1); + done(); + }); + }); + + it('should catch any run errors and return them', done => { + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(SESSION); + const fakeError = new Error('err'); + + sandbox.stub(FakeTransactionRunner.prototype, 'run').rejects(fakeError); + + database.runTransaction(err => { + assert.strictEqual(err, fakeError); + assert.strictEqual(releaseStub.callCount, 1); + done(); + }); }); }); @@ -3457,115 +3223,82 @@ describe('Database', () => { let fakeSessionFactory: FakeSessionFactory; - // muxEnabled[i][0] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS - // muxEnabled[i][1] is to enable/disable env GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW - const muxEnabled = [ - [true, true], - [true, false], - [false, true], - [false, false], - ]; - - muxEnabled.forEach(isMuxEnabled => { - describe( - 'when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is ' + - `${isMuxEnabled[0] ? 'enabled' : 'disable'}` + - ' and GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW is ' + - `${isMuxEnabled[1] ? 'enabled' : 'disable'}`, - () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = - isMuxEnabled[0].toString(); - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = - isMuxEnabled[1].toString(); - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - beforeEach(() => { - fakeSessionFactory = database.sessionFactory_; - ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub - ).callsFake(callback => { - callback(null, SESSION, TRANSACTION); - }); - }); - - it('should create an `AsyncTransactionRunner`', async () => { - const fakeRunFn = sandbox.spy(); - - await database.runTransactionAsync(fakeRunFn); - - const [session, transaction, runFn, options] = - fakeAsyncTransactionRunner.calledWith_; - assert.strictEqual(session, SESSION); - assert.strictEqual(transaction, TRANSACTION); - assert.strictEqual(runFn, fakeRunFn); - assert.deepStrictEqual(options, {}); - }); - - it('should optionally accept runner `options`', async () => { - const fakeOptions = {timeout: 1}; - - await database.runTransactionAsync(fakeOptions, assert.ifError); - - const options = fakeAsyncTransactionRunner.calledWith_[3]; - assert.strictEqual(options, fakeOptions); - }); - - it('should optionally accept runner `option` isolationLevel', async () => { - const fakeOptions = { - isolationLevel: IsolationLevel.REPEATABLE_READ, - }; - - await database.runTransactionAsync(fakeOptions, assert.ifError); - - const options = fakeAsyncTransactionRunner.calledWith_[3]; - assert.strictEqual(options, fakeOptions); - }); - - it('should optionally accept runner `option` readLockMode', async () => { - const fakeOptions = { - readLockMode: ReadLockMode.PESSIMISTIC, - }; - - await database.runTransactionAsync(fakeOptions, assert.ifError); - - const options = fakeAsyncTransactionRunner.calledWith_[3]; - assert.strictEqual(options, fakeOptions); - }); - - it('should return the runners resolved value', async () => { - const fakeValue = {}; - - sandbox - .stub(FakeAsyncTransactionRunner.prototype, 'run') - .resolves(fakeValue); - - const value = await database.runTransactionAsync(assert.ifError); - assert.strictEqual(value, fakeValue); - }); - - it('should release the session when finished', async () => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(SESSION); - - sandbox - .stub(FakeAsyncTransactionRunner.prototype, 'run') - .resolves(); - - await database.runTransactionAsync(assert.ifError); - assert.strictEqual(releaseStub.callCount, 1); - }); - }, - ); + beforeEach(() => { + fakeSessionFactory = database.sessionFactory_; + ( + sandbox.stub( + fakeSessionFactory, + 'getSessionForReadWrite', + ) as sinon.SinonStub + ).callsFake(callback => { + callback(null, SESSION, TRANSACTION); + }); + }); + + it('should create an `AsyncTransactionRunner`', async () => { + const fakeRunFn = sandbox.spy(); + + await database.runTransactionAsync(fakeRunFn); + + const [session, transaction, runFn, options] = + fakeAsyncTransactionRunner.calledWith_; + assert.strictEqual(session, SESSION); + assert.strictEqual(transaction, TRANSACTION); + assert.strictEqual(runFn, fakeRunFn); + assert.deepStrictEqual(options, {}); + }); + + it('should optionally accept runner `options`', async () => { + const fakeOptions = {timeout: 1}; + + await database.runTransactionAsync(fakeOptions, assert.ifError); + + const options = fakeAsyncTransactionRunner.calledWith_[3]; + assert.strictEqual(options, fakeOptions); + }); + + it('should optionally accept runner `option` isolationLevel', async () => { + const fakeOptions = { + isolationLevel: IsolationLevel.REPEATABLE_READ, + }; + + await database.runTransactionAsync(fakeOptions, assert.ifError); + + const options = fakeAsyncTransactionRunner.calledWith_[3]; + assert.strictEqual(options, fakeOptions); + }); + + it('should optionally accept runner `option` readLockMode', async () => { + const fakeOptions = { + readLockMode: ReadLockMode.PESSIMISTIC, + }; + + await database.runTransactionAsync(fakeOptions, assert.ifError); + + const options = fakeAsyncTransactionRunner.calledWith_[3]; + assert.strictEqual(options, fakeOptions); + }); + + it('should return the runners resolved value', async () => { + const fakeValue = {}; + + sandbox + .stub(FakeAsyncTransactionRunner.prototype, 'run') + .resolves(fakeValue); + + const value = await database.runTransactionAsync(assert.ifError); + assert.strictEqual(value, fakeValue); + }); + + it('should release the session when finished', async () => { + const releaseStub = ( + sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub + ).withArgs(SESSION); + + sandbox.stub(FakeAsyncTransactionRunner.prototype, 'run').resolves(); + + await database.runTransactionAsync(assert.ifError); + assert.strictEqual(releaseStub.callCount, 1); }); }); diff --git a/test/metrics/metrics.ts b/test/metrics/metrics.ts index 0805f7451..854a44bdc 100644 --- a/test/metrics/metrics.ts +++ b/test/metrics/metrics.ts @@ -238,7 +238,7 @@ describe('Test metrics with mock server', () => { const elapsedTime = endTime.valueOf() - startTime.valueOf(); - const methods = ['batchCreateSessions', 'executeStreamingSql']; + const methods = ['createSession', 'executeStreamingSql']; const {resourceMetrics} = await reader.collect(); const operationCountData = getMetricData( @@ -369,7 +369,7 @@ describe('Test metrics with mock server', () => { const sessionAttributes = { ...commonAttributes, database: `database-${dbCounter}`, - method: 'batchCreateSessions', + method: 'createSession', }; // Verify batchCreateSession metrics are unaffected assert.strictEqual( @@ -462,7 +462,7 @@ describe('Test metrics with mock server', () => { // Verify GFE AFE latency doesn't exist assert.ok(!hasMetricData(resourceMetrics, METRIC_NAME_GFE_LATENCIES)); assert.ok(!hasMetricData(resourceMetrics, METRIC_NAME_AFE_LATENCIES)); - const methods = ['batchCreateSessions', 'executeStreamingSql']; + const methods = ['createSession', 'executeStreamingSql']; methods.forEach(method => { const attributes = { ...commonAttributes, @@ -554,9 +554,9 @@ describe('Test metrics with mock server', () => { const sessionAttributes = { ...commonAttributes, database: `database-${dbCounter}`, - method: 'batchCreateSessions', + method: 'createSession', }; - // Verify batchCreateSession metrics are unaffected + // Verify createSession metrics are unaffected assert.strictEqual( 1, getAggregatedValue(operationCountData, sessionAttributes), @@ -659,7 +659,7 @@ describe('Test metrics with mock server', () => { const elapsedTime = endTime.valueOf() - startTime.valueOf(); - const methods = ['batchCreateSessions', 'executeStreamingSql']; + const methods = ['createSession', 'executeStreamingSql']; const {resourceMetrics} = await reader.collect(); const operationCountData = getMetricData( diff --git a/test/session-factory.ts b/test/session-factory.ts index db74ac634..e61199ab2 100644 --- a/test/session-factory.ts +++ b/test/session-factory.ts @@ -87,11 +87,15 @@ describe('SessionFactory', () => { }); describe('instantiation', () => { - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { + describe('when multiplexed session is disabled', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + }); + it('should create a SessionPool object', () => { assert(sessionFactory.pool_ instanceof SessionPool); }); @@ -125,15 +129,7 @@ describe('SessionFactory', () => { }); }); - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - + describe('when multiplexed session is default', () => { it('should create a MultiplexedSession object', () => { assert( sessionFactory.multiplexedSession_ instanceof MultiplexedSession, @@ -151,35 +147,29 @@ describe('SessionFactory', () => { }); it('should correctly initialize the isMultiplexedEnabled field when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); assert.strictEqual(sessionFactory.isMultiplexed, true); }); }); - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS and GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW both are disabled', () => { + describe('when multiplexed session is disabled for r/w', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; + }); + it('should correctly initialize the isMultiplexedRW field', () => { const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); assert.strictEqual(sessionFactory.isMultiplexedRW, false); }); }); - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS and GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW both are enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - + describe('when multiplexed session is default for r/w', () => { it('should correctly initialize the isMultiplexedRW field', () => { const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); assert.strictEqual(sessionFactory.isMultiplexedRW, true); @@ -188,11 +178,15 @@ describe('SessionFactory', () => { }); describe('getSession', () => { - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { + describe('when multiplexed session is disabled', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + }); + it('should retrieve a regular session from the pool', done => { ( sandbox.stub(sessionFactory.pool_, 'getSession') as sinon.SinonStub @@ -217,15 +211,7 @@ describe('SessionFactory', () => { }); }); - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - + describe('when multiplexed session is default', () => { it('should return the multiplexed session', done => { ( sandbox.stub( @@ -266,6 +252,11 @@ describe('SessionFactory', () => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; + }); + it('should retrieve a regular session from the pool', done => { ( sandbox.stub(sessionFactory.pool_, 'getSession') as sinon.SinonStub @@ -290,17 +281,7 @@ describe('SessionFactory', () => { }); }); - describe('when multiplexed session for r/w enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - + describe('when multiplexed session for r/w not disabled', () => { it('should return the multiplexed session', done => { ( sandbox.stub( @@ -343,15 +324,7 @@ describe('SessionFactory', () => { }); describe('release', () => { - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - + describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not disabled', () => { it('should not call the release method', () => { const releaseStub = sandbox.stub(sessionFactory.pool_, 'release'); const fakeMuxSession = createMuxSession(); @@ -365,6 +338,10 @@ describe('SessionFactory', () => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + }); + it('should call the release method to release a regular session', () => { const releaseStub = sandbox.stub(sessionFactory.pool_, 'release'); const fakeSession = createSession(); @@ -389,10 +366,7 @@ describe('SessionFactory', () => { }); describe('isMultiplexedEnabled', () => { - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - }); + describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not disabled', () => { it('should have enabled the multiplexed', () => { const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); assert.strictEqual(sessionFactory.isMultiplexedEnabled(), true); @@ -403,6 +377,9 @@ describe('SessionFactory', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + }); it('should not have enabled the multiplexed', () => { const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); assert.strictEqual(sessionFactory.isMultiplexedEnabled(), false); @@ -411,11 +388,7 @@ describe('SessionFactory', () => { }); describe('isMultiplexedEnabledForRW', () => { - describe('when multiplexed session is enabled for read/write transactions', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; - }); + describe('when multiplexed session is not disabled for read/write transactions', () => { it('should have enabled the multiplexed', () => { const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); assert.strictEqual(sessionFactory.isMultiplexedEnabledForRW(), true); @@ -427,6 +400,10 @@ describe('SessionFactory', () => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; + }); it('should not have enabled the multiplexed', () => { const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); assert.strictEqual(sessionFactory.isMultiplexedEnabledForRW(), false); diff --git a/test/session-pool.ts b/test/session-pool.ts index 87e12b971..e64b91390 100644 --- a/test/session-pool.ts +++ b/test/session-pool.ts @@ -97,12 +97,20 @@ describe('SessionPool', () => { }); beforeEach(() => { + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = + 'false'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; DATABASE.session = createSession; sessionPool = new SessionPool(DATABASE); inventory = sessionPool._inventory; }); afterEach(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env + .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; pQueueOverride = null; sandbox.restore(); }); diff --git a/test/spanner.ts b/test/spanner.ts index d6f0ee062..a0d8e9d1f 100644 --- a/test/spanner.ts +++ b/test/spanner.ts @@ -626,6 +626,8 @@ describe('Spanner with mock server', () => { } }, ); + // awaiting 10ms for begin call to finish its execution + await new Promise(resolve => setTimeout(resolve, 10)); await database.close(); const request = spannerMock.getRequests().find(val => { return (val as v1.ReadRequest).table === 'foo'; @@ -783,7 +785,10 @@ describe('Spanner with mock server', () => { it('should support all data types as JSON', async () => { const database = newTestDatabase(); try { - const [rows] = await database.run({sql: selectAllTypes, json: true}); + const [rows] = await database.run({ + sql: selectAllTypes, + json: true, + }); assert.strictEqual(rows.length, 3); let i = 0; (rows as Json[]).forEach(row => { @@ -1187,15 +1192,19 @@ describe('Spanner with mock server', () => { }; const database = newTestDatabase({incStep: 1, min: 0}); try { - const pool = database.pool_ as SessionPool; + const sessionFactory = database.sessionFactory_ as SessionFactory; + const pool = sessionFactory.pool_ as SessionPool; + const multiplexedSession = + sessionFactory.multiplexedSession_ as MultiplexedSession; const promises: Array> = []; for (let i = 0; i < 10; i++) { promises.push(database.run(query)); } await Promise.all(promises); - assert.ok( - pool.size >= 1 && pool.size <= 10, - 'Pool size should be between 1 and 10', + assert.notEqual( + multiplexedSession, + null, + 'Multiplexed session should be not null', ); } finally { await database.close(); @@ -1209,16 +1218,20 @@ describe('Spanner with mock server', () => { }; const database = newTestDatabase({incStep: 1, min: 0}); try { - const pool = database.pool_ as SessionPool; + const sessionFactory = database.sessionFactory_ as SessionFactory; + const pool = sessionFactory.pool_ as SessionPool; + const multiplexedSession = + sessionFactory.multiplexedSession_ as MultiplexedSession; const promises: Array> = []; for (let i = 0; i < 10; i++) { promises.push(executeSimpleUpdate(database, update)); } spannerMock.unfreeze(); await Promise.all(promises); - assert.ok( - pool.size >= 1 && pool.size <= 10, - 'Pool size should be between 1 and 10', + assert.notEqual( + multiplexedSession, + null, + 'Multiplexed session should be not null', ); } finally { await database.close(); @@ -1957,15 +1970,7 @@ describe('Spanner with mock server', () => { }); describe('read-only transactions', () => { - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - + describe('when session mode is default for read-only', () => { it('should make a request to CreateSession', async () => { const database = newTestDatabase(); await database.run('SELECT 1'); @@ -1991,6 +1996,7 @@ describe('Spanner with mock server', () => { .multiplexedSession_ as MultiplexedSession; database.run(query, (err, resp) => { assert.strictEqual(pool._inventory.borrowed.size, 0); + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.ifError(err); assert.strictEqual(resp.length, 3); @@ -2006,6 +2012,7 @@ describe('Spanner with mock server', () => { .multiplexedSession_ as MultiplexedSession; database.getSnapshot((err, resp) => { assert.strictEqual(pool._inventory.borrowed.size, 0); + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.ifError(err); assert(resp instanceof Snapshot); @@ -2032,6 +2039,7 @@ describe('Spanner with mock server', () => { .multiplexedSession_ as MultiplexedSession; database.writeAtLeastOnce(mutations, (err, resp) => { assert.strictEqual(pool._inventory.borrowed.size, 0); + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.ifError(err); assert.strictEqual(typeof resp?.commitTimestamp?.nanos, 'number'); @@ -2090,36 +2098,137 @@ describe('Spanner with mock server', () => { done(); }); }); + + it('should fail the transaction, if multiplexed session creation is failed', async () => { + const query = { + sql: selectSql, + } as ExecuteSqlRequest; + const err = { + code: grpc.status.NOT_FOUND, + message: 'create session failed', + } as MockError; + spannerMock.setExecutionTime( + spannerMock.createSession, + SimulatedExecutionTime.ofError(err), + ); + const database = newTestDatabase().on('error', err => { + assert.strictEqual(err.code, Status.NOT_FOUND); + }); + try { + await database.run(query); + } catch (error) { + assert.strictEqual((error as grpc.ServiceError).code, err.code); + assert.strictEqual( + (error as grpc.ServiceError).details, + 'create session failed', + ); + assert.strictEqual( + (error as grpc.ServiceError).message, + '5 NOT_FOUND: create session failed', + ); + } + }); }); - }); - describe('partitioned ops', () => { - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { + describe('when multiplexed session is disabled for read-only', () => { before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; }); after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + }); + it('should make a request to BatchCreateSessions', async () => { + const database = newTestDatabase(); + await database.run('SELECT 1'); + const requests = spannerMock.getRequests().find(val => { + return (val as v1.BatchCreateSessionsRequest).sessionTemplate; + }) as v1.BatchCreateSessionsRequest; + assert.ok(requests, 'BatchCreateSessionsRequest should be called'); + assert.strictEqual( + requests.sessionTemplate?.multiplexed, + false, + 'Multiplexed should be false', + ); }); - it('should execute the transaction(database.runPartitionedUpdate) successfully using regular/pool session', done => { + it('should execute the transaction(database.run) successfully using regular session', done => { + const query = { + sql: selectSql, + } as ExecuteSqlRequest; const database = newTestDatabase({min: 1, max: 1}); const pool = (database.sessionFactory_ as SessionFactory) .pool_ as SessionPool; const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; - database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { + database.run(query, (err, resp) => { + assert.ifError(err); assert.strictEqual(pool._inventory.sessions.length, 1); - assert.strictEqual( - pool._inventory.sessions[0].metadata.multiplexed, - false, - ); - // multiplexed session will get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); + assert.strictEqual(resp.length, 3); + done(); + }); + }); + + it('should execute the transaction(database.getSnapshot) successfully using regular session', done => { + const database = newTestDatabase({min: 1, max: 1}); + const pool = (database.sessionFactory_ as SessionFactory) + .pool_ as SessionPool; + const multiplexedSession = (database.sessionFactory_ as SessionFactory) + .multiplexedSession_ as MultiplexedSession; + database.getSnapshot((err, resp) => { + assert.ifError(err); + assert.strictEqual(pool._inventory.borrowed.size, 1); + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); + assert(resp instanceof Snapshot); + resp.end(); + done(); + }); + }); + + it('should execute the transaction(database.writeAtLeastOnce) successfully using regular session', done => { + const database = newTestDatabase({min: 1, max: 1}); + const mutations = new MutationSet(); + mutations.upsert('Singers', { + SingerId: 1, + FirstName: 'Scarlet', + LastName: 'Terry', + }); + mutations.upsert('Singers', { + SingerId: 2, + FirstName: 'Marc', + }); + const pool = (database.sessionFactory_ as SessionFactory) + .pool_ as SessionPool; + const multiplexedSession = (database.sessionFactory_ as SessionFactory) + .multiplexedSession_ as MultiplexedSession; + database.writeAtLeastOnce(mutations, (err, resp) => { + assert.ifError(err); + assert.strictEqual(pool._inventory.borrowed.size, 1); + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); + assert.strictEqual(typeof resp?.commitTimestamp?.nanos, 'number'); + assert.strictEqual(typeof resp?.commitTimestamp?.seconds, 'string'); + assert.strictEqual(resp?.commitStats, null); + done(); + }); + }); + }); + }); + + describe('partitioned ops', () => { + describe('default session mode for partitioned ops', () => { + it('should execute the transaction(database.runPartitionedUpdate) successfully using multiplexed session', done => { + const database = newTestDatabase({min: 1, max: 1}); + const pool = (database.sessionFactory_ as SessionFactory) + .pool_ as SessionPool; + const multiplexedSession = (database.sessionFactory_ as SessionFactory) + .multiplexedSession_ as MultiplexedSession; + database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { + assert.strictEqual(pool._inventory.borrowed.size, 0); + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.strictEqual(resp, 2); assert.ifError(err); @@ -2128,32 +2237,25 @@ describe('Spanner with mock server', () => { }); }); - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS is enabled', () => { + describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'true'; }); after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; }); - it('should execute the transaction(database.runPartitionedUpdate) successfully using regular/pool session', done => { + it('should execute the transaction(database.runPartitionedUpdate) successfully using multiplexed session', done => { const database = newTestDatabase({min: 1, max: 1}); const pool = (database.sessionFactory_ as SessionFactory) .pool_ as SessionPool; const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { - assert.strictEqual(pool._inventory.sessions.length, 1); - assert.strictEqual( - pool._inventory.sessions[0].metadata.multiplexed, - false, - ); - assert.strictEqual(multiplexedSession._multiplexedSession, null); + assert.strictEqual(pool._inventory.borrowed.size, 0); + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); assert.strictEqual(resp, 2); assert.ifError(err); done(); @@ -2161,17 +2263,15 @@ describe('Spanner with mock server', () => { }); }); - describe('when multiplexed session is enabled for partitioned ops', () => { + describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS is disabled', () => { before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'true'; + 'false'; }); after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; + delete process.env + .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; }); it('should execute the transaction(database.runPartitionedUpdate) successfully using multiplexed session', done => { @@ -2182,6 +2282,7 @@ describe('Spanner with mock server', () => { .multiplexedSession_ as MultiplexedSession; database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { assert.strictEqual(pool._inventory.borrowed.size, 0); + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.strictEqual(resp, 2); assert.ifError(err); @@ -2190,13 +2291,19 @@ describe('Spanner with mock server', () => { }); }); - describe('when multiplexed session is not enabled for partitioned ops', () => { + describe('when multiplexed session is disabled for partitioned ops', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = 'false'; }); + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env + .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; + }); + it('should execute the transaction(database.runPartitionedUpdate) successfully using regular/pool session', done => { const database = newTestDatabase({min: 1, max: 1}); const pool = (database.sessionFactory_ as SessionFactory) @@ -2204,14 +2311,15 @@ describe('Spanner with mock server', () => { const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { + assert.ifError(err); assert.strictEqual(pool._inventory.sessions.length, 1); assert.strictEqual( pool._inventory.sessions[0].metadata.multiplexed, false, ); - assert.strictEqual(multiplexedSession._multiplexedSession, null); + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); assert.strictEqual(resp, 2); - assert.ifError(err); done(); }); }); @@ -2219,18 +2327,8 @@ describe('Spanner with mock server', () => { }); describe('batch write', () => { - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - it('should use regular session from pool', done => { + describe('default session mode for r/w', () => { + it('should use multiplexed session', done => { const mutationGroup = new MutationGroup(); mutationGroup.upsert('FOO', { Id: '1', @@ -2250,30 +2348,26 @@ describe('Spanner with mock server', () => { .on('data', response => { // ensure that response is coming assert.notEqual(response.commitTimestamp, null); - assert.strictEqual( - Array.from(pool._inventory.borrowed)[0].metadata.multiplexed, - false, - ); - assert.strictEqual(pool._inventory.borrowed.size, 1); - // multiplexed session will get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); + + // regular session will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not disabled + assert.strictEqual(pool._inventory.sessions.length, 0); }) .on('end', done); }); }); - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW is enabled', () => { + describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; }); after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; }); - it('should use regular session from pool', done => { + it('should use multiplexed session', done => { const mutationGroup = new MutationGroup(); mutationGroup.upsert('FOO', { Id: '1', @@ -2291,29 +2385,24 @@ describe('Spanner with mock server', () => { .batchWriteAtLeastOnce([mutationGroup]) .on('error', done) .on('data', response => { - // ensure that response is not null + // ensure that response is coming assert.notEqual(response.commitTimestamp, null); - assert.strictEqual( - Array.from(pool._inventory.borrowed)[0].metadata.multiplexed, - false, - ); - assert.strictEqual(pool._inventory.borrowed.size, 1); - // multiplexed session will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled - assert.strictEqual(multiplexedSession._multiplexedSession, null); + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); + // session pool will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not false + assert.strictEqual(pool._inventory.sessions.length, 0); }) .on('end', done); }); }); - describe('when multiplexed session is enabled for r/w', () => { + describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW is disabled', () => { before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; }); after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; }); it('should use multiplexed session', done => { @@ -2336,22 +2425,27 @@ describe('Spanner with mock server', () => { .on('data', response => { // ensure that response is not null assert.notEqual(response.commitTimestamp, null); - assert.strictEqual(pool._inventory.sessions.length, 1); - assert.strictEqual(pool._inventory.borrowed.size, 0); - // multiplexed session will get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); + // session pool will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not false + assert.strictEqual(pool._inventory.sessions.length, 0); }) .on('end', done); }); }); - describe('when multiplexed session is not enabled for r/w', () => { + describe('when multiplexed session is disabled for r/w', () => { before(() => { process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; }); - it('should use regular session from pool', done => { + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; + }); + + it('should use regular session', done => { const mutationGroup = new MutationGroup(); mutationGroup.upsert('FOO', { Id: '1', @@ -2376,8 +2470,8 @@ describe('Spanner with mock server', () => { false, ); assert.strictEqual(pool._inventory.borrowed.size, 1); - // multiplexed session will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled - assert.strictEqual(multiplexedSession._multiplexedSession, null); + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); }) .on('end', done); }); @@ -2767,6 +2861,20 @@ describe('Spanner with mock server', () => { }); describe('session-not-found', () => { + before(() => { + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = + 'false'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; + }); + + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env + .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; + }); + it('should retry "Session not found" errors on Database.run()', done => { const db = newTestDatabase({ incStep: 1, @@ -2793,8 +2901,8 @@ describe('Spanner with mock server', () => { // 'Session not found' error. The second one was created by the retry. // As we only simulate the 'Session not found' error, the first // session is still present on the mock server. - assert.strictEqual(results!.length, 2); - if (results!.length !== 2) { + assert.strictEqual(results!.length, 3); + if (results!.length !== 3) { done(); } db.close() @@ -2919,7 +3027,8 @@ describe('Spanner with mock server', () => { // second one that was created as a result of the retry. db.getSessions((err, sessions) => { assert.ifError(err); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); transaction!.commit(err => { assert.ifError(err); db.close(done); @@ -2949,7 +3058,8 @@ describe('Spanner with mock server', () => { assert.ifError(err); db.getSessions((err, sessions) => { assert.ifError(err); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); db.close(done); }); }); @@ -3004,7 +3114,8 @@ describe('Spanner with mock server', () => { assert.ifError(err); db.getSessions((err, sessions) => { assert.ifError(err); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); db.close(done); }); }); @@ -3037,7 +3148,8 @@ describe('Spanner with mock server', () => { assert.ifError(err); db.getSessions((err, sessions) => { assert.ifError(err); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); db.close(done); }); }); @@ -3071,7 +3183,8 @@ describe('Spanner with mock server', () => { const [rows] = await transaction.run(selectSql); assert.strictEqual(rows.length, 3); const [sessions] = await db.getSessions(); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); await transaction.commit(); return Promise.resolve(); } catch (e) { @@ -3103,7 +3216,8 @@ describe('Spanner with mock server', () => { transaction.insert('FOO', {Id: 1, Name: 'foo'}); await transaction.commit(); const [sessions] = await db.getSessions(); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); }) .catch(assert.ifError); await db.close(); @@ -3135,7 +3249,8 @@ describe('Spanner with mock server', () => { assert.strictEqual(updateCount, 1); await transaction.commit(); const [sessions] = await db.getSessions(); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); }) .catch(assert.ifError); await db.close(); @@ -3170,7 +3285,8 @@ describe('Spanner with mock server', () => { assert.deepStrictEqual(updateCounts, [1, 1]); await transaction.commit(); const [sessions] = await db.getSessions(); - assert.strictEqual(sessions!.length, 2); + // sessions length is 3 as the list will contain default multiplexed session as well. + assert.strictEqual(sessions!.length, 3); }) .catch(assert.ifError); await db.close(); @@ -3184,6 +3300,20 @@ describe('Spanner with mock server', () => { }); describe('session-pool', () => { + before(() => { + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = + 'false'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; + }); + + after(() => { + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; + delete process.env + .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; + }); + it('should execute table mutations without leaking sessions', async () => { const database = newTestDatabase(); try { @@ -3925,35 +4055,36 @@ describe('Spanner with mock server', () => { }); describe('batch-readonly-transaction', () => { - it('should use session from pool', async () => { - const database = newTestDatabase({min: 0, incStep: 1}); - const pool = database.pool_ as SessionPool; - assert.strictEqual(pool.size, 0); - const [transaction] = await database.createBatchTransaction(); - assert.strictEqual(pool.size, 1); - assert.strictEqual(pool.available, 0); - transaction.close(); - await database.close(); - }); - - it('failing to close transaction should cause session leak error', async () => { - const database = newTestDatabase(); - await database.createBatchTransaction(); - try { - await database.close(); - assert.fail('missing expected session leak error'); - } catch (err) { - assert.ok(err instanceof SessionLeakError); - } - }); - - describe('when multiplexed session is enabled', () => { + describe('when multiplexed session is disabled', () => { before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; }); after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; }); + it('should use session from pool', async () => { + const database = newTestDatabase({min: 0, incStep: 1}); + const pool = database.pool_ as SessionPool; + assert.strictEqual(pool.size, 0); + const [transaction] = await database.createBatchTransaction(); + assert.strictEqual(pool.size, 1); + assert.strictEqual(pool.available, 0); + transaction.close(); + await database.close(); + }); + it('failing to close transaction should cause session leak error', async () => { + const database = newTestDatabase(); + await database.createBatchTransaction(); + try { + await database.close(); + assert.fail('missing expected session leak error'); + } catch (err) { + assert.ok(err instanceof SessionLeakError); + } + }); + }); + + describe('when session mode is default', () => { it('should use multiplexed session', async () => { const database = newTestDatabase({min: 0, incStep: 1}); const pool = database.pool_ as SessionPool; @@ -3965,6 +4096,7 @@ describe('Spanner with mock server', () => { const [transaction] = await database.createBatchTransaction(); // pool is empty after call to createBatchTransaction assert.strictEqual(pool.size, 0); + // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); transaction.close(); await database.close(); @@ -4130,659 +4262,645 @@ describe('Spanner with mock server', () => { }); // tests for mutation key heuristics, lock order prevention and commit retry protocol - describe('when multiplexed session is enabled for R/W', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; - }); + // test(s) for mutation key heuristic + describe('should be able to select correct mutation key in case of mutation(s) only transaction(s)', () => { + it('should select the insertOrUpdate(upsert)/delete(deleteRows) mutation key over insert', async () => { + const database = newTestDatabase(); + await database.runTransactionAsync(async tx => { + tx.upsert('foo', [ + {id: 1, name: 'One'}, + {id: 2, name: 'Two'}, + ]); + tx.insert('foo', [{id: 3, name: 'Three'}]); + tx.insert('foo', [{id: 4, name: 'Four'}]); + tx.deleteRows('foo', ['3', '4']); + await tx.commit(); + }); - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); + const beginTransactionRequest = spannerMock + .getRequests() + .filter(val => { + return (val as v1.BeginTransactionRequest).mutationKey; + }) as v1.BeginTransactionRequest[]; - // test(s) for mutation key heuristic - describe('should be able to select correct mutation key in case of mutation(s) only transaction(s)', () => { - it('should select the insertOrUpdate(upsert)/delete(deleteRows) mutation key over insert', async () => { - const database = newTestDatabase(); - await database.runTransactionAsync(async tx => { - tx.upsert('foo', [ - {id: 1, name: 'One'}, - {id: 2, name: 'Two'}, - ]); - tx.insert('foo', [{id: 3, name: 'Three'}]); - tx.insert('foo', [{id: 4, name: 'Four'}]); - tx.deleteRows('foo', ['3', '4']); - await tx.commit(); - }); + // assert on begin transaction request + assert.strictEqual(beginTransactionRequest.length, 1); - const beginTransactionRequest = spannerMock - .getRequests() - .filter(val => { - return (val as v1.BeginTransactionRequest).mutationKey; - }) as v1.BeginTransactionRequest[]; + // selected mutation key + const selectedMutationKey = beginTransactionRequest[0]!.mutationKey; - // assert on begin transaction request - assert.strictEqual(beginTransactionRequest.length, 1); + // assert that mutation key have been selected + assert.ok( + selectedMutationKey, + 'A mutation key should have been selected', + ); - // selected mutation key - const selectedMutationKey = beginTransactionRequest[0]!.mutationKey; + // get the type of mutation key + const mutationType = Object.keys(selectedMutationKey!)[0]; - // assert that mutation key have been selected - assert.ok( - selectedMutationKey, - 'A mutation key should have been selected', - ); + // assert that mutation key is either insertOrUpdate or delete + assert.ok( + ['insertOrUpdate', 'delete'].includes(mutationType), + "Expected either 'insertOrUpdate' or 'delete' key.", + ); - // get the type of mutation key - const mutationType = Object.keys(selectedMutationKey!)[0]; + const commitRequest = spannerMock.getRequests().filter(val => { + return (val as v1.CommitRequest).precommitToken; + }) as v1.CommitRequest[]; - // assert that mutation key is either insertOrUpdate or delete - assert.ok( - ['insertOrUpdate', 'delete'].includes(mutationType), - "Expected either 'insertOrUpdate' or 'delete' key.", - ); - - const commitRequest = spannerMock.getRequests().filter(val => { - return (val as v1.CommitRequest).precommitToken; - }) as v1.CommitRequest[]; + // assert on commit request + assert.strictEqual(commitRequest.length, 1); + await database.close(); + }); - // assert on commit request - assert.strictEqual(commitRequest.length, 1); - await database.close(); + it('should select the mutation key with highest number of values when insert key(s) are present', async () => { + const database = newTestDatabase(); + await database.runTransactionAsync(async tx => { + tx.insert('foo', [ + {id: randomUUID(), name: 'One'}, + {id: randomUUID(), name: 'Two'}, + {id: randomUUID(), name: 'Three'}, + ]); + tx.insert('foo', {id: randomUUID(), name: 'Four'}); + await tx.commit(); }); - it('should select the mutation key with highest number of values when insert key(s) are present', async () => { - const database = newTestDatabase(); - await database.runTransactionAsync(async tx => { - tx.insert('foo', [ - {id: randomUUID(), name: 'One'}, - {id: randomUUID(), name: 'Two'}, - {id: randomUUID(), name: 'Three'}, - ]); - tx.insert('foo', {id: randomUUID(), name: 'Four'}); - await tx.commit(); - }); - - const beginTransactionRequest = spannerMock - .getRequests() - .filter(val => { - return (val as v1.BeginTransactionRequest).mutationKey; - }) as v1.BeginTransactionRequest[]; + const beginTransactionRequest = spannerMock + .getRequests() + .filter(val => { + return (val as v1.BeginTransactionRequest).mutationKey; + }) as v1.BeginTransactionRequest[]; - // assert on begin transaction request - assert.strictEqual(beginTransactionRequest.length, 1); + // assert on begin transaction request + assert.strictEqual(beginTransactionRequest.length, 1); - // selected mutation key - const selectedMutationKey = beginTransactionRequest[0]!.mutationKey; + // selected mutation key + const selectedMutationKey = beginTransactionRequest[0]!.mutationKey; - // assert that mutation key have been selected - assert.ok( - selectedMutationKey, - 'A mutation key should have been selected', - ); + // assert that mutation key have been selected + assert.ok( + selectedMutationKey, + 'A mutation key should have been selected', + ); - // assert that mutation key is insert - const mutationType = Object.keys(selectedMutationKey!)[0]; - assert.ok( - ['insert'].includes(mutationType), - 'insert key must have been selected', - ); + // assert that mutation key is insert + const mutationType = Object.keys(selectedMutationKey!)[0]; + assert.ok( + ['insert'].includes(mutationType), + 'insert key must have been selected', + ); - // assert that insert mutation key with highest number of rows has been selected - assert.strictEqual(selectedMutationKey.insert?.values?.length, 3); + // assert that insert mutation key with highest number of rows has been selected + assert.strictEqual(selectedMutationKey.insert?.values?.length, 3); - const commitRequest = spannerMock.getRequests().filter(val => { - return (val as v1.CommitRequest).precommitToken; - }) as v1.CommitRequest[]; + const commitRequest = spannerMock.getRequests().filter(val => { + return (val as v1.CommitRequest).precommitToken; + }) as v1.CommitRequest[]; - // assert on commit request - assert.strictEqual(commitRequest.length, 1); - await database.close(); - }); + // assert on commit request + assert.strictEqual(commitRequest.length, 1); + await database.close(); }); + }); - // test(s) for lock order prevention - describe('should be able to track multiplexedSessionPreviousTransactionId in case of abort transactions and retries', () => { - describe('using runTransaction', () => { - it('case 1: transaction abortion on first query execution', async () => { - let attempts = 0; - let rowCount = 0; - const database = newTestDatabase(); - const transactionObjects: Transaction[] = []; - try { - await new Promise((resolve, reject) => { - database.runTransaction(async (err, transaction) => { - try { - if (err) { - return reject(err); - } - transactionObjects.push(transaction!); - if (!attempts) { - // abort the transaction - spannerMock.abortTransaction(transaction!); - } - attempts++; - const [rows1] = await transaction!.run(selectSql); - rows1.forEach(() => rowCount++); - - // assert on number of rows - assert.strictEqual(rowCount, 3); - - // assert on number of retries - assert.strictEqual(attempts, 2); - - const beginTxnRequest = spannerMock - .getRequests() - .find(val => { - return (val as v1.BeginTransactionRequest).options - ?.readWrite; - }) as v1.BeginTransactionRequest; - - const txnId = - beginTxnRequest.options?.readWrite - ?.multiplexedSessionPreviousTransactionId; - // no transaction id should be in the begintransactionrequest - // since first transaction got abort before getting an id - assert.ok( - txnId instanceof Buffer && txnId.byteLength === 0, - ); - // transactionObjects must have two transaction objects - // one the aborted transaction - // another the retried transaction - assert.strictEqual(transactionObjects.length, 2); - // first transaction must have an id undefined - // as the transaction got aborted before query execution - // which results in failure of inline begin - assert.strictEqual(transactionObjects[0].id, undefined); - // first transaction must not be having any previous transaction id - assert.strictEqual( - transactionObjects[0] - .multiplexedSessionPreviousTransactionId, - undefined, - ); - // the second transaction object(retried transaction) must have - // non null transaction id - assert.notEqual(transactionObjects[1].id, undefined); - // since the first transaction did not got any id previous transaction id - // for second transaction must be undefined - assert.strictEqual( - transactionObjects[1] - .multiplexedSessionPreviousTransactionId, - undefined, - ); - resolve(); - } catch (e: any) { - if (e.code === 10) { - throw e; - } else { - reject(e); - } + // test(s) for lock order prevention + describe('should be able to track multiplexedSessionPreviousTransactionId in case of abort transactions and retries', () => { + describe('using runTransaction', () => { + it('case 1: transaction abortion on first query execution', async () => { + let attempts = 0; + let rowCount = 0; + const database = newTestDatabase(); + const transactionObjects: Transaction[] = []; + try { + await new Promise((resolve, reject) => { + database.runTransaction(async (err, transaction) => { + try { + if (err) { + return reject(err); + } + transactionObjects.push(transaction!); + if (!attempts) { + // abort the transaction + spannerMock.abortTransaction(transaction!); + } + attempts++; + const [rows1] = await transaction!.run(selectSql); + rows1.forEach(() => rowCount++); + + // assert on number of rows + assert.strictEqual(rowCount, 3); + + // assert on number of retries + assert.strictEqual(attempts, 2); + + const beginTxnRequest = spannerMock + .getRequests() + .find(val => { + return (val as v1.BeginTransactionRequest).options + ?.readWrite; + }) as v1.BeginTransactionRequest; + + const txnId = + beginTxnRequest.options?.readWrite + ?.multiplexedSessionPreviousTransactionId; + // no transaction id should be in the begintransactionrequest + // since first transaction got abort before getting an id + assert.ok(txnId instanceof Buffer && txnId.byteLength === 0); + // transactionObjects must have two transaction objects + // one the aborted transaction + // another the retried transaction + assert.strictEqual(transactionObjects.length, 2); + // first transaction must have an id undefined + // as the transaction got aborted before query execution + // which results in failure of inline begin + assert.strictEqual(transactionObjects[0].id, undefined); + // first transaction must not be having any previous transaction id + assert.strictEqual( + transactionObjects[0] + .multiplexedSessionPreviousTransactionId, + undefined, + ); + // the second transaction object(retried transaction) must have + // non null transaction id + assert.notEqual(transactionObjects[1].id, undefined); + // since the first transaction did not got any id previous transaction id + // for second transaction must be undefined + assert.strictEqual( + transactionObjects[1] + .multiplexedSessionPreviousTransactionId, + undefined, + ); + resolve(); + } catch (e: any) { + if (e.code === 10) { + throw e; + } else { + reject(e); } - }); - }); - } finally { - await database.close(); - } - }); - }); - describe('using runTransactionAsync', () => { - it('case 1: transaction abortion on first query execution', async () => { - let attempts = 0; - const database = newTestDatabase(); - const transactionObjects: Transaction[] = []; - const rowCount = await database.runTransactionAsync( - (transaction): Promise => { - transactionObjects.push(transaction); - if (!attempts) { - // abort the transaction - spannerMock.abortTransaction(transaction); - } - attempts++; - return transaction.run(selectSql).then(([rows]) => { - let count = 0; - rows.forEach(() => count++); - return transaction.commit().then(() => count); - }); - }, - ); - assert.strictEqual(rowCount, 3); - assert.strictEqual(attempts, 2); - await database.close(); - - const beginTxnRequest = spannerMock.getRequests().find(val => { - return (val as v1.BeginTransactionRequest).options?.readWrite; - }) as v1.BeginTransactionRequest; - const txnId = - beginTxnRequest.options?.readWrite - ?.multiplexedSessionPreviousTransactionId; - // no transaction id should be in the begintransactionrequest - // since first transaction got abort before getting an id - assert.ok(txnId instanceof Buffer && txnId.byteLength === 0); - // transactionObjects must have two transaction objects - // one the aborted transaction - // another the retried transaction - assert.strictEqual(transactionObjects.length, 2); - // first transaction must have an id undefined - // as the transaction got aborted before query execution - // which results in failure of inline begin - assert.strictEqual(transactionObjects[0].id, undefined); - // first transaction must not be having any previous transaction id - assert.strictEqual( - transactionObjects[0].multiplexedSessionPreviousTransactionId, - undefined, - ); - // the second transaction object(retried transaction) must have - // non null transaction id - assert.notEqual(transactionObjects[1].id, undefined); - // since the first transaction did not got any id previous transaction id - // for second transaction must be undefined - assert.strictEqual( - transactionObjects[1].multiplexedSessionPreviousTransactionId, - undefined, - ); - }); - it('case 2: transaction abortion on second query execution', async () => { - let attempts = 0; - let rowCount = 0; - const database = newTestDatabase(); - const transactionObjects: Transaction[] = []; - await database.runTransactionAsync( - async (transaction): Promise => { - transactionObjects.push(transaction); - attempts++; - const [rows1] = await transaction.run(selectSql); - rows1.forEach(() => rowCount++); - if (attempts === 1) { - // abort the transaction - spannerMock.abortTransaction(transaction); } - const [rows2] = await transaction.run(selectSql); - rows2.forEach(() => rowCount++); - await transaction.commit(); - }, - ); - assert.strictEqual(rowCount, 9); - assert.strictEqual(attempts, 2); + }); + }); + } finally { await database.close(); + } + }); + }); + describe('using runTransactionAsync', () => { + it('case 1: transaction abortion on first query execution', async () => { + let attempts = 0; + const database = newTestDatabase(); + const transactionObjects: Transaction[] = []; + const rowCount = await database.runTransactionAsync( + (transaction): Promise => { + transactionObjects.push(transaction); + if (!attempts) { + // abort the transaction + spannerMock.abortTransaction(transaction); + } + attempts++; + return transaction.run(selectSql).then(([rows]) => { + let count = 0; + rows.forEach(() => count++); + return transaction.commit().then(() => count); + }); + }, + ); + assert.strictEqual(rowCount, 3); + assert.strictEqual(attempts, 2); + await database.close(); - const beginTxnRequest = spannerMock.getRequests().find(val => { - return (val as v1.BeginTransactionRequest).options?.readWrite; - }) as v1.BeginTransactionRequest; - const txnId = - beginTxnRequest.options?.readWrite - ?.multiplexedSessionPreviousTransactionId; - // begin transaction request must contain the aborted transaction id - // as the previous transaction id upon retrying - assert.deepStrictEqual(txnId, transactionObjects[0].id); - // transactionObjects must contain have both the transaction - // one the aborted transaction - // another the retried transaction - assert.strictEqual(transactionObjects.length, 2); - // since inline begin was successfull with first query execution - // the transaction id would not be undefined for first transaction - assert.notEqual(transactionObjects[0].id, undefined); - // multiplexed session previous transaction id would be undefined - // for first transaction - assert.strictEqual( - transactionObjects[0].multiplexedSessionPreviousTransactionId, - undefined, - ); - // the second transction object (the retried transaction) must have an id - assert.notEqual(transactionObjects[1].id, undefined); - // first transaction id would be the multiplexed session previous transction id - // for retried transction - assert.strictEqual( - transactionObjects[1].multiplexedSessionPreviousTransactionId, - transactionObjects[0].id, - ); - }); - it('case 3: multiple transaction abortion', async () => { - let attempts = 0; - let rowCount = 0; - const database = newTestDatabase(); - const transactionObjects: Transaction[] = []; - await database.runTransactionAsync( - async (transaction): Promise => { - transactionObjects.push(transaction); - attempts++; - const [rows1] = await transaction.run(selectSql); - rows1.forEach(() => rowCount++); - if (attempts === 1) { - // abort the transaction - spannerMock.abortTransaction(transaction); - } - const [rows2] = await transaction.run(selectSql); - rows2.forEach(() => rowCount++); - if (attempts === 2) { - // abort the transaction - spannerMock.abortTransaction(transaction); - } - const [rows3] = await transaction.run(selectSql); - rows3.forEach(() => rowCount++); - await transaction.commit(); - }, - ); - assert.strictEqual(rowCount, 18); - assert.strictEqual(attempts, 3); - await database.close(); - const beginTxnRequest = spannerMock.getRequests().filter(val => { - return (val as v1.BeginTransactionRequest).options?.readWrite; - }) as v1.BeginTransactionRequest[]; - // begin transaction request must have been called twice - // as transaction abortion happend twice - assert.strictEqual(beginTxnRequest.length, 2); - // multiplexedSessionPreviousTransactionId for first - // begin transaction request must be the id of first transaction object - assert.deepStrictEqual( - beginTxnRequest[0].options?.readWrite - ?.multiplexedSessionPreviousTransactionId, - transactionObjects[0].id, - ); - // multiplexedSessionPreviousTransactionId must get updated with an id of - // second transaction object on second begin transaction request - assert.deepStrictEqual( - beginTxnRequest[1].options?.readWrite - ?.multiplexedSessionPreviousTransactionId, - transactionObjects[1].id, - ); - // transactionObjects must contain 3 transaction objects - // as the transaction abortion happend twice - assert.strictEqual(transactionObjects.length, 3); - // first transaction must have a non null id - assert.notEqual(transactionObjects[0].id, undefined); - // first transaction must not have any previous transaction id - assert.strictEqual( - transactionObjects[0].multiplexedSessionPreviousTransactionId, - undefined, - ); - // second transaction must have a non null id - assert.notEqual(transactionObjects[1].id, undefined); - // second transaction must have previous transaction id as the - // id of first transaction object - assert.strictEqual( - transactionObjects[1].multiplexedSessionPreviousTransactionId, - transactionObjects[0].id, - ); - // third transction must have a non null id - assert.notEqual(transactionObjects[2].id, undefined); - // third transaction must have previous transaction id - // set to second transaction object id - assert.strictEqual( - transactionObjects[2].multiplexedSessionPreviousTransactionId, - transactionObjects[1].id, - ); - }); - it('case 4: commit abort', async () => { - const database = newTestDatabase(); - let attempts = 0; - let rowCount = 0; - const transactionObjects: Transaction[] = []; - const err = { - message: 'Simulated error for commit abortion', - code: grpc.status.ABORTED, - } as MockError; - await database.runTransactionAsync(async tx => { + const beginTxnRequest = spannerMock.getRequests().find(val => { + return (val as v1.BeginTransactionRequest).options?.readWrite; + }) as v1.BeginTransactionRequest; + const txnId = + beginTxnRequest.options?.readWrite + ?.multiplexedSessionPreviousTransactionId; + // no transaction id should be in the begintransactionrequest + // since first transaction got abort before getting an id + assert.ok(txnId instanceof Buffer && txnId.byteLength === 0); + // transactionObjects must have two transaction objects + // one the aborted transaction + // another the retried transaction + assert.strictEqual(transactionObjects.length, 2); + // first transaction must have an id undefined + // as the transaction got aborted before query execution + // which results in failure of inline begin + assert.strictEqual(transactionObjects[0].id, undefined); + // first transaction must not be having any previous transaction id + assert.strictEqual( + transactionObjects[0].multiplexedSessionPreviousTransactionId, + undefined, + ); + // the second transaction object(retried transaction) must have + // non null transaction id + assert.notEqual(transactionObjects[1].id, undefined); + // since the first transaction did not got any id previous transaction id + // for second transaction must be undefined + assert.strictEqual( + transactionObjects[1].multiplexedSessionPreviousTransactionId, + undefined, + ); + }); + it('case 2: transaction abortion on second query execution', async () => { + let attempts = 0; + let rowCount = 0; + const database = newTestDatabase(); + const transactionObjects: Transaction[] = []; + await database.runTransactionAsync( + async (transaction): Promise => { + transactionObjects.push(transaction); attempts++; - transactionObjects.push(tx); - try { - const [rows] = await tx.runUpdate(invalidSql); - rowCount = rowCount + rows; - assert.fail('missing expected error'); - } catch (e) { - assert.strictEqual( - (e as ServiceError).message, - `${grpc.status.NOT_FOUND} NOT_FOUND: ${fooNotFoundErr.message}`, - ); + const [rows1] = await transaction.run(selectSql); + rows1.forEach(() => rowCount++); + if (attempts === 1) { + // abort the transaction + spannerMock.abortTransaction(transaction); } - const [rows] = await tx.run(selectSql); - rows.forEach(() => rowCount++); + const [rows2] = await transaction.run(selectSql); + rows2.forEach(() => rowCount++); + await transaction.commit(); + }, + ); + assert.strictEqual(rowCount, 9); + assert.strictEqual(attempts, 2); + await database.close(); + + const beginTxnRequest = spannerMock.getRequests().find(val => { + return (val as v1.BeginTransactionRequest).options?.readWrite; + }) as v1.BeginTransactionRequest; + const txnId = + beginTxnRequest.options?.readWrite + ?.multiplexedSessionPreviousTransactionId; + // begin transaction request must contain the aborted transaction id + // as the previous transaction id upon retrying + assert.deepStrictEqual(txnId, transactionObjects[0].id); + // transactionObjects must contain have both the transaction + // one the aborted transaction + // another the retried transaction + assert.strictEqual(transactionObjects.length, 2); + // since inline begin was successfull with first query execution + // the transaction id would not be undefined for first transaction + assert.notEqual(transactionObjects[0].id, undefined); + // multiplexed session previous transaction id would be undefined + // for first transaction + assert.strictEqual( + transactionObjects[0].multiplexedSessionPreviousTransactionId, + undefined, + ); + // the second transction object (the retried transaction) must have an id + assert.notEqual(transactionObjects[1].id, undefined); + // first transaction id would be the multiplexed session previous transction id + // for retried transction + assert.strictEqual( + transactionObjects[1].multiplexedSessionPreviousTransactionId, + transactionObjects[0].id, + ); + }); + it('case 3: multiple transaction abortion', async () => { + let attempts = 0; + let rowCount = 0; + const database = newTestDatabase(); + const transactionObjects: Transaction[] = []; + await database.runTransactionAsync( + async (transaction): Promise => { + transactionObjects.push(transaction); + attempts++; + const [rows1] = await transaction.run(selectSql); + rows1.forEach(() => rowCount++); if (attempts === 1) { - spannerMock.setExecutionTime( - spannerMock.commit, - SimulatedExecutionTime.ofError(err), - ); - // abort commit - spannerMock.abortTransaction(tx); + // abort the transaction + spannerMock.abortTransaction(transaction); } - await tx.commit(); - }); - assert.strictEqual(attempts, 2); - assert.strictEqual(rowCount, 6); - await database.close(); - const beginTxnRequest = spannerMock - .getRequests() - .filter( - val => (val as v1.BeginTransactionRequest).options?.readWrite, - ) - .map(req => req as v1.BeginTransactionRequest); - // begin must have been requested twice - // one during explicit begin on unsucessful inline begin - // another time during retrying of aborted transaction - assert.deepStrictEqual(beginTxnRequest.length, 2); - // there must be two transaction in the transactionObjects - // one aborted transaction, another retried transaction - assert.strictEqual(transactionObjects.length, 2); - // since, inline begin was sucessful before commit got abort - // hence, the first transaction will have the id not null/undefined - assert.notEqual(transactionObjects[0].id, undefined); - // multiplexedSessionPreviousTransactionId must be undefined for first transaction - assert.strictEqual( - transactionObjects[0].multiplexedSessionPreviousTransactionId, - undefined, - ); - // retried transction will have the id not null/undefined - assert.notEqual(transactionObjects[1].id, undefined); - // multiplexedSessionPreviousTransactionId for retried transaction would be the id of aborted transaction - assert.strictEqual( - transactionObjects[1].multiplexedSessionPreviousTransactionId, - transactionObjects[0].id, - ); + const [rows2] = await transaction.run(selectSql); + rows2.forEach(() => rowCount++); + if (attempts === 2) { + // abort the transaction + spannerMock.abortTransaction(transaction); + } + const [rows3] = await transaction.run(selectSql); + rows3.forEach(() => rowCount++); + await transaction.commit(); + }, + ); + assert.strictEqual(rowCount, 18); + assert.strictEqual(attempts, 3); + await database.close(); + const beginTxnRequest = spannerMock.getRequests().filter(val => { + return (val as v1.BeginTransactionRequest).options?.readWrite; + }) as v1.BeginTransactionRequest[]; + // begin transaction request must have been called twice + // as transaction abortion happend twice + assert.strictEqual(beginTxnRequest.length, 2); + // multiplexedSessionPreviousTransactionId for first + // begin transaction request must be the id of first transaction object + assert.deepStrictEqual( + beginTxnRequest[0].options?.readWrite + ?.multiplexedSessionPreviousTransactionId, + transactionObjects[0].id, + ); + // multiplexedSessionPreviousTransactionId must get updated with an id of + // second transaction object on second begin transaction request + assert.deepStrictEqual( + beginTxnRequest[1].options?.readWrite + ?.multiplexedSessionPreviousTransactionId, + transactionObjects[1].id, + ); + // transactionObjects must contain 3 transaction objects + // as the transaction abortion happend twice + assert.strictEqual(transactionObjects.length, 3); + // first transaction must have a non null id + assert.notEqual(transactionObjects[0].id, undefined); + // first transaction must not have any previous transaction id + assert.strictEqual( + transactionObjects[0].multiplexedSessionPreviousTransactionId, + undefined, + ); + // second transaction must have a non null id + assert.notEqual(transactionObjects[1].id, undefined); + // second transaction must have previous transaction id as the + // id of first transaction object + assert.strictEqual( + transactionObjects[1].multiplexedSessionPreviousTransactionId, + transactionObjects[0].id, + ); + // third transction must have a non null id + assert.notEqual(transactionObjects[2].id, undefined); + // third transaction must have previous transaction id + // set to second transaction object id + assert.strictEqual( + transactionObjects[2].multiplexedSessionPreviousTransactionId, + transactionObjects[1].id, + ); + }); + it('case 4: commit abort', async () => { + const database = newTestDatabase(); + let attempts = 0; + let rowCount = 0; + const transactionObjects: Transaction[] = []; + const err = { + message: 'Simulated error for commit abortion', + code: grpc.status.ABORTED, + } as MockError; + await database.runTransactionAsync(async tx => { + attempts++; + transactionObjects.push(tx); + try { + const [rows] = await tx.runUpdate(invalidSql); + rowCount = rowCount + rows; + assert.fail('missing expected error'); + } catch (e) { + assert.strictEqual( + (e as ServiceError).message, + `${grpc.status.NOT_FOUND} NOT_FOUND: ${fooNotFoundErr.message}`, + ); + } + const [rows] = await tx.run(selectSql); + rows.forEach(() => rowCount++); + if (attempts === 1) { + spannerMock.setExecutionTime( + spannerMock.commit, + SimulatedExecutionTime.ofError(err), + ); + // abort commit + spannerMock.abortTransaction(tx); + } + await tx.commit(); }); + assert.strictEqual(attempts, 2); + assert.strictEqual(rowCount, 6); + await database.close(); + const beginTxnRequest = spannerMock + .getRequests() + .filter( + val => (val as v1.BeginTransactionRequest).options?.readWrite, + ) + .map(req => req as v1.BeginTransactionRequest); + // begin must have been requested twice + // one during explicit begin on unsucessful inline begin + // another time during retrying of aborted transaction + assert.deepStrictEqual(beginTxnRequest.length, 2); + // there must be two transaction in the transactionObjects + // one aborted transaction, another retried transaction + assert.strictEqual(transactionObjects.length, 2); + // since, inline begin was sucessful before commit got abort + // hence, the first transaction will have the id not null/undefined + assert.notEqual(transactionObjects[0].id, undefined); + // multiplexedSessionPreviousTransactionId must be undefined for first transaction + assert.strictEqual( + transactionObjects[0].multiplexedSessionPreviousTransactionId, + undefined, + ); + // retried transction will have the id not null/undefined + assert.notEqual(transactionObjects[1].id, undefined); + // multiplexedSessionPreviousTransactionId for retried transaction would be the id of aborted transaction + assert.strictEqual( + transactionObjects[1].multiplexedSessionPreviousTransactionId, + transactionObjects[0].id, + ); }); - describe('using getTransaction', () => { - it('case 1: transaction abortion on first query execution', async () => { - let attempts = 0; - let rowCount = 0; - const MAX_ATTEMPTS = 2; - let multiplexedSessionPreviousTransactionId; - let transaction; - const database = newTestDatabase(); - const transactionObjects: Transaction[] = []; - while (attempts < MAX_ATTEMPTS) { - try { - [transaction] = await database.getTransaction(); - transactionObjects.push(transaction); - transaction.multiplexedSessionPreviousTransactionId = - multiplexedSessionPreviousTransactionId; - if (attempts > 0) { - transaction.begin(); - } - const [rows1] = await transaction.run(selectSql); - rows1.forEach(() => rowCount++); - if (!attempts) { - // abort the transaction - spannerMock.abortTransaction(transaction); - } - const [rows2] = await transaction.run(selectSql); - rows2.forEach(() => rowCount++); - await transaction.commit(); - } catch (err) { - assert.strictEqual( - (err as grpc.ServiceError).code, - grpc.status.ABORTED, - ); - } finally { - attempts++; - multiplexedSessionPreviousTransactionId = transaction.id; + }); + describe('using getTransaction', () => { + it('case 1: transaction abortion on first query execution', async () => { + let attempts = 0; + let rowCount = 0; + const MAX_ATTEMPTS = 2; + let multiplexedSessionPreviousTransactionId; + let transaction; + const database = newTestDatabase(); + const transactionObjects: Transaction[] = []; + while (attempts < MAX_ATTEMPTS) { + try { + [transaction] = await database.getTransaction(); + transactionObjects.push(transaction); + transaction.multiplexedSessionPreviousTransactionId = + multiplexedSessionPreviousTransactionId; + if (attempts > 0) { + transaction.begin(); } + const [rows1] = await transaction.run(selectSql); + rows1.forEach(() => rowCount++); + if (!attempts) { + // abort the transaction + spannerMock.abortTransaction(transaction); + } + const [rows2] = await transaction.run(selectSql); + rows2.forEach(() => rowCount++); + await transaction.commit(); + } catch (err) { + assert.strictEqual( + (err as grpc.ServiceError).code, + grpc.status.ABORTED, + ); + } finally { + attempts++; + multiplexedSessionPreviousTransactionId = transaction.id; } - // assert on row count - assert.strictEqual(rowCount, 9); - // assert on number of attempts - assert.strictEqual(attempts, 2); - await database.close(); - const beginTxnRequest = spannerMock.getRequests().find(val => { - return (val as v1.BeginTransactionRequest).options?.readWrite; - }) as v1.BeginTransactionRequest; - const txnId = - beginTxnRequest.options?.readWrite - ?.multiplexedSessionPreviousTransactionId; - // begin transaction request must contain the aborted transaction id - // as the previous transaction id upon retrying - assert.deepStrictEqual(txnId, transactionObjects[0].id); - // transactionObjects must contain have both the transaction - // one the aborted transaction - // another the retried transaction - assert.strictEqual(transactionObjects.length, 2); - // since inline begin was successful with first query execution - // the transaction id would not be undefined for first transaction - assert.notEqual(transactionObjects[0].id, undefined); - // multiplexed session previous transaction id would be undefined - // for first transaction - assert.strictEqual( - transactionObjects[0].multiplexedSessionPreviousTransactionId, - undefined, - ); - // the second transction object (the retried transaction) must have an id - assert.notEqual(transactionObjects[1].id, undefined); - // first transaction id would be the multiplexed session previous transction id - // for retried transction - assert.strictEqual( - transactionObjects[1].multiplexedSessionPreviousTransactionId, - transactionObjects[0].id, - ); - }); + } + // assert on row count + assert.strictEqual(rowCount, 9); + // assert on number of attempts + assert.strictEqual(attempts, 2); + await database.close(); + const beginTxnRequest = spannerMock.getRequests().find(val => { + return (val as v1.BeginTransactionRequest).options?.readWrite; + }) as v1.BeginTransactionRequest; + const txnId = + beginTxnRequest.options?.readWrite + ?.multiplexedSessionPreviousTransactionId; + // begin transaction request must contain the aborted transaction id + // as the previous transaction id upon retrying + assert.deepStrictEqual(txnId, transactionObjects[0].id); + // transactionObjects must contain have both the transaction + // one the aborted transaction + // another the retried transaction + assert.strictEqual(transactionObjects.length, 2); + // since inline begin was successful with first query execution + // the transaction id would not be undefined for first transaction + assert.notEqual(transactionObjects[0].id, undefined); + // multiplexed session previous transaction id would be undefined + // for first transaction + assert.strictEqual( + transactionObjects[0].multiplexedSessionPreviousTransactionId, + undefined, + ); + // the second transction object (the retried transaction) must have an id + assert.notEqual(transactionObjects[1].id, undefined); + // first transaction id would be the multiplexed session previous transction id + // for retried transction + assert.strictEqual( + transactionObjects[1].multiplexedSessionPreviousTransactionId, + transactionObjects[0].id, + ); }); }); + }); - // test(s) for commit retry logic - describe('Transaction Commit Retry Logic', () => { - let commitCallCount = 0; - let capturedCommitRequests: any[] = []; + // test(s) for commit retry logic + describe('Transaction Commit Retry Logic', () => { + let commitCallCount = 0; + let capturedCommitRequests: any[] = []; - it('should retry commit only once with a precommit token', async () => { - commitCallCount = 0; - capturedCommitRequests = []; + it('should retry commit only once with a precommit token', async () => { + commitCallCount = 0; + capturedCommitRequests = []; - const database = newTestDatabase({min: 1, max: 1}); - const fakeRetryToken = Buffer.from('mock-retry-token-123'); + const database = newTestDatabase({min: 1, max: 1}); + const fakeRetryToken = Buffer.from('mock-retry-token-123'); - const commitRetryResponse = { - MultiplexedSessionRetry: 'precommitToken', - precommitToken: { - precommitToken: fakeRetryToken, - seqNum: 2, - }, - commitTimestamp: mock.now(), - }; + const commitRetryResponse = { + MultiplexedSessionRetry: 'precommitToken', + precommitToken: { + precommitToken: fakeRetryToken, + seqNum: 2, + }, + commitTimestamp: mock.now(), + }; - const commitSuccessResponse = { - commitTimestamp: mock.now(), - }; + const commitSuccessResponse = { + commitTimestamp: mock.now(), + }; - await database.runTransactionAsync(async tx => { - // mock commit request - tx.request = (config: any, callback: Function) => { - const cb = callback as (err: any, response: any) => void; + await database.runTransactionAsync(async tx => { + // mock commit request + tx.request = (config: any, callback: Function) => { + const cb = callback as (err: any, response: any) => void; - if (config.method !== 'commit') return; + if (config.method !== 'commit') return; - commitCallCount++; - capturedCommitRequests.push(config.reqOpts); + commitCallCount++; + capturedCommitRequests.push(config.reqOpts); - if (commitCallCount === 1) { - cb(null, commitRetryResponse); - } else { - cb(null, commitSuccessResponse); - } - }; + if (commitCallCount === 1) { + cb(null, commitRetryResponse); + } else { + cb(null, commitSuccessResponse); + } + }; - // perform read - await tx!.run(selectSql); + // perform read + await tx!.run(selectSql); - // perform mutations - tx.upsert('foo', [ - {id: 1, name: 'One'}, - {id: 2, name: 'Two'}, - ]); + // perform mutations + tx.upsert('foo', [ + {id: 1, name: 'One'}, + {id: 2, name: 'Two'}, + ]); - // make a call to commit - await tx.commit(); + // make a call to commit + await tx.commit(); - // assert that retry heppen only once - assert.strictEqual( - commitCallCount, - 2, - 'The mock commit method should have been called exactly twice.', - ); + // assert that retry heppen only once + assert.strictEqual( + commitCallCount, + 2, + 'The mock commit method should have been called exactly twice.', + ); - const secondRequest = capturedCommitRequests[1]; - // assert that during the second request to commit - // the precommitToken was present - assert.deepStrictEqual( - secondRequest.precommitToken, - commitRetryResponse.precommitToken, - 'The second commit request should have the precommitToken from the retry response.', - ); - }); - await database.close(); + const secondRequest = capturedCommitRequests[1]; + // assert that during the second request to commit + // the precommitToken was present + assert.deepStrictEqual( + secondRequest.precommitToken, + commitRetryResponse.precommitToken, + 'The second commit request should have the precommitToken from the retry response.', + ); }); + await database.close(); }); + }); - // parallel transactions - describe('parallel transactions', async () => { - async function readAndMutations(database) { - await database.runTransactionAsync(async tx => { - await tx.run(selectSql); - await tx.run(selectSql); - tx.upsert('foo', [ - {id: 1, name: 'One'}, - {id: 2, name: 'Two'}, - ]); - await tx.commit(); - }); - } - it('should have different precommit tokens for each transactions when running parallely', async () => { - const promises: Promise[] = []; - const database = newTestDatabase(); + // parallel transactions + describe('parallel transactions', async () => { + async function readAndMutations(database) { + await database.runTransactionAsync(async tx => { + await tx.run(selectSql); + await tx.run(selectSql); + tx.upsert('foo', [ + {id: 1, name: 'One'}, + {id: 2, name: 'Two'}, + ]); + await tx.commit(); + }); + } + it('should have different precommit tokens for each transactions when running parallely', async () => { + const promises: Promise[] = []; + const database = newTestDatabase(); - // run the transactions parallely - promises.push(readAndMutations(database)); - promises.push(readAndMutations(database)); + // run the transactions parallely + promises.push(readAndMutations(database)); + promises.push(readAndMutations(database)); - // wait for the transaction to complete its execution - await Promise.all(promises); + // wait for the transaction to complete its execution + await Promise.all(promises); - const commitRequest = spannerMock.getRequests().filter(val => { - return (val as v1.CommitRequest).precommitToken; - }) as v1.CommitRequest[]; + const commitRequest = spannerMock.getRequests().filter(val => { + return (val as v1.CommitRequest).precommitToken; + }) as v1.CommitRequest[]; - // assert that there are two commit requests one for each transaction - assert.strictEqual(commitRequest.length, 2); + // assert that there are two commit requests one for each transaction + assert.strictEqual(commitRequest.length, 2); - // assert that precommitToken is not null during first request to commit - assert.notEqual(commitRequest[0].precommitToken, null); + // assert that precommitToken is not null during first request to commit + assert.notEqual(commitRequest[0].precommitToken, null); - // assert that precommitToken is instance of Buffer - assert.ok( - commitRequest[0].precommitToken?.precommitToken instanceof Buffer, - ); + // assert that precommitToken is instance of Buffer + assert.ok( + commitRequest[0].precommitToken?.precommitToken instanceof Buffer, + ); - // assert that precommitToken is not null during second request to commit - assert.notEqual(commitRequest[1].precommitToken, null); + // assert that precommitToken is not null during second request to commit + assert.notEqual(commitRequest[1].precommitToken, null); - // assert that precommitToken is instance of Buffer - assert.ok( - commitRequest[1].precommitToken?.precommitToken instanceof Buffer, - ); + // assert that precommitToken is instance of Buffer + assert.ok( + commitRequest[1].precommitToken?.precommitToken instanceof Buffer, + ); - // assert that precommitToken is different in both the commit request - assert.notEqual( - commitRequest[0].precommitToken.precommitToken, - commitRequest[1].precommitToken.precommitToken, - ); - }); + // assert that precommitToken is different in both the commit request + assert.notEqual( + commitRequest[0].precommitToken.precommitToken, + commitRequest[1].precommitToken.precommitToken, + ); }); }); }); @@ -5898,16 +6016,6 @@ describe('Spanner with mock server', () => { }); // tests for mutation key heuristic describe('when multiplexed session is enabled for R/W', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - it('should pass the mutation key in begin transaction request in case of mutations only transactions', async () => { const database = newTestDatabase(); await database.table('foo').upsert({id: 1, name: randomUUID()}); @@ -6859,8 +6967,8 @@ describe('Spanner with mock server', () => { }); const expectedSpanNames = [ - 'CloudSpanner.Database.batchCreateSessions', - 'CloudSpanner.SessionPool.createSessions', + 'CloudSpanner.Database.createSession', + 'CloudSpanner.MultiplexedSession.createSession', 'CloudSpanner.Snapshot.runStream', 'CloudSpanner.Database.runStream', 'CloudSpanner.Database.run', @@ -6873,13 +6981,11 @@ describe('Spanner with mock server', () => { ); const expectedEventNames = [ - 'Requesting 25 sessions', - 'Creating 25 sessions', - 'Requested for 25 sessions returned 25', + 'Requesting a multiplexed session', + 'Created a multiplexed session', 'Starting stream', - 'Acquiring session', - 'Waiting for a session to become available', - 'Acquired session', + 'Waiting for a multiplexed session to become available', + 'Acquired multiplexed session', 'Using Session', ]; @@ -6888,23 +6994,19 @@ describe('Spanner with mock server', () => { expectedEventNames, `Mismatched events\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, ); - done(); }); }); describe('session-factory', () => { - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - it('should not propagate any error when enabling GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS after client initialization', done => { + it('should not propagate any error when disabling GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS after client initialization', done => { const database = newTestDatabase(); - // enable env after database creation - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; + // disable env after database creation + process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; const sessionFactory = database.sessionFactory_ as SessionFactory; sessionFactory.getSession((err, _) => { assert.ifError(err); + delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; done(); }); }); @@ -6966,7 +7068,7 @@ describe('Spanner with mock server', () => { const gotUnaryCalls = xGoogReqIDInterceptor.getUnaryCalls(); assert.deepStrictEqual( gotUnaryCalls[0].method, - '/google.spanner.v1.Spanner/BatchCreateSessions', + '/google.spanner.v1.Spanner/CreateSession', ); // It is non-deterministic to try to get the exact clientId used to invoke .BatchCreateSessions // given that these tests run as a collective and sessions are pooled. diff --git a/test/transaction.ts b/test/transaction.ts index 121c540ea..ebffbd5c4 100644 --- a/test/transaction.ts +++ b/test/transaction.ts @@ -1739,112 +1739,94 @@ describe('Transaction', () => { ); }); - describe('when multiplexed session is enabled for read/write', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; - }); - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = - 'false'; - }); - it('should pass multiplexedSessionPreviousTransactionId in the BeginTransactionRequest upon retrying an aborted transaction', () => { - const fakePreviousTransactionId = 'fake-previous-transaction-id'; - const database = { - formattedName_: 'formatted-database-name', - isMuxEnabledForRW_: true, - parent: INSTANCE, - }; - const SESSION = { - parent: database, - formattedName_: SESSION_NAME, - request: REQUEST, - requestStream: REQUEST_STREAM, - }; - // multiplexed session - const multiplexedSession = Object.assign( - {multiplexed: true}, - SESSION, - ); - const transaction = new Transaction(multiplexedSession); - // transaction option must contain the previous transaction id for multiplexed session - transaction.multiplexedSessionPreviousTransactionId = - fakePreviousTransactionId; - const stub = sandbox.stub(transaction, 'request'); - transaction.begin(); - - const expectedOptions = { - isolationLevel: 0, - readWrite: { - multiplexedSessionPreviousTransactionId: - fakePreviousTransactionId, - }, - }; - const {client, method, reqOpts, headers} = stub.lastCall.args[0]; + it('should pass multiplexedSessionPreviousTransactionId in the BeginTransactionRequest upon retrying an aborted transaction', () => { + const fakePreviousTransactionId = 'fake-previous-transaction-id'; + const database = { + formattedName_: 'formatted-database-name', + isMuxEnabledForRW_: true, + parent: INSTANCE, + }; + const SESSION = { + parent: database, + formattedName_: SESSION_NAME, + request: REQUEST, + requestStream: REQUEST_STREAM, + }; + // multiplexed session + const multiplexedSession = Object.assign({multiplexed: true}, SESSION); + const transaction = new Transaction(multiplexedSession); + // transaction option must contain the previous transaction id for multiplexed session + transaction.multiplexedSessionPreviousTransactionId = + fakePreviousTransactionId; + const stub = sandbox.stub(transaction, 'request'); + transaction.begin(); - assert.strictEqual(client, 'SpannerClient'); - assert.strictEqual(method, 'beginTransaction'); - // request options should contain the multiplexedSessionPreviousTransactionId - assert.deepStrictEqual(reqOpts.options, expectedOptions); - assert.deepStrictEqual( - headers, - Object.assign( - {[LEADER_AWARE_ROUTING_HEADER]: true}, - transaction.commonHeaders_, - ), - ); - }); + const expectedOptions = { + isolationLevel: 0, + readWrite: { + multiplexedSessionPreviousTransactionId: fakePreviousTransactionId, + }, + }; + const {client, method, reqOpts, headers} = stub.lastCall.args[0]; - it('should send the correct options if _mutationKey is set in the transaction object', () => { - // session with multiplexed enabled - const multiplexedSession = Object.assign( - {multiplexed: true}, - SESSION, - ); + assert.strictEqual(client, 'SpannerClient'); + assert.strictEqual(method, 'beginTransaction'); + // request options should contain the multiplexedSessionPreviousTransactionId + assert.deepStrictEqual(reqOpts.options, expectedOptions); + assert.deepStrictEqual( + headers, + Object.assign( + {[LEADER_AWARE_ROUTING_HEADER]: true}, + transaction.commonHeaders_, + ), + ); + }); - // fake mutation key - const fakeMutationKey = { - insertOrUpdate: { - table: 'my-table-123', - columns: ['Id', 'Name'], - values: [ - { - values: [{stringValue: 'Id3'}, {stringValue: 'Name3'}], - }, - ], - }, - } as google.spanner.v1.Mutation; + it('should send the correct options if _mutationKey is set in the transaction object', () => { + // session with multiplexed enabled + const multiplexedSession = Object.assign({multiplexed: true}, SESSION); + + // fake mutation key + const fakeMutationKey = { + insertOrUpdate: { + table: 'my-table-123', + columns: ['Id', 'Name'], + values: [ + { + values: [{stringValue: 'Id3'}, {stringValue: 'Name3'}], + }, + ], + }, + } as google.spanner.v1.Mutation; - const transaction = new Transaction(multiplexedSession); + const transaction = new Transaction(multiplexedSession); - // stub the transaction request - const stub = sandbox.stub(transaction, 'request'); + // stub the transaction request + const stub = sandbox.stub(transaction, 'request'); - // set the _mutationKey in the transaction object - transaction._mutationKey = fakeMutationKey; + // set the _mutationKey in the transaction object + transaction._mutationKey = fakeMutationKey; - // make a call to begin - transaction.begin(); + // make a call to begin + transaction.begin(); - const expectedOptions = {isolationLevel: 0, readWrite: {}}; - const {client, method, reqOpts, headers} = stub.lastCall.args[0]; + const expectedOptions = {isolationLevel: 0, readWrite: {}}; + const {client, method, reqOpts, headers} = stub.lastCall.args[0]; - // assert on the begin transaction call - assert.strictEqual(client, 'SpannerClient'); - assert.strictEqual(method, 'beginTransaction'); - assert.deepStrictEqual(reqOpts.options, expectedOptions); - // assert that if the _mutationKey is set in the transaction object - // it is getting pass in the request as well along with request options - assert.deepStrictEqual(reqOpts.mutationKey, fakeMutationKey); - assert.deepStrictEqual( - headers, - Object.assign( - {[LEADER_AWARE_ROUTING_HEADER]: true}, - transaction.commonHeaders_, - ), - ); - }); + // assert on the begin transaction call + assert.strictEqual(client, 'SpannerClient'); + assert.strictEqual(method, 'beginTransaction'); + assert.deepStrictEqual(reqOpts.options, expectedOptions); + // assert that if the _mutationKey is set in the transaction object + // it is getting pass in the request as well along with request options + assert.deepStrictEqual(reqOpts.mutationKey, fakeMutationKey); + assert.deepStrictEqual( + headers, + Object.assign( + {[LEADER_AWARE_ROUTING_HEADER]: true}, + transaction.commonHeaders_, + ), + ); }); }); @@ -1996,96 +1978,80 @@ describe('Transaction', () => { assert.deepStrictEqual(reqOpts.singleUseTransaction, expectedOptions); }); - describe('when multiplexed session is enabled for read write', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'true'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'true'; - }); - - after(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = - 'false'; - }); - - it('should call _setMutationKey when neither `id` is set nor `singleUseTransaction` is used', async () => { - // fake mutation key - const fakeMutations = [ - { - insertOrUpdate: { - table: 'my-table-123', - columns: ['Id', 'Name'], - values: [ - { - values: [{stringValue: 'Id1'}, {stringValue: 'Name1'}], - }, - ], - }, - } as google.spanner.v1.Mutation, - ]; + it('should call _setMutationKey when neither `id` is set nor `singleUseTransaction` is used', async () => { + // fake mutation key + const fakeMutations = [ + { + insertOrUpdate: { + table: 'my-table-123', + columns: ['Id', 'Name'], + values: [ + { + values: [{stringValue: 'Id1'}, {stringValue: 'Name1'}], + }, + ], + }, + } as google.spanner.v1.Mutation, + ]; - // fake transaction id - const fakeTransactionId = 'fake-tx-id-12345'; + // fake transaction id + const fakeTransactionId = 'fake-tx-id-12345'; - const database = { - formattedName_: 'formatted-database-name', - isMuxEnabledForRW_: true, - parent: INSTANCE, - }; - const SESSION = { - parent: database, - formattedName_: SESSION_NAME, - request: REQUEST, - requestStream: REQUEST_STREAM, - }; - // multiplexed session - const multiplexedSession = Object.assign( - {multiplexed: true}, - SESSION, - ); + const database = { + formattedName_: 'formatted-database-name', + isMuxEnabledForRW_: true, + parent: INSTANCE, + }; + const SESSION = { + parent: database, + formattedName_: SESSION_NAME, + request: REQUEST, + requestStream: REQUEST_STREAM, + }; + // multiplexed session + const multiplexedSession = Object.assign({multiplexed: true}, SESSION); - // transaction object - const transaction = new Transaction(multiplexedSession); + // transaction object + const transaction = new Transaction(multiplexedSession); - // ensure transaction is not single use transaction - transaction._useInRunner = true; + // ensure transaction is not single use transaction + transaction._useInRunner = true; - // ensure transaction ID is not set - transaction.id = undefined; + // ensure transaction ID is not set + transaction.id = undefined; - // set the _queuedMutations with the fakeMutations list - transaction._queuedMutations = fakeMutations; + // set the _queuedMutations with the fakeMutations list + transaction._queuedMutations = fakeMutations; - // spy on _setMutationKey - const setMutationKeySpy = sandbox.spy(transaction, '_setMutationKey'); + // spy on _setMutationKey + const setMutationKeySpy = sandbox.spy(transaction, '_setMutationKey'); - // stub the begin method - const beginStub = sandbox.stub(transaction, 'begin').callsFake(() => { - transaction.id = fakeTransactionId; - return Promise.resolve(); - }); + // stub the begin method + const beginStub = sandbox.stub(transaction, 'begin').callsFake(() => { + transaction.id = fakeTransactionId; + return Promise.resolve(); + }); - // stub transaction request - sandbox.stub(transaction, 'request'); + // stub transaction request + sandbox.stub(transaction, 'request'); - // make a call to commit - transaction.commit(); + // make a call to commit + transaction.commit(); - // ensure that _setMutationKey was got called once - sinon.assert.calledOnce(setMutationKeySpy); + // ensure that _setMutationKey was got called once + sinon.assert.calledOnce(setMutationKeySpy); - // ensure that _setMutationKey got called with correct arguments - sinon.assert.calledWith(setMutationKeySpy, fakeMutations); + // ensure that _setMutationKey got called with correct arguments + sinon.assert.calledWith(setMutationKeySpy, fakeMutations); - // ensure begin was called - sinon.assert.calledOnce(beginStub); + // ensure begin was called + sinon.assert.calledOnce(beginStub); - // ensure begin set the transaction id - assert.strictEqual(transaction.id, fakeTransactionId); + // ensure begin set the transaction id + assert.strictEqual(transaction.id, fakeTransactionId); - // ensure _mutationKey is set - assert.strictEqual(transaction._mutationKey, fakeMutations[0]); - }); + // ensure _mutationKey is set + assert.strictEqual(transaction._mutationKey, fakeMutations[0]); }); it('should call `end` once complete', () => {