Skip to content

Commit 0f3b317

Browse files
philnashdkundel
authored andcommitted
fix(serverless-api/logs): keeps the most recent logs in log cache
Another note, I discovered while testing this that _read() is called multiple times and lead to extra API calls via _poll(). This was because when calling _read() with an interval set, the code would make a one off call. Now, if an interval is set already, then no extra polls will be made.
1 parent e8c02dc commit 0f3b317

File tree

1 file changed

+37
-11
lines changed
  • packages/serverless-api/src/streams

1 file changed

+37
-11
lines changed

packages/serverless-api/src/streams/logs.ts

Lines changed: 37 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ export class LogsStream extends Readable {
99
private _pollingCacheSize: number;
1010
private _interval: NodeJS.Timeout | undefined;
1111
private _viewedSids: Set<Sid>;
12+
private _viewedLogs: Array<{ sid: Sid; dateCreated: Date }>;
1213

1314
constructor(
1415
private environmentSid: Sid,
@@ -19,6 +20,7 @@ export class LogsStream extends Readable {
1920
super({ objectMode: true });
2021
this._interval = undefined;
2122
this._viewedSids = new Set();
23+
this._viewedLogs = [];
2224
this._pollingFrequency = config.pollingFrequency || 1000;
2325
this._pollingCacheSize = config.logCacheSize || 1000;
2426
}
@@ -54,13 +56,35 @@ export class LogsStream extends Readable {
5456
// The logs endpoint is not reliably returning logs in the same order
5557
// Therefore we need to keep a set of all previously seen log entries
5658
// In order to avoid memory leaks we cap the total size of logs at 1000
57-
// If the new set is larger we'll instead only use the SIDs from the current
58-
// request.
59-
if (logs.length + this._viewedSids.size <= this._pollingCacheSize) {
60-
logs.map(log => log.sid).forEach(sid => this._viewedSids.add(sid));
61-
} else {
62-
this._viewedSids = new Set(logs.map(log => log.sid));
63-
}
59+
// (or the set pollingCacheSize).
60+
//
61+
// We store an array of the logs' SIDs and created dates.
62+
// Then when a new page of logs is added, we find the unique logs, sort by
63+
// date created, newest to oldest, and chop off the end of the array (the
64+
// oldest logs) leaving the most recent logs in memory. We then turn that
65+
// into a set of SIDs to check against next time.
66+
67+
// Creates a unique set of log sids and date created from previous logs
68+
// and new logs by stringifying the sid and the date together.
69+
const viewedLogsSet = new Set([
70+
...this._viewedLogs.map(
71+
log => `${log.sid}-${log.dateCreated.toISOString()}`
72+
),
73+
...logs.map(log => `${log.sid}-${log.date_created}`),
74+
]);
75+
// Then we take that set, map over the logs and split them up into sid and
76+
// date again, sort them most to least recent and chop off the oldest if
77+
// they are beyond the polling cache size.
78+
this._viewedLogs = [...viewedLogsSet]
79+
.map(logString => {
80+
const [sid, dateCreated] = logString.split('-');
81+
return { sid, dateCreated: new Date(dateCreated) };
82+
})
83+
.sort((a, b) => b.dateCreated.valueOf() - a.dateCreated.valueOf())
84+
.slice(0, this._pollingCacheSize);
85+
// Finally we create a set of just SIDs to compare against.
86+
this._viewedSids = new Set(this._viewedLogs.map(log => log.sid));
87+
6488
if (!this.config.tail) {
6589
this.push(null);
6690
}
@@ -70,10 +94,12 @@ export class LogsStream extends Readable {
7094
}
7195

7296
_read() {
73-
if (this.config.tail && !this._interval) {
74-
this._interval = setInterval(() => {
75-
this._poll();
76-
}, this._pollingFrequency);
97+
if (this.config.tail) {
98+
if (!this._interval) {
99+
this._interval = setInterval(() => {
100+
this._poll();
101+
}, this._pollingFrequency);
102+
}
77103
} else {
78104
this._poll();
79105
}

0 commit comments

Comments
 (0)