Skip to content

Commit b6c59df

Browse files
committed
Merge branch 'sh/sliding-window-log' of github.com:oslabs-beta/GraphQL-Gate into sh/sliding-window-log
2 parents 91b5df6 + 75981c8 commit b6c59df

File tree

6 files changed

+758
-44
lines changed

6 files changed

+758
-44
lines changed

src/@types/rateLimit.d.ts

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@ export type RateLimiterSelection =
4444
* @type {number} refillRate - Rate at which tokens are added to the bucket in seconds
4545
*/
4646
export interface TokenBucketOptions {
47-
typename: 'bucket';
4847
bucketSize: number;
4948
refillRate: number;
5049
}
@@ -54,12 +53,11 @@ export interface TokenBucketOptions {
5453
* @type {number} capacity - max number of tokens that can be used in the bucket
5554
*/
5655
export interface WindowOptions {
57-
typename: 'window';
5856
windowSize: number;
5957
capacity: number;
6058
}
6159

6260
// TODO: This will be a union type where we can specify Option types for other Rate Limiters
6361
// Record<string, never> represents the empty object for algorithms that don't require settings
6462
// and might be able to be removed in the future.
65-
export type RateLimiterOptions = TokenBucketOptions | WindowSize | Record<string, never>;
63+
export type RateLimiterOptions = TokenBucketOptions | Record<string, never>;

src/middleware/rateLimiterSetup.ts

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import Redis from 'ioredis';
22
import { RateLimiterOptions, RateLimiterSelection } from '../@types/rateLimit';
3-
import SlidingWindowLog from '../rateLimiters/slidingWindowLog';
43
import TokenBucket from '../rateLimiters/tokenBucket';
54

65
/**
@@ -19,21 +18,24 @@ export default function setupRateLimiter(
1918
) {
2019
switch (selection) {
2120
case 'TOKEN_BUCKET':
22-
if (options.typename === 'bucket') {
23-
return new TokenBucket(options.bucketSize, options.refillRate, client);
24-
}
25-
throw new Error('Invalid options for token bucket');
21+
// todo validate options
22+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
23+
// @ts-ignore
24+
return new TokenBucket(options.bucketSize, options.refillRate, client);
25+
break;
2626
case 'LEAKY_BUCKET':
2727
throw new Error('Leaky Bucket algonithm has not be implemented.');
2828
case 'FIXED_WINDOW':
2929
throw new Error('Fixed Window algonithm has not be implemented.');
3030
case 'SLIDING_WINDOW_LOG':
31-
if (options.typename === 'window') {
32-
return new SlidingWindowLog(options.windowSize, options.capacity, client);
33-
}
34-
throw new Error('Invalid options for sliding window log');
31+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
32+
// @ts-ignore
33+
return new SlidingWindowLog(options.windowSize, options.capacity, client);
3534
case 'SLIDING_WINDOW_COUNTER':
36-
throw new Error('Sliding Window Counter algonithm has not be implemented.');
35+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
36+
// @ts-ignore
37+
return new SlidingWindowCounter(options.windowSize, options.capacity, client);
38+
break;
3739
default:
3840
// typescript should never let us invoke this function with anything other than the options above
3941
throw new Error('Selected rate limiting algorithm is not suppported');

src/rateLimiters/slidingWindowCounter.ts

Lines changed: 113 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,12 @@ import { RateLimiter, RateLimiterResponse, RedisWindow } from '../@types/rateLim
99
* takeup in each.
1010
*
1111
* Whenever a user makes a request the following steps are performed:
12-
* 1. Fixed minute windows are defined along with redis caches if previously undefined.
13-
* 2. Rolling minute windows are defined or updated based on the timestamp of the new request.
12+
* 1. Fixed windows are defined along with redis caches if previously undefined.
13+
* 2. Rolling windows are defined or updated based on the timestamp of the new request.
1414
* 3. Counter of the current fixed window is updated with the new request's token usage.
1515
* 4. If a new minute interval is reached, the averaging formula is run to prevent fixed window's flaw
1616
* of flooded requests around window borders
17-
* (ex. 10 token capacity: 1m59s 10 reqs 2m2s 10 reqs)
17+
* (ex. 1m windows, 10 token capacity: 1m59s 10 reqs 2m2s 10 reqs)
1818
*/
1919
class SlidingWindowCounter implements RateLimiter {
2020
private windowSize: number;
@@ -24,7 +24,7 @@ class SlidingWindowCounter implements RateLimiter {
2424
private client: Redis;
2525

2626
/**
27-
* Create a new instance of a TokenBucket rate limiter that can be connected to any database store
27+
* Create a new instance of a SlidingWindowCounter rate limiter that can be connected to any database store
2828
* @param windowSize size of each window in milliseconds (fixed and rolling)
2929
* @param capacity max capacity of tokens allowed per fixed window
3030
* @param client redis client where rate limiter will cache information
@@ -38,12 +38,37 @@ class SlidingWindowCounter implements RateLimiter {
3838
}
3939

4040
/**
41+
* @function processRequest - Sliding window counter algorithm to allow or block
42+
* based on the depth/complexity (in amount of tokens) of incoming requests.
4143
*
44+
* First, checks if a window exists in the redis cache.
45+
*
46+
* If not, then `fixedWindowStart` is set as the current timestamp, and `currentTokens`
47+
* is checked against `capacity`. If enough room exists for the request, returns
48+
* success as true and tokens as how many tokens remain in the current fixed window.
49+
*
50+
* If a window does exist in the cache, we first check if the timestamp is greater than
51+
* the fixedWindowStart + windowSize.
52+
*
53+
* If it isn't then we check the number of tokens in the arguments as well as in the cache
54+
* against the capacity and return success or failure from there while updating the cache.
55+
*
56+
* If the timestamp is over the windowSize beyond the fixedWindowStart, then we update fixedWindowStart
57+
* to be fixedWindowStart + windowSize (to create a new fixed window) and
58+
* make previousTokens = currentTokens, and currentTokens equal to the number of tokens in args, if
59+
* not over capacity.
60+
*
61+
* Once previousTokens is not null, we then run functionality using the rolling window to compute
62+
* the formula this entire limiting algorithm is distinguished by:
63+
*
64+
* currentTokens + previousTokens * overlap % of rolling window over previous fixed window
4265
*
4366
* @param {string} uuid - unique identifer used to throttle requests
4467
* @param {number} timestamp - time the request was recieved
4568
* @param {number} [tokens=1] - complexity of the query for throttling requests
4669
* @return {*} {Promise<RateLimiterResponse>}
70+
* RateLimiterResponse: {success: boolean, tokens: number}
71+
* (tokens represents the remaining available capacity of the window)
4772
* @memberof SlidingWindowCounter
4873
*/
4974
async processRequest(
@@ -57,31 +82,90 @@ class SlidingWindowCounter implements RateLimiter {
5782
// attempt to get the value for the uuid from the redis cache
5883
const windowJSON = await this.client.get(uuid);
5984

60-
// // if the response is null, we need to create a window for the user
61-
// if (windowJSON === null) {
62-
// // rolling window is 1 minute long
63-
// const rollingWindowEnd = timestamp + 60000;
64-
65-
// // grabs the actual minute from the timestamp to create fixed window
66-
// const fixedWindowStart = timestamp - (timestamp % 10000);
67-
// const fixedWindowEnd = fixedWindowStart + 60000;
68-
69-
// const newUserWindow: RedisWindow = {
70-
// // conditionally set tokens depending on how many are requested compared to the capacity
71-
// tokens: tokens > this.capacity ? this.capacity : this.capacity - tokens,
72-
// timestamp,
73-
// };
74-
75-
// // reject the request, not enough tokens could even be in the bucket
76-
// if (tokens > this.capacity) {
77-
// await this.client.setex(uuid, keyExpiry, JSON.stringify(newUserWindow));
78-
// return { success: false, tokens: this.capacity };
79-
// }
80-
// await this.client.setex(uuid, keyExpiry, JSON.stringify(newUserWindow));
81-
// return { success: true, tokens: newUserWindow.tokens };
82-
// }
83-
84-
return { success: true, tokens: 0 };
85+
// if the response is null, we need to create a window for the user
86+
if (windowJSON === null) {
87+
const newUserWindow: RedisWindow = {
88+
// current and previous tokens represent how many tokens are in each window
89+
currentTokens: tokens <= this.capacity ? tokens : 0,
90+
previousTokens: 0,
91+
fixedWindowStart: timestamp,
92+
};
93+
94+
if (tokens <= this.capacity) {
95+
await this.client.setex(uuid, keyExpiry, JSON.stringify(newUserWindow));
96+
return { success: true, tokens: this.capacity - newUserWindow.currentTokens };
97+
}
98+
99+
await this.client.setex(uuid, keyExpiry, JSON.stringify(newUserWindow));
100+
// tokens property represents how much capacity remains
101+
return { success: false, tokens: this.capacity };
102+
}
103+
104+
// if the cache is populated
105+
106+
const window: RedisWindow = await JSON.parse(windowJSON);
107+
108+
const updatedUserWindow: RedisWindow = {
109+
currentTokens: window.currentTokens,
110+
previousTokens: window.previousTokens,
111+
fixedWindowStart: window.fixedWindowStart,
112+
};
113+
114+
// if request time is in a new window
115+
if (window.fixedWindowStart && timestamp >= window.fixedWindowStart + this.windowSize) {
116+
// if more than one window was skipped
117+
if (timestamp >= window.fixedWindowStart + this.windowSize * 2) {
118+
// if one or more windows was skipped, reset new window to be at current timestamp
119+
updatedUserWindow.previousTokens = 0;
120+
updatedUserWindow.currentTokens = 0;
121+
updatedUserWindow.fixedWindowStart = timestamp;
122+
} else {
123+
updatedUserWindow.previousTokens = updatedUserWindow.currentTokens;
124+
updatedUserWindow.currentTokens = 0;
125+
updatedUserWindow.fixedWindowStart = window.fixedWindowStart + this.windowSize;
126+
}
127+
}
128+
129+
// assigned to avoid TS error, this var will never be used as 0
130+
// var is declared here so that below can be inside a conditional for efficiency's sake
131+
let rollingWindowProportion = 0;
132+
let previousRollingTokens = 0;
133+
134+
if (updatedUserWindow.fixedWindowStart && updatedUserWindow.previousTokens) {
135+
// proportion of rolling window present in previous window
136+
rollingWindowProportion =
137+
(this.windowSize - (timestamp - updatedUserWindow.fixedWindowStart)) /
138+
this.windowSize;
139+
140+
// remove unecessary decimals, 0.xx is enough
141+
// rollingWindowProportion -= rollingWindowProportion % 0.01;
142+
143+
// # of tokens present in rolling & previous window
144+
previousRollingTokens = Math.floor(
145+
updatedUserWindow.previousTokens * rollingWindowProportion
146+
);
147+
}
148+
149+
// # of tokens present in rolling and/or current window
150+
// if previous tokens is null, previousRollingTokens will be 0
151+
const rollingTokens = updatedUserWindow.currentTokens + previousRollingTokens;
152+
153+
// if request is allowed
154+
if (tokens + rollingTokens <= this.capacity) {
155+
updatedUserWindow.currentTokens += tokens;
156+
await this.client.setex(uuid, keyExpiry, JSON.stringify(updatedUserWindow));
157+
return {
158+
success: true,
159+
tokens: this.capacity - (updatedUserWindow.currentTokens + previousRollingTokens),
160+
};
161+
}
162+
163+
// if request is blocked
164+
await this.client.setex(uuid, keyExpiry, JSON.stringify(updatedUserWindow));
165+
return {
166+
success: false,
167+
tokens: this.capacity - (updatedUserWindow.currentTokens + previousRollingTokens),
168+
};
85169
}
86170

87171
/**

0 commit comments

Comments
 (0)