@@ -9,12 +9,12 @@ import { RateLimiter, RateLimiterResponse, RedisWindow } from '../@types/rateLim
99 * takeup in each.
1010 *
1111 * Whenever a user makes a request the following steps are performed:
12- * 1. Fixed minute windows are defined along with redis caches if previously undefined.
13- * 2. Rolling minute windows are defined or updated based on the timestamp of the new request.
12+ * 1. Fixed windows are defined along with redis caches if previously undefined.
13+ * 2. Rolling windows are defined or updated based on the timestamp of the new request.
1414 * 3. Counter of the current fixed window is updated with the new request's token usage.
1515 * 4. If a new minute interval is reached, the averaging formula is run to prevent fixed window's flaw
1616 * of flooded requests around window borders
17- * (ex. 10 token capacity: 1m59s 10 reqs 2m2s 10 reqs)
17+ * (ex. 1m windows, 10 token capacity: 1m59s 10 reqs 2m2s 10 reqs)
1818 */
1919class SlidingWindowCounter implements RateLimiter {
2020 private windowSize : number ;
@@ -24,7 +24,7 @@ class SlidingWindowCounter implements RateLimiter {
2424 private client : Redis ;
2525
2626 /**
27- * Create a new instance of a TokenBucket rate limiter that can be connected to any database store
27+ * Create a new instance of a SlidingWindowCounter rate limiter that can be connected to any database store
2828 * @param windowSize size of each window in milliseconds (fixed and rolling)
2929 * @param capacity max capacity of tokens allowed per fixed window
3030 * @param client redis client where rate limiter will cache information
@@ -38,12 +38,37 @@ class SlidingWindowCounter implements RateLimiter {
3838 }
3939
4040 /**
41+ * @function processRequest - Sliding window counter algorithm to allow or block
42+ * based on the depth/complexity (in amount of tokens) of incoming requests.
4143 *
44+ * First, checks if a window exists in the redis cache.
45+ *
46+ * If not, then `fixedWindowStart` is set as the current timestamp, and `currentTokens`
47+ * is checked against `capacity`. If enough room exists for the request, returns
48+ * success as true and tokens as how many tokens remain in the current fixed window.
49+ *
50+ * If a window does exist in the cache, we first check if the timestamp is greater than
51+ * the fixedWindowStart + windowSize.
52+ *
53+ * If it isn't then we check the number of tokens in the arguments as well as in the cache
54+ * against the capacity and return success or failure from there while updating the cache.
55+ *
56+ * If the timestamp is over the windowSize beyond the fixedWindowStart, then we update fixedWindowStart
57+ * to be fixedWindowStart + windowSize (to create a new fixed window) and
58+ * make previousTokens = currentTokens, and currentTokens equal to the number of tokens in args, if
59+ * not over capacity.
60+ *
61+ * Once previousTokens is not null, we then run functionality using the rolling window to compute
62+ * the formula this entire limiting algorithm is distinguished by:
63+ *
64+ * currentTokens + previousTokens * overlap % of rolling window over previous fixed window
4265 *
4366 * @param {string } uuid - unique identifer used to throttle requests
4467 * @param {number } timestamp - time the request was recieved
4568 * @param {number } [tokens=1] - complexity of the query for throttling requests
4669 * @return {* } {Promise<RateLimiterResponse>}
70+ * RateLimiterResponse: {success: boolean, tokens: number}
71+ * (tokens represents the remaining available capacity of the window)
4772 * @memberof SlidingWindowCounter
4873 */
4974 async processRequest (
@@ -57,31 +82,90 @@ class SlidingWindowCounter implements RateLimiter {
5782 // attempt to get the value for the uuid from the redis cache
5883 const windowJSON = await this . client . get ( uuid ) ;
5984
60- // // if the response is null, we need to create a window for the user
61- // if (windowJSON === null) {
62- // // rolling window is 1 minute long
63- // const rollingWindowEnd = timestamp + 60000;
64-
65- // // grabs the actual minute from the timestamp to create fixed window
66- // const fixedWindowStart = timestamp - (timestamp % 10000);
67- // const fixedWindowEnd = fixedWindowStart + 60000;
68-
69- // const newUserWindow: RedisWindow = {
70- // // conditionally set tokens depending on how many are requested compared to the capacity
71- // tokens: tokens > this.capacity ? this.capacity : this.capacity - tokens,
72- // timestamp,
73- // };
74-
75- // // reject the request, not enough tokens could even be in the bucket
76- // if (tokens > this.capacity) {
77- // await this.client.setex(uuid, keyExpiry, JSON.stringify(newUserWindow));
78- // return { success: false, tokens: this.capacity };
79- // }
80- // await this.client.setex(uuid, keyExpiry, JSON.stringify(newUserWindow));
81- // return { success: true, tokens: newUserWindow.tokens };
82- // }
83-
84- return { success : true , tokens : 0 } ;
85+ // if the response is null, we need to create a window for the user
86+ if ( windowJSON === null ) {
87+ const newUserWindow : RedisWindow = {
88+ // current and previous tokens represent how many tokens are in each window
89+ currentTokens : tokens <= this . capacity ? tokens : 0 ,
90+ previousTokens : 0 ,
91+ fixedWindowStart : timestamp ,
92+ } ;
93+
94+ if ( tokens <= this . capacity ) {
95+ await this . client . setex ( uuid , keyExpiry , JSON . stringify ( newUserWindow ) ) ;
96+ return { success : true , tokens : this . capacity - newUserWindow . currentTokens } ;
97+ }
98+
99+ await this . client . setex ( uuid , keyExpiry , JSON . stringify ( newUserWindow ) ) ;
100+ // tokens property represents how much capacity remains
101+ return { success : false , tokens : this . capacity } ;
102+ }
103+
104+ // if the cache is populated
105+
106+ const window : RedisWindow = await JSON . parse ( windowJSON ) ;
107+
108+ const updatedUserWindow : RedisWindow = {
109+ currentTokens : window . currentTokens ,
110+ previousTokens : window . previousTokens ,
111+ fixedWindowStart : window . fixedWindowStart ,
112+ } ;
113+
114+ // if request time is in a new window
115+ if ( window . fixedWindowStart && timestamp >= window . fixedWindowStart + this . windowSize ) {
116+ // if more than one window was skipped
117+ if ( timestamp >= window . fixedWindowStart + this . windowSize * 2 ) {
118+ // if one or more windows was skipped, reset new window to be at current timestamp
119+ updatedUserWindow . previousTokens = 0 ;
120+ updatedUserWindow . currentTokens = 0 ;
121+ updatedUserWindow . fixedWindowStart = timestamp ;
122+ } else {
123+ updatedUserWindow . previousTokens = updatedUserWindow . currentTokens ;
124+ updatedUserWindow . currentTokens = 0 ;
125+ updatedUserWindow . fixedWindowStart = window . fixedWindowStart + this . windowSize ;
126+ }
127+ }
128+
129+ // assigned to avoid TS error, this var will never be used as 0
130+ // var is declared here so that below can be inside a conditional for efficiency's sake
131+ let rollingWindowProportion = 0 ;
132+ let previousRollingTokens = 0 ;
133+
134+ if ( updatedUserWindow . fixedWindowStart && updatedUserWindow . previousTokens ) {
135+ // proportion of rolling window present in previous window
136+ rollingWindowProportion =
137+ ( this . windowSize - ( timestamp - updatedUserWindow . fixedWindowStart ) ) /
138+ this . windowSize ;
139+
140+ // remove unecessary decimals, 0.xx is enough
141+ // rollingWindowProportion -= rollingWindowProportion % 0.01;
142+
143+ // # of tokens present in rolling & previous window
144+ previousRollingTokens = Math . floor (
145+ updatedUserWindow . previousTokens * rollingWindowProportion
146+ ) ;
147+ }
148+
149+ // # of tokens present in rolling and/or current window
150+ // if previous tokens is null, previousRollingTokens will be 0
151+ const rollingTokens = updatedUserWindow . currentTokens + previousRollingTokens ;
152+
153+ // if request is allowed
154+ if ( tokens + rollingTokens <= this . capacity ) {
155+ updatedUserWindow . currentTokens += tokens ;
156+ await this . client . setex ( uuid , keyExpiry , JSON . stringify ( updatedUserWindow ) ) ;
157+ return {
158+ success : true ,
159+ tokens : this . capacity - ( updatedUserWindow . currentTokens + previousRollingTokens ) ,
160+ } ;
161+ }
162+
163+ // if request is blocked
164+ await this . client . setex ( uuid , keyExpiry , JSON . stringify ( updatedUserWindow ) ) ;
165+ return {
166+ success : false ,
167+ tokens : this . capacity - ( updatedUserWindow . currentTokens + previousRollingTokens ) ,
168+ } ;
85169 }
86170
87171 /**
0 commit comments