@@ -86,20 +86,22 @@ xdescribe('Test TokenBucket Rate Limiter', () => {
8686 expect ( tokenCountPartialToEmpty . currentTokens ) . toBe ( 0 ) ;
8787 } ) ;
8888
89- // Bucket initially empty but enough time elapsed to paritally fill bucket since last request
89+ // Window initially full but enough time elapsed to paritally fill window since last request
9090 test ( 'fixed window is initially full but after new fixed window is initialized request is allowed' , async ( ) => {
9191 await setTokenCountInClient ( client , user4 , 10 , null , timestamp ) ;
9292 // tokens returned in processRequest is equal to the capacity
9393 // still available in the fixed window
9494 expect (
95- ( await limiter . processRequest ( user4 , timestamp + WINDOW_SIZE + 1 , 10 ) ) . tokens
96- ) . toBe ( 0 ) ;
95+ ( await limiter . processRequest ( user4 , timestamp + WINDOW_SIZE + 1 , 1 ) ) . tokens
96+ ) . toBe ( 0 ) ; // here, we expect the rolling window to only allow 1 token, b/c
97+ // only 1ms has passed since the previous fixed window
98+
9799 // `currentTokens` cached is the amount of tokens
98100 // currently in the fixed window.
99101 // this differs from token bucket, which caches the amount
100102 // of tokens still available for use
101103 const count = await getWindowFromClient ( client , user4 ) ;
102- expect ( count . currentTokens ) . toBe ( 10 ) ;
104+ expect ( count . currentTokens ) . toBe ( 1 ) ;
103105 } ) ;
104106
105107 // three different tests within, with different rolling window proportions (.25, .5, .75)
0 commit comments