merge: Fix rate limits under multi-node environments (!809)

View MR for information: https://activitypub.software/TransFem-org/Sharkey/-/merge_requests/809

Approved-by: dakkar <dakkar@thenautilus.net>
Approved-by: Marie <github@yuugi.dev>
This commit is contained in:
Hazelnoot 2024-12-15 16:53:48 +00:00
commit fd0ecb22cf
16 changed files with 719 additions and 400 deletions

View file

@ -117,12 +117,27 @@ export interface LimitInfo {
fullResetMs: number; fullResetMs: number;
} }
export const disabledLimitInfo: Readonly<LimitInfo> = Object.freeze({
blocked: false,
remaining: Number.MAX_SAFE_INTEGER,
resetSec: 0,
resetMs: 0,
fullResetSec: 0,
fullResetMs: 0,
});
export function isLegacyRateLimit(limit: RateLimit): limit is LegacyRateLimit { export function isLegacyRateLimit(limit: RateLimit): limit is LegacyRateLimit {
return limit.type === undefined; return limit.type === undefined;
} }
export function hasMinLimit(limit: LegacyRateLimit): limit is LegacyRateLimit & { minInterval: number } { export type MaxLegacyLimit = LegacyRateLimit & { duration: number, max: number };
return !!limit.minInterval; export function hasMaxLimit(limit: LegacyRateLimit): limit is MaxLegacyLimit {
return limit.max != null && limit.duration != null;
}
export type MinLegacyLimit = LegacyRateLimit & { minInterval: number };
export function hasMinLimit(limit: LegacyRateLimit): limit is MinLegacyLimit {
return limit.minInterval != null;
} }
export function sendRateLimitHeaders(reply: FastifyReply, info: LimitInfo): void { export function sendRateLimitHeaders(reply: FastifyReply, info: LimitInfo): void {

View file

@ -0,0 +1,143 @@
# SkRateLimiterService - Leaky Bucket Rate Limit Implementation
SkRateLimiterService replaces Misskey's RateLimiterService for all use cases.
It offers a simplified API, detailed metrics, and support for Rate Limit headers.
The prime feature is an implementation of Leaky Bucket - a flexible rate limiting scheme that better supports bursty request patterns common with human interaction.
## Compatibility
The API is backwards-compatible with existing limit definitions, but it's preferred to use the new BucketRateLimit interface.
Legacy limits will be "translated" into a bucket limit in a way that attempts to respect max, duration, and minInterval (if present).
SkRateLimiterService is not quite plug-and-play compatible with existing call sites, as it no longer throws when a limit is exceeded.
Instead, the returned LimitInfo object will have `blocked` set to true.
Callers are responsible for checking this property and taking any desired action, such as rejecting a request or returning limit details.
## Headers
LimitInfo objects (returned by `SkRateLimitService.limit()`) can be passed to `rate-limit-utils.sendRateLimitHeaders()` to send standard rate limit headers with an HTTP response.
The defined headers are:
| Header | Definition | Example |
|-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------|
| `X-RateLimit-Remaining` | Number of calls that can be made without triggering the rate limit. Will be zero if the limit is already exceeded, or will be exceeded by the next request. | `X-RateLimit-Remaining: 1` |
| `X-RateLimit-Clear` | Time in seconds required to completely clear the rate limit "bucket". | `X-RateLimit-Clear: 1.5` |
| `X-RateLimit-Reset` | Contains the number of seconds to wait before retrying the current request. Clients should delay for at least this long before making another call. Only included if the rate limit has already been exceeded. | `X-RateLimit-Reset: 0.755` |
| `Retry-After` | Like `X-RateLimit-Reset`, but measured in seconds (rounded up). Preserved for backwards compatibility, and only included if the rate limit has already been exceeded. | `Retry-After: 2` |
Note: rate limit headers are not standardized, except for `Retry-After`.
Header meanings and usage have been devised by adapting common patterns to work with a leaky bucket rate limit model.
## Performance
SkRateLimiterService makes between 1 and 4 redis transactions per rate limit check.
The first call is read-only, while the others perform at least one write operation.
Two integer keys are stored per client/subject, and both expire together after the maximum duration of the limit.
While performance has not been formally tested, it's expected that SkRateLimiterService has an impact roughly on par with the legacy RateLimiterService.
Redis memory usage should be notably lower due to the reduced number of keys and avoidance of set / array constructions.
## Concurrency and Multi-Node Correctness
To provide consistency across multi-node environments, leaky bucket is implemented with only atomic operations (`Increment`, `Decrement`, `Add`, and `Subtract`).
This allows the use of Optimistic Locking with read-modify-check logic.
If a data conflict is detected during the "drip" phase, then it's safely reverted by executing its inverse (`Increment` <-> `Decrement`, `Add` <-> `Subtract`).
We don't need to check for conflicts when adding the current request to the bucket, as all other logic already accounts for the case where the bucket has been "overfilled".
Should an extra request slip through, the limit delay will be extended until the bucket size is back within limits.
There is one non-atomic `Set` operation used to populate the initial Timestamp value, but we can safely ignore data races there.
Any possible conflict would have to occur within a few-milliseconds window, which means that the final value can be no more than a few milliseconds off from the expected value.
This error does not compound, as all further operations are relative (Increment and Add).
Thus, it's considered an acceptable tradeoff given the limitations imposed by Redis and ioredis.
## Algorithm Pseudocode
The Atomic Leaky Bucket algorithm is described here, in pseudocode:
```
# Terms
# * Now - UNIX timestamp of the current moment
# * Bucket Size - Maximum number of requests allowed in the bucket
# * Counter - Number of requests in the bucket
# * Drip Rate - How often to decrement counter
# * Drip Size - How much to decrement the counter
# * Timestamp - UNIX timestamp of last bucket drip
# * Delta Counter - Difference between current and expected counter value
# * Delta Timestamp - Difference between current and expected timestamp value
# 0 - Calculations
dripRate = ceil(limit.dripRate ?? 1000);
dripSize = ceil(limit.dripSize ?? 1);
bucketSize = max(ceil(limit.size / factor), 1);
maxExpiration = max(ceil((dripRate * ceil(bucketSize / dripSize)) / 1000), 1);;
# 1 - Read
MULTI
GET 'counter' INTO counter
GET 'timestamp' INTO timestamp
EXEC
# 2 - Drip
if (counter > 0) {
# Deltas
deltaCounter = floor((now - timestamp) / dripRate) * dripSize;
deltaCounter = min(deltaCounter, counter);
deltaTimestamp = deltaCounter * dripRate;
if (deltaCounter > 0) {
# Update
expectedTimestamp = timestamp
MULTI
GET 'timestamp' INTO canaryTimestamp
INCRBY 'timestamp' deltaTimestamp
EXPIRE 'timestamp' maxExpiration
GET 'timestamp' INTO timestamp
DECRBY 'counter' deltaCounter
EXPIRE 'counter' maxExpiration
GET 'counter' INTO counter
EXEC
# Rollback
if (canaryTimestamp != expectedTimestamp) {
MULTI
DECRBY 'timestamp' deltaTimestamp
GET 'timestamp' INTO timestmamp
INCRBY 'counter' deltaCounter
GET 'counter' INTO counter
EXEC
}
}
}
# 3 - Check
blocked = counter >= bucketSize
if (!blocked) {
if (timestamp == 0) {
# Edge case - set the initial value for timestamp.
# Otherwise the first request will immediately drip away.
MULTI
SET 'timestamp', now
EXPIRE 'timestamp' maxExpiration
INCR 'counter'
EXPIRE 'counter' maxExpiration
GET 'counter' INTO counter
EXEC
} else {
MULTI
INCR 'counter'
EXPIRE 'counter' maxExpiration
GET 'counter' INTO counter
EXEC
}
}
# 4 - Handle
if (blocked) {
# Application-specific code goes here.
# At this point blocked, counter, and timestamp are all accurate and synced to redis.
# Caller can apply limits, calculate headers, log audit failure, or anything else.
}
```
## Notes, Resources, and Further Reading
* https://en.wikipedia.org/wiki/Leaky_bucket#As_a_meter
* https://ietf-wg-httpapi.github.io/ratelimit-headers/darrelmiller-policyname/draft-ietf-httpapi-ratelimit-headers.txt
* https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
* https://stackoverflow.com/a/16022625

View file

@ -5,16 +5,13 @@
import { Inject, Injectable } from '@nestjs/common'; import { Inject, Injectable } from '@nestjs/common';
import Redis from 'ioredis'; import Redis from 'ioredis';
import { LoggerService } from '@/core/LoggerService.js';
import { TimeService } from '@/core/TimeService.js'; import { TimeService } from '@/core/TimeService.js';
import { EnvService } from '@/core/EnvService.js'; import { EnvService } from '@/core/EnvService.js';
import { BucketRateLimit, LegacyRateLimit, LimitInfo, RateLimit, hasMinLimit, isLegacyRateLimit, Keyed, hasMaxLimit, disabledLimitInfo, MaxLegacyLimit, MinLegacyLimit } from '@/misc/rate-limit-utils.js';
import { DI } from '@/di-symbols.js'; import { DI } from '@/di-symbols.js';
import type Logger from '@/logger.js';
import { BucketRateLimit, LegacyRateLimit, LimitInfo, RateLimit, hasMinLimit, isLegacyRateLimit, Keyed } from '@/misc/rate-limit-utils.js';
@Injectable() @Injectable()
export class SkRateLimiterService { export class SkRateLimiterService {
private readonly logger: Logger;
private readonly disabled: boolean; private readonly disabled: boolean;
constructor( constructor(
@ -24,32 +21,31 @@ export class SkRateLimiterService {
@Inject(DI.redis) @Inject(DI.redis)
private readonly redisClient: Redis.Redis, private readonly redisClient: Redis.Redis,
@Inject(LoggerService)
loggerService: LoggerService,
@Inject(EnvService) @Inject(EnvService)
envService: EnvService, envService: EnvService,
) { ) {
this.logger = loggerService.getLogger('limiter'); this.disabled = envService.env.NODE_ENV === 'test';
this.disabled = envService.env.NODE_ENV !== 'production'; // TODO disable in TEST *only*
} }
/**
* Check & increment a rate limit
* @param limit The limit definition
* @param actor Client who is calling this limit
* @param factor Scaling factor - smaller = larger limit (less restrictive)
*/
public async limit(limit: Keyed<RateLimit>, actor: string, factor = 1): Promise<LimitInfo> { public async limit(limit: Keyed<RateLimit>, actor: string, factor = 1): Promise<LimitInfo> {
if (this.disabled || factor === 0) { if (this.disabled || factor === 0) {
return { return disabledLimitInfo;
blocked: false,
remaining: Number.MAX_SAFE_INTEGER,
resetSec: 0,
resetMs: 0,
fullResetSec: 0,
fullResetMs: 0,
};
} }
if (factor < 0) { if (factor < 0) {
throw new Error(`Rate limit factor is zero or negative: ${factor}`); throw new Error(`Rate limit factor is zero or negative: ${factor}`);
} }
return await this.tryLimit(limit, actor, factor);
}
private async tryLimit(limit: Keyed<RateLimit>, actor: string, factor: number): Promise<LimitInfo> {
if (isLegacyRateLimit(limit)) { if (isLegacyRateLimit(limit)) {
return await this.limitLegacy(limit, actor, factor); return await this.limitLegacy(limit, actor, factor);
} else { } else {
@ -58,141 +54,200 @@ export class SkRateLimiterService {
} }
private async limitLegacy(limit: Keyed<LegacyRateLimit>, actor: string, factor: number): Promise<LimitInfo> { private async limitLegacy(limit: Keyed<LegacyRateLimit>, actor: string, factor: number): Promise<LimitInfo> {
const promises: Promise<LimitInfo | null>[] = []; if (hasMaxLimit(limit)) {
return await this.limitLegacyMinMax(limit, actor, factor);
// The "min" limit - if present - is handled directly. } else if (hasMinLimit(limit)) {
if (hasMinLimit(limit)) { return await this.limitLegacyMinOnly(limit, actor, factor);
promises.push( } else {
this.limitMin(limit, actor, factor), return disabledLimitInfo;
); }
} }
// Convert the "max" limit into a leaky bucket with 1 drip / second rate. private async limitLegacyMinMax(limit: Keyed<MaxLegacyLimit>, actor: string, factor: number): Promise<LimitInfo> {
if (limit.max != null && limit.duration != null) { if (limit.duration === 0) return disabledLimitInfo;
promises.push( if (limit.duration < 0) throw new Error(`Invalid rate limit ${limit.key}: duration is negative (${limit.duration})`);
this.limitBucket({ if (limit.max < 1) throw new Error(`Invalid rate limit ${limit.key}: max is less than 1 (${limit.max})`);
// Derive initial dripRate from minInterval OR duration/max.
const initialDripRate = Math.max(limit.minInterval ?? Math.round(limit.duration / limit.max), 1);
// Calculate dripSize to reach max at exactly duration
const dripSize = Math.max(Math.round(limit.max / (limit.duration / initialDripRate)), 1);
// Calculate final dripRate from dripSize and duration/max
const dripRate = Math.max(Math.round(limit.duration / (limit.max / dripSize)), 1);
const bucketLimit: Keyed<BucketRateLimit> = {
type: 'bucket', type: 'bucket',
key: limit.key, key: limit.key,
size: limit.max, size: limit.max,
dripRate: Math.max(Math.round(limit.duration / limit.max), 1), dripRate,
}, actor, factor), dripSize,
);
}
const [lim1, lim2] = await Promise.all(promises);
return {
blocked: (lim1?.blocked || lim2?.blocked) ?? false,
remaining: Math.min(lim1?.remaining ?? Number.MAX_SAFE_INTEGER, lim2?.remaining ?? Number.MAX_SAFE_INTEGER),
resetSec: Math.max(lim1?.resetSec ?? 0, lim2?.resetSec ?? 0),
resetMs: Math.max(lim1?.resetMs ?? 0, lim2?.resetMs ?? 0),
fullResetSec: Math.max(lim1?.fullResetSec ?? 0, lim2?.fullResetSec ?? 0),
fullResetMs: Math.max(lim1?.fullResetMs ?? 0, lim2?.fullResetMs ?? 0),
}; };
return await this.limitBucket(bucketLimit, actor, factor);
} }
private async limitMin(limit: Keyed<LegacyRateLimit> & { minInterval: number }, actor: string, factor: number): Promise<LimitInfo | null> { private async limitLegacyMinOnly(limit: Keyed<MinLegacyLimit>, actor: string, factor: number): Promise<LimitInfo> {
if (limit.minInterval === 0) return null; if (limit.minInterval === 0) return disabledLimitInfo;
if (limit.minInterval < 0) throw new Error(`Invalid rate limit ${limit.key}: minInterval is negative (${limit.minInterval})`); if (limit.minInterval < 0) throw new Error(`Invalid rate limit ${limit.key}: minInterval is negative (${limit.minInterval})`);
const counter = await this.getLimitCounter(limit, actor, 'min'); const dripRate = Math.max(Math.round(limit.minInterval), 1);
const minInterval = Math.max(Math.ceil(limit.minInterval * factor), 0); const bucketLimit: Keyed<BucketRateLimit> = {
type: 'bucket',
// Update expiration key: limit.key,
if (counter.c > 0) { size: 1,
const isCleared = this.timeService.now - counter.t >= minInterval; dripRate,
if (isCleared) { dripSize: 1,
counter.c = 0; };
} return await this.limitBucket(bucketLimit, actor, factor);
}
const blocked = counter.c > 0;
if (!blocked) {
counter.c++;
counter.t = this.timeService.now;
}
// Calculate limit status
const resetMs = Math.max(Math.ceil(minInterval - (this.timeService.now - counter.t)), 0);
const resetSec = Math.ceil(resetMs / 1000);
const limitInfo: LimitInfo = { blocked, remaining: 0, resetSec, resetMs, fullResetSec: resetSec, fullResetMs: resetMs };
// Update the limit counter, but not if blocked
if (!blocked) {
// Don't await, or we will slow down the API.
this.setLimitCounter(limit, actor, counter, resetSec, 'min')
.catch(err => this.logger.error(`Failed to update limit ${limit.key}:min for ${actor}:`, err));
}
return limitInfo;
} }
/**
* Implementation of Leaky Bucket rate limiting - see SkRateLimiterService.md for details.
*/
private async limitBucket(limit: Keyed<BucketRateLimit>, actor: string, factor: number): Promise<LimitInfo> { private async limitBucket(limit: Keyed<BucketRateLimit>, actor: string, factor: number): Promise<LimitInfo> {
if (limit.size < 1) throw new Error(`Invalid rate limit ${limit.key}: size is less than 1 (${limit.size})`); if (limit.size < 1) throw new Error(`Invalid rate limit ${limit.key}: size is less than 1 (${limit.size})`);
if (limit.dripRate != null && limit.dripRate < 1) throw new Error(`Invalid rate limit ${limit.key}: dripRate is less than 1 (${limit.dripRate})`); if (limit.dripRate != null && limit.dripRate < 1) throw new Error(`Invalid rate limit ${limit.key}: dripRate is less than 1 (${limit.dripRate})`);
if (limit.dripSize != null && limit.dripSize < 1) throw new Error(`Invalid rate limit ${limit.key}: dripSize is less than 1 (${limit.dripSize})`); if (limit.dripSize != null && limit.dripSize < 1) throw new Error(`Invalid rate limit ${limit.key}: dripSize is less than 1 (${limit.dripSize})`);
const counter = await this.getLimitCounter(limit, actor, 'bucket'); // 0 - Calculate
const now = this.timeService.now;
const bucketSize = Math.max(Math.ceil(limit.size / factor), 1); const bucketSize = Math.max(Math.ceil(limit.size / factor), 1);
const dripRate = Math.ceil(limit.dripRate ?? 1000); const dripRate = Math.ceil(limit.dripRate ?? 1000);
const dripSize = Math.ceil(limit.dripSize ?? 1); const dripSize = Math.ceil(limit.dripSize ?? 1);
const expirationSec = Math.max(Math.ceil((dripRate * Math.ceil(bucketSize / dripSize)) / 1000), 1);
// Update drips // 1 - Read
if (counter.c > 0) { const counterKey = createLimitKey(limit, actor, 'c');
const dripsSinceLastTick = Math.floor((this.timeService.now - counter.t) / dripRate) * dripSize; const timestampKey = createLimitKey(limit, actor, 't');
counter.c = Math.max(counter.c - dripsSinceLastTick, 0); const counter = await this.getLimitCounter(counterKey, timestampKey);
// 2 - Drip
const dripsSinceLastTick = Math.floor((now - counter.timestamp) / dripRate) * dripSize;
const deltaCounter = Math.min(dripsSinceLastTick, counter.counter);
const deltaTimestamp = dripsSinceLastTick * dripRate;
if (deltaCounter > 0) {
// Execute the next drip(s)
const results = await this.executeRedisMulti(
['get', timestampKey],
['incrby', timestampKey, deltaTimestamp],
['expire', timestampKey, expirationSec],
['get', timestampKey],
['decrby', counterKey, deltaCounter],
['expire', counterKey, expirationSec],
['get', counterKey],
);
const expectedTimestamp = counter.timestamp;
const canaryTimestamp = results[0] ? parseInt(results[0]) : 0;
counter.timestamp = results[3] ? parseInt(results[3]) : 0;
counter.counter = results[6] ? parseInt(results[6]) : 0;
// Check for a data collision and rollback
if (canaryTimestamp !== expectedTimestamp) {
const rollbackResults = await this.executeRedisMulti(
['decrby', timestampKey, deltaTimestamp],
['get', timestampKey],
['incrby', counterKey, deltaCounter],
['get', counterKey],
);
counter.timestamp = rollbackResults[1] ? parseInt(rollbackResults[1]) : 0;
counter.counter = rollbackResults[3] ? parseInt(rollbackResults[3]) : 0;
}
} }
const blocked = counter.c >= bucketSize; // 3 - Check
const blocked = counter.counter >= bucketSize;
if (!blocked) { if (!blocked) {
counter.c++; if (counter.timestamp === 0) {
counter.t = this.timeService.now; const results = await this.executeRedisMulti(
['set', timestampKey, now],
['expire', timestampKey, expirationSec],
['incr', counterKey],
['expire', counterKey, expirationSec],
['get', counterKey],
);
counter.timestamp = now;
counter.counter = results[4] ? parseInt(results[4]) : 0;
} else {
const results = await this.executeRedisMulti(
['incr', counterKey],
['expire', counterKey, expirationSec],
['get', counterKey],
);
counter.counter = results[2] ? parseInt(results[2]) : 0;
} }
}
// Calculate how much time is needed to free up a bucket slot
const overflow = Math.max((counter.counter + 1) - bucketSize, 0);
const dripsNeeded = Math.ceil(overflow / dripSize);
const timeNeeded = Math.max((dripRate * dripsNeeded) - (this.timeService.now - counter.timestamp), 0);
// Calculate limit status // Calculate limit status
const remaining = Math.max(bucketSize - counter.c, 0); const remaining = Math.max(bucketSize - counter.counter, 0);
const resetMs = remaining > 0 ? 0 : Math.max(dripRate - (this.timeService.now - counter.t), 0); const resetMs = timeNeeded;
const resetSec = Math.ceil(resetMs / 1000); const resetSec = Math.ceil(resetMs / 1000);
const fullResetMs = Math.ceil(counter.c / dripSize) * dripRate; const fullResetMs = Math.ceil(counter.counter / dripSize) * dripRate;
const fullResetSec = Math.ceil(fullResetMs / 1000); const fullResetSec = Math.ceil(fullResetMs / 1000);
const limitInfo: LimitInfo = { blocked, remaining, resetSec, resetMs, fullResetSec, fullResetMs }; return { blocked, remaining, resetSec, resetMs, fullResetSec, fullResetMs };
// Update the limit counter, but not if blocked
if (!blocked) {
// Don't await, or we will slow down the API.
this.setLimitCounter(limit, actor, counter, fullResetSec, 'bucket')
.catch(err => this.logger.error(`Failed to update limit ${limit.key} for ${actor}:`, err));
} }
return limitInfo; private async getLimitCounter(counterKey: string, timestampKey: string): Promise<LimitCounter> {
const [counter, timestamp] = await this.executeRedisMulti(
['get', counterKey],
['get', timestampKey],
);
return {
counter: counter ? parseInt(counter) : 0,
timestamp: timestamp ? parseInt(timestamp) : 0,
};
} }
private async getLimitCounter(limit: Keyed<RateLimit>, actor: string, subject: string): Promise<LimitCounter> { private async executeRedisMulti(...batch: RedisCommand[]): Promise<RedisResult[]> {
const key = createLimitKey(limit, actor, subject); const results = await this.redisClient.multi(batch).exec();
const value = await this.redisClient.get(key); // Transaction conflict (retryable)
if (value == null) { if (!results) {
return { t: 0, c: 0 }; throw new ConflictError('Redis error: transaction conflict');
} }
return JSON.parse(value); // Transaction failed (fatal)
if (results.length !== batch.length) {
throw new Error('Redis error: failed to execute batch');
} }
private async setLimitCounter(limit: Keyed<RateLimit>, actor: string, counter: LimitCounter, expiration: number, subject: string): Promise<void> { // Map responses
const key = createLimitKey(limit, actor, subject); const errors: Error[] = [];
const value = JSON.stringify(counter); const responses: RedisResult[] = [];
const expirationSec = Math.max(expiration, 1); for (const [error, response] of results) {
await this.redisClient.set(key, value, 'EX', expirationSec); if (error) errors.push(error);
responses.push(response as RedisResult);
}
// Command failed (fatal)
if (errors.length > 0) {
const errorMessages = errors
.map((e, i) => `Error in command ${i}: ${e}`)
.join('\', \'');
throw new AggregateError(errors, `Redis error: failed to execute command(s): '${errorMessages}'`);
}
return responses;
} }
} }
function createLimitKey(limit: Keyed<RateLimit>, actor: string, subject: string): string { // Not correct, but good enough for the basic commands we use.
return `rl_${actor}_${limit.key}_${subject}`; type RedisResult = string | null;
type RedisCommand = [command: string, ...args: unknown[]];
function createLimitKey(limit: Keyed<RateLimit>, actor: string, value: string): string {
return `rl_${actor}_${limit.key}_${value}`;
} }
export interface LimitCounter { class ConflictError extends Error {}
/** Timestamp */
t: number;
/** Counter */ interface LimitCounter {
c: number; timestamp: number;
counter: number;
} }

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -17,10 +17,11 @@ export const meta = {
allowGet: true, allowGet: true,
cacheSec: 60 * 60, cacheSec: 60 * 60,
// 10 calls per 5 seconds // Burst up to 100, then 2/sec average
limit: { limit: {
duration: 1000 * 5, type: 'bucket',
max: 10, size: 100,
dripRate: 500,
}, },
} as const; } as const;

View file

@ -3,25 +3,19 @@
* SPDX-License-Identifier: AGPL-3.0-only * SPDX-License-Identifier: AGPL-3.0-only
*/ */
import { KEYWORD } from 'color-convert/conversions.js';
import { jest } from '@jest/globals';
import type Redis from 'ioredis'; import type Redis from 'ioredis';
import { LimitCounter, SkRateLimiterService } from '@/server/api/SkRateLimiterService.js'; import { SkRateLimiterService } from '@/server/api/SkRateLimiterService.js';
import { LoggerService } from '@/core/LoggerService.js';
import { BucketRateLimit, Keyed, LegacyRateLimit } from '@/misc/rate-limit-utils.js'; import { BucketRateLimit, Keyed, LegacyRateLimit } from '@/misc/rate-limit-utils.js';
/* eslint-disable @typescript-eslint/no-non-null-assertion */ /* eslint-disable @typescript-eslint/no-non-null-assertion */
/* eslint-disable @typescript-eslint/no-unnecessary-condition */
describe(SkRateLimiterService, () => { describe(SkRateLimiterService, () => {
let mockTimeService: { now: number, date: Date } = null!; let mockTimeService: { now: number, date: Date } = null!;
let mockRedisGet: ((key: string) => string | null) | undefined = undefined; let mockRedis: Array<(command: [string, ...unknown[]]) => [Error | null, unknown] | null> = null!;
let mockRedisSet: ((args: unknown[]) => void) | undefined = undefined; let mockRedisExec: (batch: [string, ...unknown[]][]) => Promise<[Error | null, unknown][] | null> = null!;
let mockEnvironment: Record<string, string | undefined> = null!; let mockEnvironment: Record<string, string | undefined> = null!;
let serviceUnderTest: () => SkRateLimiterService = null!; let serviceUnderTest: () => SkRateLimiterService = null!;
let loggedMessages: { level: string, data: unknown[] }[] = [];
beforeEach(() => { beforeEach(() => {
mockTimeService = { mockTimeService = {
now: 0, now: 0,
@ -30,15 +24,41 @@ describe(SkRateLimiterService, () => {
}, },
}; };
mockRedisGet = undefined; function callMockRedis(command: [string, ...unknown[]]) {
mockRedisSet = undefined; const handlerResults = mockRedis.map(handler => handler(command));
const finalResult = handlerResults.findLast(result => result != null);
return finalResult ?? [null, null];
}
// I apologize to anyone who tries to read this later 🥲
mockRedis = [];
mockRedisExec = (batch) => {
const results: [Error | null, unknown][] = batch.map(command => {
return callMockRedis(command);
});
return Promise.resolve(results);
};
const mockRedisClient = { const mockRedisClient = {
get(key: string) { watch(...args: unknown[]) {
if (mockRedisGet) return Promise.resolve(mockRedisGet(key)); const result = callMockRedis(['watch', ...args]);
else return Promise.resolve(null); return Promise.resolve(result[0] ?? result[1]);
}, },
set(...args: unknown[]): Promise<void> { get(...args: unknown[]) {
if (mockRedisSet) mockRedisSet(args); const result = callMockRedis(['get', ...args]);
return Promise.resolve(result[0] ?? result[1]);
},
set(...args: unknown[]) {
const result = callMockRedis(['set', ...args]);
return Promise.resolve(result[0] ?? result[1]);
},
multi(batch: [string, ...unknown[]][]) {
return {
exec() {
return mockRedisExec(batch);
},
};
},
reset() {
return Promise.resolve(); return Promise.resolve();
}, },
} as unknown as Redis.Redis; } as unknown as Redis.Redis;
@ -49,89 +69,98 @@ describe(SkRateLimiterService, () => {
env: mockEnvironment, env: mockEnvironment,
}; };
loggedMessages = [];
const mockLogService = {
getLogger() {
return {
createSubLogger(context: string, color?: KEYWORD) {
return mockLogService.getLogger(context, color);
},
error(...data: unknown[]) {
loggedMessages.push({ level: 'error', data });
},
warn(...data: unknown[]) {
loggedMessages.push({ level: 'warn', data });
},
succ(...data: unknown[]) {
loggedMessages.push({ level: 'succ', data });
},
debug(...data: unknown[]) {
loggedMessages.push({ level: 'debug', data });
},
info(...data: unknown[]) {
loggedMessages.push({ level: 'info', data });
},
};
},
} as unknown as LoggerService;
let service: SkRateLimiterService | undefined = undefined; let service: SkRateLimiterService | undefined = undefined;
serviceUnderTest = () => { serviceUnderTest = () => {
return service ??= new SkRateLimiterService(mockTimeService, mockRedisClient, mockLogService, mockEnvService); return service ??= new SkRateLimiterService(mockTimeService, mockRedisClient, mockEnvService);
}; };
}); });
function expectNoUnhandledErrors() {
const unhandledErrors = loggedMessages.filter(m => m.level === 'error');
if (unhandledErrors.length > 0) {
throw new Error(`Test failed: got unhandled errors ${unhandledErrors.join('\n')}`);
}
}
describe('limit', () => { describe('limit', () => {
const actor = 'actor'; const actor = 'actor';
const key = 'test'; const key = 'test';
let counter: LimitCounter | undefined = undefined; let limitCounter: number | undefined = undefined;
let minCounter: LimitCounter | undefined = undefined; let limitTimestamp: number | undefined = undefined;
beforeEach(() => { beforeEach(() => {
counter = undefined; limitCounter = undefined;
minCounter = undefined; limitTimestamp = undefined;
mockRedisGet = (key: string) => { mockRedis.push(([command, ...args]) => {
if (key === 'rl_actor_test_bucket' && counter) { if (command === 'get') {
return JSON.stringify(counter); if (args[0] === 'rl_actor_test_c') {
const data = limitCounter?.toString() ?? null;
return [null, data];
}
if (args[0] === 'rl_actor_test_t') {
const data = limitTimestamp?.toString() ?? null;
return [null, data];
}
} }
if (key === 'rl_actor_test_min' && minCounter) { if (command === 'set') {
return JSON.stringify(minCounter); if (args[0] === 'rl_actor_test_c') {
limitCounter = parseInt(args[1] as string);
return [null, args[1]];
}
if (args[0] === 'rl_actor_test_t') {
limitTimestamp = parseInt(args[1] as string);
return [null, args[1]];
}
}
if (command === 'incr') {
if (args[0] === 'rl_actor_test_c') {
limitCounter = (limitCounter ?? 0) + 1;
return [null, null];
}
if (args[0] === 'rl_actor_test_t') {
limitTimestamp = (limitTimestamp ?? 0) + 1;
return [null, null];
}
}
if (command === 'incrby') {
if (args[0] === 'rl_actor_test_c') {
limitCounter = (limitCounter ?? 0) + parseInt(args[1] as string);
return [null, null];
}
if (args[0] === 'rl_actor_test_t') {
limitTimestamp = (limitTimestamp ?? 0) + parseInt(args[1] as string);
return [null, null];
}
}
if (command === 'decr') {
if (args[0] === 'rl_actor_test_c') {
limitCounter = (limitCounter ?? 0) - 1;
return [null, null];
}
if (args[0] === 'rl_actor_test_t') {
limitTimestamp = (limitTimestamp ?? 0) - 1;
return [null, null];
}
}
if (command === 'decrby') {
if (args[0] === 'rl_actor_test_c') {
limitCounter = (limitCounter ?? 0) - parseInt(args[1] as string);
return [null, null];
}
if (args[0] === 'rl_actor_test_t') {
limitTimestamp = (limitTimestamp ?? 0) - parseInt(args[1] as string);
return [null, null];
}
} }
return null; return null;
}; });
mockRedisSet = (args: unknown[]) => {
const [key, value] = args;
if (key === 'rl_actor_test_bucket') {
if (value == null) counter = undefined;
else if (typeof(value) === 'string') counter = JSON.parse(value);
else throw new Error('invalid redis call');
}
if (key === 'rl_actor_test_min') {
if (value == null) minCounter = undefined;
else if (typeof(value) === 'string') minCounter = JSON.parse(value);
else throw new Error('invalid redis call');
}
};
}); });
it('should bypass in non-production', async () => { it('should bypass in test environment', async () => {
mockEnvironment.NODE_ENV = 'test'; mockEnvironment.NODE_ENV = 'test';
const info = await serviceUnderTest().limit({ key: 'l', type: undefined, max: 0 }, 'actor'); const info = await serviceUnderTest().limit({ key: 'l', type: undefined, max: 0 }, actor);
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
expect(info.remaining).toBe(Number.MAX_SAFE_INTEGER); expect(info.remaining).toBe(Number.MAX_SAFE_INTEGER);
@ -158,15 +187,10 @@ describe(SkRateLimiterService, () => {
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
}); });
it('should not error when allowed', async () => {
await serviceUnderTest().limit(limit, actor);
expectNoUnhandledErrors();
});
it('should return correct info when allowed', async () => { it('should return correct info when allowed', async () => {
limit.size = 2; limit.size = 2;
counter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -180,8 +204,7 @@ describe(SkRateLimiterService, () => {
it('should increment counter when called', async () => { it('should increment counter when called', async () => {
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(counter).not.toBeUndefined(); expect(limitCounter).toBe(1);
expect(counter?.c).toBe(1);
}); });
it('should set timestamp when called', async () => { it('should set timestamp when called', async () => {
@ -189,29 +212,28 @@ describe(SkRateLimiterService, () => {
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(counter).not.toBeUndefined(); expect(limitTimestamp).toBe(1000);
expect(counter?.t).toBe(1000);
}); });
it('should decrement counter when dripRate has passed', async () => { it('should decrement counter when dripRate has passed', async () => {
counter = { c: 2, t: 0 }; limitCounter = 2;
limitTimestamp = 0;
mockTimeService.now = 2000; mockTimeService.now = 2000;
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(counter).not.toBeUndefined(); expect(limitCounter).toBe(1); // 2 (starting) - 2 (2x1 drip) + 1 (call) = 1
expect(counter?.c).toBe(1); // 2 (starting) - 2 (2x1 drip) + 1 (call) = 1
}); });
it('should decrement counter by dripSize', async () => { it('should decrement counter by dripSize', async () => {
counter = { c: 2, t: 0 }; limitCounter = 2;
limitTimestamp = 0;
limit.dripSize = 2; limit.dripSize = 2;
mockTimeService.now = 1000; mockTimeService.now = 1000;
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(counter).not.toBeUndefined(); expect(limitCounter).toBe(1); // 2 (starting) - 2 (1x2 drip) + 1 (call) = 1
expect(counter?.c).toBe(1); // 2 (starting) - 2 (1x2 drip) + 1 (call) = 1
}); });
it('should maintain counter between calls over time', async () => { it('should maintain counter between calls over time', async () => {
@ -226,25 +248,13 @@ describe(SkRateLimiterService, () => {
mockTimeService.now += 1000; // 2 - 1 = 1 mockTimeService.now += 1000; // 2 - 1 = 1
await serviceUnderTest().limit(limit, actor); // 1 + 1 = 2 await serviceUnderTest().limit(limit, actor); // 1 + 1 = 2
expect(counter?.c).toBe(2); expect(limitCounter).toBe(2);
expect(counter?.t).toBe(3000); expect(limitTimestamp).toBe(3000);
});
it('should log error and continue when update fails', async () => {
mockRedisSet = () => {
throw new Error('test error');
};
await serviceUnderTest().limit(limit, actor);
const matchingError = loggedMessages
.find(m => m.level === 'error' && m.data
.some(d => typeof(d) === 'string' && d.includes('Failed to update limit')));
expect(matchingError).toBeTruthy();
}); });
it('should block when bucket is filled', async () => { it('should block when bucket is filled', async () => {
counter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -252,7 +262,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should calculate correct info when blocked', async () => { it('should calculate correct info when blocked', async () => {
counter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -263,7 +274,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should allow when bucket is filled but should drip', async () => { it('should allow when bucket is filled but should drip', async () => {
counter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
mockTimeService.now = 1000; mockTimeService.now = 1000;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -272,7 +284,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should scale limit by factor', async () => { it('should scale limit by factor', async () => {
counter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
const i1 = await serviceUnderTest().limit(limit, actor, 0.5); // 1 + 1 = 2 const i1 = await serviceUnderTest().limit(limit, actor, 0.5); // 1 + 1 = 2
const i2 = await serviceUnderTest().limit(limit, actor, 0.5); // 2 + 1 = 3 const i2 = await serviceUnderTest().limit(limit, actor, 0.5); // 2 + 1 = 3
@ -281,23 +294,39 @@ describe(SkRateLimiterService, () => {
expect(i2.blocked).toBeTruthy(); expect(i2.blocked).toBeTruthy();
}); });
it('should set key expiration', async () => { it('should set counter expiration', async () => {
const mock = jest.fn(mockRedisSet); const commands: unknown[][] = [];
mockRedisSet = mock; mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(mock).toHaveBeenCalledWith(['rl_actor_test_bucket', '{"t":0,"c":1}', 'EX', 1]); expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 1]);
});
it('should set timestamp expiration', async () => {
const commands: unknown[][] = [];
mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor);
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 1]);
}); });
it('should not increment when already blocked', async () => { it('should not increment when already blocked', async () => {
counter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
mockTimeService.now += 100; mockTimeService.now += 100;
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(counter?.c).toBe(1); expect(limitCounter).toBe(1);
expect(counter?.t).toBe(0); expect(limitTimestamp).toBe(0);
}); });
it('should skip if factor is zero', async () => { it('should skip if factor is zero', async () => {
@ -384,6 +413,19 @@ describe(SkRateLimiterService, () => {
await expect(promise).rejects.toThrow(/dripSize is less than 1/); await expect(promise).rejects.toThrow(/dripSize is less than 1/);
}); });
it('should apply correction if extra calls slip through', async () => {
limitCounter = 2;
const info = await serviceUnderTest().limit(limit, actor);
expect(info.blocked).toBeTruthy();
expect(info.remaining).toBe(0);
expect(info.resetMs).toBe(2000);
expect(info.resetSec).toBe(2);
expect(info.fullResetMs).toBe(2000);
expect(info.fullResetSec).toBe(2);
});
}); });
describe('with min interval', () => { describe('with min interval', () => {
@ -403,12 +445,6 @@ describe(SkRateLimiterService, () => {
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
}); });
it('should not error when allowed', async () => {
await serviceUnderTest().limit(limit, actor);
expectNoUnhandledErrors();
});
it('should calculate correct info when allowed', async () => { it('should calculate correct info when allowed', async () => {
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -422,8 +458,8 @@ describe(SkRateLimiterService, () => {
it('should increment counter when called', async () => { it('should increment counter when called', async () => {
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(minCounter).not.toBeUndefined(); expect(limitCounter).not.toBeUndefined();
expect(minCounter?.c).toBe(1); expect(limitCounter).toBe(1);
}); });
it('should set timestamp when called', async () => { it('should set timestamp when called', async () => {
@ -431,28 +467,19 @@ describe(SkRateLimiterService, () => {
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(minCounter).not.toBeUndefined(); expect(limitCounter).not.toBeUndefined();
expect(minCounter?.t).toBe(1000); expect(limitTimestamp).toBe(1000);
}); });
it('should decrement counter when minInterval has passed', async () => { it('should decrement counter when minInterval has passed', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
mockTimeService.now = 1000; mockTimeService.now = 1000;
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(minCounter).not.toBeUndefined(); expect(limitCounter).not.toBeUndefined();
expect(minCounter?.c).toBe(1); // 1 (starting) - 1 (interval) + 1 (call) = 1 expect(limitCounter).toBe(1); // 1 (starting) - 1 (interval) + 1 (call) = 1
});
it('should reset counter entirely', async () => {
minCounter = { c: 2, t: 0 };
mockTimeService.now = 1000;
await serviceUnderTest().limit(limit, actor);
expect(minCounter).not.toBeUndefined();
expect(minCounter?.c).toBe(1); // 2 (starting) - 2 (interval) + 1 (call) = 1
}); });
it('should maintain counter between calls over time', async () => { it('should maintain counter between calls over time', async () => {
@ -463,27 +490,16 @@ describe(SkRateLimiterService, () => {
await serviceUnderTest().limit(limit, actor); // blocked await serviceUnderTest().limit(limit, actor); // blocked
mockTimeService.now += 1000; // 1 - 1 = 0 mockTimeService.now += 1000; // 1 - 1 = 0
mockTimeService.now += 1000; // 0 - 1 = 0 mockTimeService.now += 1000; // 0 - 1 = 0
await serviceUnderTest().limit(limit, actor); // 0 + 1 = 1 const info = await serviceUnderTest().limit(limit, actor); // 0 + 1 = 1
expect(minCounter?.c).toBe(1); expect(info.blocked).toBeFalsy();
expect(minCounter?.t).toBe(3000); expect(limitCounter).toBe(1);
}); expect(limitTimestamp).toBe(3000);
it('should log error and continue when update fails', async () => {
mockRedisSet = () => {
throw new Error('test error');
};
await serviceUnderTest().limit(limit, actor);
const matchingError = loggedMessages
.find(m => m.level === 'error' && m.data
.some(d => typeof(d) === 'string' && d.includes('Failed to update limit')));
expect(matchingError).toBeTruthy();
}); });
it('should block when interval exceeded', async () => { it('should block when interval exceeded', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -491,7 +507,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should calculate correct info when blocked', async () => { it('should calculate correct info when blocked', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -502,7 +519,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should allow when bucket is filled but interval has passed', async () => { it('should allow when bucket is filled but interval has passed', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
mockTimeService.now = 1000; mockTimeService.now = 1000;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -511,7 +529,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should scale interval by factor', async () => { it('should scale interval by factor', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
mockTimeService.now += 500; mockTimeService.now += 500;
const info = await serviceUnderTest().limit(limit, actor, 0.5); const info = await serviceUnderTest().limit(limit, actor, 0.5);
@ -519,23 +538,39 @@ describe(SkRateLimiterService, () => {
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
}); });
it('should set key expiration', async () => { it('should set counter expiration', async () => {
const mock = jest.fn(mockRedisSet); const commands: unknown[][] = [];
mockRedisSet = mock; mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(mock).toHaveBeenCalledWith(['rl_actor_test_min', '{"t":0,"c":1}', 'EX', 1]); expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 1]);
});
it('should set timer expiration', async () => {
const commands: unknown[][] = [];
mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor);
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 1]);
}); });
it('should not increment when already blocked', async () => { it('should not increment when already blocked', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
mockTimeService.now += 100; mockTimeService.now += 100;
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(minCounter?.c).toBe(1); expect(limitCounter).toBe(1);
expect(minCounter?.t).toBe(0); expect(limitTimestamp).toBe(0);
}); });
it('should skip if factor is zero', async () => { it('should skip if factor is zero', async () => {
@ -567,6 +602,19 @@ describe(SkRateLimiterService, () => {
await expect(promise).rejects.toThrow(/minInterval is negative/); await expect(promise).rejects.toThrow(/minInterval is negative/);
}); });
it('should apply correction if extra calls slip through', async () => {
limitCounter = 2;
const info = await serviceUnderTest().limit(limit, actor);
expect(info.blocked).toBeTruthy();
expect(info.remaining).toBe(0);
expect(info.resetMs).toBe(2000);
expect(info.resetSec).toBe(2);
expect(info.fullResetMs).toBe(2000);
expect(info.fullResetSec).toBe(2);
});
}); });
describe('with legacy limit', () => { describe('with legacy limit', () => {
@ -587,16 +635,11 @@ describe(SkRateLimiterService, () => {
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
}); });
it('should not error when allowed', async () => {
await serviceUnderTest().limit(limit, actor);
expectNoUnhandledErrors();
});
it('should infer dripRate from duration', async () => { it('should infer dripRate from duration', async () => {
limit.max = 10; limit.max = 10;
limit.duration = 10000; limit.duration = 10000;
counter = { c: 10, t: 0 }; limitCounter = 10;
limitTimestamp = 0;
const i1 = await serviceUnderTest().limit(limit, actor); const i1 = await serviceUnderTest().limit(limit, actor);
mockTimeService.now += 1000; mockTimeService.now += 1000;
@ -619,7 +662,8 @@ describe(SkRateLimiterService, () => {
it('should calculate correct info when allowed', async () => { it('should calculate correct info when allowed', async () => {
limit.max = 10; limit.max = 10;
limit.duration = 10000; limit.duration = 10000;
counter = { c: 10, t: 0 }; limitCounter = 10;
limitTimestamp = 0;
mockTimeService.now += 2000; mockTimeService.now += 2000;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -634,7 +678,8 @@ describe(SkRateLimiterService, () => {
it('should calculate correct info when blocked', async () => { it('should calculate correct info when blocked', async () => {
limit.max = 10; limit.max = 10;
limit.duration = 10000; limit.duration = 10000;
counter = { c: 10, t: 0 }; limitCounter = 10;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -646,7 +691,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should allow when bucket is filled but interval has passed', async () => { it('should allow when bucket is filled but interval has passed', async () => {
counter = { c: 10, t: 0 }; limitCounter = 10;
limitTimestamp = 0;
mockTimeService.now = 1000; mockTimeService.now = 1000;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -655,37 +701,55 @@ describe(SkRateLimiterService, () => {
}); });
it('should scale limit by factor', async () => { it('should scale limit by factor', async () => {
counter = { c: 10, t: 0 }; limitCounter = 10;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor, 0.5); // 10 + 1 = 11 const info = await serviceUnderTest().limit(limit, actor, 0.5); // 10 + 1 = 11
expect(info.blocked).toBeTruthy(); expect(info.blocked).toBeTruthy();
}); });
it('should set key expiration', async () => { it('should set counter expiration', async () => {
const mock = jest.fn(mockRedisSet); const commands: unknown[][] = [];
mockRedisSet = mock; mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(mock).toHaveBeenCalledWith(['rl_actor_test_bucket', '{"t":0,"c":1}', 'EX', 1]); expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 1]);
});
it('should set timestamp expiration', async () => {
const commands: unknown[][] = [];
mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor);
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 1]);
}); });
it('should not increment when already blocked', async () => { it('should not increment when already blocked', async () => {
counter = { c: 1, t: 0 }; limitCounter = 1;
limitTimestamp = 0;
mockTimeService.now += 100; mockTimeService.now += 100;
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(counter?.c).toBe(1); expect(limitCounter).toBe(1);
expect(counter?.t).toBe(0); expect(limitTimestamp).toBe(0);
}); });
it('should not allow dripRate to be lower than 0', async () => { it('should not allow dripRate to be lower than 0', async () => {
// real-world case; taken from StreamingApiServerService // real-world case; taken from StreamingApiServerService
limit.max = 4096; limit.max = 4096;
limit.duration = 2000; limit.duration = 2000;
counter = { c: 4096, t: 0 }; limitCounter = 4096;
limitTimestamp = 0;
const i1 = await serviceUnderTest().limit(limit, actor); const i1 = await serviceUnderTest().limit(limit, actor);
mockTimeService.now = 1; mockTimeService.now = 1;
@ -708,12 +772,21 @@ describe(SkRateLimiterService, () => {
await expect(promise).rejects.toThrow(/factor is zero or negative/); await expect(promise).rejects.toThrow(/factor is zero or negative/);
}); });
it('should skip if duration is zero', async () => {
limit.duration = 0;
const info = await serviceUnderTest().limit(limit, actor);
expect(info.blocked).toBeFalsy();
expect(info.remaining).toBe(Number.MAX_SAFE_INTEGER);
});
it('should throw if max is zero', async () => { it('should throw if max is zero', async () => {
limit.max = 0; limit.max = 0;
const promise = serviceUnderTest().limit(limit, actor); const promise = serviceUnderTest().limit(limit, actor);
await expect(promise).rejects.toThrow(/size is less than 1/); await expect(promise).rejects.toThrow(/max is less than 1/);
}); });
it('should throw if max is negative', async () => { it('should throw if max is negative', async () => {
@ -721,7 +794,20 @@ describe(SkRateLimiterService, () => {
const promise = serviceUnderTest().limit(limit, actor); const promise = serviceUnderTest().limit(limit, actor);
await expect(promise).rejects.toThrow(/size is less than 1/); await expect(promise).rejects.toThrow(/max is less than 1/);
});
it('should apply correction if extra calls slip through', async () => {
limitCounter = 2;
const info = await serviceUnderTest().limit(limit, actor);
expect(info.blocked).toBeTruthy();
expect(info.remaining).toBe(0);
expect(info.resetMs).toBe(2000);
expect(info.resetSec).toBe(2);
expect(info.fullResetMs).toBe(2000);
expect(info.fullResetSec).toBe(2);
}); });
}); });
@ -732,7 +818,7 @@ describe(SkRateLimiterService, () => {
limit = { limit = {
type: undefined, type: undefined,
key, key,
max: 5, max: 10,
duration: 5000, duration: 5000,
minInterval: 1000, minInterval: 1000,
}; };
@ -744,22 +830,9 @@ describe(SkRateLimiterService, () => {
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
}); });
it('should not error when allowed', async () => {
await serviceUnderTest().limit(limit, actor);
expectNoUnhandledErrors();
});
it('should block when limit exceeded', async () => { it('should block when limit exceeded', async () => {
counter = { c: 5, t: 0 }; limitCounter = 10;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor);
expect(info.blocked).toBeTruthy();
});
it('should block when minInterval exceeded', async () => {
minCounter = { c: 1, t: 0 };
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -767,19 +840,8 @@ describe(SkRateLimiterService, () => {
}); });
it('should calculate correct info when allowed', async () => { it('should calculate correct info when allowed', async () => {
counter = { c: 1, t: 0 }; limitCounter = 9;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor);
expect(info.remaining).toBe(0);
expect(info.resetSec).toBe(1);
expect(info.resetMs).toBe(1000);
expect(info.fullResetSec).toBe(2);
expect(info.fullResetMs).toBe(2000);
});
it('should calculate correct info when blocked by limit', async () => {
counter = { c: 5, t: 0 };
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -790,20 +852,22 @@ describe(SkRateLimiterService, () => {
expect(info.fullResetMs).toBe(5000); expect(info.fullResetMs).toBe(5000);
}); });
it('should calculate correct info when blocked by minInterval', async () => { it('should calculate correct info when blocked', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 10;
limitTimestamp = 0;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
expect(info.remaining).toBe(0); expect(info.remaining).toBe(0);
expect(info.resetSec).toBe(1); expect(info.resetSec).toBe(1);
expect(info.resetMs).toBe(1000); expect(info.resetMs).toBe(1000);
expect(info.fullResetSec).toBe(1); expect(info.fullResetSec).toBe(5);
expect(info.fullResetMs).toBe(1000); expect(info.fullResetMs).toBe(5000);
}); });
it('should allow when counter is filled but interval has passed', async () => { it('should allow when counter is filled but interval has passed', async () => {
counter = { c: 5, t: 0 }; limitCounter = 5;
limitTimestamp = 0;
mockTimeService.now = 1000; mockTimeService.now = 1000;
const info = await serviceUnderTest().limit(limit, actor); const info = await serviceUnderTest().limit(limit, actor);
@ -811,18 +875,23 @@ describe(SkRateLimiterService, () => {
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
}); });
it('should allow when minCounter is filled but interval has passed', async () => { it('should drip according to minInterval', async () => {
minCounter = { c: 1, t: 0 }; limitCounter = 10;
mockTimeService.now = 1000; limitTimestamp = 0;
mockTimeService.now += 1000;
const info = await serviceUnderTest().limit(limit, actor); const i1 = await serviceUnderTest().limit(limit, actor);
const i2 = await serviceUnderTest().limit(limit, actor);
const i3 = await serviceUnderTest().limit(limit, actor);
expect(info.blocked).toBeFalsy(); expect(i1.blocked).toBeFalsy();
expect(i2.blocked).toBeFalsy();
expect(i3.blocked).toBeTruthy();
}); });
it('should scale limit and interval by factor', async () => { it('should scale limit and interval by factor', async () => {
counter = { c: 5, t: 0 }; limitCounter = 5;
minCounter = { c: 1, t: 0 }; limitTimestamp = 0;
mockTimeService.now += 500; mockTimeService.now += 500;
const info = await serviceUnderTest().limit(limit, actor, 0.5); const info = await serviceUnderTest().limit(limit, actor, 0.5);
@ -830,27 +899,52 @@ describe(SkRateLimiterService, () => {
expect(info.blocked).toBeFalsy(); expect(info.blocked).toBeFalsy();
}); });
it('should set key expiration', async () => { it('should set counter expiration', async () => {
const mock = jest.fn(mockRedisSet); const commands: unknown[][] = [];
mockRedisSet = mock; mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(mock).toHaveBeenCalledWith(['rl_actor_test_bucket', '{"t":0,"c":1}', 'EX', 1]); expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 5]);
expect(mock).toHaveBeenCalledWith(['rl_actor_test_min', '{"t":0,"c":1}', 'EX', 1]); });
it('should set timestamp expiration', async () => {
const commands: unknown[][] = [];
mockRedis.push(command => {
commands.push(command);
return null;
});
await serviceUnderTest().limit(limit, actor);
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 5]);
}); });
it('should not increment when already blocked', async () => { it('should not increment when already blocked', async () => {
counter = { c: 5, t: 0 }; limitCounter = 10;
minCounter = { c: 1, t: 0 }; limitTimestamp = 0;
mockTimeService.now += 100; mockTimeService.now += 100;
await serviceUnderTest().limit(limit, actor); await serviceUnderTest().limit(limit, actor);
expect(counter?.c).toBe(5); expect(limitCounter).toBe(10);
expect(counter?.t).toBe(0); expect(limitTimestamp).toBe(0);
expect(minCounter?.c).toBe(1); });
expect(minCounter?.t).toBe(0);
it('should apply correction if extra calls slip through', async () => {
limitCounter = 12;
const info = await serviceUnderTest().limit(limit, actor);
expect(info.blocked).toBeTruthy();
expect(info.remaining).toBe(0);
expect(info.resetMs).toBe(2000);
expect(info.resetSec).toBe(2);
expect(info.fullResetMs).toBe(6000);
expect(info.fullResetSec).toBe(6);
}); });
}); });
}); });