1
0
mirror of https://github.com/Unleash/unleash.git synced 2025-03-23 00:16:25 +01:00

feat: Adds rate limiting to metric POST endpoints (#5075)

### What
The heaviest requests we serve are the register and metrics POSTs from
our SDKs/clients.
This PR adds ratelimiting to /api/client/register, /api/client/metrics,
/api/frontend/register and /api/frontend/metrics with a default set to
6000 requests per minute (or 100 rps) for each of the endpoints.

It will be overrideable by the environment variables documented.

### Points of discussion
@kwasniew already suggested using featuretoggles with variants to
control the rate per clientId. I struggled to see if we could
dynamically update the middleware after initialisation, so this attempt
will need a restart of the pod to update the request limit.
This commit is contained in:
Christopher Kolstad 2023-10-18 13:00:44 +02:00 committed by GitHub
parent afaefa4845
commit b06613d1b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 98 additions and 10 deletions

View File

@ -185,6 +185,12 @@ exports[`should create default config 1`] = `
"host": undefined,
"port": 4242,
},
"metricsRateLimiting": {
"clientMetricsMax": 6000,
"clientRegisterMax": 6000,
"frontendMetricsMax": 6000,
"frontendRegisterMax": 6000,
},
"preHook": undefined,
"preRouterHook": undefined,
"prometheusApi": undefined,

View File

@ -18,6 +18,7 @@ import {
ICspDomainConfig,
ICspDomainOptions,
IClientCachingOption,
IMetricsRateLimiting,
} from './types/option';
import { getDefaultLogProvider, LogLevel, validateLogProvider } from './logger';
import { defaultCustomAuthDenyAll } from './default-custom-auth-deny-all';
@ -99,6 +100,38 @@ function loadClientCachingOptions(
]);
}
function loadMetricsRateLimitingConfig(
options: IUnleashOptions,
): IMetricsRateLimiting {
const clientMetricsMaxPerMinute = parseEnvVarNumber(
process.env.REGISTER_CLIENT_RATE_LIMIT_PER_MINUTE,
6000,
);
const clientRegisterMaxPerMinute = parseEnvVarNumber(
process.env.CLIENT_METRICS_RATE_LIMIT_PER_MINUTE,
6000,
);
const frontendRegisterMaxPerMinute = parseEnvVarNumber(
process.env.REGISTER_FRONTEND_RATE_LIMIT_PER_MINUTE,
6000,
);
const frontendMetricsMaxPerMinute = parseEnvVarNumber(
process.env.FRONTEND_METRICS_RATE_LIMIT_PER_MINUTE,
6000,
);
const defaultRateLimitOptions: IMetricsRateLimiting = {
clientMetricsMaxPerMinute: clientMetricsMaxPerMinute,
clientRegisterMaxPerMinute: clientRegisterMaxPerMinute,
frontendRegisterMaxPerMinute: frontendRegisterMaxPerMinute,
frontendMetricsMaxPerMinute: frontendMetricsMaxPerMinute,
};
return mergeAll([
defaultRateLimitOptions,
options.metricsRateLimiting ?? {},
]);
}
function loadUI(options: IUnleashOptions): IUIConfig {
const uiO = options.ui || {};
const ui: IUIConfig = {
@ -490,6 +523,8 @@ export function createConfig(options: IUnleashOptions): IUnleashConfig {
Boolean(options.enterpriseVersion) &&
ui.environment?.toLowerCase() !== 'pro';
const metricsRateLimiting = loadMetricsRateLimitingConfig(options);
return {
db,
session,
@ -523,6 +558,7 @@ export function createConfig(options: IUnleashOptions): IUnleashConfig {
publicFolder: options.publicFolder,
disableScheduler: options.disableScheduler,
isEnterprise: isEnterprise,
metricsRateLimiting,
};
}

View File

@ -12,6 +12,8 @@ import {
emptyResponse,
getStandardResponses,
} from '../../openapi/util/standard-responses';
import rateLimit from 'express-rate-limit';
import { minutesToMilliseconds } from 'date-fns';
export default class ClientMetricsController extends Controller {
logger: Logger;
@ -61,6 +63,13 @@ export default class ClientMetricsController extends Controller {
204: emptyResponse,
},
}),
rateLimit({
windowMs: minutesToMilliseconds(1),
max: config.metricsRateLimiting.clientMetricsMaxPerMinute,
validate: false,
standardHeaders: true,
legacyHeaders: false,
}),
],
});
}

View File

@ -13,6 +13,8 @@ import { OpenApiService } from '../../services/openapi-service';
import { emptyResponse } from '../../openapi/util/standard-responses';
import { createRequestSchema } from '../../openapi/util/create-request-schema';
import { ClientApplicationSchema } from '../../openapi/spec/client-application-schema';
import rateLimit from 'express-rate-limit';
import { minutesToMilliseconds } from 'date-fns';
export default class RegisterController extends Controller {
logger: Logger;
@ -48,6 +50,13 @@ export default class RegisterController extends Controller {
requestBody: createRequestSchema('clientApplicationSchema'),
responses: { 202: emptyResponse },
}),
rateLimit({
windowMs: minutesToMilliseconds(1),
max: config.metricsRateLimiting.clientRegisterMaxPerMinute,
validate: false,
standardHeaders: true,
legacyHeaders: false,
}),
],
});
}

View File

@ -23,6 +23,8 @@ import { enrichContextWithIp } from '../../proxy';
import { corsOriginMiddleware } from '../../middleware';
import NotImplementedError from '../../error/not-implemented-error';
import NotFoundError from '../../error/notfound-error';
import rateLimit from 'express-rate-limit';
import { minutesToMilliseconds } from 'date-fns';
interface ApiUserRequest<
PARAM = any,
@ -112,6 +114,13 @@ export default class ProxyController extends Controller {
...getStandardResponses(400, 401, 404),
},
}),
rateLimit({
windowMs: minutesToMilliseconds(1),
max: config.metricsRateLimiting.frontendMetricsMaxPerMinute,
validate: false,
standardHeaders: true,
legacyHeaders: false,
}),
],
});
@ -133,6 +142,14 @@ export default class ProxyController extends Controller {
...getStandardResponses(400, 401, 404),
},
}),
rateLimit({
windowMs: minutesToMilliseconds(1),
max: config.metricsRateLimiting
.frontendRegisterMaxPerMinute,
validate: false,
standardHeaders: true,
legacyHeaders: false,
}),
],
});

View File

@ -29,14 +29,12 @@ export const handleErrors: (
error: Error,
) => void = (res, logger, error) => {
if (createError.isHttpError(error)) {
return (
res
// @ts-expect-error http errors all have statuses, but there are no
// types provided
.status(error.status ?? 400)
.json({ message: error.message })
.end()
);
return res
.status(
// @ts-expect-error - The error object here is not guaranteed to contain status
error.status ?? 400,
)
.json({ message: error.message });
}
const finalError =

View File

@ -123,6 +123,7 @@ export interface IUnleashOptions {
prometheusApi?: string;
publicFolder?: string;
disableScheduler?: boolean;
metricsRateLimiting?: Partial<IMetricsRateLimiting>;
}
export interface IEmailOption {
@ -185,6 +186,13 @@ interface IFrontendApi {
refreshIntervalInMs: number;
}
export interface IMetricsRateLimiting {
clientMetricsMaxPerMinute: number;
clientRegisterMaxPerMinute: number;
frontendMetricsMaxPerMinute: number;
frontendRegisterMaxPerMinute: number;
}
export interface IUnleashConfig {
db: IDBOption;
session: ISessionOption;
@ -212,6 +220,7 @@ export interface IUnleashConfig {
inlineSegmentConstraints: boolean;
segmentValuesLimit: number;
strategySegmentsLimit: number;
metricsRateLimiting: IMetricsRateLimiting;
clientFeatureCaching: IClientCachingOption;
accessControlMaxAge: number;
prometheusApi?: string;

View File

@ -42,7 +42,7 @@ test('should await other actions on lock', async () => {
await ms(100); // start fast action after slow action established DB connection
await lockedAnotherAction('second');
await expect(results).toStrictEqual(['first', 'second']);
expect(results).toStrictEqual(['first', 'second']);
});
test('should handle lock timeout', async () => {

View File

@ -139,7 +139,11 @@ unleash.start(unleashOptions);
- **responseTimeWithAppNameKillSwitch** - use this to disable metrics with app names. This is enabled by default but may increase the cardinality of metrics causing Unleash memory usage to grow if your app name is randomly generated (which is not recommended). Overridable with the `UNLEASH_RESPONSE_TIME_WITH_APP_NAME_KILL_SWITCH` environment variable.
- **keepAliveTimeout** - Use this to tweak connection keepalive timeout in seconds. Useful for hosted situations where you need to make sure your connections are closed before terminating the instance. Defaults to `15`. Overridable with the `SERVER_KEEPALIVE_TIMEOUT` environment variable.
You can also set the environment variable `ENABLED_ENVIRONMENTS` to a comma delimited string of environment names to override environments.
- **metricsRateLimiting** - Use the following to tweak the rate limits for `/api/client/register`, `/api/client/metrics`, `/api/frontend/register` and `/api/frontend/metrics` POST endpoints
- `clientMetricsMaxPerMinute` - How many requests per minute is allowed against POST `/api/client/metrics` before returning 429. Set to 6000 by default (100rps) - Overridable with `REGISTER_CLIENT_RATE_LIMIT_PER_MINUTE` environment variable
- `clientRegisterMaxPerMinute` - How many requests per minute is allowed against POST `/api/client/register` before returning 429. Set to 6000 by default (100rps) - Overridable with `CLIENT_METRICS_RATE_LIMIT_PER_MINUTE` environment variable
- `frontendMetricsMaxPerMinute` - How many requests per minute is allowed against POST `/api/frontend/metrics` before returning 429. Set to 6000 by default (100rps) - Overridable with `FRONTEND_METRICS_RATE_LIMIT_PER_MINUTE` environment variable
- `frontendRegisterMaxPerMinute` - How many requests per minute is allowed against POST `/api/frontend/register` before returning 429. Set to 6000 by default (100rps) - Overridable with `REGISTER_FRONTEND_RATE_LIMIT_PER_MINUTE` environment variable
### Disabling Auto-Start {#disabling-auto-start}
If you're using Unleash as part of a larger express app, you can disable the automatic server start by calling `server.create`. It takes the same options as `server.start`, but will not begin listening for connections.