2022-11-21 11:57:07 +01:00
|
|
|
import { IUnleashConfig, IUnleashServices, IUnleashStores } from '../types';
|
2022-08-16 15:33:33 +02:00
|
|
|
import { Logger } from '../logger';
|
2023-03-03 16:36:23 +01:00
|
|
|
import { ClientMetricsSchema, ProxyFeatureSchema } from '../openapi';
|
2022-08-16 15:33:33 +02:00
|
|
|
import ApiUser from '../types/api-user';
|
|
|
|
import {
|
|
|
|
Context,
|
|
|
|
InMemStorageProvider,
|
|
|
|
Unleash,
|
|
|
|
UnleashEvents,
|
|
|
|
} from 'unleash-client';
|
2022-11-21 11:57:07 +01:00
|
|
|
import { ProxyRepository } from '../proxy';
|
2022-08-16 15:33:33 +02:00
|
|
|
import { ApiTokenType } from '../types/models/api-token';
|
2022-12-14 17:35:22 +01:00
|
|
|
import {
|
|
|
|
FrontendSettings,
|
|
|
|
frontendSettingsKey,
|
|
|
|
} from '../types/settings/frontend-settings';
|
|
|
|
import { validateOrigins } from '../util';
|
2023-02-13 08:40:04 +01:00
|
|
|
import { BadDataError, InvalidTokenError } from '../error';
|
2022-12-14 17:35:22 +01:00
|
|
|
import { minutesToMilliseconds } from 'date-fns';
|
2022-08-16 15:33:33 +02:00
|
|
|
|
2022-12-14 17:35:22 +01:00
|
|
|
type Config = Pick<
|
|
|
|
IUnleashConfig,
|
|
|
|
'getLogger' | 'frontendApi' | 'frontendApiOrigins'
|
|
|
|
>;
|
2022-08-16 15:33:33 +02:00
|
|
|
|
|
|
|
type Stores = Pick<IUnleashStores, 'projectStore' | 'eventStore'>;
|
|
|
|
|
|
|
|
type Services = Pick<
|
|
|
|
IUnleashServices,
|
2022-12-14 17:35:22 +01:00
|
|
|
| 'featureToggleServiceV2'
|
|
|
|
| 'segmentService'
|
|
|
|
| 'clientMetricsServiceV2'
|
|
|
|
| 'settingService'
|
2022-08-16 15:33:33 +02:00
|
|
|
>;
|
|
|
|
|
|
|
|
export class ProxyService {
|
|
|
|
private readonly config: Config;
|
|
|
|
|
|
|
|
private readonly logger: Logger;
|
|
|
|
|
|
|
|
private readonly stores: Stores;
|
|
|
|
|
|
|
|
private readonly services: Services;
|
|
|
|
|
|
|
|
private readonly clients: Map<ApiUser['secret'], Unleash> = new Map();
|
|
|
|
|
2022-12-14 17:35:22 +01:00
|
|
|
private cachedFrontendSettings?: FrontendSettings;
|
|
|
|
|
fix: polling in proxy repository now stops correctly (#3268)
### What
This patches two very subtle bugs in the proxy repository that cause it
to never actually stop polling the db in the background
## Details - Issue 1
We've recently started to get the following output when running `yarn
test`:
` Attempted to log "Error: Unable to acquire a connection
at Object.queryBuilder
(/home/simon/dev/unleash/node_modules/knex/lib/knex-builder/make-knex.js:111:26)`
This seems to occur for every test suite after running the proxy tests
and the full stack trace doesn't point to anything related to the
running tests that produce this output. Running a `git bisect` points to
this commit:
https://github.com/Unleash/unleash/commit/6e44a65c58d8e28668f0d3459b62c0ce0b84849a
being the culprit but I believe that this may have surfaced the bug
rather than causing it.
Layering in a few console logs and running Unleash, seems to point to
the proxy repository setting up data polling but never actually
terminating it when `stop` was called, which is inline with the output
here - effectively the tests were continuing to run the polling in the
background after the suite had exited and jest freaks out that an async
task is running when it shouldn't be. This is easy to reproduce once the
console logs are in place in the `dataPolling` function, by running
Unleash - creating and deleting a front end token never terminates the
poll cycle.
I believe the cause here is some subtlety around using async functions
with timers - stop was being called, which results in the timer being
cleared but a scheduled async call was already on the stack, causing the
recursive call to resolve after stop, resurrecting the timer and
reinitializing the poll cycle.
I've moved the terminating code into the async callback. Which seems to
solve the problem here.
## Details - Issue 2
Related to the first issue, when the proxy service stops the underlying
Unleash Client, it never actually calls destroy on the client, it only
removes it from its internal map. That in turn means that the Client
never calls stop on the injected repository, it only removes it from
memory. However, the scheduled task is `async` and `unref`, meaning it
continues to spin in the background until every other process also
exits. This is patched by simply calling destroy on the client when
cleaning up
## The Ugly
This is really hard to test effectively, mostly because this is an issue
caused by internals within NodeJS and async. I've added a test that
reads the output from the debug log (and also placed a debug log in the
termination code). This also requires the test code to wait until the
async task completes. This is horribly fragile so if someone has a
better idea on how to prove this I would be a very happy human.
The second ugly part is that this is a subtle issue in complex code that
really, really needs to work correctly. I'm nervous about making changes
here without lots of eyes on this
2023-03-10 09:03:32 +01:00
|
|
|
private timer: NodeJS.Timeout | null;
|
2022-12-14 17:35:22 +01:00
|
|
|
|
2022-08-16 15:33:33 +02:00
|
|
|
constructor(config: Config, stores: Stores, services: Services) {
|
|
|
|
this.config = config;
|
|
|
|
this.logger = config.getLogger('services/proxy-service.ts');
|
|
|
|
this.stores = stores;
|
|
|
|
this.services = services;
|
2022-12-14 17:35:22 +01:00
|
|
|
|
|
|
|
this.timer = setInterval(
|
|
|
|
() => this.fetchFrontendSettings(),
|
|
|
|
minutesToMilliseconds(2),
|
|
|
|
).unref();
|
2022-08-16 15:33:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
async getProxyFeatures(
|
|
|
|
token: ApiUser,
|
|
|
|
context: Context,
|
|
|
|
): Promise<ProxyFeatureSchema[]> {
|
|
|
|
const client = await this.clientForProxyToken(token);
|
|
|
|
const definitions = client.getFeatureToggleDefinitions() || [];
|
|
|
|
|
|
|
|
return definitions
|
|
|
|
.filter((feature) => client.isEnabled(feature.name, context))
|
|
|
|
.map((feature) => ({
|
|
|
|
name: feature.name,
|
|
|
|
enabled: Boolean(feature.enabled),
|
|
|
|
variant: client.forceGetVariant(feature.name, context),
|
|
|
|
impressionData: Boolean(feature.impressionData),
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
async registerProxyMetrics(
|
|
|
|
token: ApiUser,
|
2023-03-03 16:36:23 +01:00
|
|
|
metrics: ClientMetricsSchema,
|
2022-08-16 15:33:33 +02:00
|
|
|
ip: string,
|
|
|
|
): Promise<void> {
|
|
|
|
ProxyService.assertExpectedTokenType(token);
|
|
|
|
|
|
|
|
const environment =
|
|
|
|
this.services.clientMetricsServiceV2.resolveMetricsEnvironment(
|
|
|
|
token,
|
|
|
|
metrics,
|
|
|
|
);
|
|
|
|
|
|
|
|
await this.services.clientMetricsServiceV2.registerClientMetrics(
|
|
|
|
{ ...metrics, environment },
|
|
|
|
ip,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
private async clientForProxyToken(token: ApiUser): Promise<Unleash> {
|
|
|
|
ProxyService.assertExpectedTokenType(token);
|
|
|
|
|
|
|
|
if (!this.clients.has(token.secret)) {
|
|
|
|
this.clients.set(
|
|
|
|
token.secret,
|
|
|
|
await this.createClientForProxyToken(token),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
return this.clients.get(token.secret);
|
|
|
|
}
|
|
|
|
|
|
|
|
private async createClientForProxyToken(token: ApiUser): Promise<Unleash> {
|
|
|
|
const repository = new ProxyRepository(
|
|
|
|
this.config,
|
|
|
|
this.stores,
|
|
|
|
this.services,
|
|
|
|
token,
|
|
|
|
);
|
|
|
|
|
2023-02-10 10:51:53 +01:00
|
|
|
const client = new Unleash({
|
2022-08-16 15:33:33 +02:00
|
|
|
appName: 'proxy',
|
|
|
|
url: 'unused',
|
|
|
|
storageProvider: new InMemStorageProvider(),
|
|
|
|
disableMetrics: true,
|
|
|
|
repository,
|
|
|
|
});
|
|
|
|
|
|
|
|
client.on(UnleashEvents.Error, (error) => {
|
|
|
|
this.logger.error(error);
|
|
|
|
});
|
|
|
|
|
2023-02-10 10:51:53 +01:00
|
|
|
await client.start();
|
|
|
|
|
2022-08-16 15:33:33 +02:00
|
|
|
return client;
|
|
|
|
}
|
|
|
|
|
2022-08-22 15:02:39 +02:00
|
|
|
deleteClientForProxyToken(secret: string): void {
|
fix: polling in proxy repository now stops correctly (#3268)
### What
This patches two very subtle bugs in the proxy repository that cause it
to never actually stop polling the db in the background
## Details - Issue 1
We've recently started to get the following output when running `yarn
test`:
` Attempted to log "Error: Unable to acquire a connection
at Object.queryBuilder
(/home/simon/dev/unleash/node_modules/knex/lib/knex-builder/make-knex.js:111:26)`
This seems to occur for every test suite after running the proxy tests
and the full stack trace doesn't point to anything related to the
running tests that produce this output. Running a `git bisect` points to
this commit:
https://github.com/Unleash/unleash/commit/6e44a65c58d8e28668f0d3459b62c0ce0b84849a
being the culprit but I believe that this may have surfaced the bug
rather than causing it.
Layering in a few console logs and running Unleash, seems to point to
the proxy repository setting up data polling but never actually
terminating it when `stop` was called, which is inline with the output
here - effectively the tests were continuing to run the polling in the
background after the suite had exited and jest freaks out that an async
task is running when it shouldn't be. This is easy to reproduce once the
console logs are in place in the `dataPolling` function, by running
Unleash - creating and deleting a front end token never terminates the
poll cycle.
I believe the cause here is some subtlety around using async functions
with timers - stop was being called, which results in the timer being
cleared but a scheduled async call was already on the stack, causing the
recursive call to resolve after stop, resurrecting the timer and
reinitializing the poll cycle.
I've moved the terminating code into the async callback. Which seems to
solve the problem here.
## Details - Issue 2
Related to the first issue, when the proxy service stops the underlying
Unleash Client, it never actually calls destroy on the client, it only
removes it from its internal map. That in turn means that the Client
never calls stop on the injected repository, it only removes it from
memory. However, the scheduled task is `async` and `unref`, meaning it
continues to spin in the background until every other process also
exits. This is patched by simply calling destroy on the client when
cleaning up
## The Ugly
This is really hard to test effectively, mostly because this is an issue
caused by internals within NodeJS and async. I've added a test that
reads the output from the debug log (and also placed a debug log in the
termination code). This also requires the test code to wait until the
async task completes. This is horribly fragile so if someone has a
better idea on how to prove this I would be a very happy human.
The second ugly part is that this is a subtle issue in complex code that
really, really needs to work correctly. I'm nervous about making changes
here without lots of eyes on this
2023-03-10 09:03:32 +01:00
|
|
|
this.clients.get(secret)?.destroy();
|
2022-08-22 15:02:39 +02:00
|
|
|
this.clients.delete(secret);
|
|
|
|
}
|
|
|
|
|
2022-09-28 14:23:41 +02:00
|
|
|
stopAll(): void {
|
|
|
|
this.clients.forEach((client) => client.destroy());
|
|
|
|
}
|
|
|
|
|
2022-08-16 15:33:33 +02:00
|
|
|
private static assertExpectedTokenType({ type }: ApiUser) {
|
2023-02-13 08:40:04 +01:00
|
|
|
if (!(type === ApiTokenType.FRONTEND || type === ApiTokenType.ADMIN)) {
|
|
|
|
throw new InvalidTokenError();
|
|
|
|
}
|
2022-08-16 15:33:33 +02:00
|
|
|
}
|
2022-12-14 17:35:22 +01:00
|
|
|
|
|
|
|
async setFrontendSettings(
|
|
|
|
value: FrontendSettings,
|
|
|
|
createdBy: string,
|
|
|
|
): Promise<void> {
|
|
|
|
const error = validateOrigins(value.frontendApiOrigins);
|
|
|
|
if (error) {
|
|
|
|
throw new BadDataError(error);
|
|
|
|
}
|
|
|
|
await this.services.settingService.insert(
|
|
|
|
frontendSettingsKey,
|
|
|
|
value,
|
|
|
|
createdBy,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
private async fetchFrontendSettings(): Promise<FrontendSettings> {
|
2022-12-14 20:24:47 +01:00
|
|
|
try {
|
|
|
|
this.cachedFrontendSettings =
|
|
|
|
await this.services.settingService.get(frontendSettingsKey, {
|
|
|
|
frontendApiOrigins: this.config.frontendApiOrigins,
|
|
|
|
});
|
|
|
|
} catch (error) {
|
|
|
|
this.logger.debug('Unable to fetch frontend settings');
|
|
|
|
}
|
2022-12-14 17:35:22 +01:00
|
|
|
return this.cachedFrontendSettings;
|
|
|
|
}
|
|
|
|
|
|
|
|
async getFrontendSettings(
|
|
|
|
useCache: boolean = true,
|
|
|
|
): Promise<FrontendSettings> {
|
|
|
|
if (useCache && this.cachedFrontendSettings) {
|
|
|
|
return this.cachedFrontendSettings;
|
|
|
|
}
|
|
|
|
return this.fetchFrontendSettings();
|
|
|
|
}
|
|
|
|
|
|
|
|
destroy(): void {
|
fix: polling in proxy repository now stops correctly (#3268)
### What
This patches two very subtle bugs in the proxy repository that cause it
to never actually stop polling the db in the background
## Details - Issue 1
We've recently started to get the following output when running `yarn
test`:
` Attempted to log "Error: Unable to acquire a connection
at Object.queryBuilder
(/home/simon/dev/unleash/node_modules/knex/lib/knex-builder/make-knex.js:111:26)`
This seems to occur for every test suite after running the proxy tests
and the full stack trace doesn't point to anything related to the
running tests that produce this output. Running a `git bisect` points to
this commit:
https://github.com/Unleash/unleash/commit/6e44a65c58d8e28668f0d3459b62c0ce0b84849a
being the culprit but I believe that this may have surfaced the bug
rather than causing it.
Layering in a few console logs and running Unleash, seems to point to
the proxy repository setting up data polling but never actually
terminating it when `stop` was called, which is inline with the output
here - effectively the tests were continuing to run the polling in the
background after the suite had exited and jest freaks out that an async
task is running when it shouldn't be. This is easy to reproduce once the
console logs are in place in the `dataPolling` function, by running
Unleash - creating and deleting a front end token never terminates the
poll cycle.
I believe the cause here is some subtlety around using async functions
with timers - stop was being called, which results in the timer being
cleared but a scheduled async call was already on the stack, causing the
recursive call to resolve after stop, resurrecting the timer and
reinitializing the poll cycle.
I've moved the terminating code into the async callback. Which seems to
solve the problem here.
## Details - Issue 2
Related to the first issue, when the proxy service stops the underlying
Unleash Client, it never actually calls destroy on the client, it only
removes it from its internal map. That in turn means that the Client
never calls stop on the injected repository, it only removes it from
memory. However, the scheduled task is `async` and `unref`, meaning it
continues to spin in the background until every other process also
exits. This is patched by simply calling destroy on the client when
cleaning up
## The Ugly
This is really hard to test effectively, mostly because this is an issue
caused by internals within NodeJS and async. I've added a test that
reads the output from the debug log (and also placed a debug log in the
termination code). This also requires the test code to wait until the
async task completes. This is horribly fragile so if someone has a
better idea on how to prove this I would be a very happy human.
The second ugly part is that this is a subtle issue in complex code that
really, really needs to work correctly. I'm nervous about making changes
here without lots of eyes on this
2023-03-10 09:03:32 +01:00
|
|
|
if (this.timer) {
|
|
|
|
clearInterval(this.timer);
|
|
|
|
this.timer = null;
|
|
|
|
}
|
2022-12-14 17:35:22 +01:00
|
|
|
}
|
2022-08-16 15:33:33 +02:00
|
|
|
}
|