[Profiling] Making plugin Production ready (#159738)

This PR does:

- checks Kibana.spec file
- Checks server feature.ts
- Adds correct route access to APIs
- Removes unnecessary logs
- Removes collector and symbolized `secret_token` from config schema as
it won't be used
- Add README file

---------

Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
Cauê Marcondes 2023-06-19 14:35:59 +01:00 committed by GitHub
parent 88c2f02fca
commit c87e4e983d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 168 additions and 111 deletions

View file

@ -674,7 +674,7 @@ Elastic.
|{kib-repo}blob/{branch}/x-pack/plugins/profiling/README.md[profiling]
|undefined
|Universal Profiling provides fleet-wide, whole-system, continuous profiling with zero instrumentation. Get a comprehensive understanding of what lines of code are consuming compute resources throughout your entire fleet by visualizing your data in Kibana using the flamegraph, stacktraces, and top functions views.
|{kib-repo}blob/{branch}/x-pack/plugins/remote_clusters/README.md[remoteClusters]

View file

@ -1 +1,79 @@
### TODO
# Universal Profiling (Beta)
## Overview
Universal Profiling provides fleet-wide, whole-system, continuous profiling with zero instrumentation. Get a comprehensive understanding of what lines of code are consuming compute resources throughout your entire fleet by visualizing your data in Kibana using the flamegraph, stacktraces, and top functions views.
### Universal profiling setup
Universal Profiling is enabled by default on [Elastic Cloud](https://www.elastic.co/cloud/), and you can find it under **Observability**. To see data in Universal Profiling, you need to initialize it.
##### **Initialize Universal Profiling**
Initialize Universal Profiling by navigating to one of the views and clicking the **Set up** button. Clicking this will trigger some checks and install some packages so data can be processed.
The following are some of the actions and checks that occur during initialization:
- Check that the APM integration is installed and configured.
- Create Universal Profiling indices.
- Install the Collector integration.
- Install the Symbolizer integration.
### Collector integration
The Collector is the component that receives data from the profiling agents deployed on users machines.
It runs a gRPC server over HTTPS and exposes an endpoint where the profiling agents can send data.
To send data, agents are required to use a token-based authentication, referred as `secretToken` in the agent configurations.
The token is generated by Kibana during the setup process and at the moment cannot be configured by users.
The "Add Data" page will display instructions for several deployment methodologies.
The instructions contain both the endpoint and the token that allow profiling agent to connect to the Collector.
### Symbolizer integration
The Symbolizer is the component processing debug symbols for the received profiles data, enriching with source-code metadata the profiling visualizations.
It processes both publicly-available debug symbols and "private" debug symbols.
For public symbols, users don't have to do anything: the symbolizer asynchronously intercepts unsymbolized frames and populates them automatically.
For private symbols, an HTTPS endpoint is provided to users for uploading the debug symbols of the software they own.
The authentication and authorization on this endpoint are provided as part of the request, in form of an Elasticsearch API key.
## Testing (unit, e2e)
### Unit Tests (Jest)
```
node scripts/jest --config x-pack/plugins/profiling/jest.config.js [--watchAll]
```
## E2E Tests (Cypress)
The E2E tests are located in [`x-pack/plugins/profiling/e2e`](./e2e).
Universal Profiling uses [FTR](../../../packages/kbn-test/README.mdx) (functional test runner) and [Cypress](https://www.cypress.io/) to run the e2e tests. The tests are located at `kibana/x-pack/plugins/profiling/e2e/cypress/e2e`.
### Start test server
```
node x-pack/plugins/profiling/scripts/test/e2e --server
```
### Open cypress dashboard
```
node x-pack/plugins/profiling/scripts/test/e2e --open
```
### Run tests in terminal
```
node x-pack/plugins/profiling/scripts/test/e2e --runner
```
### Run like CI
```
node x-pack/plugins/profiling/scripts/test/e2e
```
## Other resources
- [Official Profiling documentation](https://www.elastic.co/observability/universal-profiling)

View file

@ -7,22 +7,22 @@
"server": true,
"browser": true,
"configPath": ["xpack", "profiling"],
"optionalPlugins": ["spaces"],
"requiredPlugins": [
"navigation",
"charts",
"cloud",
"data",
"kibanaUtils",
"share",
"dataViews",
"features",
"fleet",
"licensing",
"observability",
"observabilityShared",
"features",
"kibanaReact",
"unifiedSearch",
"dataViews",
"charts",
"spaces",
"cloud",
"fleet",
"licensing"
],
"requiredBundles": [
"kibanaReact",
"kibanaUtils",
]
}
}

View file

@ -18,26 +18,25 @@ export const PROFILING_FEATURE = {
order: 1200,
category: DEFAULT_APP_CATEGORIES.observability,
app: [PROFILING_SERVER_FEATURE_ID, 'ux', 'kibana'],
catalogue: [PROFILING_SERVER_FEATURE_ID],
// see x-pack/plugins/features/common/feature_kibana_privileges.ts
privileges: {
all: {
app: [PROFILING_SERVER_FEATURE_ID, 'ux', 'kibana'],
catalogue: [PROFILING_SERVER_FEATURE_ID],
savedObject: {
all: [],
read: [],
},
ui: ['show'],
api: [PROFILING_SERVER_FEATURE_ID],
},
read: {
app: [PROFILING_SERVER_FEATURE_ID, 'ux', 'kibana'],
catalogue: [PROFILING_SERVER_FEATURE_ID],
savedObject: {
all: [],
read: [],
},
ui: ['show'],
api: [PROFILING_SERVER_FEATURE_ID],
},
},
};

View file

@ -9,9 +9,14 @@ import { schema, TypeOf } from '@kbn/config-schema';
import type { PluginConfigDescriptor, PluginInitializerContext } from '@kbn/core/server';
import { ProfilingPlugin } from './plugin';
/**
* These properties are used to create both the Collector and the Symbolizer integrations
* when Universal Profiling is initialized.
* As of now Universal Profiling is only availble on Elastic Cloud, so
* Elastic Cloud will be responsable of filling these properties up and pass it to Kibana.
*/
const packageInputSchema = schema.object({
host: schema.maybe(schema.string()),
secret_token: schema.maybe(schema.string()),
tls_enabled: schema.maybe(schema.boolean()),
tls_supported_protocols: schema.maybe(schema.arrayOf(schema.string())),
tls_certificate_path: schema.maybe(schema.string()),
@ -22,12 +27,17 @@ const configSchema = schema.object({
enabled: schema.boolean({ defaultValue: false }),
symbolizer: schema.maybe(packageInputSchema),
collector: schema.maybe(packageInputSchema),
elasticsearch: schema.maybe(
schema.object({
hosts: schema.string(),
username: schema.string(),
password: schema.string(),
})
elasticsearch: schema.conditional(
schema.contextRef('dist'),
schema.literal(true),
schema.never(),
schema.maybe(
schema.object({
hosts: schema.string(),
username: schema.string(),
password: schema.string(),
})
)
),
});

View file

@ -50,33 +50,6 @@ describe('getVarsFor', () => {
});
});
it('discards secret_token defined and generate a new one', () => {
const config: PackageInputType = {
host: 'example.com',
tls_enabled: true,
tls_supported_protocols: ['foo', 'bar'],
tls_certificate_path: '123',
tls_key_path: '456',
secret_token: 'bar!',
};
const { secret_token: secretToken, ...result } = getVarsFor({
config,
includeSecretToken: true,
});
expect(secretToken?.type).toBe('text');
expect(secretToken?.value).not.toBe('bar!');
expect(secretToken?.value.length).toBe(16);
expect(secretTokenRegex.test(secretToken?.value)).toBeTruthy();
expect(result).toEqual({
host: { type: 'text', value: 'example.com' },
tls_enabled: { type: 'bool', value: true },
tls_supported_protocols: { type: 'text', value: ['foo', 'bar'] },
tls_certificate_path: { type: 'text', value: '123' },
tls_key_path: { type: 'text', value: '456' },
});
});
it('returns vars without secret_token', () => {
const config: PackageInputType = {
host: 'example.com',

View file

@ -161,6 +161,7 @@ export function generateSecretToken() {
return result;
}
type PackagePolicyVars = PackageInputType & { secret_token?: string };
export function getVarsFor({
config,
includeSecretToken,
@ -168,13 +169,13 @@ export function getVarsFor({
config: PackageInputType;
includeSecretToken: boolean;
}) {
const configKeys = Object.keys(config) as Array<keyof PackageInputType>;
const configKeys = Object.keys(config) as Array<keyof PackagePolicyVars>;
if (includeSecretToken) {
configKeys.push('secret_token');
}
return configKeys.reduce<
Partial<Record<keyof PackageInputType, { type: 'text' | 'bool'; value: any }>>
Partial<Record<keyof PackagePolicyVars, { type: 'text' | 'bool'; value: any }>>
>((acc, currKey) => {
const value = currKey === 'secret_token' ? generateSecretToken() : config[currKey];
const type = typeof value === 'boolean' ? 'bool' : 'text';

View file

@ -35,7 +35,6 @@ export class ProfilingPlugin
}
public setup(core: CoreSetup<ProfilingPluginStartDeps>, deps: ProfilingPluginSetupDeps) {
this.logger.debug('profiling: Setup');
const router = core.http.createRouter<ProfilingRequestHandlerContext>();
deps.features.registerKibanaFeature(PROFILING_FEATURE);
@ -80,7 +79,6 @@ export class ProfilingPlugin
}
public start(core: CoreStart) {
this.logger.debug('profiling: Started');
return {};
}

View file

@ -87,9 +87,8 @@ export async function findDownsampledIndex({
});
sampleCountFromInitialExp = resp.hits.total.value;
} catch (e) {
logger.info(e.message);
logger.error(e.message);
}
logger.info('sampleCountFromPow6 ' + sampleCountFromInitialExp);
return getSampledTraceEventsIndex(index, sampleSize, sampleCountFromInitialExp, initialExp);
}

View file

@ -26,6 +26,7 @@ export function registerFlameChartSearchRoute({
router.get(
{
path: paths.Flamechart,
options: { tags: ['access:profiling'] },
validate: {
query: schema.object({
timeFrom: schema.number(),
@ -48,7 +49,6 @@ export function registerFlameChartSearchRoute({
});
const totalSeconds = timeTo - timeFrom;
const t0 = Date.now();
const {
stackTraceEvents,
stackTraces,
@ -61,10 +61,8 @@ export function registerFlameChartSearchRoute({
filter,
sampleSize: targetSampleSize,
});
logger.info(`querying stacktraces took ${Date.now() - t0} ms`);
const flamegraph = await withProfilingSpan('create_flamegraph', async () => {
const t1 = Date.now();
const tree = createCalleeTree(
stackTraceEvents,
stackTraces,
@ -73,20 +71,20 @@ export function registerFlameChartSearchRoute({
totalFrames,
samplingRate
);
logger.info(`creating callee tree took ${Date.now() - t1} ms`);
const t2 = Date.now();
const fg = createBaseFlameGraph(tree, samplingRate, totalSeconds);
logger.info(`creating flamegraph took ${Date.now() - t2} ms`);
return fg;
});
logger.info('returning payload response to client');
return response.ok({ body: flamegraph });
} catch (error) {
return handleRouteHandlerError({ error, logger, response });
return handleRouteHandlerError({
error,
logger,
response,
message: 'Error while fetching flamegraph',
});
}
}
);

View file

@ -34,6 +34,7 @@ export function registerTopNFunctionsSearchRoute({
router.get(
{
path: paths.TopNFunctions,
options: { tags: ['access:profiling'] },
validate: {
query: querySchema,
},
@ -51,16 +52,13 @@ export function registerTopNFunctionsSearchRoute({
kuery,
});
const t0 = Date.now();
const { stackTraceEvents, stackTraces, executables, stackFrames, samplingRate } =
await searchStackTraces({
client: profilingElasticsearchClient,
filter,
sampleSize: targetSampleSize,
});
logger.info(`querying stacktraces took ${Date.now() - t0} ms`);
const t1 = Date.now();
const topNFunctions = await withProfilingSpan('create_topn_functions', async () => {
return createTopNFunctions(
stackTraceEvents,
@ -72,15 +70,17 @@ export function registerTopNFunctionsSearchRoute({
samplingRate
);
});
logger.info(`creating topN functions took ${Date.now() - t1} ms`);
logger.info('returning payload response to client');
return response.ok({
body: topNFunctions,
});
} catch (error) {
return handleRouteHandlerError({ error, logger, response });
return handleRouteHandlerError({
error,
logger,
response,
message: 'Error while fetching TopN functions',
});
}
}
);

View file

@ -1,20 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type { Logger } from '@kbn/core/server';
export async function logExecutionLatency<T>(
logger: Logger,
activity: string,
func: () => Promise<T>
): Promise<T> {
const start = Date.now();
return await func().then((res) => {
logger.info(activity + ' took ' + (Date.now() - start) + 'ms');
return res;
});
}

View file

@ -5,6 +5,7 @@
* 2.0.
*/
import { DEFAULT_SPACE_ID } from '@kbn/spaces-plugin/common';
import { RouteRegisterParameters } from '.';
import { getClient } from './compat';
import { installLatestApmPackage, isApmPackageInstalled } from '../lib/setup/apm_package';
@ -45,6 +46,7 @@ export function registerSetupRoute({
router.get(
{
path: paths.HasSetupESResources,
options: { tags: ['access:profiling'] },
validate: false,
},
async (context, request, response) => {
@ -56,18 +58,18 @@ export function registerSetupRoute({
request,
useDefaultAuth: true,
});
const setupOptions: ProfilingSetupOptions = {
client: clientWithDefaultAuth,
logger,
packagePolicyClient: dependencies.start.fleet.packagePolicyService,
soClient: core.savedObjects.client,
spaceId: dependencies.setup.spaces.spacesService.getSpaceId(request),
spaceId:
dependencies.setup.spaces?.spacesService?.getSpaceId(request) ?? DEFAULT_SPACE_ID,
isCloudEnabled: dependencies.setup.cloud.isCloudEnabled,
config: dependencies.config,
};
logger.info('Checking if Elasticsearch and Fleet are setup for Universal Profiling');
const state = createDefaultSetupState();
state.cloud.available = dependencies.setup.cloud.isCloudEnabled;
@ -102,7 +104,12 @@ export function registerSetupRoute({
},
});
} catch (error) {
return handleRouteHandlerError({ error, logger, response });
return handleRouteHandlerError({
error,
logger,
response,
message: 'Error while checking plugin setup',
});
}
}
);
@ -110,7 +117,8 @@ export function registerSetupRoute({
router.post(
{
path: paths.HasSetupESResources,
validate: {},
options: { tags: ['access:profiling'] },
validate: false,
},
async (context, request, response) => {
try {
@ -126,13 +134,12 @@ export function registerSetupRoute({
logger,
packagePolicyClient: dependencies.start.fleet.packagePolicyService,
soClient: core.savedObjects.client,
spaceId: dependencies.setup.spaces.spacesService.getSpaceId(request),
spaceId:
dependencies.setup.spaces?.spacesService?.getSpaceId(request) ?? DEFAULT_SPACE_ID,
isCloudEnabled: dependencies.setup.cloud.isCloudEnabled,
config: dependencies.config,
};
logger.info('Setting up Elasticsearch and Fleet for Universal Profiling');
const state = createDefaultSetupState();
state.cloud.available = dependencies.setup.cloud.isCloudEnabled;
@ -179,7 +186,12 @@ export function registerSetupRoute({
// and is not guaranteed to complete before Kibana sends a response.
return response.accepted();
} catch (error) {
return handleRouteHandlerError({ error, logger, response });
return handleRouteHandlerError({
error,
logger,
response,
message: 'Error while setting up Universal Profiling',
});
}
}
);
@ -187,6 +199,7 @@ export function registerSetupRoute({
router.get(
{
path: paths.SetupDataCollectionInstructions,
options: { tags: ['access:profiling'] },
validate: false,
},
async (context, request, response) => {
@ -198,7 +211,12 @@ export function registerSetupRoute({
return response.ok({ body: setupInstructions });
} catch (error) {
return handleRouteHandlerError({ error, logger, response });
return handleRouteHandlerError({
error,
logger,
response,
message: 'Error while fetching Universal Profiling instructions',
});
}
}
);

View file

@ -100,7 +100,6 @@ export async function topNElasticSearchQuery({
}
let totalSampledStackTraces = aggregations.total_count.value ?? 0;
logger.info('total sampled stacktraces: ' + totalSampledStackTraces);
totalSampledStackTraces = Math.floor(totalSampledStackTraces / eventsIndex.sampleRate);
if (searchField !== ProfilingESField.StacktraceID) {
@ -139,8 +138,6 @@ export async function topNElasticSearchQuery({
return groupStackFrameMetadataByStackTrace(stackTraces, stackFrames, executables);
});
logger.info('returning payload response to client');
return {
TotalCount: totalSampledStackTraces,
TopN: topN,
@ -164,6 +161,7 @@ export function queryTopNCommon({
router.get(
{
path: pathName,
options: { tags: ['access:profiling'] },
validate: {
query: schema.object({
timeFrom: schema.number(),
@ -189,7 +187,12 @@ export function queryTopNCommon({
}),
});
} catch (error) {
return handleRouteHandlerError({ error, logger, response });
return handleRouteHandlerError({
error,
logger,
response,
message: 'Error while fetching TopN functions',
});
}
}
);

View file

@ -15,17 +15,17 @@ import { FleetSetupContract, FleetStartContract } from '@kbn/fleet-plugin/server
export interface ProfilingPluginSetupDeps {
observability: ObservabilityPluginSetup;
features: FeaturesPluginSetup;
spaces: SpacesPluginSetup;
cloud: CloudSetup;
fleet: FleetSetupContract;
spaces?: SpacesPluginSetup;
}
export interface ProfilingPluginStartDeps {
observability: {};
features: {};
spaces: SpacesPluginStart;
cloud: CloudStart;
fleet: FleetStartContract;
spaces?: SpacesPluginStart;
}
// eslint-disable-next-line @typescript-eslint/no-empty-interface

View file

@ -14,10 +14,12 @@ export function handleRouteHandlerError({
error,
logger,
response,
message,
}: {
error: any;
response: KibanaResponseFactory;
logger: Logger;
message: string;
}) {
if (
error instanceof WrappedElasticsearchClientError &&
@ -34,8 +36,6 @@ export function handleRouteHandlerError({
return response.customError({
statusCode: error.statusCode ?? 500,
body: {
message: error.message,
},
body: { message },
});
}