mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 17:28:26 -04:00
[Obs AI Assistant] Minor cleanup api tests (#215181)
- Remove duplicated helpers - improve log messages - Clear intercepted requests after every test --------- Co-authored-by: Viduni Wickramarachchi <viduni.ushanka@gmail.com>
This commit is contained in:
parent
d8f6bd694b
commit
b9cd2c8040
11 changed files with 128 additions and 142 deletions
|
@ -31,7 +31,7 @@ export function createTokenLimitReachedError(
|
|||
export function createToolNotFoundError(name: string): ChatCompletionToolNotFoundError {
|
||||
return new InferenceTaskError(
|
||||
ChatCompletionErrorCode.ToolNotFoundError,
|
||||
`Tool ${name} called but was not available`,
|
||||
`Tool "${name}" called but was not available`,
|
||||
{
|
||||
name,
|
||||
}
|
||||
|
|
|
@ -53,7 +53,9 @@ describe('validateToolCalls', () => {
|
|||
},
|
||||
},
|
||||
})
|
||||
).toThrowErrorMatchingInlineSnapshot(`"Tool my_unknown_function called but was not available"`);
|
||||
).toThrowErrorMatchingInlineSnapshot(
|
||||
`"Tool \\"my_unknown_function\\" called but was not available"`
|
||||
);
|
||||
});
|
||||
|
||||
it('throws an error if invalid JSON was generated', () => {
|
||||
|
|
|
@ -167,7 +167,7 @@ export function createInternalServerError(
|
|||
export function createFunctionNotFoundError(name: string) {
|
||||
return new ChatCompletionError(
|
||||
ChatCompletionErrorCode.FunctionNotFoundError,
|
||||
`Function ${name} called but was not available`
|
||||
`Function "${name}" called but was not available`
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ import { ObservabilityAIAssistantScreenContextRequest } from '@kbn/observability
|
|||
import {
|
||||
createLlmProxy,
|
||||
LlmProxy,
|
||||
ToolMessage,
|
||||
} from '../../../../../../observability_ai_assistant_api_integration/common/create_llm_proxy';
|
||||
import type { DeploymentAgnosticFtrProviderContext } from '../../../../ftr_provider_context';
|
||||
import { SupertestWithRoleScope } from '../../../../services/role_scoped_supertest';
|
||||
|
@ -55,14 +54,9 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
let proxy: LlmProxy;
|
||||
let connectorId: string;
|
||||
|
||||
async function getEvents(
|
||||
params: { screenContexts?: ObservabilityAIAssistantScreenContextRequest[] },
|
||||
title: string,
|
||||
conversationResponse: string | ToolMessage
|
||||
) {
|
||||
void proxy.interceptTitle(title);
|
||||
void proxy.interceptConversation(conversationResponse);
|
||||
|
||||
async function getEvents(params: {
|
||||
screenContexts?: ObservabilityAIAssistantScreenContextRequest[];
|
||||
}) {
|
||||
const supertestEditorWithCookieCredentials: SupertestWithRoleScope =
|
||||
await roleScopedSupertest.getSupertestWithRoleScope('editor', {
|
||||
useCookieHeader: true,
|
||||
|
@ -82,12 +76,7 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
|
||||
await proxy.waitForAllInterceptorsToHaveBeenCalled();
|
||||
|
||||
return String(response.body)
|
||||
.split('\n')
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
.map((line) => JSON.parse(line) as StreamingChatResponseEvent)
|
||||
.slice(2); // ignore context request/response, we're testing this elsewhere
|
||||
return decodeEvents(response.body).slice(2); // ignore context request/response, we're testing this elsewhere
|
||||
}
|
||||
|
||||
before(async () => {
|
||||
|
@ -261,12 +250,12 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
let events: StreamingChatResponseEvent[];
|
||||
|
||||
before(async () => {
|
||||
events = await getEvents({}, 'Title for at new conversation', 'Hello again').then(
|
||||
(_events) => {
|
||||
return _events.filter(
|
||||
(event) => event.type !== StreamingChatResponseEventType.BufferFlush
|
||||
);
|
||||
}
|
||||
void proxy.interceptTitle('Title for a new conversation');
|
||||
void proxy.interceptConversation('Hello again');
|
||||
|
||||
const allEvents = await getEvents({});
|
||||
events = allEvents.filter(
|
||||
(event) => event.type !== StreamingChatResponseEventType.BufferFlush
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -309,7 +298,7 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
expect(omit(events[4], 'conversation.id', 'conversation.last_updated')).to.eql({
|
||||
type: StreamingChatResponseEventType.ConversationCreate,
|
||||
conversation: {
|
||||
title: 'Title for at new conversation',
|
||||
title: 'Title for a new conversation',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
@ -323,41 +312,32 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
let events: StreamingChatResponseEvent[];
|
||||
|
||||
before(async () => {
|
||||
events = await getEvents(
|
||||
{
|
||||
screenContexts: [
|
||||
{
|
||||
actions: [
|
||||
{
|
||||
name: 'my_action',
|
||||
description: 'My action',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
foo: {
|
||||
type: 'string',
|
||||
},
|
||||
void proxy.interceptTitle('Title for conversation with screen context action');
|
||||
void proxy.interceptWithFunctionRequest({
|
||||
name: 'my_action',
|
||||
arguments: () => JSON.stringify({ foo: 'bar' }),
|
||||
});
|
||||
|
||||
events = await getEvents({
|
||||
screenContexts: [
|
||||
{
|
||||
actions: [
|
||||
{
|
||||
name: 'my_action',
|
||||
description: 'My action',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
foo: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
'Title for conversation with screen context action',
|
||||
{
|
||||
tool_calls: [
|
||||
{
|
||||
toolCallId: 'fake-id',
|
||||
index: 1,
|
||||
function: {
|
||||
name: 'my_action',
|
||||
arguments: JSON.stringify({ foo: 'bar' }),
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
);
|
||||
],
|
||||
},
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
it('closes the stream without persisting the conversation', () => {
|
||||
|
@ -477,9 +457,6 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
});
|
||||
});
|
||||
|
||||
// todo
|
||||
it.skip('executes a function', async () => {});
|
||||
|
||||
describe('security roles and access privileges', () => {
|
||||
it('should deny access for users without the ai_assistant privilege', async () => {
|
||||
const { status } = await observabilityAIAssistantAPIClient.viewer({
|
||||
|
|
|
@ -113,6 +113,10 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
await clearKnowledgeBase(es);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
describe('calling the context function via /chat/complete', () => {
|
||||
let firstRequestBody: ChatCompletionStreamParams;
|
||||
let secondRequestBody: ChatCompletionStreamParams;
|
||||
|
|
|
@ -43,6 +43,10 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
// Calling `execute_query` via the chat/complete endpoint
|
||||
describe('POST /internal/observability_ai_assistant/chat/complete', function () {
|
||||
let messageAddedEvents: MessageAddEvent[];
|
||||
|
@ -62,7 +66,6 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
void llmProxy.interceptWithFunctionRequest({
|
||||
name: 'query',
|
||||
arguments: () => JSON.stringify({}),
|
||||
when: () => true,
|
||||
});
|
||||
|
||||
void llmProxy.interceptWithFunctionRequest({
|
||||
|
@ -81,10 +84,9 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
| SORT @timestamp DESC
|
||||
| LIMIT 10`,
|
||||
}),
|
||||
when: () => true,
|
||||
});
|
||||
|
||||
void llmProxy.interceptConversation({ content: 'Hello from user' });
|
||||
void llmProxy.interceptConversation('Hello from user');
|
||||
|
||||
({ messageAddedEvents } = await chatComplete({
|
||||
userPrompt: 'Please retrieve the most recent Apache log messages',
|
||||
|
@ -103,6 +105,10 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
await logsSynthtraceEsClient.clean();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
it('makes 4 requests to the LLM', () => {
|
||||
expect(llmProxy.interceptedRequests.length).to.be(4);
|
||||
});
|
||||
|
|
|
@ -54,7 +54,6 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
void llmProxy.interceptWithFunctionRequest({
|
||||
name: 'get_alerts_dataset_info',
|
||||
arguments: () => JSON.stringify({ start: 'now-10d', end: 'now' }),
|
||||
when: () => true,
|
||||
});
|
||||
|
||||
({ getRelevantFields } = llmProxy.interceptSelectRelevantFieldsToolChoice());
|
||||
|
@ -62,7 +61,6 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
void llmProxy.interceptWithFunctionRequest({
|
||||
name: 'alerts',
|
||||
arguments: () => JSON.stringify({ start: 'now-10d', end: 'now' }),
|
||||
when: () => true,
|
||||
});
|
||||
|
||||
void llmProxy.interceptConversation(
|
||||
|
@ -95,6 +93,10 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
await samlAuth.invalidateM2mApiKeyWithRoleScope(roleAuthc);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
describe('POST /internal/observability_ai_assistant/chat/complete', () => {
|
||||
let firstRequestBody: ChatCompletionStreamParams;
|
||||
let secondRequestBody: ChatCompletionStreamParams;
|
||||
|
|
|
@ -44,6 +44,10 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
// Calling `get_dataset_info` via the chat/complete endpoint
|
||||
describe('POST /internal/observability_ai_assistant/chat/complete', function () {
|
||||
let messageAddedEvents: MessageAddEvent[];
|
||||
|
@ -62,7 +66,6 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
void llmProxy.interceptWithFunctionRequest({
|
||||
name: 'get_dataset_info',
|
||||
arguments: () => JSON.stringify({ index: 'logs*' }),
|
||||
when: () => true,
|
||||
});
|
||||
|
||||
({ getRelevantFields } = llmProxy.interceptSelectRelevantFieldsToolChoice());
|
||||
|
@ -86,6 +89,10 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
await logsSynthtraceEsClient.clean();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
it('makes 3 requests to the LLM', () => {
|
||||
expect(llmProxy.interceptedRequests.length).to.be(3);
|
||||
});
|
||||
|
|
|
@ -51,6 +51,10 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
it('makes 1 requests to the LLM', () => {
|
||||
expect(llmProxy.interceptedRequests.length).to.be(1);
|
||||
});
|
||||
|
@ -87,7 +91,6 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
void llmProxy.interceptWithFunctionRequest({
|
||||
name: 'retrieve_elastic_doc',
|
||||
arguments: () => JSON.stringify({}),
|
||||
when: () => true,
|
||||
});
|
||||
|
||||
void llmProxy.interceptConversation('Hello from LLM Proxy');
|
||||
|
|
|
@ -15,14 +15,12 @@ import {
|
|||
type StreamingChatResponseEvent,
|
||||
} from '@kbn/observability-ai-assistant-plugin/common/conversation_complete';
|
||||
import { type Instruction } from '@kbn/observability-ai-assistant-plugin/common/types';
|
||||
import type { ChatCompletionChunkToolCall } from '@kbn/inference-common';
|
||||
import { ChatCompletionStreamParams } from 'openai/lib/ChatCompletionStream';
|
||||
import {
|
||||
createLlmProxy,
|
||||
LlmProxy,
|
||||
ToolMessage,
|
||||
} from '../../../../../../observability_ai_assistant_api_integration/common/create_llm_proxy';
|
||||
import type { DeploymentAgnosticFtrProviderContext } from '../../../../ftr_provider_context';
|
||||
import { decodeEvents } from '../utils/conversation';
|
||||
|
||||
export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderContext) {
|
||||
const log = getService('log');
|
||||
|
@ -41,23 +39,20 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
describe('/api/observability_ai_assistant/chat/complete', function () {
|
||||
// Fails on MKI: https://github.com/elastic/kibana/issues/205581
|
||||
this.tags(['failsOnMKI']);
|
||||
let proxy: LlmProxy;
|
||||
let llmProxy: LlmProxy;
|
||||
let connectorId: string;
|
||||
|
||||
async function addInterceptorsAndCallComplete({
|
||||
async function callPublicChatComplete({
|
||||
actions,
|
||||
instructions,
|
||||
format = 'default',
|
||||
conversationResponse,
|
||||
persist = true,
|
||||
}: {
|
||||
actions?: Array<Pick<FunctionDefinition, 'name' | 'description' | 'parameters'>>;
|
||||
instructions?: Array<string | Instruction>;
|
||||
format?: 'openai' | 'default';
|
||||
conversationResponse: string | ToolMessage;
|
||||
persist?: boolean;
|
||||
}) {
|
||||
const titleSimulatorPromise = proxy.interceptTitle('My Title');
|
||||
const conversationSimulatorPromise = proxy.interceptConversation(conversationResponse);
|
||||
|
||||
const response = await observabilityAIAssistantAPIClient.admin({
|
||||
endpoint: 'POST /api/observability_ai_assistant/chat/complete 2023-10-31',
|
||||
params: {
|
||||
|
@ -65,38 +60,20 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
body: {
|
||||
messages,
|
||||
connectorId,
|
||||
persist: true,
|
||||
persist,
|
||||
actions,
|
||||
instructions,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await proxy.waitForAllInterceptorsToHaveBeenCalled();
|
||||
|
||||
const titleSimulator = await titleSimulatorPromise;
|
||||
const conversationSimulator = await conversationSimulatorPromise;
|
||||
|
||||
return {
|
||||
titleSimulator,
|
||||
conversationSimulator,
|
||||
responseBody: String(response.body),
|
||||
};
|
||||
}
|
||||
|
||||
function getEventsFromBody(body: string) {
|
||||
return body
|
||||
.split('\n')
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
.map((line) => JSON.parse(line) as StreamingChatResponseEvent)
|
||||
.slice(2); // ignore context request/response, we're testing this elsewhere
|
||||
return String(response.body);
|
||||
}
|
||||
|
||||
before(async () => {
|
||||
proxy = await createLlmProxy(log);
|
||||
llmProxy = await createLlmProxy(log);
|
||||
connectorId = await observabilityAIAssistantAPIClient.createProxyActionConnector({
|
||||
port: proxy.getPort(),
|
||||
port: llmProxy.getPort(),
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -104,7 +81,7 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
await observabilityAIAssistantAPIClient.deleteActionConnector({
|
||||
actionId: connectorId,
|
||||
});
|
||||
proxy.close();
|
||||
llmProxy.close();
|
||||
});
|
||||
|
||||
const action = {
|
||||
|
@ -120,27 +97,27 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
},
|
||||
} as const;
|
||||
|
||||
const toolCallMock: ChatCompletionChunkToolCall = {
|
||||
toolCallId: 'fake-index',
|
||||
index: 0,
|
||||
function: {
|
||||
name: 'my_action',
|
||||
arguments: JSON.stringify({ foo: 'bar' }),
|
||||
},
|
||||
};
|
||||
afterEach(async () => {
|
||||
llmProxy.clear();
|
||||
});
|
||||
|
||||
describe('after executing an action and closing the stream', () => {
|
||||
let events: StreamingChatResponseEvent[];
|
||||
|
||||
before(async () => {
|
||||
const { responseBody } = await addInterceptorsAndCallComplete({
|
||||
actions: [action],
|
||||
conversationResponse: {
|
||||
tool_calls: [toolCallMock],
|
||||
},
|
||||
void llmProxy.interceptTitle('My Title');
|
||||
void llmProxy.interceptWithFunctionRequest({
|
||||
name: 'my_action',
|
||||
arguments: () => JSON.stringify({ foo: 'bar' }),
|
||||
});
|
||||
|
||||
events = getEventsFromBody(responseBody);
|
||||
const responseBody = await callPublicChatComplete({
|
||||
actions: [action],
|
||||
});
|
||||
|
||||
await llmProxy.waitForAllInterceptorsToHaveBeenCalled();
|
||||
|
||||
events = decodeEvents(responseBody);
|
||||
});
|
||||
|
||||
it('does not persist the conversation (the last event is not a conversationUpdated event)', () => {
|
||||
|
@ -149,29 +126,31 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
expect(lastEvent.type).to.be('messageAdd');
|
||||
expect(lastEvent.message.message.function_call).to.eql({
|
||||
name: 'my_action',
|
||||
arguments: toolCallMock.function.arguments,
|
||||
arguments: JSON.stringify({ foo: 'bar' }),
|
||||
trigger: MessageRole.Assistant,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('after adding an instruction', () => {
|
||||
let body: ChatCompletionStreamParams;
|
||||
|
||||
before(async () => {
|
||||
const { conversationSimulator } = await addInterceptorsAndCallComplete({
|
||||
instructions: ['This is a random instruction'],
|
||||
actions: [action],
|
||||
conversationResponse: {
|
||||
tool_calls: [toolCallMock],
|
||||
},
|
||||
void llmProxy.interceptWithFunctionRequest({
|
||||
name: 'my_action',
|
||||
arguments: () => JSON.stringify({ foo: 'bar' }),
|
||||
});
|
||||
|
||||
body = conversationSimulator.requestBody;
|
||||
await callPublicChatComplete({
|
||||
instructions: ['This is a random instruction'],
|
||||
actions: [action],
|
||||
persist: false,
|
||||
});
|
||||
|
||||
await llmProxy.waitForAllInterceptorsToHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('includes the instruction in the system message', async () => {
|
||||
expect(body.messages[0].content).to.contain('This is a random instruction');
|
||||
const { requestBody } = llmProxy.interceptedRequests[0];
|
||||
expect(requestBody.messages[0].content).to.contain('This is a random instruction');
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -179,10 +158,12 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
let responseBody: string;
|
||||
|
||||
before(async () => {
|
||||
({ responseBody } = await addInterceptorsAndCallComplete({
|
||||
format: 'openai',
|
||||
conversationResponse: 'Hello',
|
||||
}));
|
||||
void llmProxy.interceptTitle('My Title');
|
||||
void llmProxy.interceptConversation('Hello');
|
||||
|
||||
responseBody = await callPublicChatComplete({ format: 'openai' });
|
||||
|
||||
await llmProxy.waitForAllInterceptorsToHaveBeenCalled();
|
||||
});
|
||||
|
||||
function extractDataParts(lines: string[]) {
|
||||
|
@ -194,12 +175,12 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
});
|
||||
}
|
||||
|
||||
function getLines() {
|
||||
return responseBody.split('\n\n').filter(Boolean);
|
||||
function getLines(str: string) {
|
||||
return str.split('\n\n').filter(Boolean);
|
||||
}
|
||||
|
||||
it('outputs each line an SSE-compatible format (data: ...)', () => {
|
||||
const lines = getLines();
|
||||
const lines = getLines(responseBody);
|
||||
|
||||
lines.forEach((line) => {
|
||||
expect(line.match(/^data: /));
|
||||
|
@ -207,14 +188,14 @@ export default function ApiTest({ getService }: DeploymentAgnosticFtrProviderCon
|
|||
});
|
||||
|
||||
it('ouputs one chunk, and one [DONE] event', () => {
|
||||
const dataParts = extractDataParts(getLines());
|
||||
const dataParts = extractDataParts(getLines(responseBody));
|
||||
|
||||
expect(dataParts[0]).not.to.be.empty();
|
||||
expect(dataParts[1]).to.be('[DONE]');
|
||||
});
|
||||
|
||||
it('outuputs an OpenAI-compatible chunk', () => {
|
||||
const [dataLine] = extractDataParts(getLines());
|
||||
const [dataLine] = extractDataParts(getLines(responseBody));
|
||||
|
||||
expect(() => {
|
||||
JSON.parse(dataLine);
|
||||
|
|
|
@ -90,11 +90,11 @@ export class LlmProxy {
|
|||
|
||||
const errorMessage = `No interceptors found to handle request: ${request.method} ${request.url}`;
|
||||
const availableInterceptorNames = this.interceptors.map(({ name }) => name);
|
||||
this.log.error(
|
||||
this.log.warning(
|
||||
`Available interceptors: ${JSON.stringify(availableInterceptorNames, null, 2)}`
|
||||
);
|
||||
|
||||
this.log.error(
|
||||
this.log.warning(
|
||||
`${errorMessage}. Messages: ${JSON.stringify(requestBody.messages, null, 2)}`
|
||||
);
|
||||
response.writeHead(500, {
|
||||
|
@ -122,6 +122,7 @@ export class LlmProxy {
|
|||
this.log.debug(`Closing LLM Proxy on port ${this.port}`);
|
||||
clearInterval(this.interval);
|
||||
this.server.close();
|
||||
this.clear();
|
||||
}
|
||||
|
||||
waitForAllInterceptorsToHaveBeenCalled() {
|
||||
|
@ -149,7 +150,7 @@ export class LlmProxy {
|
|||
}
|
||||
|
||||
interceptConversation(
|
||||
msg: LLMMessage,
|
||||
msg: string | string[],
|
||||
{
|
||||
name,
|
||||
}: {
|
||||
|
@ -157,7 +158,7 @@ export class LlmProxy {
|
|||
} = {}
|
||||
) {
|
||||
return this.intercept(
|
||||
`Conversation interceptor: "${name ?? 'Unnamed'}"`,
|
||||
`Conversation: "${name ?? isString(msg) ? msg.slice(0, 80) : `${msg.length} chunks`}"`,
|
||||
// @ts-expect-error
|
||||
(body) => body.tool_choice?.function?.name === undefined,
|
||||
msg
|
||||
|
@ -165,16 +166,18 @@ export class LlmProxy {
|
|||
}
|
||||
|
||||
interceptWithFunctionRequest({
|
||||
name: name,
|
||||
name,
|
||||
arguments: argumentsCallback,
|
||||
when,
|
||||
when = () => true,
|
||||
interceptorName,
|
||||
}: {
|
||||
name: string;
|
||||
arguments: (body: ChatCompletionStreamParams) => string;
|
||||
when: RequestInterceptor['when'];
|
||||
when?: RequestInterceptor['when'];
|
||||
interceptorName?: string;
|
||||
}) {
|
||||
// @ts-expect-error
|
||||
return this.intercept(`Function request interceptor: "${name}"`, when, (body) => {
|
||||
return this.intercept(interceptorName ?? `Function request: "${name}"`, when, (body) => {
|
||||
return {
|
||||
content: '',
|
||||
tool_calls: [
|
||||
|
@ -247,6 +250,7 @@ export class LlmProxy {
|
|||
interceptTitle(title: string) {
|
||||
return this.interceptWithFunctionRequest({
|
||||
name: TITLE_CONVERSATION_FUNCTION_NAME,
|
||||
interceptorName: `Title: "${title}"`,
|
||||
arguments: () => JSON.stringify({ title }),
|
||||
// @ts-expect-error
|
||||
when: (body) => body.tool_choice?.function?.name === TITLE_CONVERSATION_FUNCTION_NAME,
|
||||
|
@ -278,7 +282,7 @@ export class LlmProxy {
|
|||
requestBody,
|
||||
status: once((status: number) => {
|
||||
response.writeHead(status, {
|
||||
'Elastic-Interceptor': name,
|
||||
'Elastic-Interceptor': name.replace(/[^\x20-\x7E]/g, ' '), // Keeps only alphanumeric characters and spaces
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue