[Bedrock chat] Fixes for BedrockChat model + tools (#189227)

## Summary

BedrockChat was not working as expected when triggering the tools. For
example, trying to invoke alerts tools caused this error:
<img width="1059" alt="Screenshot 2024-07-25 at 11 21 52 AM"
src="https://github.com/user-attachments/assets/fe302d41-482c-4a65-abc1-8d669abf928d">
 

This was not a helpful or proper error message to display. I fixed error
handling to bubble up the correct error:
<img width="857" alt="Screenshot 2024-07-25 at 11 26 08 AM"
src="https://github.com/user-attachments/assets/a4135269-6bcb-436f-bd17-f817c06f3778">

This error has something to do with the BedrockChat model not working
properly. @jacoblee93 is looking into a fix on the LC side.

In the meantime, I changed the prompt around and was able to coerce the
correct responses out of Sonnet 3.5 and Opus for the ESQL and RAG alerts
questions James was testing with:

<img width="1708" alt="alerts-opus"
src="https://github.com/user-attachments/assets/47df0500-6e2f-477b-a3b9-da792f1d9b03">
<img width="1510" alt="alerts-sonnet"
src="https://github.com/user-attachments/assets/0efd5519-196b-4309-9300-194196e5dde3">
<img width="1573" alt="esql-opus"
src="https://github.com/user-attachments/assets/d7f519be-262c-4767-9d84-3fcb4fde089c">
<img width="1580" alt="Screenshot 2024-07-25 at 3 40 05 PM"
src="https://github.com/user-attachments/assets/9d408ab8-2fd5-4bbb-bb2e-bb54e0277717">
This commit is contained in:
Steph Milovic 2024-07-26 00:26:54 -05:00 committed by GitHub
parent 0a1db3040d
commit dd9e94d189
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 48 additions and 14 deletions

View file

@ -51,7 +51,13 @@ export class ActionsClientBedrockChatModel extends _BedrockChat {
anthropicVersion: inputBody.anthropic_version,
},
},
})) as { data: Readable };
})) as { data: Readable; status: string; message?: string; serviceMessage?: string };
if (data.status === 'error') {
throw new Error(
`ActionsClientBedrockChat: action result status is error: ${data?.message} - ${data?.serviceMessage}`
);
}
return {
body: Readable.toWeb(data.data),
@ -72,7 +78,18 @@ export class ActionsClientBedrockChatModel extends _BedrockChat {
anthropicVersion: inputBody.anthropic_version,
},
},
})) as { status: string; data: { message: string } };
})) as {
status: string;
data: { message: string };
message?: string;
serviceMessage?: string;
};
if (data.status === 'error') {
throw new Error(
`ActionsClientBedrockChat: action result status is error: ${data?.message} - ${data?.serviceMessage}`
);
}
return {
ok: data.status === 'ok',

View file

@ -14,12 +14,16 @@ import {
createToolCallingAgent,
} from 'langchain/agents';
import { APMTracer } from '@kbn/langchain/server/tracers/apm';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { getLlmClass } from '../../../../routes/utils';
import { EsAnonymizationFieldsSchema } from '../../../../ai_assistant_data_clients/anonymization_fields/types';
import { AssistantToolParams } from '../../../../types';
import { AgentExecutor } from '../../executors/types';
import { openAIFunctionAgentPrompt, structuredChatAgentPrompt } from './prompts';
import {
bedrockToolCallingAgentPrompt,
geminiToolCallingAgentPrompt,
openAIFunctionAgentPrompt,
structuredChatAgentPrompt,
} from './prompts';
import { getDefaultAssistantGraph } from './graph';
import { invokeGraph, streamGraph } from './helpers';
import { transformESSearchToAnonymizationFields } from '../../../../ai_assistant_data_clients/anonymization_fields/helpers';
@ -126,16 +130,8 @@ export const callAssistantGraph: AgentExecutor<true | false> = async ({
? createToolCallingAgent({
llm,
tools,
prompt: ChatPromptTemplate.fromMessages([
[
'system',
'You are a helpful assistant. ALWAYS use the provided tools. Use tools as often as possible, as they have access to the latest data and syntax.\n\n' +
`The final response will be the only output the user sees and should be a complete answer to the user's question, as if you were responding to the user's initial question, which is "{input}". The final response should never be empty.`,
],
['placeholder', '{chat_history}'],
['human', '{input}'],
['placeholder', '{agent_scratchpad}'],
]),
prompt:
llmType === 'bedrock' ? bedrockToolCallingAgentPrompt : geminiToolCallingAgentPrompt,
streamRunnable: isStream,
})
: await createStructuredChatAgent({

View file

@ -14,6 +14,27 @@ export const openAIFunctionAgentPrompt = ChatPromptTemplate.fromMessages([
['placeholder', '{agent_scratchpad}'],
]);
export const bedrockToolCallingAgentPrompt = ChatPromptTemplate.fromMessages([
[
'system',
'You are a helpful assistant. ALWAYS use the provided tools. Use tools as often as possible, as they have access to the latest data and syntax.',
],
['placeholder', '{chat_history}'],
['human', '{input}'],
['placeholder', '{agent_scratchpad}'],
]);
export const geminiToolCallingAgentPrompt = ChatPromptTemplate.fromMessages([
[
'system',
'You are a helpful assistant. ALWAYS use the provided tools. Use tools as often as possible, as they have access to the latest data and syntax.\n\n' +
`The final response will be the only output the user sees and should be a complete answer to the user's question, as if you were responding to the user's initial question, which is "{input}". The final response should never be empty.`,
],
['placeholder', '{chat_history}'],
['human', '{input}'],
['placeholder', '{agent_scratchpad}'],
]);
export const structuredChatAgentPrompt = ChatPromptTemplate.fromMessages([
[
'system',