[Security solution] Remove isEnabledLangChain from UI and telemetry (#188990)

This commit is contained in:
Steph Milovic 2024-07-23 16:16:41 -05:00 committed by GitHub
parent c17d335b48
commit 369eef0106
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 53 additions and 775 deletions

View file

@ -20,8 +20,6 @@ describe('AlertsSettings', () => {
it('updates the knowledgeBase settings when the alerts range slider is changed', () => {
const setUpdatedKnowledgeBaseSettings = jest.fn();
const knowledgeBase: KnowledgeBaseConfig = {
isEnabledRAGAlerts: true,
isEnabledKnowledgeBase: false,
latestAlerts: DEFAULT_LATEST_ALERTS,
};
@ -36,8 +34,6 @@ describe('AlertsSettings', () => {
fireEvent.change(rangeSlider, { target: { value: '10' } });
expect(setUpdatedKnowledgeBaseSettings).toHaveBeenCalledWith({
isEnabledRAGAlerts: true,
isEnabledKnowledgeBase: false,
latestAlerts: 10,
});
});

View file

@ -63,7 +63,6 @@ describe('use chat send', () => {
assistantTelemetry: {
reportAssistantMessageSent,
},
knowledgeBase: { isEnabledKnowledgeBase: false, isEnabledRAGAlerts: false },
});
});
it('handleOnChatCleared clears the conversation', async () => {
@ -150,8 +149,6 @@ describe('use chat send', () => {
expect(reportAssistantMessageSent).toHaveBeenNthCalledWith(1, {
conversationId: testProps.currentConversation?.title,
role: 'user',
isEnabledKnowledgeBase: false,
isEnabledRAGAlerts: false,
actionTypeId: '.gen-ai',
model: undefined,
provider: 'OpenAI',
@ -159,8 +156,6 @@ describe('use chat send', () => {
expect(reportAssistantMessageSent).toHaveBeenNthCalledWith(2, {
conversationId: testProps.currentConversation?.title,
role: 'assistant',
isEnabledKnowledgeBase: false,
isEnabledRAGAlerts: false,
actionTypeId: '.gen-ai',
model: undefined,
provider: 'OpenAI',

View file

@ -57,11 +57,7 @@ export const useChatSend = ({
setUserPrompt,
setCurrentConversation,
}: UseChatSendProps): UseChatSend => {
const {
assistantTelemetry,
knowledgeBase: { isEnabledKnowledgeBase, isEnabledRAGAlerts },
toasts,
} = useAssistantContext();
const { assistantTelemetry, toasts } = useAssistantContext();
const { isLoading, sendMessage, abortStream } = useSendMessage();
const { clearConversation, removeLastMessage } = useConversation();
@ -129,8 +125,6 @@ export const useChatSend = ({
assistantTelemetry?.reportAssistantMessageSent({
conversationId: currentConversation.title,
role: userMessage.role,
isEnabledKnowledgeBase,
isEnabledRAGAlerts,
actionTypeId: currentConversation.apiConfig.actionTypeId,
model: currentConversation.apiConfig.model,
provider: currentConversation.apiConfig.provider,
@ -149,8 +143,6 @@ export const useChatSend = ({
actionTypeId: currentConversation.apiConfig.actionTypeId,
model: currentConversation.apiConfig.model,
provider: currentConversation.apiConfig.provider,
isEnabledKnowledgeBase,
isEnabledRAGAlerts,
});
},
[
@ -159,8 +151,6 @@ export const useChatSend = ({
currentConversation,
editingSystemPromptId,
http,
isEnabledKnowledgeBase,
isEnabledRAGAlerts,
selectedPromptContexts,
sendMessage,
setCurrentConversation,

View file

@ -118,7 +118,6 @@ const AssistantComponent: React.FC<Props> = ({
assistantAvailability: { isAssistantEnabled },
getComments,
http,
knowledgeBase: { isEnabledKnowledgeBase, isEnabledRAGAlerts },
promptContexts,
setLastConversationId,
getLastConversationId,
@ -585,7 +584,6 @@ const AssistantComponent: React.FC<Props> = ({
showAnonymizedValues,
refetchCurrentConversation,
regenerateMessage: handleRegenerateResponse,
isEnabledLangChain: isEnabledKnowledgeBase || isEnabledRAGAlerts,
isFetchingResponse: isLoadingChatSend,
setIsStreaming,
currentUserAvatar,
@ -612,8 +610,6 @@ const AssistantComponent: React.FC<Props> = ({
showAnonymizedValues,
refetchCurrentConversation,
handleRegenerateResponse,
isEnabledKnowledgeBase,
isEnabledRAGAlerts,
isLoadingChatSend,
currentUserAvatar,
selectedPromptContextsCount,

View file

@ -74,10 +74,6 @@ const mockUseAssistantContext = {
},
],
setAllSystemPrompts: jest.fn(),
knowledgeBase: {
isEnabledRAGAlerts: false,
isEnabledKnowledgeBase: false,
},
};
jest.mock('../../../../assistant_context', () => {
const original = jest.requireActual('../../../../assistant_context');

View file

@ -26,9 +26,6 @@ const mockUseAssistantContext = {
setSelectedSettingsTab,
promptContexts: {},
allQuickPrompts: MOCK_QUICK_PROMPTS,
knowledgeBase: {
isEnabledKnowledgeBase: true,
},
};
const testTitle = 'SPL_QUERY_CONVERSION_TITLE';

View file

@ -43,14 +43,13 @@ export const QuickPrompts: React.FC<QuickPromptsProps> = React.memo(
({ setInput, setIsSettingsModalVisible, trackPrompt, allPrompts }) => {
const [quickPromptsContainerRef, { width }] = useMeasure();
const { knowledgeBase, promptContexts, setSelectedSettingsTab } = useAssistantContext();
const { promptContexts, setSelectedSettingsTab } = useAssistantContext();
const contextFilteredQuickPrompts = useMemo(() => {
const registeredPromptContextTitles = Object.values(promptContexts).map((pc) => pc.category);
// If KB is enabled, include KNOWLEDGE_BASE_CATEGORY so KB dependent quick prompts are shown
if (knowledgeBase.isEnabledKnowledgeBase) {
registeredPromptContextTitles.push(KNOWLEDGE_BASE_CATEGORY);
}
// include KNOWLEDGE_BASE_CATEGORY so KB dependent quick prompts are shown
registeredPromptContextTitles.push(KNOWLEDGE_BASE_CATEGORY);
return allPrompts.filter((prompt) => {
// only quick prompts
if (prompt.promptType !== PromptTypeEnum.quick) {
@ -65,7 +64,7 @@ export const QuickPrompts: React.FC<QuickPromptsProps> = React.memo(
});
}
});
}, [allPrompts, knowledgeBase.isEnabledKnowledgeBase, promptContexts]);
}, [allPrompts, promptContexts]);
// Overflow state
const [isOverflowPopoverOpen, setIsOverflowPopoverOpen] = useState(false);

View file

@ -48,8 +48,6 @@ const mockValues = {
allSystemPrompts: mockSystemPrompts,
allQuickPrompts: mockQuickPrompts,
knowledgeBase: {
isEnabledRAGAlerts: true,
isEnabledKnowledgeBase: true,
latestAlerts: DEFAULT_LATEST_ALERTS,
},
baseConversations: {},
@ -82,9 +80,7 @@ const updatedValues = {
],
},
knowledgeBase: {
isEnabledRAGAlerts: false,
isEnabledKnowledgeBase: false,
latestAlerts: DEFAULT_LATEST_ALERTS,
latestAlerts: DEFAULT_LATEST_ALERTS + 10,
},
assistantStreamingEnabled: false,
};
@ -206,35 +202,7 @@ describe('useSettingsUpdater', () => {
expect(setKnowledgeBaseMock).toHaveBeenCalledWith(updatedValues.knowledgeBase);
});
});
it('should track which toggles have been updated when saveSettings is called', async () => {
await act(async () => {
const { result, waitForNextUpdate } = renderHook(() =>
useSettingsUpdater(
mockConversations,
{
data: mockSystemPrompts,
page: 1,
perPage: 100,
total: 10,
},
conversationsLoaded,
promptsLoaded,
anonymizationFields
)
);
await waitForNextUpdate();
const { setUpdatedKnowledgeBaseSettings } = result.current;
setUpdatedKnowledgeBaseSettings(updatedValues.knowledgeBase);
await result.current.saveSettings();
expect(reportAssistantSettingToggled).toHaveBeenCalledWith({
isEnabledKnowledgeBase: false,
isEnabledRAGAlerts: false,
});
});
});
it('should track only toggles that updated', async () => {
it('should track when alerts count is updated', async () => {
await act(async () => {
const { result, waitForNextUpdate } = renderHook(() =>
useSettingsUpdater(
@ -255,15 +223,38 @@ describe('useSettingsUpdater', () => {
setUpdatedKnowledgeBaseSettings({
...updatedValues.knowledgeBase,
isEnabledKnowledgeBase: true,
});
await result.current.saveSettings();
expect(reportAssistantSettingToggled).toHaveBeenCalledWith({ alertsCountUpdated: true });
});
});
it('should track when streaming is updated', async () => {
await act(async () => {
const { result, waitForNextUpdate } = renderHook(() =>
useSettingsUpdater(
mockConversations,
{
data: mockSystemPrompts,
page: 1,
perPage: 100,
total: 10,
},
conversationsLoaded,
promptsLoaded,
anonymizationFields
)
);
await waitForNextUpdate();
const { setUpdatedAssistantStreamingEnabled } = result.current;
setUpdatedAssistantStreamingEnabled(false);
await result.current.saveSettings();
expect(reportAssistantSettingToggled).toHaveBeenCalledWith({
isEnabledRAGAlerts: false,
assistantStreamingEnabled: false,
});
});
});
it('if no toggles update, do not track anything', async () => {
it('if no settings update, do not track anything', async () => {
await act(async () => {
const { result, waitForNextUpdate } = renderHook(() =>
useSettingsUpdater(

View file

@ -172,21 +172,13 @@ export const useSettingsUpdater = (
const bulkResult = hasBulkConversations
? await bulkUpdateConversations(http, conversationsSettingsBulkActions, toasts)
: undefined;
const didUpdateKnowledgeBase =
knowledgeBase.isEnabledKnowledgeBase !== updatedKnowledgeBaseSettings.isEnabledKnowledgeBase;
const didUpdateRAGAlerts =
knowledgeBase.isEnabledRAGAlerts !== updatedKnowledgeBaseSettings.isEnabledRAGAlerts;
const didUpdateAssistantStreamingEnabled =
assistantStreamingEnabled !== updatedAssistantStreamingEnabled;
if (didUpdateKnowledgeBase || didUpdateRAGAlerts || didUpdateAssistantStreamingEnabled) {
const didUpdateAlertsCount =
knowledgeBase.latestAlerts !== updatedKnowledgeBaseSettings.latestAlerts;
if (didUpdateAssistantStreamingEnabled || didUpdateAlertsCount) {
assistantTelemetry?.reportAssistantSettingToggled({
...(didUpdateKnowledgeBase
? { isEnabledKnowledgeBase: updatedKnowledgeBaseSettings.isEnabledKnowledgeBase }
: {}),
...(didUpdateRAGAlerts
? { isEnabledRAGAlerts: updatedKnowledgeBaseSettings.isEnabledRAGAlerts }
: {}),
...(didUpdateAlertsCount ? { alertsCountUpdated: didUpdateAlertsCount } : {}),
...(didUpdateAssistantStreamingEnabled
? { assistantStreamingEnabled: updatedAssistantStreamingEnabled }
: {}),
@ -207,21 +199,20 @@ export const useSettingsUpdater = (
(bulkPromptsResult?.success ?? true)
);
}, [
hasBulkConversations,
hasBulkPrompts,
http,
conversationsSettingsBulkActions,
promptsBulkActions,
toasts,
knowledgeBase.isEnabledKnowledgeBase,
knowledgeBase.isEnabledRAGAlerts,
updatedKnowledgeBaseSettings,
hasBulkConversations,
conversationsSettingsBulkActions,
assistantStreamingEnabled,
updatedAssistantStreamingEnabled,
knowledgeBase.latestAlerts,
updatedKnowledgeBaseSettings,
setAssistantStreamingEnabled,
setKnowledgeBase,
hasBulkAnonymizationFields,
anonymizationFieldsBulkActions,
hasBulkPrompts,
promptsBulkActions,
assistantTelemetry,
]);

View file

@ -6,8 +6,6 @@
*/
export interface KnowledgeBaseConfig {
isEnabledRAGAlerts: boolean;
isEnabledKnowledgeBase: boolean;
latestAlerts: number;
}

View file

@ -24,7 +24,5 @@ export const ANONYMIZATION_TABLE_SESSION_STORAGE_KEY = 'anonymizationTable';
export const DEFAULT_LATEST_ALERTS = 20;
export const DEFAULT_KNOWLEDGE_BASE_SETTINGS: KnowledgeBaseConfig = {
isEnabledRAGAlerts: false,
isEnabledKnowledgeBase: false,
latestAlerts: DEFAULT_LATEST_ALERTS,
};

View file

@ -66,7 +66,6 @@ export interface AssistantProviderProps {
getComments: (commentArgs: {
abortStream: () => void;
currentConversation?: Conversation;
isEnabledLangChain: boolean;
isFetchingResponse: boolean;
refetchCurrentConversation: ({ isStreamRefetch }: { isStreamRefetch?: boolean }) => void;
regenerateMessage: (conversationId: string) => void;
@ -106,7 +105,6 @@ export interface UseAssistantContext {
getComments: (commentArgs: {
abortStream: () => void;
currentConversation?: Conversation;
isEnabledLangChain: boolean;
isFetchingResponse: boolean;
refetchCurrentConversation: ({ isStreamRefetch }: { isStreamRefetch?: boolean }) => void;
regenerateMessage: () => void;

View file

@ -64,18 +64,12 @@ export interface AssistantTelemetry {
reportAssistantMessageSent: (params: {
conversationId: string;
role: string;
isEnabledKnowledgeBase: boolean;
isEnabledRAGAlerts: boolean;
actionTypeId: string;
model?: string;
provider?: string;
}) => void;
reportAssistantQuickPrompt: (params: { conversationId: string; promptTitle: string }) => void;
reportAssistantSettingToggled: (params: {
isEnabledKnowledgeBase?: boolean;
isEnabledRAGAlerts?: boolean;
assistantStreamingEnabled?: boolean;
}) => void;
reportAssistantSettingToggled: (params: { assistantStreamingEnabled?: boolean }) => void;
}
export interface AssistantAvailability {

View file

@ -40,8 +40,6 @@ jest.mock('../assistant_context', () => {
const setUpdatedKnowledgeBaseSettings = jest.fn();
const defaultProps = {
knowledgeBase: {
isEnabledKnowledgeBase: true,
isEnabledRAGAlerts: false,
latestAlerts: DEFAULT_LATEST_ALERTS,
},
setUpdatedKnowledgeBaseSettings,

View file

@ -35,7 +35,6 @@ const testProps = {
setIsStreaming: jest.fn(),
refetchCurrentConversation: jest.fn(),
regenerateMessage: jest.fn(),
isEnabledLangChain: false,
isFetchingResponse: false,
currentConversation,
showAnonymizedValues,

View file

@ -55,7 +55,6 @@ const transformMessageWithReplacements = ({
export const getComments = ({
abortStream,
currentConversation,
isEnabledLangChain,
isFetchingResponse,
refetchCurrentConversation,
regenerateMessage,
@ -65,7 +64,6 @@ export const getComments = ({
}: {
abortStream: () => void;
currentConversation?: Conversation;
isEnabledLangChain: boolean;
isFetchingResponse: boolean;
refetchCurrentConversation: ({ isStreamRefetch }: { isStreamRefetch?: boolean }) => void;
regenerateMessage: (conversationId: string) => void;
@ -78,8 +76,6 @@ export const getComments = ({
const regenerateMessageOfConversation = () => {
regenerateMessage(currentConversation.id);
};
// should only happen when no apiConfig is present
const actionTypeId = currentConversation.apiConfig?.actionTypeId ?? '';
const extraLoadingComment = isFetchingResponse
? [
@ -94,11 +90,9 @@ export const getComments = ({
children: (
<StreamComment
abortStream={abortStream}
actionTypeId={actionTypeId}
content=""
refetchCurrentConversation={refetchCurrentConversation}
regenerateMessage={regenerateMessageOfConversation}
isEnabledLangChain={isEnabledLangChain}
setIsStreaming={setIsStreaming}
transformMessage={() => ({ content: '' } as unknown as ContentMessage)}
isFetching
@ -165,10 +159,8 @@ export const getComments = ({
children: (
<StreamComment
abortStream={abortStream}
actionTypeId={actionTypeId}
index={index}
isControlsEnabled={isControlsEnabled}
isEnabledLangChain={isEnabledLangChain}
isError={message.isError}
reader={message.reader}
refetchCurrentConversation={refetchCurrentConversation}
@ -188,12 +180,10 @@ export const getComments = ({
actions: <CommentActions message={transformedMessage} />,
children: (
<StreamComment
actionTypeId={actionTypeId}
abortStream={abortStream}
content={transformedMessage.content}
index={index}
isControlsEnabled={isControlsEnabled}
isEnabledLangChain={isEnabledLangChain}
// reader is used to determine if streaming controls are shown
reader={transformedMessage.reader}
regenerateMessage={regenerateMessageOfConversation}

View file

@ -22,12 +22,10 @@ const content = 'Test Content';
const mockAbortStream = jest.fn();
const testProps = {
abortStream: mockAbortStream,
actionTypeId: '.gen-ai',
connectorId: 'test',
content,
index: 1,
isControlsEnabled: true,
isEnabledLangChain: true,
refetchCurrentConversation: jest.fn(),
regenerateMessage: jest.fn(),
setIsStreaming: jest.fn(),

View file

@ -17,12 +17,10 @@ import { MessageText } from './message_text';
interface Props {
abortStream: () => void;
content?: string;
isEnabledLangChain: boolean;
isError?: boolean;
isFetching?: boolean;
isControlsEnabled?: boolean;
index: number;
actionTypeId: string;
reader?: ReadableStreamDefaultReader<Uint8Array>;
refetchCurrentConversation: ({ isStreamRefetch }: { isStreamRefetch?: boolean }) => void;
regenerateMessage: () => void;
@ -33,10 +31,8 @@ interface Props {
export const StreamComment = ({
abortStream,
content,
actionTypeId,
index,
isControlsEnabled = false,
isEnabledLangChain,
isError = false,
isFetching = false,
reader,
@ -48,9 +44,7 @@ export const StreamComment = ({
const { error, isLoading, isStreaming, pendingMessage, setComplete } = useStream({
refetchCurrentConversation,
content,
actionTypeId,
reader,
isEnabledLangChain,
isError,
});
useEffect(() => {

View file

@ -9,8 +9,6 @@ import { API_ERROR } from '../translations';
import type { PromptObservableState } from './types';
import { Subject } from 'rxjs';
import { EventStreamCodec } from '@smithy/eventstream-codec';
import { fromUtf8, toUtf8 } from '@smithy/util-utf8';
describe('getStreamObservable', () => {
const mockReader = {
read: jest.fn(),
@ -21,8 +19,6 @@ describe('getStreamObservable', () => {
const setLoading = jest.fn();
const defaultProps = {
actionTypeId: '.gen-ai',
isEnabledLangChain: false,
isError: false,
reader: typedReader,
setLoading,
@ -30,211 +26,6 @@ describe('getStreamObservable', () => {
beforeEach(() => {
jest.clearAllMocks();
});
it('should emit loading state and chunks for Bedrock', (done) => {
const completeSubject = new Subject<void>();
const expectedStates: PromptObservableState[] = [
{ chunks: [], loading: true },
{
// when i log the actual emit, chunks equal to message.split(''); test is wrong
chunks: ['My', ' new', ' message'],
message: 'My',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new message',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new message',
loading: false,
},
];
mockReader.read
.mockResolvedValueOnce({
done: false,
value: encodeBedrockResponse('My'),
})
.mockResolvedValueOnce({
done: false,
value: encodeBedrockResponse(' new'),
})
.mockResolvedValueOnce({
done: false,
value: encodeBedrockResponse(' message'),
})
.mockResolvedValue({
done: true,
});
const source = getStreamObservable({
...defaultProps,
actionTypeId: '.bedrock',
});
const emittedStates: PromptObservableState[] = [];
source.subscribe({
next: (state) => {
return emittedStates.push(state);
},
complete: () => {
expect(emittedStates).toEqual(expectedStates);
done();
completeSubject.subscribe({
next: () => {
expect(setLoading).toHaveBeenCalledWith(false);
expect(typedReader.cancel).toHaveBeenCalled();
done();
},
});
},
error: (err) => done(err),
});
});
it('should emit loading state and chunks for OpenAI', (done) => {
const chunk1 = `data: {"object":"chat.completion.chunk","choices":[{"delta":{"content":"My"}}]}\ndata: {"object":"chat.completion.chunk","choices":[{"delta":{"content":" new"}}]}`;
const chunk2 = `\ndata: {"object":"chat.completion.chunk","choices":[{"delta":{"content":" message"}}]}\ndata: [DONE]`;
const completeSubject = new Subject<void>();
const expectedStates: PromptObservableState[] = [
{ chunks: [], loading: true },
{
// when i log the actual emit, chunks equal to message.split(''); test is wrong
chunks: ['My', ' new', ' message'],
message: 'My',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new message',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new message',
loading: false,
},
];
mockReader.read
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk1)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk2)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode('')),
})
.mockResolvedValue({
done: true,
});
const source = getStreamObservable(defaultProps);
const emittedStates: PromptObservableState[] = [];
source.subscribe({
next: (state) => {
return emittedStates.push(state);
},
complete: () => {
expect(emittedStates).toEqual(expectedStates);
done();
completeSubject.subscribe({
next: () => {
expect(setLoading).toHaveBeenCalledWith(false);
expect(typedReader.cancel).toHaveBeenCalled();
done();
},
});
},
error: (err) => done(err),
});
});
it('should emit loading state and chunks for partial response OpenAI', (done) => {
const chunk1 = `data: {"object":"chat.completion.chunk","choices":[{"delta":{"content":"My"}}]}\ndata: {"object":"chat.completion.chunk","choices":[{"delta":{"content":" new"`;
const chunk2 = `}}]}\ndata: {"object":"chat.completion.chunk","choices":[{"delta":{"content":" message"}}]}\ndata: [DONE]`;
const completeSubject = new Subject<void>();
const expectedStates: PromptObservableState[] = [
{ chunks: [], loading: true },
{
// when i log the actual emit, chunks equal to message.split(''); test is wrong
chunks: ['My', ' new', ' message'],
message: 'My',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new message',
loading: true,
},
{
chunks: ['My', ' new', ' message'],
message: 'My new message',
loading: false,
},
];
mockReader.read
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk1)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk2)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode('')),
})
.mockResolvedValue({
done: true,
});
const source = getStreamObservable(defaultProps);
const emittedStates: PromptObservableState[] = [];
source.subscribe({
next: (state) => {
return emittedStates.push(state);
},
complete: () => {
expect(emittedStates).toEqual(expectedStates);
done();
completeSubject.subscribe({
next: () => {
expect(setLoading).toHaveBeenCalledWith(false);
expect(typedReader.cancel).toHaveBeenCalled();
done();
},
});
},
error: (err) => done(err),
});
});
it('should emit loading state and chunks for LangChain', (done) => {
const chunk1 = `{"payload":"","type":"content"}
{"payload":"My","type":"content"}
@ -299,7 +90,7 @@ describe('getStreamObservable', () => {
done: true,
});
const source = getStreamObservable({ ...defaultProps, isEnabledLangChain: true });
const source = getStreamObservable(defaultProps);
const emittedStates: PromptObservableState[] = [];
source.subscribe({
@ -376,148 +167,7 @@ describe('getStreamObservable', () => {
done: true,
});
const source = getStreamObservable({ ...defaultProps, isEnabledLangChain: true });
const emittedStates: PromptObservableState[] = [];
source.subscribe({
next: (state) => {
return emittedStates.push(state);
},
complete: () => {
expect(emittedStates).toEqual(expectedStates);
done();
completeSubject.subscribe({
next: () => {
expect(setLoading).toHaveBeenCalledWith(false);
expect(typedReader.cancel).toHaveBeenCalled();
done();
},
});
},
error: (err) => done(err),
});
});
it('should emit loading state and chunks for Gemini', (done) => {
const chunk1 = `data: {"candidates": [{"content":{"role":"model","parts":[{"text":"My"}]}}]}\rdata: {"candidates": [{"content":{"role":"model","parts":[{"text":" new"}]}}]}`;
const chunk2 = `\rdata: {"candidates": [{"content": {"role": "model","parts": [{"text": " message"}]},"finishReason": "STOP"}],"usageMetadata": {"promptTokenCount": 23,"candidatesTokenCount": 50,"totalTokenCount": 73}}`;
const completeSubject = new Subject<void>();
const expectedStates: PromptObservableState[] = [
{ chunks: [], loading: true },
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My ',
loading: true,
},
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My ',
loading: true,
},
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My new ',
loading: true,
},
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My new message',
loading: false,
},
];
mockReader.read
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk1)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk2)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode('')),
})
.mockResolvedValue({
done: true,
});
const source = getStreamObservable({
...defaultProps,
actionTypeId: '.gemini',
});
const emittedStates: PromptObservableState[] = [];
source.subscribe({
next: (state) => {
return emittedStates.push(state);
},
complete: () => {
expect(emittedStates).toEqual(expectedStates);
done();
completeSubject.subscribe({
next: () => {
expect(setLoading).toHaveBeenCalledWith(false);
expect(typedReader.cancel).toHaveBeenCalled();
done();
},
});
},
error: (err) => done(err),
});
});
it('should emit loading state and chunks for partial response Gemini', (done) => {
const chunk1 = `data: {"candidates": [{"content":{"role":"model","parts":[{"text":"My"}]}}]}\rdata: {"candidates": [{"content":{"role":"model","parts":[{"text":" new"}]}}]}`;
const chunk2 = `\rdata: {"candidates": [{"content": {"role": "model","parts": [{"text": " message"}]},"finishReason": "STOP"}],"usageMetadata": {"promptTokenCount": 23,"candidatesTokenCount": 50,"totalTokenCount": 73}}`;
const completeSubject = new Subject<void>();
const expectedStates: PromptObservableState[] = [
{ chunks: [], loading: true },
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My ',
loading: true,
},
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My ',
loading: true,
},
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My new ',
loading: true,
},
{
chunks: ['My ', ' ', 'new ', ' message'],
message: 'My new message',
loading: false,
},
];
mockReader.read
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk1)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode(chunk2)),
})
.mockResolvedValueOnce({
done: false,
value: new Uint8Array(new TextEncoder().encode('')),
})
.mockResolvedValue({
done: true,
});
const source = getStreamObservable({
...defaultProps,
actionTypeId: '.gemini',
});
const source = getStreamObservable(defaultProps);
const emittedStates: PromptObservableState[] = [];
source.subscribe({
@ -613,22 +263,3 @@ describe('getStreamObservable', () => {
});
});
});
function encodeBedrockResponse(completion: string) {
return new EventStreamCodec(toUtf8, fromUtf8).encode({
headers: {},
body: Uint8Array.from(
Buffer.from(
JSON.stringify({
bytes: Buffer.from(
JSON.stringify({
type: 'content_block_delta',
index: 0,
delta: { type: 'text_delta', text: completion },
})
).toString('base64'),
})
)
),
});
}

View file

@ -7,55 +7,25 @@
import { concatMap, delay, finalize, Observable, of, scan, timestamp } from 'rxjs';
import type { Dispatch, SetStateAction } from 'react';
import { handleBedrockChunk } from '@kbn/elastic-assistant-common';
import type { PromptObservableState } from './types';
import { API_ERROR } from '../translations';
const MIN_DELAY = 35;
interface StreamObservable {
actionTypeId: string;
isEnabledLangChain: boolean;
isError: boolean;
reader: ReadableStreamDefaultReader<Uint8Array>;
setLoading: Dispatch<SetStateAction<boolean>>;
}
interface ResponseSchema {
candidates: Candidate[];
usageMetadata: {
promptTokenCount: number;
candidatesTokenCount: number;
totalTokenCount: number;
};
}
interface Part {
text: string;
}
interface Candidate {
content: Content;
finishReason: string;
}
interface Content {
role: string;
parts: Part[];
}
/**
* Returns an Observable that reads data from a ReadableStream and emits values representing the state of the data processing.
*
* @param connectorTypeTitle - The title of the connector type.
* @param isEnabledLangChain - indicates whether langchain is enabled or not
* @param isError - indicates whether the reader response is an error message or not
* @param reader - The ReadableStreamDefaultReader used to read data from the stream.
* @param setLoading - A function to update the loading state.
* @returns {Observable<PromptObservableState>} An Observable that emits PromptObservableState
*/
export const getStreamObservable = ({
actionTypeId,
isEnabledLangChain,
isError,
reader,
setLoading,
@ -64,14 +34,8 @@ export const getStreamObservable = ({
observer.next({ chunks: [], loading: true });
const decoder = new TextDecoder();
const chunks: string[] = [];
// Initialize an empty string to store the OpenAI buffer.
let openAIBuffer: string = '';
// Initialize an empty string to store the LangChain buffer.
let langChainBuffer: string = '';
// Initialize an empty Uint8Array to store the Bedrock concatenated buffer.
let bedrockBuffer: Uint8Array = new Uint8Array(0);
// Initialize an empty string to store the Gemini buffer.
let geminiBuffer: string = '';
// read data from LangChain stream
function readLangChain() {
@ -130,166 +94,7 @@ export const getStreamObservable = ({
observer.error(err);
});
}
// read data from OpenAI stream
function readOpenAI() {
reader
.read()
.then(({ done, value }: { done: boolean; value?: Uint8Array }) => {
try {
if (done) {
if (openAIBuffer) {
chunks.push(getOpenAIChunks([openAIBuffer])[0]);
}
observer.next({
chunks,
message: chunks.join(''),
loading: false,
});
observer.complete();
return;
}
const decoded = decoder.decode(value);
let nextChunks;
if (isError) {
nextChunks = [`${API_ERROR}\n\n${JSON.parse(decoded).message}`];
} else {
const lines = decoded.split('\n');
lines[0] = openAIBuffer + lines[0];
openAIBuffer = lines.pop() || '';
nextChunks = getOpenAIChunks(lines);
}
nextChunks.forEach((chunk: string) => {
chunks.push(chunk);
observer.next({
chunks,
message: chunks.join(''),
loading: true,
});
});
} catch (err) {
observer.error(err);
return;
}
readOpenAI();
})
.catch((err) => {
observer.error(err);
});
}
// read data from Bedrock stream
function readBedrock() {
reader
.read()
.then(({ done, value }: { done: boolean; value?: Uint8Array }) => {
try {
if (done) {
observer.next({
chunks,
message: chunks.join(''),
loading: false,
});
observer.complete();
return;
}
let content;
if (isError) {
content = `${API_ERROR}\n\n${JSON.parse(decoder.decode(value)).message}`;
chunks.push(content);
observer.next({
chunks,
message: chunks.join(''),
loading: true,
});
} else if (value != null) {
const chunk: Uint8Array = value;
const chunkHandler = (decodedChunk: string) => {
chunks.push(decodedChunk);
observer.next({
chunks,
message: chunks.join(''),
loading: true,
});
};
const processedChunk = handleBedrockChunk({ chunk, bedrockBuffer, chunkHandler });
bedrockBuffer = processedChunk.bedrockBuffer;
}
} catch (err) {
observer.error(err);
return;
}
readBedrock();
})
.catch((err) => {
observer.error(err);
});
}
// read data from Gemini stream
function readGemini() {
reader
.read()
.then(({ done, value }: { done: boolean; value?: Uint8Array }) => {
try {
if (done) {
if (geminiBuffer) {
chunks.push(getGeminiChunks([geminiBuffer])[0]);
}
observer.next({
chunks,
message: chunks.join(''),
loading: false,
});
observer.complete();
return;
}
const decoded = decoder.decode(value, { stream: true });
const lines = decoded.split('\r');
lines[0] = geminiBuffer + lines[0];
geminiBuffer = lines.pop() || '';
const nextChunks = getGeminiChunks(lines);
nextChunks.forEach((chunk: string) => {
const splitBySpace = chunk.split(' ');
for (const word of splitBySpace) {
chunks.push(`${word} `);
observer.next({
chunks,
message: chunks.join(''),
loading: true,
});
}
});
} catch (err) {
observer.error(err);
return;
}
readGemini();
})
.catch((err) => {
observer.error(err);
});
}
// this should never actually happen
function badConnector() {
observer.next({
chunks: [`Invalid connector type - ${actionTypeId} is not a supported GenAI connector.`],
message: `Invalid connector type - ${actionTypeId} is not a supported GenAI connector.`,
loading: false,
});
observer.complete();
}
if (isEnabledLangChain) readLangChain();
else if (actionTypeId === '.bedrock') readBedrock();
else if (actionTypeId === '.gen-ai') readOpenAI();
else if (actionTypeId === '.gemini') readGemini();
else badConnector();
readLangChain();
return () => {
reader.cancel();
@ -325,26 +130,6 @@ export const getStreamObservable = ({
finalize(() => setLoading(false))
);
/**
* Parses an OpenAI response from a string.
* @param lines
* @returns {string[]} - Parsed string array from the OpenAI response.
*/
const getOpenAIChunks = (lines: string[]): string[] => {
const nextChunk = lines
.map((str) => str.substring(6))
.filter((str) => !!str && str !== '[DONE]')
.map((line) => {
try {
const openaiResponse = JSON.parse(line);
return openaiResponse.choices[0]?.delta.content ?? '';
} catch (err) {
return '';
}
});
return nextChunk;
};
/**
* Parses a LangChain response from a string.
* @param lines
@ -366,23 +151,4 @@ const getLangChainChunks = (lines: string[]): string[] =>
return acc;
}, []);
/**
* Parses an Gemini response from a string.
* @param lines
* @returns {string[]} - Parsed string array from the Gemini response.
*/
const getGeminiChunks = (lines: string[]): string[] => {
return lines
.filter((str) => !!str && str !== '[DONE]')
.map((line) => {
try {
const newLine = line.replaceAll('data: ', '');
const geminiResponse: ResponseSchema = JSON.parse(newLine);
return geminiResponse.candidates[0]?.content.parts.map((part) => part.text).join('') ?? '';
} catch (err) {
return '';
}
});
};
export const getPlaceholderObservable = () => new Observable<PromptObservableState>();

View file

@ -39,9 +39,7 @@ const readerComplete = {
const defaultProps = {
refetchCurrentConversation,
reader: readerComplete,
isEnabledLangChain: false,
isError: false,
actionTypeId: '.gen-ai',
};
// FLAKY: https://github.com/elastic/kibana/issues/180091

View file

@ -11,10 +11,8 @@ import { getPlaceholderObservable, getStreamObservable } from './stream_observab
interface UseStreamProps {
refetchCurrentConversation: ({ isStreamRefetch }: { isStreamRefetch?: boolean }) => void;
isEnabledLangChain: boolean;
isError: boolean;
content?: string;
actionTypeId: string;
reader?: ReadableStreamDefaultReader<Uint8Array>;
}
interface UseStream {
@ -33,17 +31,12 @@ interface UseStream {
* A hook that takes a ReadableStreamDefaultReader and returns an object with properties and functions
* that can be used to handle streaming data from a readable stream
* @param content - the content of the message. If provided, the function will not use the reader to stream data.
* @param actionTypeId - the actionTypeId of the connector type
* @param refetchCurrentConversation - refetch the current conversation
* @param reader - The readable stream reader used to stream data. If provided, the function will use this reader to stream data.
* @param isEnabledLangChain - indicates whether langchain is enabled or not
* @param isError - indicates whether the reader response is an error message or not
* @param reader - The readable stream reader used to stream data. If provided, the function will use this reader to stream data.
*/
export const useStream = ({
content,
actionTypeId,
isEnabledLangChain,
isError,
reader,
refetchCurrentConversation,
@ -55,9 +48,9 @@ export const useStream = ({
const observer$ = useMemo(
() =>
content == null && reader != null
? getStreamObservable({ actionTypeId, reader, setLoading, isEnabledLangChain, isError })
? getStreamObservable({ reader, setLoading, isError })
: getPlaceholderObservable(),
[content, isEnabledLangChain, isError, reader, actionTypeId]
[content, isError, reader]
);
const onCompleteStream = useCallback(
(didAbort: boolean) => {

View file

@ -133,8 +133,6 @@ describe('getRequestBody', () => {
],
};
const knowledgeBase = {
isEnabledKnowledgeBase: true,
isEnabledRAGAlerts: true,
latestAlerts: 20,
};
const traceOptions = {

View file

@ -37,8 +37,6 @@ jest.mock('@kbn/elastic-assistant', () => ({
alertsIndexPattern: 'alerts-index-pattern',
assistantAvailability: mockAssistantAvailability(),
knowledgeBase: {
isEnabledRAGAlerts: true,
isEnabledKnowledgeBase: true,
latestAlerts: 20,
},
}),

View file

@ -45,20 +45,6 @@ export const assistantMessageSentEvent: TelemetryEvent = {
optional: false,
},
},
isEnabledKnowledgeBase: {
type: 'boolean',
_meta: {
description: 'Is knowledge base enabled',
optional: false,
},
},
isEnabledRAGAlerts: {
type: 'boolean',
_meta: {
description: 'Is RAG on Alerts enabled',
optional: false,
},
},
actionTypeId: {
type: 'keyword',
_meta: {
@ -106,17 +92,10 @@ export const assistantQuickPrompt: TelemetryEvent = {
export const assistantSettingToggledEvent: TelemetryEvent = {
eventType: TelemetryEventTypes.AssistantSettingToggled,
schema: {
isEnabledKnowledgeBase: {
alertsCountUpdated: {
type: 'boolean',
_meta: {
description: 'Is knowledge base enabled',
optional: true,
},
},
isEnabledRAGAlerts: {
type: 'boolean',
_meta: {
description: 'Is RAG on Alerts enabled',
description: 'Did alerts count update',
optional: true,
},
},

View file

@ -16,8 +16,6 @@ export interface ReportAssistantInvokedParams {
export interface ReportAssistantMessageSentParams {
conversationId: string;
role: string;
isEnabledKnowledgeBase: boolean;
isEnabledRAGAlerts: boolean;
actionTypeId: string;
provider?: string;
model?: string;
@ -29,8 +27,7 @@ export interface ReportAssistantQuickPromptParams {
}
export interface ReportAssistantSettingToggledParams {
isEnabledKnowledgeBase?: boolean;
isEnabledRAGAlerts?: boolean;
alertsCountUpdated?: boolean;
assistantStreamingEnabled?: boolean;
}