mirror of
https://github.com/elastic/kibana.git
synced 2025-04-24 09:48:58 -04:00
[Automatic Import ] Enable inference connector for Auto Import (#206111)
## Summary Enables new inference connector in the Automatic Import. This PR also fixes the use of `inferenceEnabled` from `useAssistantContext` since it is not available in AutoImport. ## To test 1. Update the value for `inferenceConnectorOn` to `true` in `x-pack/platform/plugins/shared/stack_connectors/common/experimental_features.ts` 2. Create an inference connector using [OpenAI creds](https://p.elstc.co/paste/36VivuC+#TnP7-Z7wBKDUg8fQ/lTycSCdwUxEEbHcyQ/Q0i3oEmO). Configure the inference endpoint for completion and name the endpoint `openai-completion-preconfig` 3. Now that the inference endpoint is created, add a [preconfigured connector](https://p.elstc.co/paste/tFWF3LSA#0thBRW05e6KSSkLCDjQiH8GkECQySBiHm6zRMCUThlf) with the same credentials. 4. Select the preconfigured selector in Automatic Import. 5. Test the Auto Import flow works. --------- Co-authored-by: Steph Milovic <stephanie.milovic@elastic.co>
This commit is contained in:
parent
a9f5f432ad
commit
668d88e19e
8 changed files with 36 additions and 21 deletions
|
@ -82,9 +82,10 @@ export const useAssistantOverlay = (
|
|||
*/
|
||||
replacements?: Replacements | null
|
||||
): UseAssistantOverlay => {
|
||||
const { http } = useAssistantContext();
|
||||
const { http, inferenceEnabled } = useAssistantContext();
|
||||
const { data: connectors } = useLoadConnectors({
|
||||
http,
|
||||
inferenceEnabled,
|
||||
});
|
||||
|
||||
const defaultConnector = useMemo(() => getDefaultConnector(connectors), [connectors]);
|
||||
|
|
|
@ -57,14 +57,18 @@ export const ConnectorSelector: React.FC<Props> = React.memo(
|
|||
setIsOpen,
|
||||
stats = null,
|
||||
}) => {
|
||||
const { actionTypeRegistry, http, assistantAvailability } = useAssistantContext();
|
||||
const { actionTypeRegistry, http, assistantAvailability, inferenceEnabled } =
|
||||
useAssistantContext();
|
||||
// Connector Modal State
|
||||
const [isConnectorModalVisible, setIsConnectorModalVisible] = useState<boolean>(false);
|
||||
const { data: actionTypes } = useLoadActionTypes({ http });
|
||||
|
||||
const [selectedActionType, setSelectedActionType] = useState<ActionType | null>(null);
|
||||
|
||||
const { data: aiConnectors, refetch: refetchConnectors } = useLoadConnectors({ http });
|
||||
const { data: aiConnectors, refetch: refetchConnectors } = useLoadConnectors({
|
||||
http,
|
||||
inferenceEnabled,
|
||||
});
|
||||
|
||||
const localIsDisabled = isDisabled || !assistantAvailability.hasConnectorsReadPrivilege;
|
||||
|
||||
|
|
|
@ -35,9 +35,9 @@ export const ConnectorSetup = ({
|
|||
);
|
||||
const { setApiConfig } = useConversation();
|
||||
// Access all conversations so we can add connector to all on initial setup
|
||||
const { actionTypeRegistry, http } = useAssistantContext();
|
||||
const { actionTypeRegistry, http, inferenceEnabled } = useAssistantContext();
|
||||
|
||||
const { refetch: refetchConnectors } = useLoadConnectors({ http });
|
||||
const { refetch: refetchConnectors } = useLoadConnectors({ http, inferenceEnabled });
|
||||
|
||||
const { data: actionTypes } = useLoadActionTypes({ http });
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ import { waitFor, renderHook } from '@testing-library/react';
|
|||
import { useLoadConnectors, Props } from '.';
|
||||
import { mockConnectors } from '../../mock/connectors';
|
||||
import { TestProviders } from '../../mock/test_providers/test_providers';
|
||||
import React, { ReactNode } from 'react';
|
||||
|
||||
const mockConnectorsAndExtras = [
|
||||
...mockConnectors,
|
||||
|
@ -55,13 +54,6 @@ const toasts = {
|
|||
};
|
||||
const defaultProps = { http, toasts } as unknown as Props;
|
||||
|
||||
const createWrapper = (inferenceEnabled = false) => {
|
||||
// eslint-disable-next-line react/display-name
|
||||
return ({ children }: { children: ReactNode }) => (
|
||||
<TestProviders providerContext={{ inferenceEnabled }}>{children}</TestProviders>
|
||||
);
|
||||
};
|
||||
|
||||
describe('useLoadConnectors', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
@ -91,9 +83,12 @@ describe('useLoadConnectors', () => {
|
|||
});
|
||||
|
||||
it('includes preconfigured .inference results when inferenceEnabled is true', async () => {
|
||||
const { result } = renderHook(() => useLoadConnectors(defaultProps), {
|
||||
wrapper: createWrapper(true),
|
||||
});
|
||||
const { result } = renderHook(
|
||||
() => useLoadConnectors({ ...defaultProps, inferenceEnabled: true }),
|
||||
{
|
||||
wrapper: TestProviders,
|
||||
}
|
||||
);
|
||||
await waitFor(() => {
|
||||
expect(result.current.data).toStrictEqual(
|
||||
mockConnectors
|
||||
|
|
|
@ -13,7 +13,6 @@ import type { IHttpFetchError } from '@kbn/core-http-browser';
|
|||
import { HttpSetup } from '@kbn/core-http-browser';
|
||||
import { IToasts } from '@kbn/core-notifications-browser';
|
||||
import { OpenAiProviderType } from '@kbn/stack-connectors-plugin/common/openai/constants';
|
||||
import { useAssistantContext } from '../../assistant_context';
|
||||
import { AIConnector } from '../connector_selector';
|
||||
import * as i18n from '../translations';
|
||||
|
||||
|
@ -26,6 +25,7 @@ const QUERY_KEY = ['elastic-assistant, load-connectors'];
|
|||
export interface Props {
|
||||
http: HttpSetup;
|
||||
toasts?: IToasts;
|
||||
inferenceEnabled?: boolean;
|
||||
}
|
||||
|
||||
const actionTypes = ['.bedrock', '.gen-ai', '.gemini'];
|
||||
|
@ -33,8 +33,8 @@ const actionTypes = ['.bedrock', '.gen-ai', '.gemini'];
|
|||
export const useLoadConnectors = ({
|
||||
http,
|
||||
toasts,
|
||||
inferenceEnabled = false,
|
||||
}: Props): UseQueryResult<AIConnector[], IHttpFetchError> => {
|
||||
const { inferenceEnabled } = useAssistantContext();
|
||||
if (inferenceEnabled) {
|
||||
actionTypes.push('.inference');
|
||||
}
|
||||
|
|
|
@ -34,6 +34,12 @@ const actionType = { id: '.bedrock', name: 'Bedrock', iconClass: 'logoBedrock' }
|
|||
mockServices.triggersActionsUi.actionTypeRegistry.register(
|
||||
actionType as unknown as ActionTypeModel
|
||||
);
|
||||
|
||||
const inferenceActionType = { id: '.inference', name: 'Inference', iconClass: 'logoInference' };
|
||||
mockServices.triggersActionsUi.actionTypeRegistry.register(
|
||||
inferenceActionType as unknown as ActionTypeModel
|
||||
);
|
||||
|
||||
jest.mock('@kbn/elastic-assistant/impl/connectorland/use_load_action_types', () => ({
|
||||
useLoadActionTypes: jest.fn(() => ({ data: [actionType] })),
|
||||
}));
|
||||
|
|
|
@ -42,15 +42,23 @@ interface ConnectorStepProps {
|
|||
}
|
||||
export const ConnectorStep = React.memo<ConnectorStepProps>(({ connector }) => {
|
||||
const { euiTheme } = useEuiTheme();
|
||||
const { http, notifications } = useKibana().services;
|
||||
const { http, notifications, triggersActionsUi } = useKibana().services;
|
||||
const { setConnector, completeStep } = useActions();
|
||||
|
||||
const [connectors, setConnectors] = useState<AIConnector[]>();
|
||||
let inferenceEnabled: boolean = false;
|
||||
|
||||
if (triggersActionsUi.actionTypeRegistry.has('.inference')) {
|
||||
inferenceEnabled = triggersActionsUi.actionTypeRegistry.get('.inference') as unknown as boolean;
|
||||
}
|
||||
if (inferenceEnabled) {
|
||||
AllowedActionTypeIds.push('.inference');
|
||||
}
|
||||
const {
|
||||
isLoading,
|
||||
data: aiConnectors,
|
||||
refetch: refetchConnectors,
|
||||
} = useLoadConnectors({ http, toasts: notifications.toasts });
|
||||
} = useLoadConnectors({ http, toasts: notifications.toasts, inferenceEnabled });
|
||||
|
||||
useEffect(() => {
|
||||
if (aiConnectors != null) {
|
||||
|
|
|
@ -16,12 +16,13 @@ export const getLLMType = (actionTypeId: string): string | undefined => {
|
|||
[`.gen-ai`]: `openai`,
|
||||
[`.bedrock`]: `bedrock`,
|
||||
[`.gemini`]: `gemini`,
|
||||
[`.inference`]: `inference`,
|
||||
};
|
||||
return llmTypeDictionary[actionTypeId];
|
||||
};
|
||||
|
||||
export const getLLMClass = (llmType?: string) =>
|
||||
llmType === 'openai'
|
||||
llmType === 'openai' || llmType === 'inference'
|
||||
? ActionsClientChatOpenAI
|
||||
: llmType === 'bedrock'
|
||||
? ActionsClientBedrockChatModel
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue