[8.18] [Security GenAI][Bug] OSS models do not work when streaming is ON (#12827) (#224129) (#224146)

# Backport

This will backport the following commits from `main` to `8.18`:
- [[Security GenAI][Bug] OSS models do not work when streaming is ON
(#12827) (#224129)](https://github.com/elastic/kibana/pull/224129)

<!--- Backport version: 10.0.1 -->

### Questions ?
Please refer to the [Backport tool
documentation](https://github.com/sorenlouv/backport)

<!--BACKPORT [{"author":{"name":"Ievgen
Sorokopud","email":"ievgen.sorokopud@elastic.co"},"sourceCommit":{"committedDate":"2025-06-16T20:50:27Z","message":"[Security
GenAI][Bug] OSS models do not work when streaming is ON (#12827)
(#224129)\n\n## Summary\n\nMain ticket:
https://github.com/elastic/security-team/issues/12827\n\nThese changes
fix the issue with the locally running models over the\n`HTTP` server.
We do always use `HTTPS` agent even when URL uses HTTP\nprotocol. This
leads to an error:\n\n```\nTypeError: Protocol \"http:\" not supported.
Expected \"https:\"\n at new ClientRequest (node:_http_client:187:11)\n
at request (node:http:101:10)\n at AsyncLocalStorage.run
(node:async_hooks:346:14)\n at AsyncLocalStorageRunContextManager.with
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/run-context/AsyncLocalStorageRunContextManager.js:57:36)\n
at Instrumentation.withRunContext
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/index.js:1126:30)\n
at wrappedHttpRequest
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/http-shared.js:292:21)\n
at /kibana/node_modules/node-fetch/lib/index.js:1478:15\n at new Promise
(<anonymous>)\n at fetch
(/kibana/node_modules/node-fetch/lib/index.js:1447:9)\n at
OpenAI.fetchWithTimeout
(/kibana/node_modules/openai/src/core.ts:580:18)\n at OpenAI.makeRequest
(/kibana/node_modules/openai/src/core.ts:476:33)\n at
OpenAIConnector.invokeAsyncIterator (openai.ts:418:22)\n at
Object.executor (executor.ts:99:18)\n at action_executor.ts:504:23\n at
ActionExecutor.execute (action_executor.ts:153:12)\n at
chat_openai.ts:162:28\n at RetryOperation._fn
(/kibana/node_modules/p-retry/index.js:50:12)\n```\n\n### To
test\n\nSteps to reproduce the behavior:\n\n1. Run locally hosted model
and make sure that the chat completion url\nuses the HTTP protocol\n2.
Add OSS model via Other (OpenAI Compatible Service) provider\n3. Turn
streaming ON\n4. Try to chat via AI Assistant\n\nThanks @stephmilovic
helping to solve the
issue!","sha":"4c62313e1bb5a02beeb33e24ef6d5d9f758ab082","branchLabelMapping":{"^v9.1.0$":"main","^v(\\d+).(\\d+).\\d+$":"$1.$2"}},"sourcePullRequest":{"labels":["release_note:fix","Team:
SecuritySolution","Team:Security Generative
AI","backport:version","v9.1.0","v8.19.0","v9.0.3","v8.18.3"],"title":"[Security
GenAI][Bug] OSS models do not work when streaming is ON
(#12827)","number":224129,"url":"https://github.com/elastic/kibana/pull/224129","mergeCommit":{"message":"[Security
GenAI][Bug] OSS models do not work when streaming is ON (#12827)
(#224129)\n\n## Summary\n\nMain ticket:
https://github.com/elastic/security-team/issues/12827\n\nThese changes
fix the issue with the locally running models over the\n`HTTP` server.
We do always use `HTTPS` agent even when URL uses HTTP\nprotocol. This
leads to an error:\n\n```\nTypeError: Protocol \"http:\" not supported.
Expected \"https:\"\n at new ClientRequest (node:_http_client:187:11)\n
at request (node:http:101:10)\n at AsyncLocalStorage.run
(node:async_hooks:346:14)\n at AsyncLocalStorageRunContextManager.with
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/run-context/AsyncLocalStorageRunContextManager.js:57:36)\n
at Instrumentation.withRunContext
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/index.js:1126:30)\n
at wrappedHttpRequest
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/http-shared.js:292:21)\n
at /kibana/node_modules/node-fetch/lib/index.js:1478:15\n at new Promise
(<anonymous>)\n at fetch
(/kibana/node_modules/node-fetch/lib/index.js:1447:9)\n at
OpenAI.fetchWithTimeout
(/kibana/node_modules/openai/src/core.ts:580:18)\n at OpenAI.makeRequest
(/kibana/node_modules/openai/src/core.ts:476:33)\n at
OpenAIConnector.invokeAsyncIterator (openai.ts:418:22)\n at
Object.executor (executor.ts:99:18)\n at action_executor.ts:504:23\n at
ActionExecutor.execute (action_executor.ts:153:12)\n at
chat_openai.ts:162:28\n at RetryOperation._fn
(/kibana/node_modules/p-retry/index.js:50:12)\n```\n\n### To
test\n\nSteps to reproduce the behavior:\n\n1. Run locally hosted model
and make sure that the chat completion url\nuses the HTTP protocol\n2.
Add OSS model via Other (OpenAI Compatible Service) provider\n3. Turn
streaming ON\n4. Try to chat via AI Assistant\n\nThanks @stephmilovic
helping to solve the
issue!","sha":"4c62313e1bb5a02beeb33e24ef6d5d9f758ab082"}},"sourceBranch":"main","suggestedTargetBranches":["8.19","8.18"],"targetPullRequestStates":[{"branch":"main","label":"v9.1.0","branchLabelMappingKey":"^v9.1.0$","isSourceBranch":true,"state":"MERGED","url":"https://github.com/elastic/kibana/pull/224129","number":224129,"mergeCommit":{"message":"[Security
GenAI][Bug] OSS models do not work when streaming is ON (#12827)
(#224129)\n\n## Summary\n\nMain ticket:
https://github.com/elastic/security-team/issues/12827\n\nThese changes
fix the issue with the locally running models over the\n`HTTP` server.
We do always use `HTTPS` agent even when URL uses HTTP\nprotocol. This
leads to an error:\n\n```\nTypeError: Protocol \"http:\" not supported.
Expected \"https:\"\n at new ClientRequest (node:_http_client:187:11)\n
at request (node:http:101:10)\n at AsyncLocalStorage.run
(node:async_hooks:346:14)\n at AsyncLocalStorageRunContextManager.with
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/run-context/AsyncLocalStorageRunContextManager.js:57:36)\n
at Instrumentation.withRunContext
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/index.js:1126:30)\n
at wrappedHttpRequest
(/kibana/node_modules/elastic-apm-node/lib/instrumentation/http-shared.js:292:21)\n
at /kibana/node_modules/node-fetch/lib/index.js:1478:15\n at new Promise
(<anonymous>)\n at fetch
(/kibana/node_modules/node-fetch/lib/index.js:1447:9)\n at
OpenAI.fetchWithTimeout
(/kibana/node_modules/openai/src/core.ts:580:18)\n at OpenAI.makeRequest
(/kibana/node_modules/openai/src/core.ts:476:33)\n at
OpenAIConnector.invokeAsyncIterator (openai.ts:418:22)\n at
Object.executor (executor.ts:99:18)\n at action_executor.ts:504:23\n at
ActionExecutor.execute (action_executor.ts:153:12)\n at
chat_openai.ts:162:28\n at RetryOperation._fn
(/kibana/node_modules/p-retry/index.js:50:12)\n```\n\n### To
test\n\nSteps to reproduce the behavior:\n\n1. Run locally hosted model
and make sure that the chat completion url\nuses the HTTP protocol\n2.
Add OSS model via Other (OpenAI Compatible Service) provider\n3. Turn
streaming ON\n4. Try to chat via AI Assistant\n\nThanks @stephmilovic
helping to solve the
issue!","sha":"4c62313e1bb5a02beeb33e24ef6d5d9f758ab082"}},{"branch":"8.19","label":"v8.19.0","branchLabelMappingKey":"^v(\\d+).(\\d+).\\d+$","isSourceBranch":false,"state":"NOT_CREATED"},{"branch":"9.0","label":"v9.0.3","branchLabelMappingKey":"^v(\\d+).(\\d+).\\d+$","isSourceBranch":false,"url":"https://github.com/elastic/kibana/pull/224145","number":224145,"state":"OPEN"},{"branch":"8.18","label":"v8.18.3","branchLabelMappingKey":"^v(\\d+).(\\d+).\\d+$","isSourceBranch":false,"state":"NOT_CREATED"}]}]
BACKPORT-->
This commit is contained in:
Ievgen Sorokopud 2025-06-17 01:26:13 +02:00 committed by GitHub
parent ef473728b6
commit e01dba894d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -81,6 +81,10 @@ export class OpenAIConnector extends SubActionConnector<Config, Secrets> {
this.url this.url
); );
const isHttps = (this.configurationUtilities.getProxySettings()?.proxyUrl ?? this.url)
.toLowerCase()
.startsWith('https');
this.openAI = this.openAI =
this.config.apiProvider === OpenAiProviderType.AzureAi this.config.apiProvider === OpenAiProviderType.AzureAi
? new OpenAI({ ? new OpenAI({
@ -91,7 +95,7 @@ export class OpenAIConnector extends SubActionConnector<Config, Secrets> {
...this.headers, ...this.headers,
'api-key': this.secrets.apiKey, 'api-key': this.secrets.apiKey,
}, },
httpAgent: httpsAgent ?? httpAgent, httpAgent: isHttps ? httpsAgent : httpAgent,
}) })
: new OpenAI({ : new OpenAI({
baseURL: removeEndpointFromUrl(this.config.apiUrl), baseURL: removeEndpointFromUrl(this.config.apiUrl),
@ -99,7 +103,7 @@ export class OpenAIConnector extends SubActionConnector<Config, Secrets> {
defaultHeaders: { defaultHeaders: {
...this.headers, ...this.headers,
}, },
httpAgent: httpsAgent ?? httpAgent, httpAgent: isHttps ? httpsAgent : httpAgent,
}); });
this.registerSubActions(); this.registerSubActions();