|
| 1 | +import collections.abc |
| 2 | +from typing import override |
| 3 | + |
| 4 | +import httpx |
| 5 | +from httpx import Auth, Request, Response |
| 6 | +from pydantic import BaseModel |
| 7 | + |
| 8 | +from splunklib.ai import OpenAIModel |
| 9 | +from splunklib.ai.model import PredefinedModel |
| 10 | + |
| 11 | + |
| 12 | +class InternalAIModel(BaseModel): |
| 13 | + client_id: str |
| 14 | + client_secret: str |
| 15 | + app_key: str |
| 16 | + |
| 17 | + token_url: str |
| 18 | + base_url: str |
| 19 | + |
| 20 | + |
| 21 | +class TestLLMSettings(BaseModel): |
| 22 | + # TODO: Currently we only support our internal OpenAI-compatible model, |
| 23 | + # once we are close to GA we should also support OpenAI and probably Ollama, such |
| 24 | + # that external developers can also run our test suite suite locally. |
| 25 | + internal_ai: InternalAIModel | None = None |
| 26 | + |
| 27 | + |
| 28 | +async def create_model(s: TestLLMSettings) -> PredefinedModel: |
| 29 | + if s.internal_ai is not None: |
| 30 | + return await _buildInternalAIModel( |
| 31 | + token_url=s.internal_ai.token_url, |
| 32 | + base_url=s.internal_ai.base_url, |
| 33 | + client_id=s.internal_ai.client_id, |
| 34 | + client_secret=s.internal_ai.client_secret, |
| 35 | + app_key=s.internal_ai.app_key, |
| 36 | + ) |
| 37 | + raise Exception("unreachable") |
| 38 | + |
| 39 | + |
| 40 | +class _InternalAIAuth(Auth): |
| 41 | + token: str |
| 42 | + |
| 43 | + def __init__(self, token: str) -> None: |
| 44 | + self.token = token |
| 45 | + |
| 46 | + @override |
| 47 | + def auth_flow( |
| 48 | + self, request: Request |
| 49 | + ) -> collections.abc.Generator[Request, Response, None]: |
| 50 | + request.headers["api-key"] = self.token |
| 51 | + yield request |
| 52 | + |
| 53 | + |
| 54 | +class _TokenResponse(BaseModel): |
| 55 | + access_token: str |
| 56 | + |
| 57 | + |
| 58 | +async def _buildInternalAIModel( |
| 59 | + token_url: str, |
| 60 | + base_url: str, |
| 61 | + client_id: str, |
| 62 | + client_secret: str, |
| 63 | + app_key: str, |
| 64 | +) -> OpenAIModel: |
| 65 | + headers = { |
| 66 | + "Accept": "*/*", |
| 67 | + "Content-Type": "application/x-www-form-urlencoded", |
| 68 | + } |
| 69 | + |
| 70 | + http = httpx.AsyncClient() |
| 71 | + response = await http.post( |
| 72 | + url=token_url, |
| 73 | + headers=headers, |
| 74 | + data={"grant_type": "client_credentials"}, |
| 75 | + auth=(client_id, client_secret), |
| 76 | + ) |
| 77 | + |
| 78 | + token = _TokenResponse.model_validate_json(response.text).access_token |
| 79 | + |
| 80 | + auth_handler = _InternalAIAuth(token) |
| 81 | + model = "gpt-4.1" |
| 82 | + |
| 83 | + return OpenAIModel( |
| 84 | + model=model, |
| 85 | + base_url=f"{base_url}/{model}", |
| 86 | + api_key="", # unused |
| 87 | + extra_body={"user": f'{{"appkey":"{app_key}"}}'}, |
| 88 | + httpx_client=httpx.AsyncClient(auth=auth_handler), |
| 89 | + ) |
0 commit comments