diff --git a/sample.config.toml b/sample.config.toml index e283826..681a10f 100644 --- a/sample.config.toml +++ b/sample.config.toml @@ -7,6 +7,7 @@ CHAT_MODEL = "gpt-3.5-turbo" # Name of the model to use [API_KEYS] OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef +OPENAI_URL = "https://api.openai.com/v1" [API_ENDPOINTS] SEARXNG = "http://localhost:32768" # SearxNG API URL diff --git a/src/config.ts b/src/config.ts index 25dcbf4..344079a 100644 --- a/src/config.ts +++ b/src/config.ts @@ -14,6 +14,7 @@ interface Config { API_KEYS: { OPENAI: string; GROQ: string; + OPENAI_URL: string; }; API_ENDPOINTS: { SEARXNG: string; @@ -40,6 +41,8 @@ export const getChatModelProvider = () => export const getChatModel = () => loadConfig().GENERAL.CHAT_MODEL; +export const getOpenaiUrl = () => loadConfig().API_KEYS.OPENAI_URL; + export const getOpenaiApiKey = () => loadConfig().API_KEYS.OPENAI; export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ; diff --git a/src/lib/providers.ts b/src/lib/providers.ts index aea69de..683996b 100644 --- a/src/lib/providers.ts +++ b/src/lib/providers.ts @@ -1,3 +1,4 @@ +import { type ClientOptions } from 'openai'; import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; @@ -5,6 +6,7 @@ import { getGroqApiKey, getOllamaApiEndpoint, getOpenaiApiKey, + getOpenaiUrl, } from '../config'; import logger from '../utils/logger'; @@ -12,6 +14,10 @@ export const getAvailableProviders = async () => { const openAIApiKey = getOpenaiApiKey(); const groqApiKey = getGroqApiKey(); const ollamaEndpoint = getOllamaApiEndpoint(); + const openaiUrl = getOpenaiUrl(); + const configuration: ClientOptions = { + baseURL: openaiUrl + }; const models = {}; @@ -20,22 +26,26 @@ export const getAvailableProviders = async () => { models['openai'] = { 'GPT-3.5 turbo': new ChatOpenAI({ openAIApiKey, - modelName: 'gpt-3.5-turbo', + modelName: 'gpt-3.5-turbo-1106', temperature: 0.7, + configuration, }), 'GPT-4': new ChatOpenAI({ openAIApiKey, - modelName: 'gpt-4', + modelName: 'gpt-4-1106-preview', temperature: 0.7, + configuration, }), 'GPT-4 turbo': new ChatOpenAI({ openAIApiKey, modelName: 'gpt-4-turbo', temperature: 0.7, + configuration, }), embeddings: new OpenAIEmbeddings({ openAIApiKey, modelName: 'text-embedding-3-large', + configuration, }), }; } catch (err) { diff --git a/src/routes/config.ts b/src/routes/config.ts index 4d22ec5..cd75d89 100644 --- a/src/routes/config.ts +++ b/src/routes/config.ts @@ -5,6 +5,7 @@ import { getChatModelProvider, getGroqApiKey, getOllamaApiEndpoint, + getOpenaiUrl, getOpenaiApiKey, updateConfig, } from '../config'; @@ -29,6 +30,7 @@ router.get('/', async (_, res) => { config['selectedProvider'] = getChatModelProvider(); config['selectedChatModel'] = getChatModel(); + config['openaiUrl'] = getOpenaiUrl(); config['openeaiApiKey'] = getOpenaiApiKey(); config['ollamaApiUrl'] = getOllamaApiEndpoint(); config['groqApiKey'] = getGroqApiKey(); @@ -47,6 +49,7 @@ router.post('/', async (req, res) => { API_KEYS: { OPENAI: config.openeaiApiKey, GROQ: config.groqApiKey, + OPENAI_URL: config.openaiUrl, }, API_ENDPOINTS: { OLLAMA: config.ollamaApiUrl, diff --git a/ui/components/SettingsDialog.tsx b/ui/components/SettingsDialog.tsx index f005b8c..ff5052b 100644 --- a/ui/components/SettingsDialog.tsx +++ b/ui/components/SettingsDialog.tsx @@ -8,6 +8,7 @@ interface SettingsType { }; selectedProvider: string; selectedChatModel: string; + openaiUrl: string; openeaiApiKey: string; groqApiKey: string; ollamaApiUrl: string; @@ -165,6 +166,21 @@ const SettingsDialog = ({ )} +
+

OpenAI Url

+ + setConfig({ + ...config, + openaiUrl: e.target.value, + }) + } + className="bg-[#111111] px-3 py-2 flex items-center overflow-hidden border border-[#1C1C1C] text-white rounded-lg text-sm" + /> +

OpenAI API Key