From 8536caa0962f58af7dfcf62e39cabffeee4580a9 Mon Sep 17 00:00:00 2001 From: kaunghtut24 <163727919+kaunghtut24@users.noreply.github.com> Date: Mon, 16 Jun 2025 14:13:32 +0530 Subject: [PATCH] Create config.toml --- config.toml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 config.toml diff --git a/config.toml b/config.toml new file mode 100644 index 0000000..42d36bb --- /dev/null +++ b/config.toml @@ -0,0 +1,32 @@ +[GENERAL] +SIMILARITY_MEASURE = "cosine" # "cosine" or "dot" +KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m") + +[MODELS.OPENAI] +API_KEY = "" + +[MODELS.GROQ] +API_KEY = "" + +[MODELS.ANTHROPIC] +API_KEY = "" + +[MODELS.GEMINI] +API_KEY = "AIzaSyCkRpRuHKbE1nMY5CQLhoiWP5UH0zRzLqM" + +[MODELS.CUSTOM_OPENAI] +API_KEY = "" +API_URL = "" +MODEL_NAME = "" + +[MODELS.OLLAMA] +API_URL = "" # Ollama API URL - http://host.docker.internal:11434 + +[MODELS.DEEPSEEK] +API_KEY = "" + +[MODELS.LM_STUDIO] +API_URL = "" # LM Studio API URL - http://host.docker.internal:1234 + +[API_ENDPOINTS] +SEARXNG = "" # SearxNG API URL - http://localhost:32768