diff --git a/.env.example b/.env.example index bc67919..1c735cc 100644 --- a/.env.example +++ b/.env.example @@ -1,5 +1,5 @@ PORT=3001 -OPENAI_API_KEY= +OLLAMA_URL=http://localhost:11434 # url of the ollama server SIMILARITY_MEASURE=cosine # cosine or dot SEARXNG_API_URL= # no need to fill this if using docker -MODEL_NAME=gpt-3.5-turbo \ No newline at end of file +MODEL_NAME=llama2 \ No newline at end of file diff --git a/README.md b/README.md index 9d26fae..562bd65 100644 --- a/README.md +++ b/README.md @@ -51,14 +51,15 @@ There are mainly 2 ways of installing Perplexica - With Docker, Without Docker. 2. Clone the Perplexica repository: ```bash - git clone https://github.com/ItzCrazyKns/Perplexica.git + git clone -b feat/ollama-support https://github.com/ItzCrazyKns/Perplexica.git ``` 3. After cloning, navigate to the directory containing the project files. 4. Rename the `.env.example` file to `.env`. For Docker setups, you need only fill in the following fields: - - `OPENAI_API_KEY` + - `OLLAMA_URL` (It should be the URL where Ollama is running; it is also filled by default but you need to replace it if your Ollama URL is different.) + - `MODEL_NAME` (This is filled by default; you can change it if you want to use a different model.) - `SIMILARITY_MEASURE` (This is filled by default; you can leave it as is if you are unsure about it.) 5. Ensure you are in the directory containing the `docker-compose.yaml` file and execute: diff --git a/src/agents/academicSearchAgent.ts b/src/agents/academicSearchAgent.ts index 7c3d448..0e78581 100644 --- a/src/agents/academicSearchAgent.ts +++ b/src/agents/academicSearchAgent.ts @@ -9,7 +9,9 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; +import { Ollama } from '@langchain/community/llms/ollama'; +import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { Document } from '@langchain/core/documents'; import { searchSearxng } from '../core/searxng'; @@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory'; import eventEmitter from 'events'; import computeSimilarity from '../utils/computeSimilarity'; -const chatLLM = new ChatOpenAI({ - modelName: process.env.MODEL_NAME, +const chatLLM = new ChatOllama({ + baseUrl: process.env.OLLAMA_URL, + model: process.env.MODEL_NAME, temperature: 0.7, }); -const llm = new OpenAI({ +const llm = new Ollama({ temperature: 0, - modelName: process.env.MODEL_NAME, + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); -const embeddings = new OpenAIEmbeddings({ - modelName: 'text-embedding-3-large', +const embeddings = new OllamaEmbeddings({ + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); const basicAcademicSearchRetrieverPrompt = ` diff --git a/src/agents/imageSearchAgent.ts b/src/agents/imageSearchAgent.ts index 3a2c9db..5e38123 100644 --- a/src/agents/imageSearchAgent.ts +++ b/src/agents/imageSearchAgent.ts @@ -4,15 +4,16 @@ import { RunnableLambda, } from '@langchain/core/runnables'; import { PromptTemplate } from '@langchain/core/prompts'; -import { OpenAI } from '@langchain/openai'; +import { Ollama } from '@langchain/community/llms/ollama'; import formatChatHistoryAsString from '../utils/formatHistory'; import { BaseMessage } from '@langchain/core/messages'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { searchSearxng } from '../core/searxng'; -const llm = new OpenAI({ +const llm = new Ollama({ temperature: 0, - modelName: process.env.MODEL_NAME, + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); const imageSearchChainPrompt = ` diff --git a/src/agents/redditSearchAgent.ts b/src/agents/redditSearchAgent.ts index 77f293e..d5ab77c 100644 --- a/src/agents/redditSearchAgent.ts +++ b/src/agents/redditSearchAgent.ts @@ -9,7 +9,9 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; +import { Ollama } from '@langchain/community/llms/ollama'; +import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { Document } from '@langchain/core/documents'; import { searchSearxng } from '../core/searxng'; @@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory'; import eventEmitter from 'events'; import computeSimilarity from '../utils/computeSimilarity'; -const chatLLM = new ChatOpenAI({ - modelName: process.env.MODEL_NAME, +const chatLLM = new ChatOllama({ + baseUrl: process.env.OLLAMA_URL, + model: process.env.MODEL_NAME, temperature: 0.7, }); -const llm = new OpenAI({ +const llm = new Ollama({ temperature: 0, - modelName: process.env.MODEL_NAME, + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); -const embeddings = new OpenAIEmbeddings({ - modelName: 'text-embedding-3-large', +const embeddings = new OllamaEmbeddings({ + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); const basicRedditSearchRetrieverPrompt = ` diff --git a/src/agents/webSearchAgent.ts b/src/agents/webSearchAgent.ts index f5799e3..5d60dda 100644 --- a/src/agents/webSearchAgent.ts +++ b/src/agents/webSearchAgent.ts @@ -9,7 +9,9 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; +import { Ollama } from '@langchain/community/llms/ollama'; +import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { Document } from '@langchain/core/documents'; import { searchSearxng } from '../core/searxng'; @@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory'; import eventEmitter from 'events'; import computeSimilarity from '../utils/computeSimilarity'; -const chatLLM = new ChatOpenAI({ - modelName: process.env.MODEL_NAME, +const chatLLM = new ChatOllama({ + baseUrl: process.env.OLLAMA_URL, + model: process.env.MODEL_NAME, temperature: 0.7, }); -const llm = new OpenAI({ +const llm = new Ollama({ temperature: 0, - modelName: process.env.MODEL_NAME, + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); -const embeddings = new OpenAIEmbeddings({ - modelName: 'text-embedding-3-large', +const embeddings = new OllamaEmbeddings({ + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); const basicSearchRetrieverPrompt = ` diff --git a/src/agents/wolframAlphaSearchAgent.ts b/src/agents/wolframAlphaSearchAgent.ts index c071ef0..5f42ed7 100644 --- a/src/agents/wolframAlphaSearchAgent.ts +++ b/src/agents/wolframAlphaSearchAgent.ts @@ -9,7 +9,8 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { ChatOpenAI, OpenAI } from '@langchain/openai'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; +import { Ollama } from '@langchain/community/llms/ollama'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { Document } from '@langchain/core/documents'; import { searchSearxng } from '../core/searxng'; @@ -17,14 +18,16 @@ import type { StreamEvent } from '@langchain/core/tracers/log_stream'; import formatChatHistoryAsString from '../utils/formatHistory'; import eventEmitter from 'events'; -const chatLLM = new ChatOpenAI({ - modelName: process.env.MODEL_NAME, +const chatLLM = new ChatOllama({ + baseUrl: process.env.OLLAMA_URL, + model: process.env.MODEL_NAME, temperature: 0.7, }); -const llm = new OpenAI({ +const llm = new Ollama({ temperature: 0, - modelName: process.env.MODEL_NAME, + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); const basicWolframAlphaSearchRetrieverPrompt = ` diff --git a/src/agents/writingAssistant.ts b/src/agents/writingAssistant.ts index 2c8d66e..eba9872 100644 --- a/src/agents/writingAssistant.ts +++ b/src/agents/writingAssistant.ts @@ -4,13 +4,14 @@ import { MessagesPlaceholder, } from '@langchain/core/prompts'; import { RunnableSequence } from '@langchain/core/runnables'; -import { ChatOpenAI } from '@langchain/openai'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { StringOutputParser } from '@langchain/core/output_parsers'; import type { StreamEvent } from '@langchain/core/tracers/log_stream'; import eventEmitter from 'events'; -const chatLLM = new ChatOpenAI({ - modelName: process.env.MODEL_NAME, +const chatLLM = new ChatOllama({ + baseUrl: process.env.OLLAMA_URL, + model: process.env.MODEL_NAME, temperature: 0.7, }); diff --git a/src/agents/youtubeSearchAgent.ts b/src/agents/youtubeSearchAgent.ts index 9ab5ed8..7fa258b 100644 --- a/src/agents/youtubeSearchAgent.ts +++ b/src/agents/youtubeSearchAgent.ts @@ -9,7 +9,9 @@ import { RunnableMap, RunnableLambda, } from '@langchain/core/runnables'; -import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; +import { ChatOllama } from '@langchain/community/chat_models/ollama'; +import { Ollama } from '@langchain/community/llms/ollama'; +import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { StringOutputParser } from '@langchain/core/output_parsers'; import { Document } from '@langchain/core/documents'; import { searchSearxng } from '../core/searxng'; @@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory'; import eventEmitter from 'events'; import computeSimilarity from '../utils/computeSimilarity'; -const chatLLM = new ChatOpenAI({ - modelName: process.env.MODEL_NAME, +const chatLLM = new ChatOllama({ + baseUrl: process.env.OLLAMA_URL, + model: process.env.MODEL_NAME, temperature: 0.7, }); -const llm = new OpenAI({ +const llm = new Ollama({ temperature: 0, - modelName: process.env.MODEL_NAME, + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); -const embeddings = new OpenAIEmbeddings({ - modelName: 'text-embedding-3-large', +const embeddings = new OllamaEmbeddings({ + model: process.env.MODEL_NAME, + baseUrl: process.env.OLLAMA_URL, }); const basicYoutubeSearchRetrieverPrompt = `