diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml
new file mode 100644
index 0000000..3cd9044
--- /dev/null
+++ b/.github/workflows/docker-build.yaml
@@ -0,0 +1,70 @@
+name: Build & Push Docker Images
+
+on:
+ push:
+ branches:
+ - master
+ release:
+ types: [published]
+
+jobs:
+ build-and-push:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ service: [backend, app]
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ with:
+ install: true
+
+ - name: Log in to DockerHub
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Extract version from release tag
+ if: github.event_name == 'release'
+ id: version
+ run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
+
+ - name: Build and push Docker image for ${{ matrix.service }}
+ if: github.ref == 'refs/heads/master' && github.event_name == 'push'
+ run: |
+ docker buildx create --use
+ if [[ "${{ matrix.service }}" == "backend" ]]; then \
+ DOCKERFILE=backend.dockerfile; \
+ IMAGE_NAME=perplexica-backend; \
+ else \
+ DOCKERFILE=app.dockerfile; \
+ IMAGE_NAME=perplexica-frontend; \
+ fi
+ docker buildx build --platform linux/amd64,linux/arm64 \
+ --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
+ --cache-to=type=inline \
+ -f $DOCKERFILE \
+ -t itzcrazykns1337/${IMAGE_NAME}:main \
+ --push .
+
+ - name: Build and push release Docker image for ${{ matrix.service }}
+ if: github.event_name == 'release'
+ run: |
+ docker buildx create --use
+ if [[ "${{ matrix.service }}" == "backend" ]]; then \
+ DOCKERFILE=backend.dockerfile; \
+ IMAGE_NAME=perplexica-backend; \
+ else \
+ DOCKERFILE=app.dockerfile; \
+ IMAGE_NAME=perplexica-frontend; \
+ fi
+ docker buildx build --platform linux/amd64,linux/arm64 \
+ --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
+ --cache-to=type=inline \
+ -f $DOCKERFILE \
+ -t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
+ --push .
diff --git a/.gitignore b/.gitignore
index a3dd5cc..8391d19 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,4 +35,5 @@ logs/
Thumbs.db
# Db
-db.sqlite
\ No newline at end of file
+db.sqlite
+/searxng
diff --git a/README.md b/README.md
index 3c87acc..f66e8b1 100644
--- a/README.md
+++ b/README.md
@@ -12,6 +12,7 @@
- [Non-Docker Installation](#non-docker-installation)
- [Ollama Connection Errors](#ollama-connection-errors)
- [Using as a Search Engine](#using-as-a-search-engine)
+- [Using Perplexica's API](#using-perplexicas-api)
- [One-Click Deployment](#one-click-deployment)
- [Upcoming Features](#upcoming-features)
- [Support Us](#support-us)
@@ -45,6 +46,7 @@ Want to know more about its architecture and how it works? You can read it [here
- **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha.
- **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query.
- **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevant source out of it, ensuring you always get the latest information without the overhead of daily data updates.
+- **API**: Integrate Perplexica into your existing applications and make use of its capibilities.
It has many more features like image and video search. Some of the planned features are mentioned in [upcoming features](#upcoming-features).
@@ -125,6 +127,12 @@ If you wish to use Perplexica as an alternative to traditional search engines li
3. Add a new site search with the following URL: `http://localhost:3000/?q=%s`. Replace `localhost` with your IP address or domain name, and `3000` with the port number if Perplexica is not hosted locally.
4. Click the add button. Now, you can use Perplexica directly from your browser's search bar.
+## Using Perplexica's API
+
+Perplexica also provides an API for developers looking to integrate its powerful search engine into their own applications. You can run searches, use multiple models and get answers to your queries.
+
+For more details, check out the full documentation [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/API/SEARCH.md).
+
## One-Click Deployment
[](https://repocloud.io/details/?app_id=267)
@@ -135,6 +143,7 @@ If you wish to use Perplexica as an alternative to traditional search engines li
- [x] Adding support for local LLMs
- [x] History Saving features
- [x] Introducing various Focus Modes
+- [x] Adding API support
- [ ] Finalizing Copilot Mode
- [ ] Adding Discover
diff --git a/app.dockerfile b/app.dockerfile
index 105cf86..ff1824d 100644
--- a/app.dockerfile
+++ b/app.dockerfile
@@ -1,7 +1,7 @@
FROM node:alpine
-ARG NEXT_PUBLIC_WS_URL
-ARG NEXT_PUBLIC_API_URL
+ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
+ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
@@ -9,7 +9,7 @@ WORKDIR /home/perplexica
COPY ui /home/perplexica/
-RUN yarn install
+RUN yarn install --frozen-lockfile
RUN yarn build
CMD ["yarn", "start"]
\ No newline at end of file
diff --git a/backend.dockerfile b/backend.dockerfile
index 4886573..b8d0155 100644
--- a/backend.dockerfile
+++ b/backend.dockerfile
@@ -1,21 +1,16 @@
FROM node:slim
-ARG SEARXNG_API_URL
-
WORKDIR /home/perplexica
COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/
-COPY config.toml /home/perplexica/
COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/
-RUN sed -i "s|SEARXNG = \".*\"|SEARXNG = \"${SEARXNG_API_URL}\"|g" /home/perplexica/config.toml
-
RUN mkdir /home/perplexica/data
-RUN yarn install
+RUN yarn install --frozen-lockfile
RUN yarn build
CMD ["yarn", "start"]
\ No newline at end of file
diff --git a/docker-compose.yaml b/docker-compose.yaml
index d6f9203..46d82c6 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -13,14 +13,16 @@ services:
build:
context: .
dockerfile: backend.dockerfile
- args:
- - SEARXNG_API_URL=http://searxng:8080
+ image: itzcrazykns1337/perplexica-backend:main
+ environment:
+ - SEARXNG_API_URL=http://searxng:8080
depends_on:
- searxng
ports:
- 3001:3001
volumes:
- backend-dbstore:/home/perplexica/data
+ - ./config.toml:/home/perplexica/config.toml
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
@@ -34,6 +36,7 @@ services:
args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
+ image: itzcrazykns1337/perplexica-frontend:main
depends_on:
- perplexica-backend
ports:
diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md
new file mode 100644
index 0000000..a573021
--- /dev/null
+++ b/docs/API/SEARCH.md
@@ -0,0 +1,110 @@
+# Perplexica Search API Documentation
+
+## Overview
+
+Perplexica’s Search API makes it easy to use our AI-powered search engine. You can run different types of searches, pick the models you want to use, and get the most recent info. Follow the following headings to learn more about Perplexica's search API.
+
+## Endpoint
+
+### **POST** `http://localhost:3001/api/search`
+
+**Note**: Replace `3001` with any other port if you've changed the default PORT
+
+### Request
+
+The API accepts a JSON object in the request body, where you define the focus mode, chat models, embedding models, and your query.
+
+#### Request Body Structure
+
+```json
+{
+ "chatModel": {
+ "provider": "openai",
+ "model": "gpt-4o-mini"
+ },
+ "embeddingModel": {
+ "provider": "openai",
+ "model": "text-embedding-3-large"
+ },
+ "focusMode": "webSearch",
+ "query": "What is Perplexica",
+ "history": [
+ ["human", "Hi, how are you?"],
+ ["assistant", "I am doing well, how can I help you today?"]
+ ]
+}
+```
+
+### Request Parameters
+
+- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`.
+
+ - `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
+ - `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
+ - Optional fields for custom OpenAI configuration:
+ - `customOpenAIBaseURL`: If you’re using a custom OpenAI instance, provide the base URL.
+ - `customOpenAIKey`: The API key for a custom OpenAI instance.
+
+- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`.
+
+ - `provider`: The provider for the embedding model (e.g., `openai`).
+ - `model`: The specific embedding model (e.g., `text-embedding-3-large`).
+
+- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes:
+
+ - `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`.
+
+- **`query`** (string, required): The search query or question.
+
+- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
+ ```json
+ [
+ ["human", "What is Perplexica?"],
+ ["assistant", "Perplexica is an AI-powered search engine..."]
+ ]
+ ```
+
+### Response
+
+The response from the API includes both the final message and the sources used to generate that message.
+
+#### Example Response
+
+```json
+{
+ "message": "Perplexica is an innovative, open-source AI-powered search engine designed to enhance the way users search for information online. Here are some key features and characteristics of Perplexica:\n\n- **AI-Powered Technology**: It utilizes advanced machine learning algorithms to not only retrieve information but also to understand the context and intent behind user queries, providing more relevant results [1][5].\n\n- **Open-Source**: Being open-source, Perplexica offers flexibility and transparency, allowing users to explore its functionalities without the constraints of proprietary software [3][10].",
+ "sources": [
+ {
+ "pageContent": "Perplexica is an innovative, open-source AI-powered search engine designed to enhance the way users search for information online.",
+ "metadata": {
+ "title": "What is Perplexica, and how does it function as an AI-powered search ...",
+ "url": "https://askai.glarity.app/search/What-is-Perplexica--and-how-does-it-function-as-an-AI-powered-search-engine"
+ }
+ },
+ {
+ "pageContent": "Perplexica is an open-source AI-powered search tool that dives deep into the internet to find precise answers.",
+ "metadata": {
+ "title": "Sahar Mor's Post",
+ "url": "https://www.linkedin.com/posts/sahar-mor_a-new-open-source-project-called-perplexica-activity-7204489745668694016-ncja"
+ }
+ }
+ ....
+ ]
+}
+```
+
+### Fields in the Response
+
+- **`message`** (string): The search result, generated based on the query and focus mode.
+- **`sources`** (array): A list of sources that were used to generate the search result. Each source includes:
+ - `pageContent`: A snippet of the relevant content from the source.
+ - `metadata`: Metadata about the source, including:
+ - `title`: The title of the webpage.
+ - `url`: The URL of the webpage.
+
+### Error Handling
+
+If an error occurs during the search process, the API will return an appropriate error message with an HTTP status code.
+
+- **400**: If the request is malformed or missing required fields (e.g., no focus mode or query).
+- **500**: If an internal server error occurs during the search.
diff --git a/docs/installation/UPDATING.md b/docs/installation/UPDATING.md
index df67775..031a3e8 100644
--- a/docs/installation/UPDATING.md
+++ b/docs/installation/UPDATING.md
@@ -10,15 +10,21 @@ To update Perplexica to the latest version, follow these steps:
git clone https://github.com/ItzCrazyKns/Perplexica.git
```
-2. Navigate to the Project Directory
+2. Navigate to the Project Directory.
-3. Update and Rebuild Docker Containers:
+3. Pull latest images from registry.
```bash
-docker compose up -d --build
+docker compose pull
```
-4. Once the command completes running go to http://localhost:3000 and verify the latest changes.
+4. Update and Recreate containers.
+
+```bash
+docker compose up -d
+```
+
+5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
## For non Docker users
diff --git a/package.json b/package.json
index f10d018..264963c 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "perplexica-backend",
- "version": "1.9.0-rc1",
+ "version": "1.9.0-rc3",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {
diff --git a/src/agents/webSearchAgent.ts b/src/agents/webSearchAgent.ts
index f7a6d7e..77ec181 100644
--- a/src/agents/webSearchAgent.ts
+++ b/src/agents/webSearchAgent.ts
@@ -23,22 +23,37 @@ import LineListOutputParser from '../lib/outputParsers/listLineOutputParser';
import { getDocumentsFromLinks } from '../lib/linkDocument';
import LineOutputParser from '../lib/outputParsers/lineOutputParser';
import { IterableReadableStream } from '@langchain/core/utils/stream';
+import { ChatOpenAI } from '@langchain/openai';
const basicSearchRetrieverPrompt = `
-You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information.
-If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response.
-If the question contains some links and asks to answer from those links or even if they don't you need to return the links inside 'links' XML block and the question inside 'question' XML block. If there are no links then you need to return the question without any XML block.
-If the user asks to summarize the content from some links you need to return \`Summarize\` as the question inside the 'question' XML block and the links inside the 'links' XML block.
+You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
+If it is a smple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
+If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
+You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
-Example:
-1. Follow up question: What is the capital of France?
-Rephrased question: \`Capital of france\`
+There are several examples attached for your reference inside the below \`examples\` XML block
-2. Follow up question: What is the population of New York City?
-Rephrased question: \`Population of New York City\`
+
+1. Follow up question: What is the capital of France
+Rephrased question:\`
+
+Capital of france
+
+\`
+
+2. Hi, how are you?
+Rephrased question\`
+
+not_needed
+
+\`
3. Follow up question: What is Docker?
-Rephrased question: \`What is Docker\`
+Rephrased question: \`
+
+What is Docker
+
+\`
4. Follow up question: Can you tell me what is X from https://example.com
Rephrased question: \`
@@ -54,16 +69,20 @@ https://example.com
5. Follow up question: Summarize the content from https://example.com
Rephrased question: \`
-Summarize
+summarize
https://example.com
\`
+
-Conversation:
+Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
+
+
{chat_history}
+
Follow up question: {query}
Rephrased question:
@@ -133,15 +152,13 @@ type BasicChainInput = {
};
const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => {
+ (llm as unknown as ChatOpenAI).temperature = 0;
+
return RunnableSequence.from([
PromptTemplate.fromTemplate(basicSearchRetrieverPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
- if (input === 'not_needed') {
- return { query: '', docs: [] };
- }
-
const linksOutputParser = new LineListOutputParser({
key: 'links',
});
@@ -153,9 +170,13 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => {
const links = await linksOutputParser.parse(input);
let question = await questionOutputParser.parse(input);
+ if (question === 'not_needed') {
+ return { query: '', docs: [] };
+ }
+
if (links.length > 0) {
if (question.length === 0) {
- question = 'Summarize';
+ question = 'summarize';
}
let docs = [];
@@ -227,7 +248,7 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => {
return { query: question, docs: docs };
} else {
- const res = await searchSearxng(input, {
+ const res = await searchSearxng(question, {
language: 'en',
});
@@ -243,7 +264,7 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => {
}),
);
- return { query: input, docs: documents };
+ return { query: question, docs: documents };
}
}),
]);
@@ -272,7 +293,7 @@ const createBasicWebSearchAnsweringChain = (
return docs;
}
- if (query === 'Summarize') {
+ if (query.toLocaleLowerCase() === 'summarize') {
return docs;
}
@@ -295,7 +316,7 @@ const createBasicWebSearchAnsweringChain = (
});
const sortedDocs = similarity
- .filter((sim) => sim.similarity > 0.5)
+ .filter((sim) => sim.similarity > 0.3)
.sort((a, b) => b.similarity - a.similarity)
.slice(0, 15)
.map((sim) => docsWithContent[sim.index]);
diff --git a/src/config.ts b/src/config.ts
index e608051..339bd02 100644
--- a/src/config.ts
+++ b/src/config.ts
@@ -54,7 +54,8 @@ export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ;
export const getAnthropicApiKey = () => loadConfig().API_KEYS.ANTHROPIC;
-export const getSearxngApiEndpoint = () => loadConfig().API_ENDPOINTS.SEARXNG;
+export const getSearxngApiEndpoint = () =>
+ process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
diff --git a/src/lib/linkDocument.ts b/src/lib/linkDocument.ts
index 9607220..5e90571 100644
--- a/src/lib/linkDocument.ts
+++ b/src/lib/linkDocument.ts
@@ -3,6 +3,7 @@ import { htmlToText } from 'html-to-text';
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { Document } from '@langchain/core/documents';
import pdfParse from 'pdf-parse';
+import logger from '../utils/logger';
export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
const splitter = new RecursiveCharacterTextSplitter();
@@ -16,66 +17,81 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => {
? link
: `https://${link}`;
- const res = await axios.get(link, {
- responseType: 'arraybuffer',
- });
+ try {
+ const res = await axios.get(link, {
+ responseType: 'arraybuffer',
+ });
- const isPdf = res.headers['content-type'] === 'application/pdf';
+ const isPdf = res.headers['content-type'] === 'application/pdf';
- if (isPdf) {
- const pdfText = await pdfParse(res.data);
- const parsedText = pdfText.text
+ if (isPdf) {
+ const pdfText = await pdfParse(res.data);
+ const parsedText = pdfText.text
+ .replace(/(\r\n|\n|\r)/gm, ' ')
+ .replace(/\s+/g, ' ')
+ .trim();
+
+ const splittedText = await splitter.splitText(parsedText);
+ const title = 'PDF Document';
+
+ const linkDocs = splittedText.map((text) => {
+ return new Document({
+ pageContent: text,
+ metadata: {
+ title: title,
+ url: link,
+ },
+ });
+ });
+
+ docs.push(...linkDocs);
+ return;
+ }
+
+ const parsedText = htmlToText(res.data.toString('utf8'), {
+ selectors: [
+ {
+ selector: 'a',
+ options: {
+ ignoreHref: true,
+ },
+ },
+ ],
+ })
.replace(/(\r\n|\n|\r)/gm, ' ')
.replace(/\s+/g, ' ')
.trim();
const splittedText = await splitter.splitText(parsedText);
- const title = 'PDF Document';
+ const title = res.data
+ .toString('utf8')
+ .match(/
(.*?)<\/title>/)?.[1];
const linkDocs = splittedText.map((text) => {
return new Document({
pageContent: text,
metadata: {
- title: title,
+ title: title || link,
url: link,
},
});
});
docs.push(...linkDocs);
- return;
- }
-
- const parsedText = htmlToText(res.data.toString('utf8'), {
- selectors: [
- {
- selector: 'a',
- options: {
- ignoreHref: true,
+ } catch (err) {
+ logger.error(
+ `Error at generating documents from links: ${err.message}`,
+ );
+ docs.push(
+ new Document({
+ pageContent: `Failed to retrieve content from the link: ${err.message}`,
+ metadata: {
+ title: 'Failed to retrieve content',
+ url: link,
},
- },
- ],
- })
- .replace(/(\r\n|\n|\r)/gm, ' ')
- .replace(/\s+/g, ' ')
- .trim();
-
- const splittedText = await splitter.splitText(parsedText);
- const title = res.data
- .toString('utf8')
- .match(/(.*?)<\/title>/)?.[1];
-
- const linkDocs = splittedText.map((text) => {
- return new Document({
- pageContent: text,
- metadata: {
- title: title || link,
- url: link,
- },
- });
- });
-
- docs.push(...linkDocs);
+ }),
+ );
+ }
}),
);
diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts
index 58cd164..90460c6 100644
--- a/src/lib/providers/anthropic.ts
+++ b/src/lib/providers/anthropic.ts
@@ -9,26 +9,38 @@ export const loadAnthropicChatModels = async () => {
try {
const chatModels = {
- 'Claude 3.5 Sonnet': new ChatAnthropic({
- temperature: 0.7,
- anthropicApiKey: anthropicApiKey,
- model: 'claude-3-5-sonnet-20240620',
- }),
- 'Claude 3 Opus': new ChatAnthropic({
- temperature: 0.7,
- anthropicApiKey: anthropicApiKey,
- model: 'claude-3-opus-20240229',
- }),
- 'Claude 3 Sonnet': new ChatAnthropic({
- temperature: 0.7,
- anthropicApiKey: anthropicApiKey,
- model: 'claude-3-sonnet-20240229',
- }),
- 'Claude 3 Haiku': new ChatAnthropic({
- temperature: 0.7,
- anthropicApiKey: anthropicApiKey,
- model: 'claude-3-haiku-20240307',
- }),
+ 'claude-3-5-sonnet-20240620': {
+ displayName: 'Claude 3.5 Sonnet',
+ model: new ChatAnthropic({
+ temperature: 0.7,
+ anthropicApiKey: anthropicApiKey,
+ model: 'claude-3-5-sonnet-20240620',
+ }),
+ },
+ 'claude-3-opus-20240229': {
+ displayName: 'Claude 3 Opus',
+ model: new ChatAnthropic({
+ temperature: 0.7,
+ anthropicApiKey: anthropicApiKey,
+ model: 'claude-3-opus-20240229',
+ }),
+ },
+ 'claude-3-sonnet-20240229': {
+ displayName: 'Claude 3 Sonnet',
+ model: new ChatAnthropic({
+ temperature: 0.7,
+ anthropicApiKey: anthropicApiKey,
+ model: 'claude-3-sonnet-20240229',
+ }),
+ },
+ 'claude-3-haiku-20240307': {
+ displayName: 'Claude 3 Haiku',
+ model: new ChatAnthropic({
+ temperature: 0.7,
+ anthropicApiKey: anthropicApiKey,
+ model: 'claude-3-haiku-20240307',
+ }),
+ },
};
return chatModels;
diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts
index ffe8f6c..69db4f7 100644
--- a/src/lib/providers/groq.ts
+++ b/src/lib/providers/groq.ts
@@ -9,76 +9,136 @@ export const loadGroqChatModels = async () => {
try {
const chatModels = {
- 'Llama 3.1 70B': new ChatOpenAI(
- {
- openAIApiKey: groqApiKey,
- modelName: 'llama-3.1-70b-versatile',
- temperature: 0.7,
- },
- {
- baseURL: 'https://api.groq.com/openai/v1',
- },
- ),
- 'Llama 3.1 8B': new ChatOpenAI(
- {
- openAIApiKey: groqApiKey,
- modelName: 'llama-3.1-8b-instant',
- temperature: 0.7,
- },
- {
- baseURL: 'https://api.groq.com/openai/v1',
- },
- ),
- 'LLaMA3 8b': new ChatOpenAI(
- {
- openAIApiKey: groqApiKey,
- modelName: 'llama3-8b-8192',
- temperature: 0.7,
- },
- {
- baseURL: 'https://api.groq.com/openai/v1',
- },
- ),
- 'LLaMA3 70b': new ChatOpenAI(
- {
- openAIApiKey: groqApiKey,
- modelName: 'llama3-70b-8192',
- temperature: 0.7,
- },
- {
- baseURL: 'https://api.groq.com/openai/v1',
- },
- ),
- 'Mixtral 8x7b': new ChatOpenAI(
- {
- openAIApiKey: groqApiKey,
- modelName: 'mixtral-8x7b-32768',
- temperature: 0.7,
- },
- {
- baseURL: 'https://api.groq.com/openai/v1',
- },
- ),
- 'Gemma 7b': new ChatOpenAI(
- {
- openAIApiKey: groqApiKey,
- modelName: 'gemma-7b-it',
- temperature: 0.7,
- },
- {
- baseURL: 'https://api.groq.com/openai/v1',
- },
- ),
- 'Gemma2 9b': new ChatOpenAI(
- {
- openAIApiKey: groqApiKey,
- modelName: 'gemma2-9b-it',
- temperature: 0.7,
- },
- {
- baseURL: 'https://api.groq.com/openai/v1',
- },
- ),
+ 'llama-3.2-3b-preview': {
+ displayName: 'Llama 3.2 3B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'llama-3.2-3b-preview',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'llama-3.2-11b-text-preview': {
+ displayName: 'Llama 3.2 11B Text',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'llama-3.2-11b-text-preview',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'llama-3.2-90b-text-preview': {
+ displayName: 'Llama 3.2 90B Text',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'llama-3.2-90b-text-preview',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'llama-3.1-70b-versatile': {
+ displayName: 'Llama 3.1 70B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'llama-3.1-70b-versatile',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'llama-3.1-8b-instant': {
+ displayName: 'Llama 3.1 8B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'llama-3.1-8b-instant',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'llama3-8b-8192': {
+ displayName: 'LLaMA3 8B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'llama3-8b-8192',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'llama3-70b-8192': {
+ displayName: 'LLaMA3 70B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'llama3-70b-8192',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'mixtral-8x7b-32768': {
+ displayName: 'Mixtral 8x7B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'mixtral-8x7b-32768',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'gemma-7b-it': {
+ displayName: 'Gemma 7B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'gemma-7b-it',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
+ 'gemma2-9b-it': {
+ displayName: 'Gemma2 9B',
+ model: new ChatOpenAI(
+ {
+ openAIApiKey: groqApiKey,
+ modelName: 'gemma2-9b-it',
+ temperature: 0.7,
+ },
+ {
+ baseURL: 'https://api.groq.com/openai/v1',
+ },
+ ),
+ },
};
return chatModels;
diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts
index b2901ff..ed68bfa 100644
--- a/src/lib/providers/ollama.ts
+++ b/src/lib/providers/ollama.ts
@@ -18,11 +18,15 @@ export const loadOllamaChatModels = async () => {
const { models: ollamaModels } = (await response.json()) as any;
const chatModels = ollamaModels.reduce((acc, model) => {
- acc[model.model] = new ChatOllama({
- baseUrl: ollamaEndpoint,
- model: model.model,
- temperature: 0.7,
- });
+ acc[model.model] = {
+ displayName: model.name,
+ model: new ChatOllama({
+ baseUrl: ollamaEndpoint,
+ model: model.model,
+ temperature: 0.7,
+ }),
+ };
+
return acc;
}, {});
@@ -48,10 +52,14 @@ export const loadOllamaEmbeddingsModels = async () => {
const { models: ollamaModels } = (await response.json()) as any;
const embeddingsModels = ollamaModels.reduce((acc, model) => {
- acc[model.model] = new OllamaEmbeddings({
- baseUrl: ollamaEndpoint,
- model: model.model,
- });
+ acc[model.model] = {
+ displayName: model.name,
+ model: new OllamaEmbeddings({
+ baseUrl: ollamaEndpoint,
+ model: model.model,
+ }),
+ };
+
return acc;
}, {});
diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts
index 8673954..3747e37 100644
--- a/src/lib/providers/openai.ts
+++ b/src/lib/providers/openai.ts
@@ -9,31 +9,46 @@ export const loadOpenAIChatModels = async () => {
try {
const chatModels = {
- 'GPT-3.5 turbo': new ChatOpenAI({
- openAIApiKey,
- modelName: 'gpt-3.5-turbo',
- temperature: 0.7,
- }),
- 'GPT-4': new ChatOpenAI({
- openAIApiKey,
- modelName: 'gpt-4',
- temperature: 0.7,
- }),
- 'GPT-4 turbo': new ChatOpenAI({
- openAIApiKey,
- modelName: 'gpt-4-turbo',
- temperature: 0.7,
- }),
- 'GPT-4 omni': new ChatOpenAI({
- openAIApiKey,
- modelName: 'gpt-4o',
- temperature: 0.7,
- }),
- 'GPT-4 omni mini': new ChatOpenAI({
- openAIApiKey,
- modelName: 'gpt-4o-mini',
- temperature: 0.7,
- }),
+ 'gpt-3.5-turbo': {
+ displayName: 'GPT-3.5 Turbo',
+ model: new ChatOpenAI({
+ openAIApiKey,
+ modelName: 'gpt-3.5-turbo',
+ temperature: 0.7,
+ }),
+ },
+ 'gpt-4': {
+ displayName: 'GPT-4',
+ model: new ChatOpenAI({
+ openAIApiKey,
+ modelName: 'gpt-4',
+ temperature: 0.7,
+ }),
+ },
+ 'gpt-4-turbo': {
+ displayName: 'GPT-4 turbo',
+ model: new ChatOpenAI({
+ openAIApiKey,
+ modelName: 'gpt-4-turbo',
+ temperature: 0.7,
+ }),
+ },
+ 'gpt-4o': {
+ displayName: 'GPT-4 omni',
+ model: new ChatOpenAI({
+ openAIApiKey,
+ modelName: 'gpt-4o',
+ temperature: 0.7,
+ }),
+ },
+ 'gpt-4o-mini': {
+ displayName: 'GPT-4 omni mini',
+ model: new ChatOpenAI({
+ openAIApiKey,
+ modelName: 'gpt-4o-mini',
+ temperature: 0.7,
+ }),
+ },
};
return chatModels;
@@ -50,14 +65,20 @@ export const loadOpenAIEmbeddingsModels = async () => {
try {
const embeddingModels = {
- 'Text embedding 3 small': new OpenAIEmbeddings({
- openAIApiKey,
- modelName: 'text-embedding-3-small',
- }),
- 'Text embedding 3 large': new OpenAIEmbeddings({
- openAIApiKey,
- modelName: 'text-embedding-3-large',
- }),
+ 'text-embedding-3-small': {
+ displayName: 'Text Embedding 3 Small',
+ model: new OpenAIEmbeddings({
+ openAIApiKey,
+ modelName: 'text-embedding-3-small',
+ }),
+ },
+ 'text-embedding-3-large': {
+ displayName: 'Text Embedding 3 Large',
+ model: new OpenAIEmbeddings({
+ openAIApiKey,
+ modelName: 'text-embedding-3-large',
+ }),
+ },
};
return embeddingModels;
diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts
index 0ec7052..8a3417d 100644
--- a/src/lib/providers/transformers.ts
+++ b/src/lib/providers/transformers.ts
@@ -4,15 +4,24 @@ import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const loadTransformersEmbeddingsModels = async () => {
try {
const embeddingModels = {
- 'BGE Small': new HuggingFaceTransformersEmbeddings({
- modelName: 'Xenova/bge-small-en-v1.5',
- }),
- 'GTE Small': new HuggingFaceTransformersEmbeddings({
- modelName: 'Xenova/gte-small',
- }),
- 'Bert Multilingual': new HuggingFaceTransformersEmbeddings({
- modelName: 'Xenova/bert-base-multilingual-uncased',
- }),
+ 'xenova-bge-small-en-v1.5': {
+ displayName: 'BGE Small',
+ model: new HuggingFaceTransformersEmbeddings({
+ modelName: 'Xenova/bge-small-en-v1.5',
+ }),
+ },
+ 'xenova-gte-small': {
+ displayName: 'GTE Small',
+ model: new HuggingFaceTransformersEmbeddings({
+ modelName: 'Xenova/gte-small',
+ }),
+ },
+ 'xenova-bert-base-multilingual-uncased': {
+ displayName: 'Bert Multilingual',
+ model: new HuggingFaceTransformersEmbeddings({
+ modelName: 'Xenova/bert-base-multilingual-uncased',
+ }),
+ },
};
return embeddingModels;
diff --git a/src/routes/config.ts b/src/routes/config.ts
index f255560..f635e4b 100644
--- a/src/routes/config.ts
+++ b/src/routes/config.ts
@@ -10,38 +10,54 @@ import {
getOpenaiApiKey,
updateConfig,
} from '../config';
+import logger from '../utils/logger';
const router = express.Router();
router.get('/', async (_, res) => {
- const config = {};
+ try {
+ const config = {};
- const [chatModelProviders, embeddingModelProviders] = await Promise.all([
- getAvailableChatModelProviders(),
- getAvailableEmbeddingModelProviders(),
- ]);
+ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
+ getAvailableChatModelProviders(),
+ getAvailableEmbeddingModelProviders(),
+ ]);
- config['chatModelProviders'] = {};
- config['embeddingModelProviders'] = {};
+ config['chatModelProviders'] = {};
+ config['embeddingModelProviders'] = {};
- for (const provider in chatModelProviders) {
- config['chatModelProviders'][provider] = Object.keys(
- chatModelProviders[provider],
- );
+ for (const provider in chatModelProviders) {
+ config['chatModelProviders'][provider] = Object.keys(
+ chatModelProviders[provider],
+ ).map((model) => {
+ return {
+ name: model,
+ displayName: chatModelProviders[provider][model].displayName,
+ };
+ });
+ }
+
+ for (const provider in embeddingModelProviders) {
+ config['embeddingModelProviders'][provider] = Object.keys(
+ embeddingModelProviders[provider],
+ ).map((model) => {
+ return {
+ name: model,
+ displayName: embeddingModelProviders[provider][model].displayName,
+ };
+ });
+ }
+
+ config['openaiApiKey'] = getOpenaiApiKey();
+ config['ollamaApiUrl'] = getOllamaApiEndpoint();
+ config['anthropicApiKey'] = getAnthropicApiKey();
+ config['groqApiKey'] = getGroqApiKey();
+
+ res.status(200).json(config);
+ } catch (err: any) {
+ res.status(500).json({ message: 'An error has occurred.' });
+ logger.error(`Error getting config: ${err.message}`);
}
-
- for (const provider in embeddingModelProviders) {
- config['embeddingModelProviders'][provider] = Object.keys(
- embeddingModelProviders[provider],
- );
- }
-
- config['openaiApiKey'] = getOpenaiApiKey();
- config['ollamaApiUrl'] = getOllamaApiEndpoint();
- config['anthropicApiKey'] = getAnthropicApiKey();
- config['groqApiKey'] = getGroqApiKey();
-
- res.status(200).json(config);
});
router.post('/', async (req, res) => {
diff --git a/src/routes/images.ts b/src/routes/images.ts
index 6bd43d3..7806ce7 100644
--- a/src/routes/images.ts
+++ b/src/routes/images.ts
@@ -26,7 +26,7 @@ router.post('/', async (req, res) => {
let llm: BaseChatModel | undefined;
if (chatModels[provider] && chatModels[provider][chatModel]) {
- llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
+ llm = chatModels[provider][chatModel].model as BaseChatModel | undefined;
}
if (!llm) {
diff --git a/src/routes/index.ts b/src/routes/index.ts
index af928ab..6e82e54 100644
--- a/src/routes/index.ts
+++ b/src/routes/index.ts
@@ -5,6 +5,7 @@ import configRouter from './config';
import modelsRouter from './models';
import suggestionsRouter from './suggestions';
import chatsRouter from './chats';
+import searchRouter from './search';
const router = express.Router();
@@ -14,5 +15,6 @@ router.use('/config', configRouter);
router.use('/models', modelsRouter);
router.use('/suggestions', suggestionsRouter);
router.use('/chats', chatsRouter);
+router.use('/search', searchRouter);
export default router;
diff --git a/src/routes/search.ts b/src/routes/search.ts
new file mode 100644
index 0000000..9eec29f
--- /dev/null
+++ b/src/routes/search.ts
@@ -0,0 +1,150 @@
+import express from 'express';
+import logger from '../utils/logger';
+import { BaseChatModel } from 'langchain/chat_models/base';
+import { Embeddings } from 'langchain/embeddings/base';
+import { ChatOpenAI } from '@langchain/openai';
+import {
+ getAvailableChatModelProviders,
+ getAvailableEmbeddingModelProviders,
+} from '../lib/providers';
+import { searchHandlers } from '../websocket/messageHandler';
+import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages';
+
+const router = express.Router();
+
+interface chatModel {
+ provider: string;
+ model: string;
+ customOpenAIBaseURL?: string;
+ customOpenAIKey?: string;
+}
+
+interface embeddingModel {
+ provider: string;
+ model: string;
+}
+
+interface ChatRequestBody {
+ focusMode: string;
+ chatModel?: chatModel;
+ embeddingModel?: embeddingModel;
+ query: string;
+ history: Array<[string, string]>;
+}
+
+router.post('/', async (req, res) => {
+ try {
+ const body: ChatRequestBody = req.body;
+
+ if (!body.focusMode || !body.query) {
+ return res.status(400).json({ message: 'Missing focus mode or query' });
+ }
+
+ body.history = body.history || [];
+
+ const history: BaseMessage[] = body.history.map((msg) => {
+ if (msg[0] === 'human') {
+ return new HumanMessage({
+ content: msg[1],
+ });
+ } else {
+ return new AIMessage({
+ content: msg[1],
+ });
+ }
+ });
+
+ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
+ getAvailableChatModelProviders(),
+ getAvailableEmbeddingModelProviders(),
+ ]);
+
+ const chatModelProvider =
+ body.chatModel?.provider || Object.keys(chatModelProviders)[0];
+ const chatModel =
+ body.chatModel?.model ||
+ Object.keys(chatModelProviders[chatModelProvider])[0];
+
+ const embeddingModelProvider =
+ body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0];
+ const embeddingModel =
+ body.embeddingModel?.model ||
+ Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
+
+ let llm: BaseChatModel | undefined;
+ let embeddings: Embeddings | undefined;
+
+ if (body.chatModel?.provider === 'custom_openai') {
+ if (
+ !body.chatModel?.customOpenAIBaseURL ||
+ !body.chatModel?.customOpenAIKey
+ ) {
+ return res
+ .status(400)
+ .json({ message: 'Missing custom OpenAI base URL or key' });
+ }
+
+ llm = new ChatOpenAI({
+ modelName: body.chatModel.model,
+ openAIApiKey: body.chatModel.customOpenAIKey,
+ temperature: 0.7,
+ configuration: {
+ baseURL: body.chatModel.customOpenAIBaseURL,
+ },
+ }) as unknown as BaseChatModel;
+ } else if (
+ chatModelProviders[chatModelProvider] &&
+ chatModelProviders[chatModelProvider][chatModel]
+ ) {
+ llm = chatModelProviders[chatModelProvider][chatModel]
+ .model as unknown as BaseChatModel | undefined;
+ }
+
+ if (
+ embeddingModelProviders[embeddingModelProvider] &&
+ embeddingModelProviders[embeddingModelProvider][embeddingModel]
+ ) {
+ embeddings = embeddingModelProviders[embeddingModelProvider][
+ embeddingModel
+ ].model as Embeddings | undefined;
+ }
+
+ if (!llm || !embeddings) {
+ return res.status(400).json({ message: 'Invalid model selected' });
+ }
+
+ const searchHandler = searchHandlers[body.focusMode];
+
+ if (!searchHandler) {
+ return res.status(400).json({ message: 'Invalid focus mode' });
+ }
+
+ const emitter = searchHandler(body.query, history, llm, embeddings);
+
+ let message = '';
+ let sources = [];
+
+ emitter.on('data', (data) => {
+ const parsedData = JSON.parse(data);
+ if (parsedData.type === 'response') {
+ message += parsedData.data;
+ } else if (parsedData.type === 'sources') {
+ sources = parsedData.data;
+ }
+ });
+
+ emitter.on('end', () => {
+ res.status(200).json({ message, sources });
+ });
+
+ emitter.on('error', (data) => {
+ const parsedData = JSON.parse(data);
+ res.status(500).json({ message: parsedData.data });
+ });
+ } catch (err: any) {
+ logger.error(`Error in getting search results: ${err.message}`);
+ res.status(500).json({ message: 'An error has occurred.' });
+ }
+});
+
+export default router;
diff --git a/src/routes/suggestions.ts b/src/routes/suggestions.ts
index b15ff5f..a75657e 100644
--- a/src/routes/suggestions.ts
+++ b/src/routes/suggestions.ts
@@ -26,7 +26,7 @@ router.post('/', async (req, res) => {
let llm: BaseChatModel | undefined;
if (chatModels[provider] && chatModels[provider][chatModel]) {
- llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
+ llm = chatModels[provider][chatModel].model as BaseChatModel | undefined;
}
if (!llm) {
diff --git a/src/routes/videos.ts b/src/routes/videos.ts
index 0ffdb2c..9d43fd2 100644
--- a/src/routes/videos.ts
+++ b/src/routes/videos.ts
@@ -26,7 +26,7 @@ router.post('/', async (req, res) => {
let llm: BaseChatModel | undefined;
if (chatModels[provider] && chatModels[provider][chatModel]) {
- llm = chatModels[provider][chatModel] as BaseChatModel | undefined;
+ llm = chatModels[provider][chatModel].model as BaseChatModel | undefined;
}
if (!llm) {
diff --git a/src/websocket/connectionManager.ts b/src/websocket/connectionManager.ts
index 70e20d9..04797c5 100644
--- a/src/websocket/connectionManager.ts
+++ b/src/websocket/connectionManager.ts
@@ -45,9 +45,8 @@ export const handleConnection = async (
chatModelProviders[chatModelProvider][chatModel] &&
chatModelProvider != 'custom_openai'
) {
- llm = chatModelProviders[chatModelProvider][chatModel] as unknown as
- | BaseChatModel
- | undefined;
+ llm = chatModelProviders[chatModelProvider][chatModel]
+ .model as unknown as BaseChatModel | undefined;
} else if (chatModelProvider == 'custom_openai') {
llm = new ChatOpenAI({
modelName: chatModel,
@@ -65,7 +64,7 @@ export const handleConnection = async (
) {
embeddings = embeddingModelProviders[embeddingModelProvider][
embeddingModel
- ] as Embeddings | undefined;
+ ].model as Embeddings | undefined;
}
if (!llm || !embeddings) {
diff --git a/src/websocket/messageHandler.ts b/src/websocket/messageHandler.ts
index 0afda9f..332910c 100644
--- a/src/websocket/messageHandler.ts
+++ b/src/websocket/messageHandler.ts
@@ -28,7 +28,7 @@ type WSMessage = {
history: Array<[string, string]>;
};
-const searchHandlers = {
+export const searchHandlers = {
webSearch: handleWebSearch,
academicSearch: handleAcademicSearch,
writingAssistant: handleWritingAssistant,
diff --git a/ui/components/ChatWindow.tsx b/ui/components/ChatWindow.tsx
index 0ace2dd..b3d0089 100644
--- a/ui/components/ChatWindow.tsx
+++ b/ui/components/ChatWindow.tsx
@@ -201,13 +201,6 @@ const useSocket = (
connectWs();
}
-
- return () => {
- if (ws?.readyState === 1) {
- ws?.close();
- console.log('[DEBUG] closed');
- }
- };
}, [ws, url, setIsWSReady, setError]);
return ws;
@@ -313,6 +306,15 @@ const ChatWindow = ({ id }: { id?: string }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
+ useEffect(() => {
+ return () => {
+ if (ws?.readyState === 1) {
+ ws.close();
+ console.log('[DEBUG] closed');
+ }
+ };
+ }, []);
+
const messagesRef = useRef([]);
useEffect(() => {
diff --git a/ui/components/EmptyChatMessageInput.tsx b/ui/components/EmptyChatMessageInput.tsx
index 0ff9b2e..39d3f16 100644
--- a/ui/components/EmptyChatMessageInput.tsx
+++ b/ui/components/EmptyChatMessageInput.tsx
@@ -18,14 +18,21 @@ const EmptyChatMessageInput = ({
const inputRef = useRef(null);
- const handleKeyDown = (e: KeyboardEvent) => {
- if (e.key === '/') {
- e.preventDefault();
- inputRef.current?.focus();
- }
- };
-
useEffect(() => {
+ const handleKeyDown = (e: KeyboardEvent) => {
+ const activeElement = document.activeElement;
+
+ const isInputFocused =
+ activeElement?.tagName === 'INPUT' ||
+ activeElement?.tagName === 'TEXTAREA' ||
+ activeElement?.hasAttribute('contenteditable');
+
+ if (e.key === '/' && !isInputFocused) {
+ e.preventDefault();
+ inputRef.current?.focus();
+ }
+ };
+
document.addEventListener('keydown', handleKeyDown);
return () => {
diff --git a/ui/components/MessageInput.tsx b/ui/components/MessageInput.tsx
index 2229cdf..05d44a6 100644
--- a/ui/components/MessageInput.tsx
+++ b/ui/components/MessageInput.tsx
@@ -27,14 +27,21 @@ const MessageInput = ({
const inputRef = useRef(null);
- const handleKeyDown = (e: KeyboardEvent) => {
- if (e.key === '/') {
- e.preventDefault();
- inputRef.current?.focus();
- }
- };
-
useEffect(() => {
+ const handleKeyDown = (e: KeyboardEvent) => {
+ const activeElement = document.activeElement;
+
+ const isInputFocused =
+ activeElement?.tagName === 'INPUT' ||
+ activeElement?.tagName === 'TEXTAREA' ||
+ activeElement?.hasAttribute('contenteditable');
+
+ if (e.key === '/' && !isInputFocused) {
+ e.preventDefault();
+ inputRef.current?.focus();
+ }
+ };
+
document.addEventListener('keydown', handleKeyDown);
return () => {
diff --git a/ui/components/SearchImages.tsx b/ui/components/SearchImages.tsx
index b53b8b0..6025925 100644
--- a/ui/components/SearchImages.tsx
+++ b/ui/components/SearchImages.tsx
@@ -51,7 +51,7 @@ const SearchImages = ({
const data = await res.json();
- const images = data.images;
+ const images = data.images ?? [];
setImages(images);
setSlides(
images.map((image: Image) => {
diff --git a/ui/components/SearchVideos.tsx b/ui/components/SearchVideos.tsx
index 2646322..fec229c 100644
--- a/ui/components/SearchVideos.tsx
+++ b/ui/components/SearchVideos.tsx
@@ -64,7 +64,7 @@ const Searchvideos = ({
const data = await res.json();
- const videos = data.videos;
+ const videos = data.videos ?? [];
setVideos(videos);
setSlides(
videos.map((video: Video) => {
diff --git a/ui/components/SettingsDialog.tsx b/ui/components/SettingsDialog.tsx
index 171e812..02358c5 100644
--- a/ui/components/SettingsDialog.tsx
+++ b/ui/components/SettingsDialog.tsx
@@ -49,10 +49,10 @@ export const Select = ({ className, options, ...restProps }: SelectProps) => {
interface SettingsType {
chatModelProviders: {
- [key: string]: string[];
+ [key: string]: [Record];
};
embeddingModelProviders: {
- [key: string]: string[];
+ [key: string]: [Record];
};
openaiApiKey: string;
groqApiKey: string;
@@ -68,6 +68,10 @@ const SettingsDialog = ({
setIsOpen: (isOpen: boolean) => void;
}) => {
const [config, setConfig] = useState(null);
+ const [chatModels, setChatModels] = useState>({});
+ const [embeddingModels, setEmbeddingModels] = useState>(
+ {},
+ );
const [selectedChatModelProvider, setSelectedChatModelProvider] = useState<
string | null
>(null);
@@ -118,7 +122,7 @@ const SettingsDialog = ({
const chatModel =
localStorage.getItem('chatModel') ||
(data.chatModelProviders &&
- data.chatModelProviders[chatModelProvider]?.[0]) ||
+ data.chatModelProviders[chatModelProvider]?.[0].name) ||
'';
const embeddingModelProvider =
localStorage.getItem('embeddingModelProvider') ||
@@ -127,7 +131,7 @@ const SettingsDialog = ({
const embeddingModel =
localStorage.getItem('embeddingModel') ||
(data.embeddingModelProviders &&
- data.embeddingModelProviders[embeddingModelProvider]?.[0]) ||
+ data.embeddingModelProviders[embeddingModelProvider]?.[0].name) ||
'';
setSelectedChatModelProvider(chatModelProvider);
@@ -136,6 +140,8 @@ const SettingsDialog = ({
setSelectedEmbeddingModel(embeddingModel);
setCustomOpenAIApiKey(localStorage.getItem('openAIApiKey') || '');
setCustomOpenAIBaseURL(localStorage.getItem('openAIBaseURL') || '');
+ setChatModels(data.chatModelProviders || {});
+ setEmbeddingModels(data.embeddingModelProviders || {});
setIsLoading(false);
};
@@ -229,7 +235,8 @@ const SettingsDialog = ({
setSelectedChatModel('');
} else {
setSelectedChatModel(
- config.chatModelProviders[e.target.value][0],
+ config.chatModelProviders[e.target.value][0]
+ .name,
);
}
}}
@@ -264,8 +271,8 @@ const SettingsDialog = ({
return chatModelProvider
? chatModelProvider.length > 0
? chatModelProvider.map((model) => ({
- value: model,
- label: model,
+ value: model.name,
+ label: model.displayName,
}))
: [
{
@@ -341,7 +348,8 @@ const SettingsDialog = ({
onChange={(e) => {
setSelectedEmbeddingModelProvider(e.target.value);
setSelectedEmbeddingModel(
- config.embeddingModelProviders[e.target.value][0],
+ config.embeddingModelProviders[e.target.value][0]
+ .name,
);
}}
options={Object.keys(
@@ -374,8 +382,8 @@ const SettingsDialog = ({
return embeddingModelProvider
? embeddingModelProvider.length > 0
? embeddingModelProvider.map((model) => ({
- label: model,
- value: model,
+ label: model.displayName,
+ value: model.name,
}))
: [
{
diff --git a/ui/package.json b/ui/package.json
index 1d892de..04512b6 100644
--- a/ui/package.json
+++ b/ui/package.json
@@ -1,6 +1,6 @@
{
"name": "perplexica-frontend",
- "version": "1.9.0-rc1",
+ "version": "1.9.0-rc3",
"license": "MIT",
"author": "ItzCrazyKns",
"scripts": {