From 806c47e70592356193bacc487f6fe0b148e84337 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:53:06 +0530 Subject: [PATCH 01/30] feat(chatwindow): fix infinite loading --- ui/components/ChatWindow.tsx | 7 ------- 1 file changed, 7 deletions(-) diff --git a/ui/components/ChatWindow.tsx b/ui/components/ChatWindow.tsx index 0ace2dd..cc93da8 100644 --- a/ui/components/ChatWindow.tsx +++ b/ui/components/ChatWindow.tsx @@ -201,13 +201,6 @@ const useSocket = ( connectWs(); } - - return () => { - if (ws?.readyState === 1) { - ws?.close(); - console.log('[DEBUG] closed'); - } - }; }, [ws, url, setIsWSReady, setError]); return ws; From 2873093fee1fbb7b473b4ab2c1f3e1d7116d95ee Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:00:05 +0530 Subject: [PATCH 02/30] feat(package): bump version --- package.json | 2 +- ui/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index db3d773..c09454c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "perplexica-backend", - "version": "1.9.0-rc1", + "version": "1.9.0-rc2", "license": "MIT", "author": "ItzCrazyKns", "scripts": { diff --git a/ui/package.json b/ui/package.json index 1d892de..23afda2 100644 --- a/ui/package.json +++ b/ui/package.json @@ -1,6 +1,6 @@ { "name": "perplexica-frontend", - "version": "1.9.0-rc1", + "version": "1.9.0-rc2", "license": "MIT", "author": "ItzCrazyKns", "scripts": { From e8ed4df31aaac4001192f64a66701c48bc6e9378 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:27:22 +0530 Subject: [PATCH 03/30] feat(chat-window): close socket on unmount --- ui/components/ChatWindow.tsx | 410 ++++++++++++++++++----------------- 1 file changed, 208 insertions(+), 202 deletions(-) diff --git a/ui/components/ChatWindow.tsx b/ui/components/ChatWindow.tsx index cc93da8..9a1fe3c 100644 --- a/ui/components/ChatWindow.tsx +++ b/ui/components/ChatWindow.tsx @@ -1,42 +1,42 @@ -'use client'; +'use client' -import { useEffect, useRef, useState } from 'react'; -import { Document } from '@langchain/core/documents'; -import Navbar from './Navbar'; -import Chat from './Chat'; -import EmptyChat from './EmptyChat'; -import crypto from 'crypto'; -import { toast } from 'sonner'; -import { useSearchParams } from 'next/navigation'; -import { getSuggestions } from '@/lib/actions'; -import Error from 'next/error'; +import { useEffect, useRef, useState } from 'react' +import { Document } from '@langchain/core/documents' +import Navbar from './Navbar' +import Chat from './Chat' +import EmptyChat from './EmptyChat' +import crypto from 'crypto' +import { toast } from 'sonner' +import { useSearchParams } from 'next/navigation' +import { getSuggestions } from '@/lib/actions' +import Error from 'next/error' export type Message = { - messageId: string; - chatId: string; - createdAt: Date; - content: string; - role: 'user' | 'assistant'; - suggestions?: string[]; - sources?: Document[]; -}; + messageId: string + chatId: string + createdAt: Date + content: string + role: 'user' | 'assistant' + suggestions?: string[] + sources?: Document[] +} const useSocket = ( url: string, setIsWSReady: (ready: boolean) => void, setError: (error: boolean) => void, ) => { - const [ws, setWs] = useState(null); + const [ws, setWs] = useState(null) useEffect(() => { if (!ws) { const connectWs = async () => { - let chatModel = localStorage.getItem('chatModel'); - let chatModelProvider = localStorage.getItem('chatModelProvider'); - let embeddingModel = localStorage.getItem('embeddingModel'); + let chatModel = localStorage.getItem('chatModel') + let chatModelProvider = localStorage.getItem('chatModelProvider') + let embeddingModel = localStorage.getItem('embeddingModel') let embeddingModelProvider = localStorage.getItem( 'embeddingModelProvider', - ); + ) const providers = await fetch( `${process.env.NEXT_PUBLIC_API_URL}/models`, @@ -45,7 +45,7 @@ const useSocket = ( 'Content-Type': 'application/json', }, }, - ).then(async (res) => await res.json()); + ).then(async res => await res.json()) if ( !chatModel || @@ -54,58 +54,55 @@ const useSocket = ( !embeddingModelProvider ) { if (!chatModel || !chatModelProvider) { - const chatModelProviders = providers.chatModelProviders; + const chatModelProviders = providers.chatModelProviders - chatModelProvider = Object.keys(chatModelProviders)[0]; + chatModelProvider = Object.keys(chatModelProviders)[0] if (chatModelProvider === 'custom_openai') { toast.error( 'Seems like you are using the custom OpenAI provider, please open the settings and configure the API key and base URL', - ); - setError(true); - return; + ) + setError(true) + return } else { - chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; + chatModel = Object.keys(chatModelProviders[chatModelProvider])[0] if ( !chatModelProviders || Object.keys(chatModelProviders).length === 0 ) - return toast.error('No chat models available'); + return toast.error('No chat models available') } } if (!embeddingModel || !embeddingModelProvider) { - const embeddingModelProviders = providers.embeddingModelProviders; + const embeddingModelProviders = providers.embeddingModelProviders if ( !embeddingModelProviders || Object.keys(embeddingModelProviders).length === 0 ) - return toast.error('No embedding models available'); + return toast.error('No embedding models available') - embeddingModelProvider = Object.keys(embeddingModelProviders)[0]; + embeddingModelProvider = Object.keys(embeddingModelProviders)[0] embeddingModel = Object.keys( embeddingModelProviders[embeddingModelProvider], - )[0]; + )[0] } - localStorage.setItem('chatModel', chatModel!); - localStorage.setItem('chatModelProvider', chatModelProvider); - localStorage.setItem('embeddingModel', embeddingModel!); - localStorage.setItem( - 'embeddingModelProvider', - embeddingModelProvider, - ); + localStorage.setItem('chatModel', chatModel!) + localStorage.setItem('chatModelProvider', chatModelProvider) + localStorage.setItem('embeddingModel', embeddingModel!) + localStorage.setItem('embeddingModelProvider', embeddingModelProvider) } else { - const chatModelProviders = providers.chatModelProviders; - const embeddingModelProviders = providers.embeddingModelProviders; + const chatModelProviders = providers.chatModelProviders + const embeddingModelProviders = providers.embeddingModelProviders if ( Object.keys(chatModelProviders).length > 0 && !chatModelProviders[chatModelProvider] ) { - chatModelProvider = Object.keys(chatModelProviders)[0]; - localStorage.setItem('chatModelProvider', chatModelProvider); + chatModelProvider = Object.keys(chatModelProviders)[0] + localStorage.setItem('chatModelProvider', chatModelProvider) } if ( @@ -113,19 +110,19 @@ const useSocket = ( chatModelProvider != 'custom_openai' && !chatModelProviders[chatModelProvider][chatModel] ) { - chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; - localStorage.setItem('chatModel', chatModel); + chatModel = Object.keys(chatModelProviders[chatModelProvider])[0] + localStorage.setItem('chatModel', chatModel) } if ( Object.keys(embeddingModelProviders).length > 0 && !embeddingModelProviders[embeddingModelProvider] ) { - embeddingModelProvider = Object.keys(embeddingModelProviders)[0]; + embeddingModelProvider = Object.keys(embeddingModelProviders)[0] localStorage.setItem( 'embeddingModelProvider', embeddingModelProvider, - ); + ) } if ( @@ -134,77 +131,77 @@ const useSocket = ( ) { embeddingModel = Object.keys( embeddingModelProviders[embeddingModelProvider], - )[0]; - localStorage.setItem('embeddingModel', embeddingModel); + )[0] + localStorage.setItem('embeddingModel', embeddingModel) } } - const wsURL = new URL(url); - const searchParams = new URLSearchParams({}); + const wsURL = new URL(url) + const searchParams = new URLSearchParams({}) - searchParams.append('chatModel', chatModel!); - searchParams.append('chatModelProvider', chatModelProvider); + searchParams.append('chatModel', chatModel!) + searchParams.append('chatModelProvider', chatModelProvider) if (chatModelProvider === 'custom_openai') { searchParams.append( 'openAIApiKey', localStorage.getItem('openAIApiKey')!, - ); + ) searchParams.append( 'openAIBaseURL', localStorage.getItem('openAIBaseURL')!, - ); + ) } - searchParams.append('embeddingModel', embeddingModel!); - searchParams.append('embeddingModelProvider', embeddingModelProvider); + searchParams.append('embeddingModel', embeddingModel!) + searchParams.append('embeddingModelProvider', embeddingModelProvider) - wsURL.search = searchParams.toString(); + wsURL.search = searchParams.toString() - const ws = new WebSocket(wsURL.toString()); + const ws = new WebSocket(wsURL.toString()) const timeoutId = setTimeout(() => { if (ws.readyState !== 1) { toast.error( 'Failed to connect to the server. Please try again later.', - ); + ) } - }, 10000); + }, 10000) ws.onopen = () => { - console.log('[DEBUG] open'); - clearTimeout(timeoutId); - setIsWSReady(true); - }; + console.log('[DEBUG] open') + clearTimeout(timeoutId) + setIsWSReady(true) + } ws.onerror = () => { - clearTimeout(timeoutId); - setError(true); - toast.error('WebSocket connection error.'); - }; + clearTimeout(timeoutId) + setError(true) + toast.error('WebSocket connection error.') + } ws.onclose = () => { - clearTimeout(timeoutId); - setError(true); - console.log('[DEBUG] closed'); - }; + clearTimeout(timeoutId) + setError(true) + console.log('[DEBUG] closed') + } - ws.addEventListener('message', (e) => { - const data = JSON.parse(e.data); + ws.addEventListener('message', e => { + const data = JSON.parse(e.data) if (data.type === 'error') { - toast.error(data.data); + toast.error(data.data) } - }); + }) - setWs(ws); - }; + setWs(ws) + } - connectWs(); + connectWs() } - }, [ws, url, setIsWSReady, setError]); + }, [ws, url, setIsWSReady, setError]) - return ws; -}; + return ws +} const loadMessages = async ( chatId: string, @@ -222,66 +219,66 @@ const loadMessages = async ( 'Content-Type': 'application/json', }, }, - ); + ) if (res.status === 404) { - setNotFound(true); - setIsMessagesLoaded(true); - return; + setNotFound(true) + setIsMessagesLoaded(true) + return } - const data = await res.json(); + const data = await res.json() const messages = data.messages.map((msg: any) => { return { ...msg, ...JSON.parse(msg.metadata), - }; - }) as Message[]; + } + }) as Message[] - setMessages(messages); + setMessages(messages) - const history = messages.map((msg) => { - return [msg.role, msg.content]; - }) as [string, string][]; + const history = messages.map(msg => { + return [msg.role, msg.content] + }) as [string, string][] - console.log('[DEBUG] messages loaded'); + console.log('[DEBUG] messages loaded') - document.title = messages[0].content; + document.title = messages[0].content - setChatHistory(history); - setFocusMode(data.chat.focusMode); - setIsMessagesLoaded(true); -}; + setChatHistory(history) + setFocusMode(data.chat.focusMode) + setIsMessagesLoaded(true) +} const ChatWindow = ({ id }: { id?: string }) => { - const searchParams = useSearchParams(); - const initialMessage = searchParams.get('q'); + const searchParams = useSearchParams() + const initialMessage = searchParams.get('q') - const [chatId, setChatId] = useState(id); - const [newChatCreated, setNewChatCreated] = useState(false); + const [chatId, setChatId] = useState(id) + const [newChatCreated, setNewChatCreated] = useState(false) - const [hasError, setHasError] = useState(false); - const [isReady, setIsReady] = useState(false); + const [hasError, setHasError] = useState(false) + const [isReady, setIsReady] = useState(false) - const [isWSReady, setIsWSReady] = useState(false); + const [isWSReady, setIsWSReady] = useState(false) const ws = useSocket( process.env.NEXT_PUBLIC_WS_URL!, setIsWSReady, setHasError, - ); + ) - const [loading, setLoading] = useState(false); - const [messageAppeared, setMessageAppeared] = useState(false); + const [loading, setLoading] = useState(false) + const [messageAppeared, setMessageAppeared] = useState(false) - const [chatHistory, setChatHistory] = useState<[string, string][]>([]); - const [messages, setMessages] = useState([]); + const [chatHistory, setChatHistory] = useState<[string, string][]>([]) + const [messages, setMessages] = useState([]) - const [focusMode, setFocusMode] = useState('webSearch'); + const [focusMode, setFocusMode] = useState('webSearch') - const [isMessagesLoaded, setIsMessagesLoaded] = useState(false); + const [isMessagesLoaded, setIsMessagesLoaded] = useState(false) - const [notFound, setNotFound] = useState(false); + const [notFound, setNotFound] = useState(false) useEffect(() => { if ( @@ -297,37 +294,46 @@ const ChatWindow = ({ id }: { id?: string }) => { setChatHistory, setFocusMode, setNotFound, - ); + ) } else if (!chatId) { - setNewChatCreated(true); - setIsMessagesLoaded(true); - setChatId(crypto.randomBytes(20).toString('hex')); + setNewChatCreated(true) + setIsMessagesLoaded(true) + setChatId(crypto.randomBytes(20).toString('hex')) } // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); - - const messagesRef = useRef([]); + }, []) useEffect(() => { - messagesRef.current = messages; - }, [messages]); + return () => { + if (ws?.readyState === 1) { + ws.close() + console.log('[DEBUG] closed') + } + } + }, []) + + const messagesRef = useRef([]) + + useEffect(() => { + messagesRef.current = messages + }, [messages]) useEffect(() => { if (isMessagesLoaded && isWSReady) { - setIsReady(true); + setIsReady(true) } - }, [isMessagesLoaded, isWSReady]); + }, [isMessagesLoaded, isWSReady]) const sendMessage = async (message: string) => { - if (loading) return; - setLoading(true); - setMessageAppeared(false); + if (loading) return + setLoading(true) + setMessageAppeared(false) - let sources: Document[] | undefined = undefined; - let recievedMessage = ''; - let added = false; + let sources: Document[] | undefined = undefined + let recievedMessage = '' + let added = false - const messageId = crypto.randomBytes(7).toString('hex'); + const messageId = crypto.randomBytes(7).toString('hex') ws?.send( JSON.stringify({ @@ -339,9 +345,9 @@ const ChatWindow = ({ id }: { id?: string }) => { focusMode: focusMode, history: [...chatHistory, ['human', message]], }), - ); + ) - setMessages((prevMessages) => [ + setMessages(prevMessages => [ ...prevMessages, { content: message, @@ -350,21 +356,21 @@ const ChatWindow = ({ id }: { id?: string }) => { role: 'user', createdAt: new Date(), }, - ]); + ]) const messageHandler = async (e: MessageEvent) => { - const data = JSON.parse(e.data); + const data = JSON.parse(e.data) if (data.type === 'error') { - toast.error(data.data); - setLoading(false); - return; + toast.error(data.data) + setLoading(false) + return } if (data.type === 'sources') { - sources = data.data; + sources = data.data if (!added) { - setMessages((prevMessages) => [ + setMessages(prevMessages => [ ...prevMessages, { content: '', @@ -374,15 +380,15 @@ const ChatWindow = ({ id }: { id?: string }) => { sources: sources, createdAt: new Date(), }, - ]); - added = true; + ]) + added = true } - setMessageAppeared(true); + setMessageAppeared(true) } if (data.type === 'message') { if (!added) { - setMessages((prevMessages) => [ + setMessages(prevMessages => [ ...prevMessages, { content: data.data, @@ -392,35 +398,35 @@ const ChatWindow = ({ id }: { id?: string }) => { sources: sources, createdAt: new Date(), }, - ]); - added = true; + ]) + added = true } - setMessages((prev) => - prev.map((message) => { + setMessages(prev => + prev.map(message => { if (message.messageId === data.messageId) { - return { ...message, content: message.content + data.data }; + return { ...message, content: message.content + data.data } } - return message; + return message }), - ); + ) - recievedMessage += data.data; - setMessageAppeared(true); + recievedMessage += data.data + setMessageAppeared(true) } if (data.type === 'messageEnd') { - setChatHistory((prevHistory) => [ + setChatHistory(prevHistory => [ ...prevHistory, ['human', message], ['assistant', recievedMessage], - ]); + ]) - ws?.removeEventListener('message', messageHandler); - setLoading(false); + ws?.removeEventListener('message', messageHandler) + setLoading(false) - const lastMsg = messagesRef.current[messagesRef.current.length - 1]; + const lastMsg = messagesRef.current[messagesRef.current.length - 1] if ( lastMsg.role === 'assistant' && @@ -428,54 +434,54 @@ const ChatWindow = ({ id }: { id?: string }) => { lastMsg.sources.length > 0 && !lastMsg.suggestions ) { - const suggestions = await getSuggestions(messagesRef.current); - setMessages((prev) => - prev.map((msg) => { + const suggestions = await getSuggestions(messagesRef.current) + setMessages(prev => + prev.map(msg => { if (msg.messageId === lastMsg.messageId) { - return { ...msg, suggestions: suggestions }; + return { ...msg, suggestions: suggestions } } - return msg; + return msg }), - ); + ) } } - }; + } - ws?.addEventListener('message', messageHandler); - }; + ws?.addEventListener('message', messageHandler) + } const rewrite = (messageId: string) => { - const index = messages.findIndex((msg) => msg.messageId === messageId); + const index = messages.findIndex(msg => msg.messageId === messageId) - if (index === -1) return; + if (index === -1) return - const message = messages[index - 1]; + const message = messages[index - 1] - setMessages((prev) => { - return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)]; - }); - setChatHistory((prev) => { - return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)]; - }); + setMessages(prev => { + return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)] + }) + setChatHistory(prev => { + return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)] + }) - sendMessage(message.content); - }; + sendMessage(message.content) + } useEffect(() => { if (isReady && initialMessage) { - sendMessage(initialMessage); + sendMessage(initialMessage) } // eslint-disable-next-line react-hooks/exhaustive-deps - }, [isReady, initialMessage]); + }, [isReady, initialMessage]) if (hasError) { return ( -
-

+

+

Failed to connect to the server. Please try again later.

- ); + ) } return isReady ? ( @@ -504,25 +510,25 @@ const ChatWindow = ({ id }: { id?: string }) => {
) ) : ( -
+
- ); -}; + ) +} -export default ChatWindow; +export default ChatWindow From f620252406f828c67555d9393fe3c6a49440e42f Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:51:12 +0530 Subject: [PATCH 04/30] feat(linkDocument): add error handling --- src/lib/linkDocument.ts | 98 ++++++++++++++++++++++++----------------- 1 file changed, 57 insertions(+), 41 deletions(-) diff --git a/src/lib/linkDocument.ts b/src/lib/linkDocument.ts index 9607220..5e90571 100644 --- a/src/lib/linkDocument.ts +++ b/src/lib/linkDocument.ts @@ -3,6 +3,7 @@ import { htmlToText } from 'html-to-text'; import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; import { Document } from '@langchain/core/documents'; import pdfParse from 'pdf-parse'; +import logger from '../utils/logger'; export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => { const splitter = new RecursiveCharacterTextSplitter(); @@ -16,66 +17,81 @@ export const getDocumentsFromLinks = async ({ links }: { links: string[] }) => { ? link : `https://${link}`; - const res = await axios.get(link, { - responseType: 'arraybuffer', - }); + try { + const res = await axios.get(link, { + responseType: 'arraybuffer', + }); - const isPdf = res.headers['content-type'] === 'application/pdf'; + const isPdf = res.headers['content-type'] === 'application/pdf'; - if (isPdf) { - const pdfText = await pdfParse(res.data); - const parsedText = pdfText.text + if (isPdf) { + const pdfText = await pdfParse(res.data); + const parsedText = pdfText.text + .replace(/(\r\n|\n|\r)/gm, ' ') + .replace(/\s+/g, ' ') + .trim(); + + const splittedText = await splitter.splitText(parsedText); + const title = 'PDF Document'; + + const linkDocs = splittedText.map((text) => { + return new Document({ + pageContent: text, + metadata: { + title: title, + url: link, + }, + }); + }); + + docs.push(...linkDocs); + return; + } + + const parsedText = htmlToText(res.data.toString('utf8'), { + selectors: [ + { + selector: 'a', + options: { + ignoreHref: true, + }, + }, + ], + }) .replace(/(\r\n|\n|\r)/gm, ' ') .replace(/\s+/g, ' ') .trim(); const splittedText = await splitter.splitText(parsedText); - const title = 'PDF Document'; + const title = res.data + .toString('utf8') + .match(/(.*?)<\/title>/)?.[1]; const linkDocs = splittedText.map((text) => { return new Document({ pageContent: text, metadata: { - title: title, + title: title || link, url: link, }, }); }); docs.push(...linkDocs); - return; - } - - const parsedText = htmlToText(res.data.toString('utf8'), { - selectors: [ - { - selector: 'a', - options: { - ignoreHref: true, + } catch (err) { + logger.error( + `Error at generating documents from links: ${err.message}`, + ); + docs.push( + new Document({ + pageContent: `Failed to retrieve content from the link: ${err.message}`, + metadata: { + title: 'Failed to retrieve content', + url: link, }, - }, - ], - }) - .replace(/(\r\n|\n|\r)/gm, ' ') - .replace(/\s+/g, ' ') - .trim(); - - const splittedText = await splitter.splitText(parsedText); - const title = res.data - .toString('utf8') - .match(/<title>(.*?)<\/title>/)?.[1]; - - const linkDocs = splittedText.map((text) => { - return new Document({ - pageContent: text, - metadata: { - title: title || link, - url: link, - }, - }); - }); - - docs.push(...linkDocs); + }), + ); + } }), ); From 449684c4192105ef39dcfe74dc0bc053780c4e15 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:51:42 +0530 Subject: [PATCH 05/30] feat(webSearchAgent): update retriever prompt & change temp --- src/agents/webSearchAgent.ts | 59 ++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/src/agents/webSearchAgent.ts b/src/agents/webSearchAgent.ts index f7a6d7e..159023e 100644 --- a/src/agents/webSearchAgent.ts +++ b/src/agents/webSearchAgent.ts @@ -23,22 +23,37 @@ import LineListOutputParser from '../lib/outputParsers/listLineOutputParser'; import { getDocumentsFromLinks } from '../lib/linkDocument'; import LineOutputParser from '../lib/outputParsers/lineOutputParser'; import { IterableReadableStream } from '@langchain/core/utils/stream'; +import { ChatOpenAI } from '@langchain/openai'; const basicSearchRetrieverPrompt = ` -You will be given a conversation below and a follow up question. You need to rephrase the follow-up question if needed so it is a standalone question that can be used by the LLM to search the web for information. -If it is a writing task or a simple hi, hello rather than a question, you need to return \`not_needed\` as the response. -If the question contains some links and asks to answer from those links or even if they don't you need to return the links inside 'links' XML block and the question inside 'question' XML block. If there are no links then you need to return the question without any XML block. -If the user asks to summarize the content from some links you need to return \`Summarize\` as the question inside the 'question' XML block and the links inside the 'links' XML block. +You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it. +If it is a smple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic). +If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block. +You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response. -Example: -1. Follow up question: What is the capital of France? -Rephrased question: \`Capital of france\` +There are several examples attached for your reference inside the below \`examples\` XML block -2. Follow up question: What is the population of New York City? -Rephrased question: \`Population of New York City\` +<examples> +1. Follow up question: What is the capital of France +Rephrased question:\` +<question> +Capital of france +</question> +\` + +2. Hi, how are you? +Rephrased question\` +<question> +not_needed +</question> +\` 3. Follow up question: What is Docker? -Rephrased question: \`What is Docker\` +Rephrased question: \` +<question> +What is Docker +</question> +\` 4. Follow up question: Can you tell me what is X from https://example.com Rephrased question: \` @@ -54,16 +69,20 @@ https://example.com 5. Follow up question: Summarize the content from https://example.com Rephrased question: \` <question> -Summarize +summarize </question> <links> https://example.com </links> \` +</examples> -Conversation: +Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above. + +<conversation> {chat_history} +</conversation> Follow up question: {query} Rephrased question: @@ -133,15 +152,13 @@ type BasicChainInput = { }; const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => { + (llm as unknown as ChatOpenAI).temperature = 0; + return RunnableSequence.from([ PromptTemplate.fromTemplate(basicSearchRetrieverPrompt), llm, strParser, RunnableLambda.from(async (input: string) => { - if (input === 'not_needed') { - return { query: '', docs: [] }; - } - const linksOutputParser = new LineListOutputParser({ key: 'links', }); @@ -153,9 +170,13 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => { const links = await linksOutputParser.parse(input); let question = await questionOutputParser.parse(input); + if (question === 'not_needed') { + return { query: '', docs: [] }; + } + if (links.length > 0) { if (question.length === 0) { - question = 'Summarize'; + question = 'summarize'; } let docs = []; @@ -272,7 +293,7 @@ const createBasicWebSearchAnsweringChain = ( return docs; } - if (query === 'Summarize') { + if (query.toLocaleLowerCase() === 'summarize') { return docs; } @@ -295,7 +316,7 @@ const createBasicWebSearchAnsweringChain = ( }); const sortedDocs = similarity - .filter((sim) => sim.similarity > 0.5) + .filter((sim) => sim.similarity > 0.3) .sort((a, b) => b.similarity - a.similarity) .slice(0, 15) .map((sim) => docsWithContent[sim.index]); From c952469f087c9d0577d9421308eaaf2d1d9217a7 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:51:59 +0530 Subject: [PATCH 06/30] feat(chaWindow): lint & beautify --- ui/components/ChatWindow.tsx | 407 ++++++++++++++++++----------------- 1 file changed, 205 insertions(+), 202 deletions(-) diff --git a/ui/components/ChatWindow.tsx b/ui/components/ChatWindow.tsx index 9a1fe3c..b3d0089 100644 --- a/ui/components/ChatWindow.tsx +++ b/ui/components/ChatWindow.tsx @@ -1,42 +1,42 @@ -'use client' +'use client'; -import { useEffect, useRef, useState } from 'react' -import { Document } from '@langchain/core/documents' -import Navbar from './Navbar' -import Chat from './Chat' -import EmptyChat from './EmptyChat' -import crypto from 'crypto' -import { toast } from 'sonner' -import { useSearchParams } from 'next/navigation' -import { getSuggestions } from '@/lib/actions' -import Error from 'next/error' +import { useEffect, useRef, useState } from 'react'; +import { Document } from '@langchain/core/documents'; +import Navbar from './Navbar'; +import Chat from './Chat'; +import EmptyChat from './EmptyChat'; +import crypto from 'crypto'; +import { toast } from 'sonner'; +import { useSearchParams } from 'next/navigation'; +import { getSuggestions } from '@/lib/actions'; +import Error from 'next/error'; export type Message = { - messageId: string - chatId: string - createdAt: Date - content: string - role: 'user' | 'assistant' - suggestions?: string[] - sources?: Document[] -} + messageId: string; + chatId: string; + createdAt: Date; + content: string; + role: 'user' | 'assistant'; + suggestions?: string[]; + sources?: Document[]; +}; const useSocket = ( url: string, setIsWSReady: (ready: boolean) => void, setError: (error: boolean) => void, ) => { - const [ws, setWs] = useState<WebSocket | null>(null) + const [ws, setWs] = useState<WebSocket | null>(null); useEffect(() => { if (!ws) { const connectWs = async () => { - let chatModel = localStorage.getItem('chatModel') - let chatModelProvider = localStorage.getItem('chatModelProvider') - let embeddingModel = localStorage.getItem('embeddingModel') + let chatModel = localStorage.getItem('chatModel'); + let chatModelProvider = localStorage.getItem('chatModelProvider'); + let embeddingModel = localStorage.getItem('embeddingModel'); let embeddingModelProvider = localStorage.getItem( 'embeddingModelProvider', - ) + ); const providers = await fetch( `${process.env.NEXT_PUBLIC_API_URL}/models`, @@ -45,7 +45,7 @@ const useSocket = ( 'Content-Type': 'application/json', }, }, - ).then(async res => await res.json()) + ).then(async (res) => await res.json()); if ( !chatModel || @@ -54,55 +54,58 @@ const useSocket = ( !embeddingModelProvider ) { if (!chatModel || !chatModelProvider) { - const chatModelProviders = providers.chatModelProviders + const chatModelProviders = providers.chatModelProviders; - chatModelProvider = Object.keys(chatModelProviders)[0] + chatModelProvider = Object.keys(chatModelProviders)[0]; if (chatModelProvider === 'custom_openai') { toast.error( 'Seems like you are using the custom OpenAI provider, please open the settings and configure the API key and base URL', - ) - setError(true) - return + ); + setError(true); + return; } else { - chatModel = Object.keys(chatModelProviders[chatModelProvider])[0] + chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; if ( !chatModelProviders || Object.keys(chatModelProviders).length === 0 ) - return toast.error('No chat models available') + return toast.error('No chat models available'); } } if (!embeddingModel || !embeddingModelProvider) { - const embeddingModelProviders = providers.embeddingModelProviders + const embeddingModelProviders = providers.embeddingModelProviders; if ( !embeddingModelProviders || Object.keys(embeddingModelProviders).length === 0 ) - return toast.error('No embedding models available') + return toast.error('No embedding models available'); - embeddingModelProvider = Object.keys(embeddingModelProviders)[0] + embeddingModelProvider = Object.keys(embeddingModelProviders)[0]; embeddingModel = Object.keys( embeddingModelProviders[embeddingModelProvider], - )[0] + )[0]; } - localStorage.setItem('chatModel', chatModel!) - localStorage.setItem('chatModelProvider', chatModelProvider) - localStorage.setItem('embeddingModel', embeddingModel!) - localStorage.setItem('embeddingModelProvider', embeddingModelProvider) + localStorage.setItem('chatModel', chatModel!); + localStorage.setItem('chatModelProvider', chatModelProvider); + localStorage.setItem('embeddingModel', embeddingModel!); + localStorage.setItem( + 'embeddingModelProvider', + embeddingModelProvider, + ); } else { - const chatModelProviders = providers.chatModelProviders - const embeddingModelProviders = providers.embeddingModelProviders + const chatModelProviders = providers.chatModelProviders; + const embeddingModelProviders = providers.embeddingModelProviders; if ( Object.keys(chatModelProviders).length > 0 && !chatModelProviders[chatModelProvider] ) { - chatModelProvider = Object.keys(chatModelProviders)[0] - localStorage.setItem('chatModelProvider', chatModelProvider) + chatModelProvider = Object.keys(chatModelProviders)[0]; + localStorage.setItem('chatModelProvider', chatModelProvider); } if ( @@ -110,19 +113,19 @@ const useSocket = ( chatModelProvider != 'custom_openai' && !chatModelProviders[chatModelProvider][chatModel] ) { - chatModel = Object.keys(chatModelProviders[chatModelProvider])[0] - localStorage.setItem('chatModel', chatModel) + chatModel = Object.keys(chatModelProviders[chatModelProvider])[0]; + localStorage.setItem('chatModel', chatModel); } if ( Object.keys(embeddingModelProviders).length > 0 && !embeddingModelProviders[embeddingModelProvider] ) { - embeddingModelProvider = Object.keys(embeddingModelProviders)[0] + embeddingModelProvider = Object.keys(embeddingModelProviders)[0]; localStorage.setItem( 'embeddingModelProvider', embeddingModelProvider, - ) + ); } if ( @@ -131,77 +134,77 @@ const useSocket = ( ) { embeddingModel = Object.keys( embeddingModelProviders[embeddingModelProvider], - )[0] - localStorage.setItem('embeddingModel', embeddingModel) + )[0]; + localStorage.setItem('embeddingModel', embeddingModel); } } - const wsURL = new URL(url) - const searchParams = new URLSearchParams({}) + const wsURL = new URL(url); + const searchParams = new URLSearchParams({}); - searchParams.append('chatModel', chatModel!) - searchParams.append('chatModelProvider', chatModelProvider) + searchParams.append('chatModel', chatModel!); + searchParams.append('chatModelProvider', chatModelProvider); if (chatModelProvider === 'custom_openai') { searchParams.append( 'openAIApiKey', localStorage.getItem('openAIApiKey')!, - ) + ); searchParams.append( 'openAIBaseURL', localStorage.getItem('openAIBaseURL')!, - ) + ); } - searchParams.append('embeddingModel', embeddingModel!) - searchParams.append('embeddingModelProvider', embeddingModelProvider) + searchParams.append('embeddingModel', embeddingModel!); + searchParams.append('embeddingModelProvider', embeddingModelProvider); - wsURL.search = searchParams.toString() + wsURL.search = searchParams.toString(); - const ws = new WebSocket(wsURL.toString()) + const ws = new WebSocket(wsURL.toString()); const timeoutId = setTimeout(() => { if (ws.readyState !== 1) { toast.error( 'Failed to connect to the server. Please try again later.', - ) + ); } - }, 10000) + }, 10000); ws.onopen = () => { - console.log('[DEBUG] open') - clearTimeout(timeoutId) - setIsWSReady(true) - } + console.log('[DEBUG] open'); + clearTimeout(timeoutId); + setIsWSReady(true); + }; ws.onerror = () => { - clearTimeout(timeoutId) - setError(true) - toast.error('WebSocket connection error.') - } + clearTimeout(timeoutId); + setError(true); + toast.error('WebSocket connection error.'); + }; ws.onclose = () => { - clearTimeout(timeoutId) - setError(true) - console.log('[DEBUG] closed') - } + clearTimeout(timeoutId); + setError(true); + console.log('[DEBUG] closed'); + }; - ws.addEventListener('message', e => { - const data = JSON.parse(e.data) + ws.addEventListener('message', (e) => { + const data = JSON.parse(e.data); if (data.type === 'error') { - toast.error(data.data) + toast.error(data.data); } - }) + }); - setWs(ws) - } + setWs(ws); + }; - connectWs() + connectWs(); } - }, [ws, url, setIsWSReady, setError]) + }, [ws, url, setIsWSReady, setError]); - return ws -} + return ws; +}; const loadMessages = async ( chatId: string, @@ -219,66 +222,66 @@ const loadMessages = async ( 'Content-Type': 'application/json', }, }, - ) + ); if (res.status === 404) { - setNotFound(true) - setIsMessagesLoaded(true) - return + setNotFound(true); + setIsMessagesLoaded(true); + return; } - const data = await res.json() + const data = await res.json(); const messages = data.messages.map((msg: any) => { return { ...msg, ...JSON.parse(msg.metadata), - } - }) as Message[] + }; + }) as Message[]; - setMessages(messages) + setMessages(messages); - const history = messages.map(msg => { - return [msg.role, msg.content] - }) as [string, string][] + const history = messages.map((msg) => { + return [msg.role, msg.content]; + }) as [string, string][]; - console.log('[DEBUG] messages loaded') + console.log('[DEBUG] messages loaded'); - document.title = messages[0].content + document.title = messages[0].content; - setChatHistory(history) - setFocusMode(data.chat.focusMode) - setIsMessagesLoaded(true) -} + setChatHistory(history); + setFocusMode(data.chat.focusMode); + setIsMessagesLoaded(true); +}; const ChatWindow = ({ id }: { id?: string }) => { - const searchParams = useSearchParams() - const initialMessage = searchParams.get('q') + const searchParams = useSearchParams(); + const initialMessage = searchParams.get('q'); - const [chatId, setChatId] = useState<string | undefined>(id) - const [newChatCreated, setNewChatCreated] = useState(false) + const [chatId, setChatId] = useState<string | undefined>(id); + const [newChatCreated, setNewChatCreated] = useState(false); - const [hasError, setHasError] = useState(false) - const [isReady, setIsReady] = useState(false) + const [hasError, setHasError] = useState(false); + const [isReady, setIsReady] = useState(false); - const [isWSReady, setIsWSReady] = useState(false) + const [isWSReady, setIsWSReady] = useState(false); const ws = useSocket( process.env.NEXT_PUBLIC_WS_URL!, setIsWSReady, setHasError, - ) + ); - const [loading, setLoading] = useState(false) - const [messageAppeared, setMessageAppeared] = useState(false) + const [loading, setLoading] = useState(false); + const [messageAppeared, setMessageAppeared] = useState(false); - const [chatHistory, setChatHistory] = useState<[string, string][]>([]) - const [messages, setMessages] = useState<Message[]>([]) + const [chatHistory, setChatHistory] = useState<[string, string][]>([]); + const [messages, setMessages] = useState<Message[]>([]); - const [focusMode, setFocusMode] = useState('webSearch') + const [focusMode, setFocusMode] = useState('webSearch'); - const [isMessagesLoaded, setIsMessagesLoaded] = useState(false) + const [isMessagesLoaded, setIsMessagesLoaded] = useState(false); - const [notFound, setNotFound] = useState(false) + const [notFound, setNotFound] = useState(false); useEffect(() => { if ( @@ -294,46 +297,46 @@ const ChatWindow = ({ id }: { id?: string }) => { setChatHistory, setFocusMode, setNotFound, - ) + ); } else if (!chatId) { - setNewChatCreated(true) - setIsMessagesLoaded(true) - setChatId(crypto.randomBytes(20).toString('hex')) + setNewChatCreated(true); + setIsMessagesLoaded(true); + setChatId(crypto.randomBytes(20).toString('hex')); } // eslint-disable-next-line react-hooks/exhaustive-deps - }, []) + }, []); useEffect(() => { return () => { if (ws?.readyState === 1) { - ws.close() - console.log('[DEBUG] closed') + ws.close(); + console.log('[DEBUG] closed'); } - } - }, []) + }; + }, []); - const messagesRef = useRef<Message[]>([]) + const messagesRef = useRef<Message[]>([]); useEffect(() => { - messagesRef.current = messages - }, [messages]) + messagesRef.current = messages; + }, [messages]); useEffect(() => { if (isMessagesLoaded && isWSReady) { - setIsReady(true) + setIsReady(true); } - }, [isMessagesLoaded, isWSReady]) + }, [isMessagesLoaded, isWSReady]); const sendMessage = async (message: string) => { - if (loading) return - setLoading(true) - setMessageAppeared(false) + if (loading) return; + setLoading(true); + setMessageAppeared(false); - let sources: Document[] | undefined = undefined - let recievedMessage = '' - let added = false + let sources: Document[] | undefined = undefined; + let recievedMessage = ''; + let added = false; - const messageId = crypto.randomBytes(7).toString('hex') + const messageId = crypto.randomBytes(7).toString('hex'); ws?.send( JSON.stringify({ @@ -345,9 +348,9 @@ const ChatWindow = ({ id }: { id?: string }) => { focusMode: focusMode, history: [...chatHistory, ['human', message]], }), - ) + ); - setMessages(prevMessages => [ + setMessages((prevMessages) => [ ...prevMessages, { content: message, @@ -356,21 +359,21 @@ const ChatWindow = ({ id }: { id?: string }) => { role: 'user', createdAt: new Date(), }, - ]) + ]); const messageHandler = async (e: MessageEvent) => { - const data = JSON.parse(e.data) + const data = JSON.parse(e.data); if (data.type === 'error') { - toast.error(data.data) - setLoading(false) - return + toast.error(data.data); + setLoading(false); + return; } if (data.type === 'sources') { - sources = data.data + sources = data.data; if (!added) { - setMessages(prevMessages => [ + setMessages((prevMessages) => [ ...prevMessages, { content: '', @@ -380,15 +383,15 @@ const ChatWindow = ({ id }: { id?: string }) => { sources: sources, createdAt: new Date(), }, - ]) - added = true + ]); + added = true; } - setMessageAppeared(true) + setMessageAppeared(true); } if (data.type === 'message') { if (!added) { - setMessages(prevMessages => [ + setMessages((prevMessages) => [ ...prevMessages, { content: data.data, @@ -398,35 +401,35 @@ const ChatWindow = ({ id }: { id?: string }) => { sources: sources, createdAt: new Date(), }, - ]) - added = true + ]); + added = true; } - setMessages(prev => - prev.map(message => { + setMessages((prev) => + prev.map((message) => { if (message.messageId === data.messageId) { - return { ...message, content: message.content + data.data } + return { ...message, content: message.content + data.data }; } - return message + return message; }), - ) + ); - recievedMessage += data.data - setMessageAppeared(true) + recievedMessage += data.data; + setMessageAppeared(true); } if (data.type === 'messageEnd') { - setChatHistory(prevHistory => [ + setChatHistory((prevHistory) => [ ...prevHistory, ['human', message], ['assistant', recievedMessage], - ]) + ]); - ws?.removeEventListener('message', messageHandler) - setLoading(false) + ws?.removeEventListener('message', messageHandler); + setLoading(false); - const lastMsg = messagesRef.current[messagesRef.current.length - 1] + const lastMsg = messagesRef.current[messagesRef.current.length - 1]; if ( lastMsg.role === 'assistant' && @@ -434,54 +437,54 @@ const ChatWindow = ({ id }: { id?: string }) => { lastMsg.sources.length > 0 && !lastMsg.suggestions ) { - const suggestions = await getSuggestions(messagesRef.current) - setMessages(prev => - prev.map(msg => { + const suggestions = await getSuggestions(messagesRef.current); + setMessages((prev) => + prev.map((msg) => { if (msg.messageId === lastMsg.messageId) { - return { ...msg, suggestions: suggestions } + return { ...msg, suggestions: suggestions }; } - return msg + return msg; }), - ) + ); } } - } + }; - ws?.addEventListener('message', messageHandler) - } + ws?.addEventListener('message', messageHandler); + }; const rewrite = (messageId: string) => { - const index = messages.findIndex(msg => msg.messageId === messageId) + const index = messages.findIndex((msg) => msg.messageId === messageId); - if (index === -1) return + if (index === -1) return; - const message = messages[index - 1] + const message = messages[index - 1]; - setMessages(prev => { - return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)] - }) - setChatHistory(prev => { - return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)] - }) + setMessages((prev) => { + return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)]; + }); + setChatHistory((prev) => { + return [...prev.slice(0, messages.length > 2 ? index - 1 : 0)]; + }); - sendMessage(message.content) - } + sendMessage(message.content); + }; useEffect(() => { if (isReady && initialMessage) { - sendMessage(initialMessage) + sendMessage(initialMessage); } // eslint-disable-next-line react-hooks/exhaustive-deps - }, [isReady, initialMessage]) + }, [isReady, initialMessage]); if (hasError) { return ( - <div className='flex flex-col items-center justify-center min-h-screen'> - <p className='dark:text-white/70 text-black/70 text-sm'> + <div className="flex flex-col items-center justify-center min-h-screen"> + <p className="dark:text-white/70 text-black/70 text-sm"> Failed to connect to the server. Please try again later. </p> </div> - ) + ); } return isReady ? ( @@ -510,25 +513,25 @@ const ChatWindow = ({ id }: { id?: string }) => { </div> ) ) : ( - <div className='flex flex-row items-center justify-center min-h-screen'> + <div className="flex flex-row items-center justify-center min-h-screen"> <svg - aria-hidden='true' - className='w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]' - viewBox='0 0 100 101' - fill='none' - xmlns='http://www.w3.org/2000/svg' + aria-hidden="true" + className="w-8 h-8 text-light-200 fill-light-secondary dark:text-[#202020] animate-spin dark:fill-[#ffffff3b]" + viewBox="0 0 100 101" + fill="none" + xmlns="http://www.w3.org/2000/svg" > <path - d='M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z' - fill='currentColor' + d="M100 50.5908C100.003 78.2051 78.1951 100.003 50.5908 100C22.9765 99.9972 0.997224 78.018 1 50.4037C1.00281 22.7993 22.8108 0.997224 50.4251 1C78.0395 1.00281 100.018 22.8108 100 50.4251ZM9.08164 50.594C9.06312 73.3997 27.7909 92.1272 50.5966 92.1457C73.4023 92.1642 92.1298 73.4365 92.1483 50.6308C92.1669 27.8251 73.4392 9.0973 50.6335 9.07878C27.8278 9.06026 9.10003 27.787 9.08164 50.594Z" + fill="currentColor" /> <path - d='M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z' - fill='currentFill' + d="M93.9676 39.0409C96.393 38.4037 97.8624 35.9116 96.9801 33.5533C95.1945 28.8227 92.871 24.3692 90.0681 20.348C85.6237 14.1775 79.4473 9.36872 72.0454 6.45794C64.6435 3.54717 56.3134 2.65431 48.3133 3.89319C45.869 4.27179 44.3768 6.77534 45.014 9.20079C45.6512 11.6262 48.1343 13.0956 50.5786 12.717C56.5073 11.8281 62.5542 12.5399 68.0406 14.7911C73.527 17.0422 78.2187 20.7487 81.5841 25.4923C83.7976 28.5886 85.4467 32.059 86.4416 35.7474C87.1273 38.1189 89.5423 39.6781 91.9676 39.0409Z" + fill="currentFill" /> </svg> </div> - ) -} + ); +}; -export default ChatWindow +export default ChatWindow; From 92abbc5b981b9809466c00363c7f90ecbcd1d857 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:54:37 +0530 Subject: [PATCH 07/30] feat(webSearchRetriever): use `question` instead of `input` --- src/agents/webSearchAgent.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/webSearchAgent.ts b/src/agents/webSearchAgent.ts index 159023e..77ec181 100644 --- a/src/agents/webSearchAgent.ts +++ b/src/agents/webSearchAgent.ts @@ -248,7 +248,7 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => { return { query: question, docs: docs }; } else { - const res = await searchSearxng(input, { + const res = await searchSearxng(question, { language: 'en', }); @@ -264,7 +264,7 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => { }), ); - return { query: input, docs: documents }; + return { query: question, docs: documents }; } }), ]); From c4f52adb456d16ddc957edeafa82de27c3a9f953 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Mon, 2 Sep 2024 11:44:40 +0530 Subject: [PATCH 08/30] feat(textarea): handle "/" keys --- ui/components/EmptyChatMessageInput.tsx | 21 ++++++++++++++------- ui/components/MessageInput.tsx | 21 ++++++++++++++------- 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/ui/components/EmptyChatMessageInput.tsx b/ui/components/EmptyChatMessageInput.tsx index 0ff9b2e..39d3f16 100644 --- a/ui/components/EmptyChatMessageInput.tsx +++ b/ui/components/EmptyChatMessageInput.tsx @@ -18,14 +18,21 @@ const EmptyChatMessageInput = ({ const inputRef = useRef<HTMLTextAreaElement | null>(null); - const handleKeyDown = (e: KeyboardEvent) => { - if (e.key === '/') { - e.preventDefault(); - inputRef.current?.focus(); - } - }; - useEffect(() => { + const handleKeyDown = (e: KeyboardEvent) => { + const activeElement = document.activeElement; + + const isInputFocused = + activeElement?.tagName === 'INPUT' || + activeElement?.tagName === 'TEXTAREA' || + activeElement?.hasAttribute('contenteditable'); + + if (e.key === '/' && !isInputFocused) { + e.preventDefault(); + inputRef.current?.focus(); + } + }; + document.addEventListener('keydown', handleKeyDown); return () => { diff --git a/ui/components/MessageInput.tsx b/ui/components/MessageInput.tsx index 2229cdf..05d44a6 100644 --- a/ui/components/MessageInput.tsx +++ b/ui/components/MessageInput.tsx @@ -27,14 +27,21 @@ const MessageInput = ({ const inputRef = useRef<HTMLTextAreaElement | null>(null); - const handleKeyDown = (e: KeyboardEvent) => { - if (e.key === '/') { - e.preventDefault(); - inputRef.current?.focus(); - } - }; - useEffect(() => { + const handleKeyDown = (e: KeyboardEvent) => { + const activeElement = document.activeElement; + + const isInputFocused = + activeElement?.tagName === 'INPUT' || + activeElement?.tagName === 'TEXTAREA' || + activeElement?.hasAttribute('contenteditable'); + + if (e.key === '/' && !isInputFocused) { + e.preventDefault(); + inputRef.current?.focus(); + } + }; + document.addEventListener('keydown', handleKeyDown); return () => { From 07e5615860ad420e147410731f9f350839f97bf7 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 4 Sep 2024 18:54:54 +0530 Subject: [PATCH 09/30] feat(docker-compose): link `config.toml` as vol. --- docker-compose.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index d6f9203..d3892e5 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -21,6 +21,7 @@ services: - 3001:3001 volumes: - backend-dbstore:/home/perplexica/data + - ./config.toml:/home/perplexica/config.toml extra_hosts: - 'host.docker.internal:host-gateway' networks: From 1fcd64ad421450bd1015e8cc52697c3a35d89c11 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 5 Sep 2024 18:40:07 +0530 Subject: [PATCH 10/30] feat(docker-file): use SearXNG URL from env --- backend.dockerfile | 3 +-- src/config.ts | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend.dockerfile b/backend.dockerfile index 4886573..0169218 100644 --- a/backend.dockerfile +++ b/backend.dockerfile @@ -1,6 +1,7 @@ FROM node:slim ARG SEARXNG_API_URL +ENV SEARXNG_API_URL=${SEARXNG_API_URL} WORKDIR /home/perplexica @@ -11,8 +12,6 @@ COPY drizzle.config.ts /home/perplexica/ COPY package.json /home/perplexica/ COPY yarn.lock /home/perplexica/ -RUN sed -i "s|SEARXNG = \".*\"|SEARXNG = \"${SEARXNG_API_URL}\"|g" /home/perplexica/config.toml - RUN mkdir /home/perplexica/data RUN yarn install diff --git a/src/config.ts b/src/config.ts index 9ebc182..bb69335 100644 --- a/src/config.ts +++ b/src/config.ts @@ -40,7 +40,8 @@ export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ; export const getAnthropicApiKey = () => loadConfig().API_KEYS.ANTHROPIC; -export const getSearxngApiEndpoint = () => loadConfig().API_ENDPOINTS.SEARXNG; +export const getSearxngApiEndpoint = () => + process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG; export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA; From 40f551c426e0ca67a76d2438d6404b7b7e77f877 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Sun, 15 Sep 2024 10:16:20 +0530 Subject: [PATCH 11/30] feat(search-button): add empty check --- ui/components/SearchImages.tsx | 2 +- ui/components/SearchVideos.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/components/SearchImages.tsx b/ui/components/SearchImages.tsx index b53b8b0..6025925 100644 --- a/ui/components/SearchImages.tsx +++ b/ui/components/SearchImages.tsx @@ -51,7 +51,7 @@ const SearchImages = ({ const data = await res.json(); - const images = data.images; + const images = data.images ?? []; setImages(images); setSlides( images.map((image: Image) => { diff --git a/ui/components/SearchVideos.tsx b/ui/components/SearchVideos.tsx index 2646322..74d4381 100644 --- a/ui/components/SearchVideos.tsx +++ b/ui/components/SearchVideos.tsx @@ -64,7 +64,7 @@ const Searchvideos = ({ const data = await res.json(); - const videos = data.videos; + const videos = data.videos ?? []; setVideos(videos); setSlides( videos.map((video: Video) => { From 1589f16d5a00e913acc7d56f6fcb5f858a426048 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Tue, 24 Sep 2024 22:34:43 +0530 Subject: [PATCH 12/30] feat(providers): add `displayName` property --- src/lib/providers/anthropic.ts | 52 ++++++---- src/lib/providers/groq.ts | 161 ++++++++++++++++------------- src/lib/providers/ollama.ts | 26 +++-- src/lib/providers/openai.ts | 87 ++++++++++------ src/lib/providers/transformers.ts | 27 +++-- src/routes/config.ts | 64 +++++++----- src/routes/images.ts | 2 +- src/routes/suggestions.ts | 2 +- src/routes/videos.ts | 2 +- src/websocket/connectionManager.ts | 7 +- ui/components/SearchVideos.tsx | 2 +- ui/components/SettingsDialog.tsx | 28 +++-- 12 files changed, 277 insertions(+), 183 deletions(-) diff --git a/src/lib/providers/anthropic.ts b/src/lib/providers/anthropic.ts index 58cd164..90460c6 100644 --- a/src/lib/providers/anthropic.ts +++ b/src/lib/providers/anthropic.ts @@ -9,26 +9,38 @@ export const loadAnthropicChatModels = async () => { try { const chatModels = { - 'Claude 3.5 Sonnet': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-5-sonnet-20240620', - }), - 'Claude 3 Opus': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-opus-20240229', - }), - 'Claude 3 Sonnet': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-sonnet-20240229', - }), - 'Claude 3 Haiku': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-haiku-20240307', - }), + 'claude-3-5-sonnet-20240620': { + displayName: 'Claude 3.5 Sonnet', + model: new ChatAnthropic({ + temperature: 0.7, + anthropicApiKey: anthropicApiKey, + model: 'claude-3-5-sonnet-20240620', + }), + }, + 'claude-3-opus-20240229': { + displayName: 'Claude 3 Opus', + model: new ChatAnthropic({ + temperature: 0.7, + anthropicApiKey: anthropicApiKey, + model: 'claude-3-opus-20240229', + }), + }, + 'claude-3-sonnet-20240229': { + displayName: 'Claude 3 Sonnet', + model: new ChatAnthropic({ + temperature: 0.7, + anthropicApiKey: anthropicApiKey, + model: 'claude-3-sonnet-20240229', + }), + }, + 'claude-3-haiku-20240307': { + displayName: 'Claude 3 Haiku', + model: new ChatAnthropic({ + temperature: 0.7, + anthropicApiKey: anthropicApiKey, + model: 'claude-3-haiku-20240307', + }), + }, }; return chatModels; diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts index ffe8f6c..6249267 100644 --- a/src/lib/providers/groq.ts +++ b/src/lib/providers/groq.ts @@ -9,76 +9,97 @@ export const loadGroqChatModels = async () => { try { const chatModels = { - 'Llama 3.1 70B': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama-3.1-70b-versatile', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Llama 3.1 8B': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama-3.1-8b-instant', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'LLaMA3 8b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama3-8b-8192', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'LLaMA3 70b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama3-70b-8192', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Mixtral 8x7b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'mixtral-8x7b-32768', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Gemma 7b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'gemma-7b-it', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Gemma2 9b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'gemma2-9b-it', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), + 'llama-3.1-70b-versatile': { + displayName: 'Llama 3.1 70B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.1-70b-versatile', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'llama-3.1-8b-instant': { + displayName: 'Llama 3.1 8B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.1-8b-instant', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'llama3-8b-8192': { + displayName: 'LLaMA3 8B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama3-8b-8192', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'llama3-70b-8192': { + displayName: 'LLaMA3 70B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama3-70b-8192', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'mixtral-8x7b-32768': { + displayName: 'Mixtral 8x7B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'mixtral-8x7b-32768', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'gemma-7b-it': { + displayName: 'Gemma 7B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'gemma-7b-it', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'gemma2-9b-it': { + displayName: 'Gemma2 9B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'gemma2-9b-it', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, }; return chatModels; diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts index b2901ff..ed68bfa 100644 --- a/src/lib/providers/ollama.ts +++ b/src/lib/providers/ollama.ts @@ -18,11 +18,15 @@ export const loadOllamaChatModels = async () => { const { models: ollamaModels } = (await response.json()) as any; const chatModels = ollamaModels.reduce((acc, model) => { - acc[model.model] = new ChatOllama({ - baseUrl: ollamaEndpoint, - model: model.model, - temperature: 0.7, - }); + acc[model.model] = { + displayName: model.name, + model: new ChatOllama({ + baseUrl: ollamaEndpoint, + model: model.model, + temperature: 0.7, + }), + }; + return acc; }, {}); @@ -48,10 +52,14 @@ export const loadOllamaEmbeddingsModels = async () => { const { models: ollamaModels } = (await response.json()) as any; const embeddingsModels = ollamaModels.reduce((acc, model) => { - acc[model.model] = new OllamaEmbeddings({ - baseUrl: ollamaEndpoint, - model: model.model, - }); + acc[model.model] = { + displayName: model.name, + model: new OllamaEmbeddings({ + baseUrl: ollamaEndpoint, + model: model.model, + }), + }; + return acc; }, {}); diff --git a/src/lib/providers/openai.ts b/src/lib/providers/openai.ts index 8673954..3747e37 100644 --- a/src/lib/providers/openai.ts +++ b/src/lib/providers/openai.ts @@ -9,31 +9,46 @@ export const loadOpenAIChatModels = async () => { try { const chatModels = { - 'GPT-3.5 turbo': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-3.5-turbo', - temperature: 0.7, - }), - 'GPT-4': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4', - temperature: 0.7, - }), - 'GPT-4 turbo': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4-turbo', - temperature: 0.7, - }), - 'GPT-4 omni': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4o', - temperature: 0.7, - }), - 'GPT-4 omni mini': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4o-mini', - temperature: 0.7, - }), + 'gpt-3.5-turbo': { + displayName: 'GPT-3.5 Turbo', + model: new ChatOpenAI({ + openAIApiKey, + modelName: 'gpt-3.5-turbo', + temperature: 0.7, + }), + }, + 'gpt-4': { + displayName: 'GPT-4', + model: new ChatOpenAI({ + openAIApiKey, + modelName: 'gpt-4', + temperature: 0.7, + }), + }, + 'gpt-4-turbo': { + displayName: 'GPT-4 turbo', + model: new ChatOpenAI({ + openAIApiKey, + modelName: 'gpt-4-turbo', + temperature: 0.7, + }), + }, + 'gpt-4o': { + displayName: 'GPT-4 omni', + model: new ChatOpenAI({ + openAIApiKey, + modelName: 'gpt-4o', + temperature: 0.7, + }), + }, + 'gpt-4o-mini': { + displayName: 'GPT-4 omni mini', + model: new ChatOpenAI({ + openAIApiKey, + modelName: 'gpt-4o-mini', + temperature: 0.7, + }), + }, }; return chatModels; @@ -50,14 +65,20 @@ export const loadOpenAIEmbeddingsModels = async () => { try { const embeddingModels = { - 'Text embedding 3 small': new OpenAIEmbeddings({ - openAIApiKey, - modelName: 'text-embedding-3-small', - }), - 'Text embedding 3 large': new OpenAIEmbeddings({ - openAIApiKey, - modelName: 'text-embedding-3-large', - }), + 'text-embedding-3-small': { + displayName: 'Text Embedding 3 Small', + model: new OpenAIEmbeddings({ + openAIApiKey, + modelName: 'text-embedding-3-small', + }), + }, + 'text-embedding-3-large': { + displayName: 'Text Embedding 3 Large', + model: new OpenAIEmbeddings({ + openAIApiKey, + modelName: 'text-embedding-3-large', + }), + }, }; return embeddingModels; diff --git a/src/lib/providers/transformers.ts b/src/lib/providers/transformers.ts index 0ec7052..8a3417d 100644 --- a/src/lib/providers/transformers.ts +++ b/src/lib/providers/transformers.ts @@ -4,15 +4,24 @@ import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; export const loadTransformersEmbeddingsModels = async () => { try { const embeddingModels = { - 'BGE Small': new HuggingFaceTransformersEmbeddings({ - modelName: 'Xenova/bge-small-en-v1.5', - }), - 'GTE Small': new HuggingFaceTransformersEmbeddings({ - modelName: 'Xenova/gte-small', - }), - 'Bert Multilingual': new HuggingFaceTransformersEmbeddings({ - modelName: 'Xenova/bert-base-multilingual-uncased', - }), + 'xenova-bge-small-en-v1.5': { + displayName: 'BGE Small', + model: new HuggingFaceTransformersEmbeddings({ + modelName: 'Xenova/bge-small-en-v1.5', + }), + }, + 'xenova-gte-small': { + displayName: 'GTE Small', + model: new HuggingFaceTransformersEmbeddings({ + modelName: 'Xenova/gte-small', + }), + }, + 'xenova-bert-base-multilingual-uncased': { + displayName: 'Bert Multilingual', + model: new HuggingFaceTransformersEmbeddings({ + modelName: 'Xenova/bert-base-multilingual-uncased', + }), + }, }; return embeddingModels; diff --git a/src/routes/config.ts b/src/routes/config.ts index f255560..f635e4b 100644 --- a/src/routes/config.ts +++ b/src/routes/config.ts @@ -10,38 +10,54 @@ import { getOpenaiApiKey, updateConfig, } from '../config'; +import logger from '../utils/logger'; const router = express.Router(); router.get('/', async (_, res) => { - const config = {}; + try { + const config = {}; - const [chatModelProviders, embeddingModelProviders] = await Promise.all([ - getAvailableChatModelProviders(), - getAvailableEmbeddingModelProviders(), - ]); + const [chatModelProviders, embeddingModelProviders] = await Promise.all([ + getAvailableChatModelProviders(), + getAvailableEmbeddingModelProviders(), + ]); - config['chatModelProviders'] = {}; - config['embeddingModelProviders'] = {}; + config['chatModelProviders'] = {}; + config['embeddingModelProviders'] = {}; - for (const provider in chatModelProviders) { - config['chatModelProviders'][provider] = Object.keys( - chatModelProviders[provider], - ); + for (const provider in chatModelProviders) { + config['chatModelProviders'][provider] = Object.keys( + chatModelProviders[provider], + ).map((model) => { + return { + name: model, + displayName: chatModelProviders[provider][model].displayName, + }; + }); + } + + for (const provider in embeddingModelProviders) { + config['embeddingModelProviders'][provider] = Object.keys( + embeddingModelProviders[provider], + ).map((model) => { + return { + name: model, + displayName: embeddingModelProviders[provider][model].displayName, + }; + }); + } + + config['openaiApiKey'] = getOpenaiApiKey(); + config['ollamaApiUrl'] = getOllamaApiEndpoint(); + config['anthropicApiKey'] = getAnthropicApiKey(); + config['groqApiKey'] = getGroqApiKey(); + + res.status(200).json(config); + } catch (err: any) { + res.status(500).json({ message: 'An error has occurred.' }); + logger.error(`Error getting config: ${err.message}`); } - - for (const provider in embeddingModelProviders) { - config['embeddingModelProviders'][provider] = Object.keys( - embeddingModelProviders[provider], - ); - } - - config['openaiApiKey'] = getOpenaiApiKey(); - config['ollamaApiUrl'] = getOllamaApiEndpoint(); - config['anthropicApiKey'] = getAnthropicApiKey(); - config['groqApiKey'] = getGroqApiKey(); - - res.status(200).json(config); }); router.post('/', async (req, res) => { diff --git a/src/routes/images.ts b/src/routes/images.ts index 6bd43d3..7806ce7 100644 --- a/src/routes/images.ts +++ b/src/routes/images.ts @@ -26,7 +26,7 @@ router.post('/', async (req, res) => { let llm: BaseChatModel | undefined; if (chatModels[provider] && chatModels[provider][chatModel]) { - llm = chatModels[provider][chatModel] as BaseChatModel | undefined; + llm = chatModels[provider][chatModel].model as BaseChatModel | undefined; } if (!llm) { diff --git a/src/routes/suggestions.ts b/src/routes/suggestions.ts index b15ff5f..a75657e 100644 --- a/src/routes/suggestions.ts +++ b/src/routes/suggestions.ts @@ -26,7 +26,7 @@ router.post('/', async (req, res) => { let llm: BaseChatModel | undefined; if (chatModels[provider] && chatModels[provider][chatModel]) { - llm = chatModels[provider][chatModel] as BaseChatModel | undefined; + llm = chatModels[provider][chatModel].model as BaseChatModel | undefined; } if (!llm) { diff --git a/src/routes/videos.ts b/src/routes/videos.ts index 0ffdb2c..9d43fd2 100644 --- a/src/routes/videos.ts +++ b/src/routes/videos.ts @@ -26,7 +26,7 @@ router.post('/', async (req, res) => { let llm: BaseChatModel | undefined; if (chatModels[provider] && chatModels[provider][chatModel]) { - llm = chatModels[provider][chatModel] as BaseChatModel | undefined; + llm = chatModels[provider][chatModel].model as BaseChatModel | undefined; } if (!llm) { diff --git a/src/websocket/connectionManager.ts b/src/websocket/connectionManager.ts index 70e20d9..04797c5 100644 --- a/src/websocket/connectionManager.ts +++ b/src/websocket/connectionManager.ts @@ -45,9 +45,8 @@ export const handleConnection = async ( chatModelProviders[chatModelProvider][chatModel] && chatModelProvider != 'custom_openai' ) { - llm = chatModelProviders[chatModelProvider][chatModel] as unknown as - | BaseChatModel - | undefined; + llm = chatModelProviders[chatModelProvider][chatModel] + .model as unknown as BaseChatModel | undefined; } else if (chatModelProvider == 'custom_openai') { llm = new ChatOpenAI({ modelName: chatModel, @@ -65,7 +64,7 @@ export const handleConnection = async ( ) { embeddings = embeddingModelProviders[embeddingModelProvider][ embeddingModel - ] as Embeddings | undefined; + ].model as Embeddings | undefined; } if (!llm || !embeddings) { diff --git a/ui/components/SearchVideos.tsx b/ui/components/SearchVideos.tsx index 74d4381..fec229c 100644 --- a/ui/components/SearchVideos.tsx +++ b/ui/components/SearchVideos.tsx @@ -64,7 +64,7 @@ const Searchvideos = ({ const data = await res.json(); - const videos = data.videos ?? []; + const videos = data.videos ?? []; setVideos(videos); setSlides( videos.map((video: Video) => { diff --git a/ui/components/SettingsDialog.tsx b/ui/components/SettingsDialog.tsx index 171e812..02358c5 100644 --- a/ui/components/SettingsDialog.tsx +++ b/ui/components/SettingsDialog.tsx @@ -49,10 +49,10 @@ export const Select = ({ className, options, ...restProps }: SelectProps) => { interface SettingsType { chatModelProviders: { - [key: string]: string[]; + [key: string]: [Record<string, any>]; }; embeddingModelProviders: { - [key: string]: string[]; + [key: string]: [Record<string, any>]; }; openaiApiKey: string; groqApiKey: string; @@ -68,6 +68,10 @@ const SettingsDialog = ({ setIsOpen: (isOpen: boolean) => void; }) => { const [config, setConfig] = useState<SettingsType | null>(null); + const [chatModels, setChatModels] = useState<Record<string, any>>({}); + const [embeddingModels, setEmbeddingModels] = useState<Record<string, any>>( + {}, + ); const [selectedChatModelProvider, setSelectedChatModelProvider] = useState< string | null >(null); @@ -118,7 +122,7 @@ const SettingsDialog = ({ const chatModel = localStorage.getItem('chatModel') || (data.chatModelProviders && - data.chatModelProviders[chatModelProvider]?.[0]) || + data.chatModelProviders[chatModelProvider]?.[0].name) || ''; const embeddingModelProvider = localStorage.getItem('embeddingModelProvider') || @@ -127,7 +131,7 @@ const SettingsDialog = ({ const embeddingModel = localStorage.getItem('embeddingModel') || (data.embeddingModelProviders && - data.embeddingModelProviders[embeddingModelProvider]?.[0]) || + data.embeddingModelProviders[embeddingModelProvider]?.[0].name) || ''; setSelectedChatModelProvider(chatModelProvider); @@ -136,6 +140,8 @@ const SettingsDialog = ({ setSelectedEmbeddingModel(embeddingModel); setCustomOpenAIApiKey(localStorage.getItem('openAIApiKey') || ''); setCustomOpenAIBaseURL(localStorage.getItem('openAIBaseURL') || ''); + setChatModels(data.chatModelProviders || {}); + setEmbeddingModels(data.embeddingModelProviders || {}); setIsLoading(false); }; @@ -229,7 +235,8 @@ const SettingsDialog = ({ setSelectedChatModel(''); } else { setSelectedChatModel( - config.chatModelProviders[e.target.value][0], + config.chatModelProviders[e.target.value][0] + .name, ); } }} @@ -264,8 +271,8 @@ const SettingsDialog = ({ return chatModelProvider ? chatModelProvider.length > 0 ? chatModelProvider.map((model) => ({ - value: model, - label: model, + value: model.name, + label: model.displayName, })) : [ { @@ -341,7 +348,8 @@ const SettingsDialog = ({ onChange={(e) => { setSelectedEmbeddingModelProvider(e.target.value); setSelectedEmbeddingModel( - config.embeddingModelProviders[e.target.value][0], + config.embeddingModelProviders[e.target.value][0] + .name, ); }} options={Object.keys( @@ -374,8 +382,8 @@ const SettingsDialog = ({ return embeddingModelProvider ? embeddingModelProvider.length > 0 ? embeddingModelProvider.map((model) => ({ - label: model, - value: model, + label: model.displayName, + value: model.name, })) : [ { From 2785cdd97a9e913d335f01eda6a5a7f5e6fe1f06 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 15:27:48 +0530 Subject: [PATCH 13/30] feat(routes): add search route --- src/routes/index.ts | 2 + src/routes/search.ts | 150 ++++++++++++++++++++++++++++++++ src/websocket/messageHandler.ts | 2 +- 3 files changed, 153 insertions(+), 1 deletion(-) create mode 100644 src/routes/search.ts diff --git a/src/routes/index.ts b/src/routes/index.ts index af928ab..6e82e54 100644 --- a/src/routes/index.ts +++ b/src/routes/index.ts @@ -5,6 +5,7 @@ import configRouter from './config'; import modelsRouter from './models'; import suggestionsRouter from './suggestions'; import chatsRouter from './chats'; +import searchRouter from './search'; const router = express.Router(); @@ -14,5 +15,6 @@ router.use('/config', configRouter); router.use('/models', modelsRouter); router.use('/suggestions', suggestionsRouter); router.use('/chats', chatsRouter); +router.use('/search', searchRouter); export default router; diff --git a/src/routes/search.ts b/src/routes/search.ts new file mode 100644 index 0000000..8eb2490 --- /dev/null +++ b/src/routes/search.ts @@ -0,0 +1,150 @@ +import express from 'express'; +import logger from '../utils/logger'; +import { BaseChatModel } from 'langchain/chat_models/base'; +import { Embeddings } from 'langchain/embeddings/base'; +import { ChatOpenAI } from '@langchain/openai'; +import { + getAvailableChatModelProviders, + getAvailableEmbeddingModelProviders, +} from '../lib/providers'; +import { searchHandlers } from '../websocket/messageHandler'; +import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'; + +const router = express.Router(); + +interface chatModel { + provider: string; + model: string; + customOpenAIBaseURL?: string; + customOpenAIKey?: string; +} + +interface embeddingModel { + provider: string; + model: string; +} + +interface RequestBody { + focusMode: string; + chatModel?: chatModel; + embeddingModel?: embeddingModel; + query: string; + history: Array<[string, string]>; +} + +router.post('/', async (req, res) => { + try { + const body: RequestBody = req.body; + + if (!body.focusMode || !body.query) { + return res.status(400).json({ message: 'Missing focus mode or query' }); + } + + body.history = body.history || []; + + const history: BaseMessage[] = body.history.map((msg) => { + if (msg[0] === 'human') { + return new HumanMessage({ + content: msg[1], + }); + } else { + return new AIMessage({ + content: msg[1], + }); + } + }); + + const [chatModelProviders, embeddingModelProviders] = await Promise.all([ + getAvailableChatModelProviders(), + getAvailableEmbeddingModelProviders(), + ]); + + const chatModelProvider = + body.chatModel?.provider || Object.keys(chatModelProviders)[0]; + const chatModel = + body.chatModel?.model || + Object.keys(chatModelProviders[chatModelProvider])[0]; + + const embeddingModelProvider = + body.embeddingModel?.provider || Object.keys(embeddingModelProviders)[0]; + const embeddingModel = + body.embeddingModel?.model || + Object.keys(embeddingModelProviders[embeddingModelProvider])[0]; + + let llm: BaseChatModel | undefined; + let embeddings: Embeddings | undefined; + + if (body.chatModel?.provider === 'custom_openai') { + if ( + !body.chatModel?.customOpenAIBaseURL || + !body.chatModel?.customOpenAIKey + ) { + return res + .status(400) + .json({ message: 'Missing custom OpenAI base URL or key' }); + } + + llm = new ChatOpenAI({ + modelName: body.chatModel.model, + openAIApiKey: body.chatModel.customOpenAIKey, + temperature: 0.7, + configuration: { + baseURL: body.chatModel.customOpenAIBaseURL, + }, + }) as unknown as BaseChatModel; + } else if ( + chatModelProviders[chatModelProvider] && + chatModelProviders[chatModelProvider][chatModel] + ) { + llm = chatModelProviders[chatModelProvider][chatModel] + .model as unknown as BaseChatModel | undefined; + } + + if ( + embeddingModelProviders[embeddingModelProvider] && + embeddingModelProviders[embeddingModelProvider][embeddingModel] + ) { + embeddings = embeddingModelProviders[embeddingModelProvider][ + embeddingModel + ].model as Embeddings | undefined; + } + + if (!llm || !embeddings) { + return res.status(400).json({ message: 'Invalid model selected' }); + } + + const searchHandler = searchHandlers[body.focusMode]; + + if (!searchHandler) { + return res.status(400).json({ message: 'Invalid focus mode' }); + } + + const emitter = searchHandler(body.query, history, llm, embeddings); + + let message = ''; + let sources = []; + + emitter.on('data', (data) => { + const parsedData = JSON.parse(data); + if (parsedData.type === 'response') { + message += parsedData.data; + } else if (parsedData.type === 'sources') { + sources = parsedData.data; + } + }); + + emitter.on('end', () => { + res.status(200).json({ message, sources }); + }); + + emitter.on('error', (data) => { + const parsedData = JSON.parse(data); + res.status(500).json({ message: parsedData.data }); + }); + } catch (err: any) { + logger.error(`Error in getting search results: ${err.message}`); + res.status(500).json({ message: 'An error has occurred.' }); + } +}); + +export default router; diff --git a/src/websocket/messageHandler.ts b/src/websocket/messageHandler.ts index 0afda9f..332910c 100644 --- a/src/websocket/messageHandler.ts +++ b/src/websocket/messageHandler.ts @@ -28,7 +28,7 @@ type WSMessage = { history: Array<[string, string]>; }; -const searchHandlers = { +export const searchHandlers = { webSearch: handleWebSearch, academicSearch: handleAcademicSearch, writingAssistant: handleWritingAssistant, From c52d6ac290dd22c7b3d762bb11448b100c8c919f Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:54:07 +0530 Subject: [PATCH 14/30] feat(docs): add search API docs --- README.md | 9 ++++ docs/API/SEARCH.md | 105 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 docs/API/SEARCH.md diff --git a/README.md b/README.md index 3c87acc..38cc1c2 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ - [Non-Docker Installation](#non-docker-installation) - [Ollama Connection Errors](#ollama-connection-errors) - [Using as a Search Engine](#using-as-a-search-engine) +- [Using Perplexica's API](#using-perplexicas-api) - [One-Click Deployment](#one-click-deployment) - [Upcoming Features](#upcoming-features) - [Support Us](#support-us) @@ -45,6 +46,7 @@ Want to know more about its architecture and how it works? You can read it [here - **Wolfram Alpha Search Mode:** Answers queries that need calculations or data analysis using Wolfram Alpha. - **Reddit Search Mode:** Searches Reddit for discussions and opinions related to the query. - **Current Information:** Some search tools might give you outdated info because they use data from crawling bots and convert them into embeddings and store them in a index. Unlike them, Perplexica uses SearxNG, a metasearch engine to get the results and rerank and get the most relevant source out of it, ensuring you always get the latest information without the overhead of daily data updates. +- **API**: Integrate Perplexica into your existing applications and make use of its capibilities. It has many more features like image and video search. Some of the planned features are mentioned in [upcoming features](#upcoming-features). @@ -125,6 +127,12 @@ If you wish to use Perplexica as an alternative to traditional search engines li 3. Add a new site search with the following URL: `http://localhost:3000/?q=%s`. Replace `localhost` with your IP address or domain name, and `3000` with the port number if Perplexica is not hosted locally. 4. Click the add button. Now, you can use Perplexica directly from your browser's search bar. +## Using Perplexica's API + +Perplexica also provides an API for developers looking to integrate its powerful search engine into their own applications. You can run searches, customize models, and get results tailored to your needs. + +For more details, check out the full documentation [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/API/SEARCH.md). + ## One-Click Deployment [![Deploy to RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploylobe.svg)](https://repocloud.io/details/?app_id=267) @@ -135,6 +143,7 @@ If you wish to use Perplexica as an alternative to traditional search engines li - [x] Adding support for local LLMs - [x] History Saving features - [x] Introducing various Focus Modes +- [x] Adding API support - [ ] Finalizing Copilot Mode - [ ] Adding Discover diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md new file mode 100644 index 0000000..d3391c9 --- /dev/null +++ b/docs/API/SEARCH.md @@ -0,0 +1,105 @@ +# Perplexica Search API Documentation + +## Overview + +Perplexica’s Search API makes it easy to use our AI-powered search engine. You can run different types of searches, pick the models you want to use, and get the most recent info. Follow the following headings to learn more about Perplexica's search API. + +## Endpoint + +### **POST** `/api/search` + +### Request + +The API accepts a JSON object in the request body, where you define the focus mode, chat models, embedding models, and your query. + +#### Request Body Structure + +```json +{ + "chatModel": { + "provider": "openai", + "model": "gpt-4o-mini" + }, + "embeddingModel": { + "provider": "openai", + "model": "text-embedding-3-large" + }, + "focusMode": "webSearch", + "query": "What is Perplexica", + "history": [] +} +``` + +### Request Parameters + +- **`chatModel`** (object, optional): Defines the chat model to be used for the query. + + - `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`). + - `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`). + - Optional fields for custom OpenAI configuration: + - `customOpenAIBaseURL`: If you’re using a custom OpenAI instance, provide the base URL. + - `customOpenAIKey`: The API key for a custom OpenAI instance. + +- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. + + - `provider`: The provider for the embedding model (e.g., `openai`). + - `model`: The specific embedding model (e.g., `text-embedding-3-large`). + +- **`focusMode`** (string, required): Specifies which focus mode to use. Available modes: + + - `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`. + +- **`query`** (string, required): The search query or question. + +- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example: + ```json + [ + ["human", "What is Perplexica?"], + ["assistant", "Perplexica is an AI-powered search engine..."] + ] + ``` + +### Response + +The response from the API includes both the final message and the sources used to generate that message. + +#### Example Response + +```json +{ + "message": "Perplexica is an innovative, open-source AI-powered search engine designed to enhance the way users search for information online. Here are some key features and characteristics of Perplexica:\n\n- **AI-Powered Technology**: It utilizes advanced machine learning algorithms to not only retrieve information but also to understand the context and intent behind user queries, providing more relevant results [1][5].\n\n- **Open-Source**: Being open-source, Perplexica offers flexibility and transparency, allowing users to explore its functionalities without the constraints of proprietary software [3][10].", + "sources": [ + { + "pageContent": "Perplexica is an innovative, open-source AI-powered search engine designed to enhance the way users search for information online.", + "metadata": { + "title": "What is Perplexica, and how does it function as an AI-powered search ...", + "url": "https://askai.glarity.app/search/What-is-Perplexica--and-how-does-it-function-as-an-AI-powered-search-engine" + } + }, + { + "pageContent": "Perplexica is an open-source AI-powered search tool that dives deep into the internet to find precise answers.", + "metadata": { + "title": "Sahar Mor's Post", + "url": "https://www.linkedin.com/posts/sahar-mor_a-new-open-source-project-called-perplexica-activity-7204489745668694016-ncja" + } + } + .... + ] +} +``` + +### Fields in the Response + +- **`message`** (string): The search result, generated based on the query and focus mode. +- **`sources`** (array): A list of sources that were used to generate the search result. Each source includes: + - `pageContent`: A snippet of the relevant content from the source. + - `metadata`: Metadata about the source, including: + - `title`: The title of the webpage. + - `url`: The URL of the webpage. + +### Error Handling + +If an error occurs during the search process, the API will return an appropriate error message with an HTTP status code. + +- **400**: If the request is malformed or missing required fields (e.g., no focus mode or query). +- **500**: If an internal server error occurs during the search. From ead2d98a9fb4eaf80fe126e004e86f7263b9afdc Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:54:19 +0530 Subject: [PATCH 15/30] feat(search): update types --- src/routes/search.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/routes/search.ts b/src/routes/search.ts index 8eb2490..9eec29f 100644 --- a/src/routes/search.ts +++ b/src/routes/search.ts @@ -24,7 +24,7 @@ interface embeddingModel { model: string; } -interface RequestBody { +interface ChatRequestBody { focusMode: string; chatModel?: chatModel; embeddingModel?: embeddingModel; @@ -34,7 +34,7 @@ interface RequestBody { router.post('/', async (req, res) => { try { - const body: RequestBody = req.body; + const body: ChatRequestBody = req.body; if (!body.focusMode || !body.query) { return res.status(400).json({ message: 'Missing focus mode or query' }); From 1cfa3398a3f2bfdb5fa9129a5d3115a97cdc813a Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:54:44 +0530 Subject: [PATCH 16/30] feat(package): bump version --- package.json | 2 +- ui/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index c09454c..ab45174 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "perplexica-backend", - "version": "1.9.0-rc2", + "version": "1.9.0-rc3", "license": "MIT", "author": "ItzCrazyKns", "scripts": { diff --git a/ui/package.json b/ui/package.json index 23afda2..04512b6 100644 --- a/ui/package.json +++ b/ui/package.json @@ -1,6 +1,6 @@ { "name": "perplexica-frontend", - "version": "1.9.0-rc2", + "version": "1.9.0-rc3", "license": "MIT", "author": "ItzCrazyKns", "scripts": { From a0aad69f62d3ac4c210e11e6aeb09c952fe8c219 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:56:41 +0530 Subject: [PATCH 17/30] feat(readme): update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 38cc1c2..f66e8b1 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ If you wish to use Perplexica as an alternative to traditional search engines li ## Using Perplexica's API -Perplexica also provides an API for developers looking to integrate its powerful search engine into their own applications. You can run searches, customize models, and get results tailored to your needs. +Perplexica also provides an API for developers looking to integrate its powerful search engine into their own applications. You can run searches, use multiple models and get answers to your queries. For more details, check out the full documentation [here](https://github.com/ItzCrazyKns/Perplexica/tree/master/docs/API/SEARCH.md). From 15203c123dff84685d5a9c57286bf35e7462f452 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 17:49:16 +0530 Subject: [PATCH 18/30] feat(docs): update search docs --- docs/API/SEARCH.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md index d3391c9..714cbd8 100644 --- a/docs/API/SEARCH.md +++ b/docs/API/SEARCH.md @@ -6,7 +6,9 @@ Perplexica’s Search API makes it easy to use our AI-powered search engine. You ## Endpoint -### **POST** `/api/search` +### **POST** `http://localhost:3001/api/search` + +**Note**: Replace `3001` with any other port if you've changed the default PORT ### Request From 8902abdcee7a1f7fe964381f7afb9e31acfb6136 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 17:54:35 +0530 Subject: [PATCH 19/30] Update SEARCH.md --- docs/API/SEARCH.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md index 714cbd8..56d34ce 100644 --- a/docs/API/SEARCH.md +++ b/docs/API/SEARCH.md @@ -28,7 +28,10 @@ The API accepts a JSON object in the request body, where you define the focus mo }, "focusMode": "webSearch", "query": "What is Perplexica", - "history": [] + "history": [ + ["human", "Hi, how are you?"], + ["assistant", "I am doing well, how can I help you today?"] + ] } ``` From e3488366c16ded6e8d60d3b77eb3ff2860db6026 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 25 Sep 2024 17:56:19 +0530 Subject: [PATCH 20/30] Update SEARCH.md --- docs/API/SEARCH.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md index 56d34ce..996a88b 100644 --- a/docs/API/SEARCH.md +++ b/docs/API/SEARCH.md @@ -37,7 +37,7 @@ The API accepts a JSON object in the request body, where you define the focus mo ### Request Parameters -- **`chatModel`** (object, optional): Defines the chat model to be used for the query. +- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. - `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`). - `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`). @@ -45,7 +45,8 @@ The API accepts a JSON object in the request body, where you define the focus mo - `customOpenAIBaseURL`: If you’re using a custom OpenAI instance, provide the base URL. - `customOpenAIKey`: The API key for a custom OpenAI instance. -- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. +- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. + - `provider`: The provider for the embedding model (e.g., `openai`). - `model`: The specific embedding model (e.g., `text-embedding-3-large`). From 425a08432b29b96ed50378af0f87cba8272f1e03 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 26 Sep 2024 21:37:05 +0530 Subject: [PATCH 21/30] feat(groq): add Llama 3.2 --- src/lib/providers/groq.ts | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts index 6249267..69db4f7 100644 --- a/src/lib/providers/groq.ts +++ b/src/lib/providers/groq.ts @@ -9,6 +9,45 @@ export const loadGroqChatModels = async () => { try { const chatModels = { + 'llama-3.2-3b-preview': { + displayName: 'Llama 3.2 3B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.2-3b-preview', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'llama-3.2-11b-text-preview': { + displayName: 'Llama 3.2 11B Text', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.2-11b-text-preview', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'llama-3.2-90b-text-preview': { + displayName: 'Llama 3.2 90B Text', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.2-90b-text-preview', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, 'llama-3.1-70b-versatile': { displayName: 'Llama 3.1 70B', model: new ChatOpenAI( From fc5e35b1b10c3e05303a457e64c9f84404dba748 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 2 Oct 2024 21:59:40 +0530 Subject: [PATCH 22/30] feat(docker): add prebuilt images --- .github/workflows/docker-build.yaml | 46 +++++++++++++++++++++++++++++ backend.dockerfile | 1 - docker-compose.yaml | 2 ++ 3 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/docker-build.yaml diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml new file mode 100644 index 0000000..90ccbe4 --- /dev/null +++ b/.github/workflows/docker-build.yaml @@ -0,0 +1,46 @@ +name: Build & Push Docker Image + +on: + push: + branches: + - main + release: + types: [published] + +jobs: + build-and-push: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Extract version from release tag + if: github.event_name == 'release' + id: version + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + + - name: Build and push Docker image + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + run: | + docker build -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:main . + docker build -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:main . + docker push itzcrazykns1337/perplexica-backend:main + docker push itzcrazykns1337/perplexica-frontend:main + + - name: Build and push release Docker image + if: github.event_name == 'release' + run: | + docker build -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:${{ env.RELEASE_VERSION }} . + docker build -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:${{ env.RELEASE_VERSION }} . + docker push itzcrazykns1337/perplexica-backend:${{ env.RELEASE_VERSION }} + docker push itzcrazykns1337/perplexica-frontend:${{ env.RELEASE_VERSION }} \ No newline at end of file diff --git a/backend.dockerfile b/backend.dockerfile index 0169218..66de9dc 100644 --- a/backend.dockerfile +++ b/backend.dockerfile @@ -7,7 +7,6 @@ WORKDIR /home/perplexica COPY src /home/perplexica/src COPY tsconfig.json /home/perplexica/ -COPY config.toml /home/perplexica/ COPY drizzle.config.ts /home/perplexica/ COPY package.json /home/perplexica/ COPY yarn.lock /home/perplexica/ diff --git a/docker-compose.yaml b/docker-compose.yaml index d3892e5..ad61ec2 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -15,6 +15,7 @@ services: dockerfile: backend.dockerfile args: - SEARXNG_API_URL=http://searxng:8080 + image: itzcrazykns1337/perplexica-backend:main depends_on: - searxng ports: @@ -35,6 +36,7 @@ services: args: - NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api - NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001 + image: itzcrazykns1337/perplexica-frontend:main depends_on: - perplexica-backend ports: From dcfe43ebda6b46b83eda33d2f8b049901d80df3e Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:00:04 +0530 Subject: [PATCH 23/30] trigger build --- docs/API/SEARCH.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md index 996a88b..a573021 100644 --- a/docs/API/SEARCH.md +++ b/docs/API/SEARCH.md @@ -47,7 +47,6 @@ The API accepts a JSON object in the request body, where you define the focus mo - **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. - - `provider`: The provider for the embedding model (e.g., `openai`). - `model`: The specific embedding model (e.g., `text-embedding-3-large`). From 4bba674134d2acce0d6a3d7f577125215a440546 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:00:46 +0530 Subject: [PATCH 24/30] feat(build-workflow): update branch --- .github/workflows/docker-build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml index 90ccbe4..ac855e1 100644 --- a/.github/workflows/docker-build.yaml +++ b/.github/workflows/docker-build.yaml @@ -3,7 +3,7 @@ name: Build & Push Docker Image on: push: branches: - - main + - master release: types: [published] From 1aaf172246e3900dc44ecfa0f8f34ce0a07893ed Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:01:49 +0530 Subject: [PATCH 25/30] feat(build-workflow): update head --- .github/workflows/docker-build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml index ac855e1..6bf2c64 100644 --- a/.github/workflows/docker-build.yaml +++ b/.github/workflows/docker-build.yaml @@ -30,7 +30,7 @@ jobs: run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV - name: Build and push Docker image - if: github.ref == 'refs/heads/main' && github.event_name == 'push' + if: github.ref == 'refs/heads/master' && github.event_name == 'push' run: | docker build -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:main . docker build -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:main . From c233362e70aebf04fdfbb2da67e612ecc421ede3 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:53:45 +0530 Subject: [PATCH 26/30] feat(dockerfile): specify default args --- .gitignore | 3 ++- app.dockerfile | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index a3dd5cc..8391d19 100644 --- a/.gitignore +++ b/.gitignore @@ -35,4 +35,5 @@ logs/ Thumbs.db # Db -db.sqlite \ No newline at end of file +db.sqlite +/searxng diff --git a/app.dockerfile b/app.dockerfile index 105cf86..8337171 100644 --- a/app.dockerfile +++ b/app.dockerfile @@ -1,7 +1,7 @@ FROM node:alpine -ARG NEXT_PUBLIC_WS_URL -ARG NEXT_PUBLIC_API_URL +ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001 +ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL} ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} From 9f88d16ef1dbbc3100f6fa76dd7538ea6da04383 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:54:00 +0530 Subject: [PATCH 27/30] feat(docker-compose): use env vars from compose --- backend.dockerfile | 3 --- docker-compose.yaml | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/backend.dockerfile b/backend.dockerfile index 66de9dc..70c30e8 100644 --- a/backend.dockerfile +++ b/backend.dockerfile @@ -1,8 +1,5 @@ FROM node:slim -ARG SEARXNG_API_URL -ENV SEARXNG_API_URL=${SEARXNG_API_URL} - WORKDIR /home/perplexica COPY src /home/perplexica/src diff --git a/docker-compose.yaml b/docker-compose.yaml index ad61ec2..46d82c6 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -13,9 +13,9 @@ services: build: context: . dockerfile: backend.dockerfile - args: - - SEARXNG_API_URL=http://searxng:8080 image: itzcrazykns1337/perplexica-backend:main + environment: + - SEARXNG_API_URL=http://searxng:8080 depends_on: - searxng ports: From ae3fc5f80285b34d7e289498a9f6e1c61bef20aa Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:54:16 +0530 Subject: [PATCH 28/30] feat(docs): modify updating docs --- docs/installation/UPDATING.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/installation/UPDATING.md b/docs/installation/UPDATING.md index df67775..031a3e8 100644 --- a/docs/installation/UPDATING.md +++ b/docs/installation/UPDATING.md @@ -10,15 +10,21 @@ To update Perplexica to the latest version, follow these steps: git clone https://github.com/ItzCrazyKns/Perplexica.git ``` -2. Navigate to the Project Directory +2. Navigate to the Project Directory. -3. Update and Rebuild Docker Containers: +3. Pull latest images from registry. ```bash -docker compose up -d --build +docker compose pull ``` -4. Once the command completes running go to http://localhost:3000 and verify the latest changes. +4. Update and Recreate containers. + +```bash +docker compose up -d +``` + +5. Once the command completes running go to http://localhost:3000 and verify the latest changes. ## For non Docker users From 66f1e19ce88eaf167f09f66bbd196eaa89a9ffa5 Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 3 Oct 2024 09:37:15 +0530 Subject: [PATCH 29/30] feat(image-build): use Docker buildx, publish multi arch images --- .github/workflows/docker-build.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml index 6bf2c64..63210ff 100644 --- a/.github/workflows/docker-build.yaml +++ b/.github/workflows/docker-build.yaml @@ -17,6 +17,8 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 + with: + install: true - name: Log in to DockerHub uses: docker/login-action@v2 @@ -32,15 +34,13 @@ jobs: - name: Build and push Docker image if: github.ref == 'refs/heads/master' && github.event_name == 'push' run: | - docker build -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:main . - docker build -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:main . - docker push itzcrazykns1337/perplexica-backend:main - docker push itzcrazykns1337/perplexica-frontend:main + docker buildx create --use + docker buildx build --platform linux/amd64,linux/arm64 -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:main --push . + docker buildx build --platform linux/amd64,linux/arm64 -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:main --push . - name: Build and push release Docker image if: github.event_name == 'release' run: | - docker build -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:${{ env.RELEASE_VERSION }} . - docker build -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:${{ env.RELEASE_VERSION }} . - docker push itzcrazykns1337/perplexica-backend:${{ env.RELEASE_VERSION }} - docker push itzcrazykns1337/perplexica-frontend:${{ env.RELEASE_VERSION }} \ No newline at end of file + docker buildx create --use + docker buildx build --platform linux/amd64,linux/arm64 -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:${{ env.RELEASE_VERSION }} --push . + docker buildx build --platform linux/amd64,linux/arm64 -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:${{ env.RELEASE_VERSION }} --push . \ No newline at end of file From 1680a1786e7e81122c36743d580a4caafe19612e Mon Sep 17 00:00:00 2001 From: ItzCrazyKns <95534749+ItzCrazyKns@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:41:05 +0530 Subject: [PATCH 30/30] feat(image-build): improve build time by caching --- .github/workflows/docker-build.yaml | 40 +++++++++++++++++++++++------ app.dockerfile | 2 +- backend.dockerfile | 2 +- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml index 63210ff..3cd9044 100644 --- a/.github/workflows/docker-build.yaml +++ b/.github/workflows/docker-build.yaml @@ -1,4 +1,4 @@ -name: Build & Push Docker Image +name: Build & Push Docker Images on: push: @@ -10,7 +10,9 @@ on: jobs: build-and-push: runs-on: ubuntu-latest - + strategy: + matrix: + service: [backend, app] steps: - name: Checkout code uses: actions/checkout@v3 @@ -31,16 +33,38 @@ jobs: id: version run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV - - name: Build and push Docker image + - name: Build and push Docker image for ${{ matrix.service }} if: github.ref == 'refs/heads/master' && github.event_name == 'push' run: | docker buildx create --use - docker buildx build --platform linux/amd64,linux/arm64 -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:main --push . - docker buildx build --platform linux/amd64,linux/arm64 -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:main --push . + if [[ "${{ matrix.service }}" == "backend" ]]; then \ + DOCKERFILE=backend.dockerfile; \ + IMAGE_NAME=perplexica-backend; \ + else \ + DOCKERFILE=app.dockerfile; \ + IMAGE_NAME=perplexica-frontend; \ + fi + docker buildx build --platform linux/amd64,linux/arm64 \ + --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \ + --cache-to=type=inline \ + -f $DOCKERFILE \ + -t itzcrazykns1337/${IMAGE_NAME}:main \ + --push . - - name: Build and push release Docker image + - name: Build and push release Docker image for ${{ matrix.service }} if: github.event_name == 'release' run: | docker buildx create --use - docker buildx build --platform linux/amd64,linux/arm64 -f backend.dockerfile -t itzcrazykns1337/perplexica-backend:${{ env.RELEASE_VERSION }} --push . - docker buildx build --platform linux/amd64,linux/arm64 -f app.dockerfile -t itzcrazykns1337/perplexica-frontend:${{ env.RELEASE_VERSION }} --push . \ No newline at end of file + if [[ "${{ matrix.service }}" == "backend" ]]; then \ + DOCKERFILE=backend.dockerfile; \ + IMAGE_NAME=perplexica-backend; \ + else \ + DOCKERFILE=app.dockerfile; \ + IMAGE_NAME=perplexica-frontend; \ + fi + docker buildx build --platform linux/amd64,linux/arm64 \ + --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \ + --cache-to=type=inline \ + -f $DOCKERFILE \ + -t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \ + --push . diff --git a/app.dockerfile b/app.dockerfile index 8337171..ff1824d 100644 --- a/app.dockerfile +++ b/app.dockerfile @@ -9,7 +9,7 @@ WORKDIR /home/perplexica COPY ui /home/perplexica/ -RUN yarn install +RUN yarn install --frozen-lockfile RUN yarn build CMD ["yarn", "start"] \ No newline at end of file diff --git a/backend.dockerfile b/backend.dockerfile index 70c30e8..b8d0155 100644 --- a/backend.dockerfile +++ b/backend.dockerfile @@ -10,7 +10,7 @@ COPY yarn.lock /home/perplexica/ RUN mkdir /home/perplexica/data -RUN yarn install +RUN yarn install --frozen-lockfile RUN yarn build CMD ["yarn", "start"] \ No newline at end of file