chore: Update dependencies and fix import paths

This commit is contained in:
Jin Yucong 2024-07-05 15:49:43 +08:00
parent 3b737a078a
commit 81c5e30fda
46 changed files with 1626 additions and 371 deletions

View file

@ -8,7 +8,7 @@ import type { StreamEvent } from "@langchain/core/tracers/log_stream";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import formatChatHistoryAsString from "../utils/formatHistory";
import eventEmitter from "events";
import eventEmitter from "node:events";
import computeSimilarity from "../utils/computeSimilarity";
import logger from "../utils/logger";
@ -55,7 +55,7 @@ const basicAcademicSearchResponsePrompt = `
Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
`;
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const handleStream = async (stream: AsyncGenerator<StreamEvent, unknown, unknown>, emitter: eventEmitter) => {
for await (const event of stream) {
@ -80,7 +80,7 @@ const createBasicAcademicSearchRetrieverChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
PromptTemplate.fromTemplate(basicAcademicSearchRetrieverPrompt),
llm,
strParser,
stringParser,
RunnableLambda.from(async (input: string) => {
if (input === "not_needed") {
return { query: "", docs: [] };
@ -108,30 +108,30 @@ const createBasicAcademicSearchRetrieverChain = (llm: BaseChatModel) => {
]);
};
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const createBasicAcademicSearchAnsweringChain = (llm: BaseChatModel, embeddings: Embeddings) => {
const basicAcademicSearchRetrieverChain = createBasicAcademicSearchRetrieverChain(llm);
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const rerankDocs = async ({ query, docs }: { query: string; docs: Document[] }) => {
if (docs.length === 0) {
return docs;
}
const docsWithContent = docs.filter(doc => doc.pageContent && doc.pageContent.length > 0);
const docsWithContent = docs.filter(document => document.pageContent && document.pageContent.length > 0);
const [docEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(doc => doc.pageContent)),
const [documentEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(document => document.pageContent)),
embeddings.embedQuery(query),
]);
const similarity = docEmbeddings.map((docEmbedding, i) => {
const sim = computeSimilarity(queryEmbedding, docEmbedding);
const similarity = documentEmbeddings.map((documentEmbedding, index) => {
const sim = computeSimilarity(queryEmbedding, documentEmbedding);
return {
index: i,
index: index,
similarity: sim,
};
});
@ -167,7 +167,7 @@ const createBasicAcademicSearchAnsweringChain = (llm: BaseChatModel, embeddings:
["user", "{query}"],
]),
llm,
strParser,
stringParser,
]).withConfig({
runName: "FinalResponseGenerator",
});
@ -190,9 +190,9 @@ const basicAcademicSearch = (query: string, history: BaseMessage[], llm: BaseCha
);
handleStream(stream, emitter);
} catch (err) {
} catch (error) {
emitter.emit("error", JSON.stringify({ data: "An error has occurred please try again later" }));
logger.error(`Error in academic search: ${err}`);
logger.error(`Error in academic search: ${error}`);
}
return emitter;

View file

@ -32,7 +32,7 @@ type ImageSearchChainInput = {
query: string;
};
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const createImageSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
@ -46,7 +46,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
}),
PromptTemplate.fromTemplate(imageSearchChainPrompt),
llm,
strParser,
stringParser,
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ["bing images", "google images"],
@ -54,7 +54,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
const images = [];
res.results.forEach(result => {
for (const result of res.results) {
if (result.img_src && result.url && result.title) {
images.push({
img_src: result.img_src,
@ -62,7 +62,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
title: result.title,
});
}
});
}
return images.slice(0, 10);
}),

View file

@ -8,7 +8,7 @@ import type { StreamEvent } from "@langchain/core/tracers/log_stream";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import formatChatHistoryAsString from "../utils/formatHistory";
import eventEmitter from "events";
import eventEmitter from "node:events";
import computeSimilarity from "../utils/computeSimilarity";
import logger from "../utils/logger";
@ -55,7 +55,7 @@ const basicRedditSearchResponsePrompt = `
Anything between the \`context\` is retrieved from Reddit and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
`;
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const handleStream = async (stream: AsyncGenerator<StreamEvent, unknown, unknown>, emitter: eventEmitter) => {
for await (const event of stream) {
@ -80,7 +80,7 @@ const createBasicRedditSearchRetrieverChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
PromptTemplate.fromTemplate(basicRedditSearchRetrieverPrompt),
llm,
strParser,
stringParser,
RunnableLambda.from(async (input: string) => {
if (input === "not_needed") {
return { query: "", docs: [] };
@ -94,7 +94,7 @@ const createBasicRedditSearchRetrieverChain = (llm: BaseChatModel) => {
const documents = res.results.map(
result =>
new Document({
pageContent: result.content ? result.content : result.title,
pageContent: result.content ?? result.title,
metadata: {
title: result.title,
url: result.url,
@ -108,30 +108,30 @@ const createBasicRedditSearchRetrieverChain = (llm: BaseChatModel) => {
]);
};
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const createBasicRedditSearchAnsweringChain = (llm: BaseChatModel, embeddings: Embeddings) => {
const basicRedditSearchRetrieverChain = createBasicRedditSearchRetrieverChain(llm);
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const rerankDocs = async ({ query, docs }: { query: string; docs: Document[] }) => {
if (docs.length === 0) {
return docs;
}
const docsWithContent = docs.filter(doc => doc.pageContent && doc.pageContent.length > 0);
const docsWithContent = docs.filter(document => document.pageContent && document.pageContent.length > 0);
const [docEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(doc => doc.pageContent)),
const [documentEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(document => document.pageContent)),
embeddings.embedQuery(query),
]);
const similarity = docEmbeddings.map((docEmbedding, i) => {
const sim = computeSimilarity(queryEmbedding, docEmbedding);
const similarity = documentEmbeddings.map((documentEmbedding, index) => {
const sim = computeSimilarity(queryEmbedding, documentEmbedding);
return {
index: i,
index: index,
similarity: sim,
};
});
@ -168,7 +168,7 @@ const createBasicRedditSearchAnsweringChain = (llm: BaseChatModel, embeddings: E
["user", "{query}"],
]),
llm,
strParser,
stringParser,
]).withConfig({
runName: "FinalResponseGenerator",
});
@ -190,9 +190,9 @@ const basicRedditSearch = (query: string, history: BaseMessage[], llm: BaseChatM
);
handleStream(stream, emitter);
} catch (err) {
} catch (error) {
emitter.emit("error", JSON.stringify({ data: "An error has occurred please try again later" }));
logger.error(`Error in RedditSearch: ${err}`);
logger.error(`Error in RedditSearch: ${error}`);
}
return emitter;

View file

@ -32,7 +32,7 @@ type VideoSearchChainInput = {
query: string;
};
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
@ -46,7 +46,7 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
strParser,
stringParser,
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ["youtube"],
@ -54,7 +54,7 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
const videos = [];
res.results.forEach(result => {
for (const result of res.results) {
if (result.thumbnail && result.url && result.title && result.iframe_src) {
videos.push({
img_src: result.thumbnail,
@ -63,7 +63,7 @@ const createVideoSearchChain = (llm: BaseChatModel) => {
iframe_src: result.iframe_src,
});
}
});
}
return videos.slice(0, 10);
}),

View file

@ -8,7 +8,7 @@ import type { StreamEvent } from "@langchain/core/tracers/log_stream";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import formatChatHistoryAsString from "../utils/formatHistory";
import eventEmitter from "events";
import eventEmitter from "node:events";
import computeSimilarity from "../utils/computeSimilarity";
import logger from "../utils/logger";
@ -55,7 +55,7 @@ const basicWebSearchResponsePrompt = `
Anything between the \`context\` is retrieved from a search engine and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
`;
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const handleStream = async (stream: AsyncGenerator<StreamEvent, unknown, unknown>, emitter: eventEmitter) => {
for await (const event of stream) {
@ -80,7 +80,7 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
PromptTemplate.fromTemplate(basicSearchRetrieverPrompt),
llm,
strParser,
stringParser,
RunnableLambda.from(async (input: string) => {
if (input === "not_needed") {
return { query: "", docs: [] };
@ -107,30 +107,30 @@ const createBasicWebSearchRetrieverChain = (llm: BaseChatModel) => {
]);
};
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const createBasicWebSearchAnsweringChain = (llm: BaseChatModel, embeddings: Embeddings) => {
const basicWebSearchRetrieverChain = createBasicWebSearchRetrieverChain(llm);
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const rerankDocs = async ({ query, docs }: { query: string; docs: Document[] }) => {
if (docs.length === 0) {
return docs;
}
const docsWithContent = docs.filter(doc => doc.pageContent && doc.pageContent.length > 0);
const docsWithContent = docs.filter(document => document.pageContent && document.pageContent.length > 0);
const [docEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(doc => doc.pageContent)),
const [documentEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(document => document.pageContent)),
embeddings.embedQuery(query),
]);
const similarity = docEmbeddings.map((docEmbedding, i) => {
const sim = computeSimilarity(queryEmbedding, docEmbedding);
const similarity = documentEmbeddings.map((documentEmbedding, index) => {
const sim = computeSimilarity(queryEmbedding, documentEmbedding);
return {
index: i,
index: index,
similarity: sim,
};
});
@ -167,7 +167,7 @@ const createBasicWebSearchAnsweringChain = (llm: BaseChatModel, embeddings: Embe
["user", "{query}"],
]),
llm,
strParser,
stringParser,
]).withConfig({
runName: "FinalResponseGenerator",
});
@ -190,9 +190,9 @@ const basicWebSearch = (query: string, history: BaseMessage[], llm: BaseChatMode
);
handleStream(stream, emitter);
} catch (err) {
} catch (error) {
emitter.emit("error", JSON.stringify({ data: "An error has occurred please try again later" }));
logger.error(`Error in websearch: ${err}`);
logger.error(`Error in websearch: ${error}`);
}
return emitter;

View file

@ -8,7 +8,7 @@ import type { StreamEvent } from "@langchain/core/tracers/log_stream";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import formatChatHistoryAsString from "../utils/formatHistory";
import eventEmitter from "events";
import eventEmitter from "node:events";
import logger from "../utils/logger";
const basicWolframAlphaSearchRetrieverPrompt = `
@ -54,7 +54,7 @@ const basicWolframAlphaSearchResponsePrompt = `
Anything between the \`context\` is retrieved from Wolfram Alpha and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
`;
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const handleStream = async (stream: AsyncGenerator<StreamEvent, unknown, unknown>, emitter: eventEmitter) => {
for await (const event of stream) {
@ -79,7 +79,7 @@ const createBasicWolframAlphaSearchRetrieverChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
PromptTemplate.fromTemplate(basicWolframAlphaSearchRetrieverPrompt),
llm,
strParser,
stringParser,
RunnableLambda.from(async (input: string) => {
if (input === "not_needed") {
return { query: "", docs: [] };
@ -107,13 +107,13 @@ const createBasicWolframAlphaSearchRetrieverChain = (llm: BaseChatModel) => {
]);
};
const processDocs = (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const createBasicWolframAlphaSearchAnsweringChain = (llm: BaseChatModel) => {
const basicWolframAlphaSearchRetrieverChain = createBasicWolframAlphaSearchRetrieverChain(llm);
const processDocs = (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
return RunnableSequence.from([
RunnableMap.from({
query: (input: BasicChainInput) => input.query,
@ -139,7 +139,7 @@ const createBasicWolframAlphaSearchAnsweringChain = (llm: BaseChatModel) => {
["user", "{query}"],
]),
llm,
strParser,
stringParser,
]).withConfig({
runName: "FinalResponseGenerator",
});
@ -161,9 +161,9 @@ const basicWolframAlphaSearch = (query: string, history: BaseMessage[], llm: Bas
);
handleStream(stream, emitter);
} catch (err) {
} catch (error) {
emitter.emit("error", JSON.stringify({ data: "An error has occurred please try again later" }));
logger.error(`Error in WolframAlphaSearch: ${err}`);
logger.error(`Error in WolframAlphaSearch: ${error}`);
}
return emitter;

View file

@ -3,7 +3,7 @@ import { ChatPromptTemplate, MessagesPlaceholder } from "@langchain/core/prompts
import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import type { StreamEvent } from "@langchain/core/tracers/log_stream";
import eventEmitter from "events";
import eventEmitter from "node:events";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import logger from "../utils/logger";
@ -13,7 +13,7 @@ You are Perplexica, an AI model who is expert at searching the web and answering
Since you are a writing assistant, you would not perform web searches. If you think you lack information to answer the query, you can ask the user for more information or suggest them to switch to a different focus mode.
`;
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const handleStream = async (stream: AsyncGenerator<StreamEvent, unknown, unknown>, emitter: eventEmitter) => {
for await (const event of stream) {
@ -34,7 +34,7 @@ const createWritingAssistantChain = (llm: BaseChatModel) => {
["user", "{query}"],
]),
llm,
strParser,
stringParser,
]).withConfig({
runName: "FinalResponseGenerator",
});
@ -62,9 +62,9 @@ const handleWritingAssistant = (
);
handleStream(stream, emitter);
} catch (err) {
} catch (error) {
emitter.emit("error", JSON.stringify({ data: "An error has occurred please try again later" }));
logger.error(`Error in writing assistant: ${err}`);
logger.error(`Error in writing assistant: ${error}`);
}
return emitter;

View file

@ -8,7 +8,7 @@ import type { StreamEvent } from "@langchain/core/tracers/log_stream";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import formatChatHistoryAsString from "../utils/formatHistory";
import eventEmitter from "events";
import eventEmitter from "node:events";
import computeSimilarity from "../utils/computeSimilarity";
import logger from "../utils/logger";
@ -55,7 +55,7 @@ const basicYoutubeSearchResponsePrompt = `
Anything between the \`context\` is retrieved from Youtube and is not a part of the conversation with the user. Today's date is ${new Date().toISOString()}
`;
const strParser = new StringOutputParser();
const stringParser = new StringOutputParser();
const handleStream = async (stream: AsyncGenerator<StreamEvent, unknown, unknown>, emitter: eventEmitter) => {
for await (const event of stream) {
@ -80,7 +80,7 @@ const createBasicYoutubeSearchRetrieverChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
PromptTemplate.fromTemplate(basicYoutubeSearchRetrieverPrompt),
llm,
strParser,
stringParser,
RunnableLambda.from(async (input: string) => {
if (input === "not_needed") {
return { query: "", docs: [] };
@ -94,7 +94,7 @@ const createBasicYoutubeSearchRetrieverChain = (llm: BaseChatModel) => {
const documents = res.results.map(
result =>
new Document({
pageContent: result.content ? result.content : result.title,
pageContent: result.content ?? result.title,
metadata: {
title: result.title,
url: result.url,
@ -108,30 +108,30 @@ const createBasicYoutubeSearchRetrieverChain = (llm: BaseChatModel) => {
]);
};
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const createBasicYoutubeSearchAnsweringChain = (llm: BaseChatModel, embeddings: Embeddings) => {
const basicYoutubeSearchRetrieverChain = createBasicYoutubeSearchRetrieverChain(llm);
const processDocs = async (docs: Document[]) => {
return docs.map((_, index) => `${index + 1}. ${docs[index].pageContent}`).join("\n");
};
const rerankDocs = async ({ query, docs }: { query: string; docs: Document[] }) => {
if (docs.length === 0) {
return docs;
}
const docsWithContent = docs.filter(doc => doc.pageContent && doc.pageContent.length > 0);
const docsWithContent = docs.filter(document => document.pageContent && document.pageContent.length > 0);
const [docEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(doc => doc.pageContent)),
const [documentEmbeddings, queryEmbedding] = await Promise.all([
embeddings.embedDocuments(docsWithContent.map(document => document.pageContent)),
embeddings.embedQuery(query),
]);
const similarity = docEmbeddings.map((docEmbedding, i) => {
const sim = computeSimilarity(queryEmbedding, docEmbedding);
const similarity = documentEmbeddings.map((documentEmbedding, index) => {
const sim = computeSimilarity(queryEmbedding, documentEmbedding);
return {
index: i,
index: index,
similarity: sim,
};
});
@ -168,7 +168,7 @@ const createBasicYoutubeSearchAnsweringChain = (llm: BaseChatModel, embeddings:
["user", "{query}"],
]),
llm,
strParser,
stringParser,
]).withConfig({
runName: "FinalResponseGenerator",
});
@ -191,9 +191,9 @@ const basicYoutubeSearch = (query: string, history: BaseMessage[], llm: BaseChat
);
handleStream(stream, emitter);
} catch (err) {
} catch (error) {
emitter.emit("error", JSON.stringify({ data: "An error has occurred please try again later" }));
logger.error(`Error in youtube search: ${err}`);
logger.error(`Error in youtube search: ${error}`);
}
return emitter;

View file

@ -1,7 +1,7 @@
import { startWebSocketServer } from "./websocket";
import express from "express";
import cors from "cors";
import http from "http";
import http from "node:http";
import routes from "./routes";
import { getPort } from "./config";
import logger from "./utils/logger";

View file

@ -1,5 +1,6 @@
import fs from "fs";
import path from "path";
/* eslint-disable unicorn/prefer-module */
import fs from "node:fs";
import path from "node:path";
import toml from "@iarna/toml";
const configFileName = "config.toml";
@ -24,7 +25,7 @@ type RecursivePartial<T> = {
};
const loadConfig = () =>
toml.parse(fs.readFileSync(path.join(__dirname, `../${configFileName}`), "utf-8")) as unknown as Config;
toml.parse(fs.readFileSync(path.join(__dirname, `../${configFileName}`), "utf8")) as unknown as Config;
export const getPort = () => loadConfig().GENERAL.PORT;

View file

@ -3,8 +3,8 @@ import Database from "better-sqlite3";
import * as schema from "./schema";
const sqlite = new Database("data/db.sqlite");
const db = drizzle(sqlite, {
const database = drizzle(sqlite, {
schema: schema,
});
export default db;
export default database;

View file

@ -1,7 +1,7 @@
import { Embeddings, type EmbeddingsParams } from "@langchain/core/embeddings";
import { chunkArray } from "@langchain/core/utils/chunk_array";
export interface HuggingFaceTransformersEmbeddingsParams extends EmbeddingsParams {
export interface HuggingFaceTransformersEmbeddingsParameters extends EmbeddingsParams {
modelName: string;
model: string;
@ -13,7 +13,10 @@ export interface HuggingFaceTransformersEmbeddingsParams extends EmbeddingsParam
stripNewLines?: boolean;
}
export class HuggingFaceTransformersEmbeddings extends Embeddings implements HuggingFaceTransformersEmbeddingsParams {
export class HuggingFaceTransformersEmbeddings
extends Embeddings
implements HuggingFaceTransformersEmbeddingsParameters
{
modelName = "Xenova/all-MiniLM-L6-v2";
model = "Xenova/all-MiniLM-L6-v2";
@ -27,7 +30,7 @@ export class HuggingFaceTransformersEmbeddings extends Embeddings implements Hug
// eslint-disable-next-line @typescript-eslint/no-explicit-any
private pipelinePromise: Promise<any>;
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParameters>) {
super(fields ?? {});
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
@ -37,16 +40,15 @@ export class HuggingFaceTransformersEmbeddings extends Embeddings implements Hug
}
async embedDocuments(texts: string[]): Promise<number[][]> {
const batches = chunkArray(this.stripNewLines ? texts.map(t => t.replace(/\n/g, " ")) : texts, this.batchSize);
const batches = chunkArray(this.stripNewLines ? texts.map(t => t.replaceAll("\n", " ")) : texts, this.batchSize);
const batchRequests = batches.map(batch => this.runEmbedding(batch));
const batchResponses = await Promise.all(batchRequests);
const embeddings: number[][] = [];
for (let i = 0; i < batchResponses.length; i += 1) {
const batchResponse = batchResponses[i];
for (let j = 0; j < batchResponse.length; j += 1) {
embeddings.push(batchResponse[j]);
for (const batchResponse of batchResponses) {
for (const element of batchResponse) {
embeddings.push(element);
}
}
@ -54,7 +56,7 @@ export class HuggingFaceTransformersEmbeddings extends Embeddings implements Hug
}
async embedQuery(text: string): Promise<number[]> {
const data = await this.runEmbedding([this.stripNewLines ? text.replace(/\n/g, " ") : text]);
const data = await this.runEmbedding([this.stripNewLines ? text.replaceAll("\n", " ") : text]);
return data[0];
}

View file

@ -1,15 +1,15 @@
import { BaseOutputParser } from "@langchain/core/output_parsers";
interface LineListOutputParserArgs {
interface LineListOutputParserArguments {
key?: string;
}
class LineListOutputParser extends BaseOutputParser<string[]> {
private key = "questions";
constructor(args?: LineListOutputParserArgs) {
constructor(arguments_?: LineListOutputParserArguments) {
super();
this.key = args.key ?? this.key;
this.key = arguments_.key ?? this.key;
}
static lc_name() {

View file

@ -36,8 +36,8 @@ export const getAvailableChatModelProviders = async () => {
temperature: 0.7,
}),
};
} catch (err) {
logger.error(`Error loading OpenAI models: ${err}`);
} catch (error) {
logger.error(`Error loading OpenAI models: ${error}`);
}
}
@ -85,8 +85,8 @@ export const getAvailableChatModelProviders = async () => {
},
),
};
} catch (err) {
logger.error(`Error loading Groq models: ${err}`);
} catch (error) {
logger.error(`Error loading Groq models: ${error}`);
}
}
@ -101,16 +101,17 @@ export const getAvailableChatModelProviders = async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const { models: ollamaModels } = (await response.json()) as any;
models["ollama"] = ollamaModels.reduce((acc, model) => {
acc[model.model] = new ChatOllama({
// eslint-disable-next-line unicorn/no-array-reduce
models["ollama"] = ollamaModels.reduce((accumulator, model) => {
accumulator[model.model] = new ChatOllama({
baseUrl: ollamaEndpoint,
model: model.model,
temperature: 0.7,
});
return acc;
return accumulator;
}, {});
} catch (err) {
logger.error(`Error loading Ollama models: ${err}`);
} catch (error) {
logger.error(`Error loading Ollama models: ${error}`);
}
}
@ -137,8 +138,8 @@ export const getAvailableEmbeddingModelProviders = async () => {
modelName: "text-embedding-3-large",
}),
};
} catch (err) {
logger.error(`Error loading OpenAI embeddings: ${err}`);
} catch (error) {
logger.error(`Error loading OpenAI embeddings: ${error}`);
}
}
@ -153,15 +154,16 @@ export const getAvailableEmbeddingModelProviders = async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const { models: ollamaModels } = (await response.json()) as any;
models["ollama"] = ollamaModels.reduce((acc, model) => {
acc[model.model] = new OllamaEmbeddings({
// eslint-disable-next-line unicorn/no-array-reduce
models["ollama"] = ollamaModels.reduce((accumulator, model) => {
accumulator[model.model] = new OllamaEmbeddings({
baseUrl: ollamaEndpoint,
model: model.model,
});
return acc;
return accumulator;
}, {});
} catch (err) {
logger.error(`Error loading Ollama embeddings: ${err}`);
} catch (error) {
logger.error(`Error loading Ollama embeddings: ${error}`);
}
}
@ -177,8 +179,8 @@ export const getAvailableEmbeddingModelProviders = async () => {
modelName: "Xenova/bert-base-multilingual-uncased",
}),
};
} catch (err) {
logger.error(`Error loading local embeddings: ${err}`);
} catch (error) {
logger.error(`Error loading local embeddings: ${error}`);
}
return models;

View file

@ -19,20 +19,20 @@ interface SearxngSearchResult {
iframe_src?: string;
}
export const searchSearxng = async (query: string, opts?: SearxngSearchOptions) => {
export const searchSearxng = async (query: string, options?: SearxngSearchOptions) => {
const searxngURL = getSearxngApiEndpoint();
const url = new URL(`${searxngURL}/search?format=json`);
url.searchParams.append("q", query);
if (opts) {
Object.keys(opts).forEach(key => {
if (Array.isArray(opts[key])) {
url.searchParams.append(key, opts[key].join(","));
return;
if (options) {
for (const key of Object.keys(options)) {
if (Array.isArray(options[key])) {
url.searchParams.append(key, options[key].join(","));
continue;
}
url.searchParams.append(key, opts[key]);
});
url.searchParams.append(key, options[key]);
}
}
const res = await axios.get(url.toString());

View file

@ -1,6 +1,6 @@
import express from "express";
import logger from "../utils/logger";
import db from "../db/index";
import database from "../db/index";
import { eq } from "drizzle-orm";
import { chats, messages } from "../db/schema";
@ -8,55 +8,55 @@ const router = express.Router();
router.get("/", async (_, res) => {
try {
let chats = await db.query.chats.findMany();
let chats = await database.query.chats.findMany();
chats = chats.reverse();
return res.status(200).json({ chats: chats });
} catch (err) {
} catch (error) {
res.status(500).json({ message: "An error has occurred." });
logger.error(`Error in getting chats: ${err.message}`);
logger.error(`Error in getting chats: ${error.message}`);
}
});
router.get("/:id", async (req, res) => {
router.get("/:id", async (request, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
const chatExists = await database.query.chats.findFirst({
where: eq(chats.id, request.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: "Chat not found" });
}
const chatMessages = await db.query.messages.findMany({
where: eq(messages.chatId, req.params.id),
const chatMessages = await database.query.messages.findMany({
where: eq(messages.chatId, request.params.id),
});
return res.status(200).json({ chat: chatExists, messages: chatMessages });
} catch (err) {
} catch (error) {
res.status(500).json({ message: "An error has occurred." });
logger.error(`Error in getting chat: ${err.message}`);
logger.error(`Error in getting chat: ${error.message}`);
}
});
router.delete(`/:id`, async (req, res) => {
router.delete(`/:id`, async (request, res) => {
try {
const chatExists = await db.query.chats.findFirst({
where: eq(chats.id, req.params.id),
const chatExists = await database.query.chats.findFirst({
where: eq(chats.id, request.params.id),
});
if (!chatExists) {
return res.status(404).json({ message: "Chat not found" });
}
await db.delete(chats).where(eq(chats.id, req.params.id)).execute();
await db.delete(messages).where(eq(messages.chatId, req.params.id)).execute();
await database.delete(chats).where(eq(chats.id, request.params.id)).execute();
await database.delete(messages).where(eq(messages.chatId, request.params.id)).execute();
return res.status(200).json({ message: "Chat deleted successfully" });
} catch (err) {
} catch (error) {
res.status(500).json({ message: "An error has occurred." });
logger.error(`Error in deleting chat: ${err.message}`);
logger.error(`Error in deleting chat: ${error.message}`);
}
});

View file

@ -30,8 +30,8 @@ router.get("/", async (_, res) => {
res.status(200).json(config);
});
router.post("/", async (req, res) => {
const config = req.body;
router.post("/", async (request, res) => {
const config = request.body;
const updatedConfig = {
API_KEYS: {

View file

@ -7,16 +7,16 @@ import logger from "../utils/logger";
const router = express.Router();
router.post("/", async (req, res) => {
router.post("/", async (request, res) => {
try {
const { query, chat_history: raw_chat_history, chat_model_provider, chat_model } = req.body;
const { query, chat_history: raw_chat_history, chat_model_provider, chat_model } = request.body;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const chat_history = raw_chat_history.map((msg: any) => {
if (msg.role === "user") {
return new HumanMessage(msg.content);
} else if (msg.role === "assistant") {
return new AIMessage(msg.content);
const chat_history = raw_chat_history.map((message: any) => {
if (message.role === "user") {
return new HumanMessage(message.content);
} else if (message.role === "assistant") {
return new AIMessage(message.content);
}
});
@ -38,9 +38,9 @@ router.post("/", async (req, res) => {
const images = await handleImageSearch({ query, chat_history }, llm);
res.status(200).json({ images });
} catch (err) {
} catch (error) {
res.status(500).json({ message: "An error has occurred." });
logger.error(`Error in image search: ${err.message}`);
logger.error(`Error in image search: ${error.message}`);
}
});

View file

@ -4,7 +4,7 @@ import { getAvailableChatModelProviders, getAvailableEmbeddingModelProviders } f
const router = express.Router();
router.get("/", async (req, res) => {
router.get("/", async (request, res) => {
try {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
@ -12,9 +12,9 @@ router.get("/", async (req, res) => {
]);
res.status(200).json({ chatModelProviders, embeddingModelProviders });
} catch (err) {
} catch (error) {
res.status(500).json({ message: "An error has occurred." });
logger.error(err.message);
logger.error(error.message);
}
});

View file

@ -7,16 +7,16 @@ import logger from "../utils/logger";
const router = express.Router();
router.post("/", async (req, res) => {
router.post("/", async (request, res) => {
try {
const { chat_history: raw_chat_history, chat_model, chat_model_provider } = req.body;
const { chat_history: raw_chat_history, chat_model, chat_model_provider } = request.body;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const chat_history = raw_chat_history.map((msg: any) => {
if (msg.role === "user") {
return new HumanMessage(msg.content);
} else if (msg.role === "assistant") {
return new AIMessage(msg.content);
const chat_history = raw_chat_history.map((message: any) => {
if (message.role === "user") {
return new HumanMessage(message.content);
} else if (message.role === "assistant") {
return new AIMessage(message.content);
}
});
@ -38,9 +38,9 @@ router.post("/", async (req, res) => {
const suggestions = await generateSuggestions({ chat_history }, llm);
res.status(200).json({ suggestions: suggestions });
} catch (err) {
} catch (error) {
res.status(500).json({ message: "An error has occurred." });
logger.error(`Error in generating suggestions: ${err.message}`);
logger.error(`Error in generating suggestions: ${error.message}`);
}
});

View file

@ -7,16 +7,16 @@ import handleVideoSearch from "../agents/videoSearchAgent";
const router = express.Router();
router.post("/", async (req, res) => {
router.post("/", async (request, res) => {
try {
const { query, chat_history: raw_chat_history, chat_model_provider, chat_model } = req.body;
const { query, chat_history: raw_chat_history, chat_model_provider, chat_model } = request.body;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const chat_history = raw_chat_history.map((msg: any) => {
if (msg.role === "user") {
return new HumanMessage(msg.content);
} else if (msg.role === "assistant") {
return new AIMessage(msg.content);
const chat_history = raw_chat_history.map((message: any) => {
if (message.role === "user") {
return new HumanMessage(message.content);
} else if (message.role === "assistant") {
return new AIMessage(message.content);
}
});
@ -38,9 +38,9 @@ router.post("/", async (req, res) => {
const videos = await handleVideoSearch({ chat_history, query }, llm);
res.status(200).json({ videos });
} catch (err) {
} catch (error) {
res.status(500).json({ message: "An error has occurred." });
logger.error(`Error in video search: ${err.message}`);
logger.error(`Error in video search: ${error.message}`);
}
});

View file

@ -3,26 +3,26 @@ import { handleMessage } from "./messageHandler";
import { getAvailableEmbeddingModelProviders, getAvailableChatModelProviders } from "../lib/providers";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import type { IncomingMessage } from "http";
import type { IncomingMessage } from "node:http";
import logger from "../utils/logger";
import { ChatOpenAI } from "@langchain/openai";
export const handleConnection = async (ws: WebSocket, request: IncomingMessage) => {
try {
const searchParams = new URL(request.url, `http://${request.headers.host}`).searchParams;
const searchParameters = new URL(request.url, `http://${request.headers.host}`).searchParams;
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(),
]);
const chatModelProvider = searchParams.get("chatModelProvider") || Object.keys(chatModelProviders)[0];
const chatModel = searchParams.get("chatModel") || Object.keys(chatModelProviders[chatModelProvider])[0];
const chatModelProvider = searchParameters.get("chatModelProvider") || Object.keys(chatModelProviders)[0];
const chatModel = searchParameters.get("chatModel") || Object.keys(chatModelProviders[chatModelProvider])[0];
const embeddingModelProvider =
searchParams.get("embeddingModelProvider") || Object.keys(embeddingModelProviders)[0];
searchParameters.get("embeddingModelProvider") || Object.keys(embeddingModelProviders)[0];
const embeddingModel =
searchParams.get("embeddingModel") || Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
searchParameters.get("embeddingModel") || Object.keys(embeddingModelProviders[embeddingModelProvider])[0];
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
@ -36,10 +36,10 @@ export const handleConnection = async (ws: WebSocket, request: IncomingMessage)
} else if (chatModelProvider == "custom_openai") {
llm = new ChatOpenAI({
modelName: chatModel,
openAIApiKey: searchParams.get("openAIApiKey"),
openAIApiKey: searchParameters.get("openAIApiKey"),
temperature: 0.7,
configuration: {
baseURL: searchParams.get("openAIBaseURL"),
baseURL: searchParameters.get("openAIBaseURL"),
},
});
}
@ -65,7 +65,7 @@ export const handleConnection = async (ws: WebSocket, request: IncomingMessage)
ws.on("message", async message => await handleMessage(message.toString(), ws, llm, embeddings));
ws.on("close", () => logger.debug("Connection closed"));
} catch (err) {
} catch (error) {
ws.send(
JSON.stringify({
type: "error",
@ -74,6 +74,6 @@ export const handleConnection = async (ws: WebSocket, request: IncomingMessage)
}),
);
ws.close();
logger.error(err);
logger.error(error);
}
};

View file

@ -1,5 +1,5 @@
import { initServer } from "./websocketServer";
import http from "http";
import http from "node:http";
export const startWebSocketServer = (server: http.Server<typeof http.IncomingMessage, typeof http.ServerResponse>) => {
initServer(server);

View file

@ -9,10 +9,10 @@ import handleRedditSearch from "../agents/redditSearchAgent";
import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
import type { Embeddings } from "@langchain/core/embeddings";
import logger from "../utils/logger";
import db from "../db";
import database from "../db";
import { chats, messages } from "../db/schema";
import { eq } from "drizzle-orm";
import crypto from "crypto";
import crypto from "node:crypto";
type Message = {
messageId: string;
@ -66,7 +66,8 @@ const handleEmitterEvents = (emitter: EventEmitter, ws: WebSocket, messageId: st
emitter.on("end", () => {
ws.send(JSON.stringify({ type: "messageEnd", messageId: messageId }));
db.insert(messages)
database
.insert(messages)
.values({
content: recievedMessage,
chatId: chatId,
@ -107,16 +108,14 @@ export const handleMessage = async (message: string, ws: WebSocket, llm: BaseCha
}),
);
const history: BaseMessage[] = parsedWSMessage.history.map(msg => {
if (msg[0] === "human") {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
const history: BaseMessage[] = parsedWSMessage.history.map(message_ => {
return message_[0] === "human"
? new HumanMessage({
content: message_[1],
})
: new AIMessage({
content: message_[1],
});
});
if (parsedWSMessage.type === "message") {
@ -127,12 +126,12 @@ export const handleMessage = async (message: string, ws: WebSocket, llm: BaseCha
handleEmitterEvents(emitter, ws, id, parsedMessage.chatId);
const chat = await db.query.chats.findFirst({
const chat = await database.query.chats.findFirst({
where: eq(chats.id, parsedMessage.chatId),
});
if (!chat) {
await db
await database
.insert(chats)
.values({
id: parsedMessage.chatId,
@ -143,7 +142,7 @@ export const handleMessage = async (message: string, ws: WebSocket, llm: BaseCha
.execute();
}
await db
await database
.insert(messages)
.values({
content: parsedMessage.content,
@ -165,7 +164,7 @@ export const handleMessage = async (message: string, ws: WebSocket, llm: BaseCha
);
}
}
} catch (err) {
} catch (error) {
ws.send(
JSON.stringify({
type: "error",
@ -173,6 +172,6 @@ export const handleMessage = async (message: string, ws: WebSocket, llm: BaseCha
key: "INVALID_FORMAT",
}),
);
logger.error(`Failed to handle message: ${err}`);
logger.error(`Failed to handle message: ${error}`);
}
};

View file

@ -1,6 +1,6 @@
import { WebSocketServer } from "ws";
import { handleConnection } from "./connectionManager";
import http from "http";
import http from "node:http";
import { getPort } from "../config";
import logger from "../utils/logger";