chore: Update dependencies and fix import paths
This commit is contained in:
parent
3b737a078a
commit
81c5e30fda
46 changed files with 1626 additions and 371 deletions
|
|
@ -1,7 +1,7 @@
|
|||
import { Embeddings, type EmbeddingsParams } from "@langchain/core/embeddings";
|
||||
import { chunkArray } from "@langchain/core/utils/chunk_array";
|
||||
|
||||
export interface HuggingFaceTransformersEmbeddingsParams extends EmbeddingsParams {
|
||||
export interface HuggingFaceTransformersEmbeddingsParameters extends EmbeddingsParams {
|
||||
modelName: string;
|
||||
|
||||
model: string;
|
||||
|
|
@ -13,7 +13,10 @@ export interface HuggingFaceTransformersEmbeddingsParams extends EmbeddingsParam
|
|||
stripNewLines?: boolean;
|
||||
}
|
||||
|
||||
export class HuggingFaceTransformersEmbeddings extends Embeddings implements HuggingFaceTransformersEmbeddingsParams {
|
||||
export class HuggingFaceTransformersEmbeddings
|
||||
extends Embeddings
|
||||
implements HuggingFaceTransformersEmbeddingsParameters
|
||||
{
|
||||
modelName = "Xenova/all-MiniLM-L6-v2";
|
||||
|
||||
model = "Xenova/all-MiniLM-L6-v2";
|
||||
|
|
@ -27,7 +30,7 @@ export class HuggingFaceTransformersEmbeddings extends Embeddings implements Hug
|
|||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
private pipelinePromise: Promise<any>;
|
||||
|
||||
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParams>) {
|
||||
constructor(fields?: Partial<HuggingFaceTransformersEmbeddingsParameters>) {
|
||||
super(fields ?? {});
|
||||
|
||||
this.modelName = fields?.model ?? fields?.modelName ?? this.model;
|
||||
|
|
@ -37,16 +40,15 @@ export class HuggingFaceTransformersEmbeddings extends Embeddings implements Hug
|
|||
}
|
||||
|
||||
async embedDocuments(texts: string[]): Promise<number[][]> {
|
||||
const batches = chunkArray(this.stripNewLines ? texts.map(t => t.replace(/\n/g, " ")) : texts, this.batchSize);
|
||||
const batches = chunkArray(this.stripNewLines ? texts.map(t => t.replaceAll("\n", " ")) : texts, this.batchSize);
|
||||
|
||||
const batchRequests = batches.map(batch => this.runEmbedding(batch));
|
||||
const batchResponses = await Promise.all(batchRequests);
|
||||
const embeddings: number[][] = [];
|
||||
|
||||
for (let i = 0; i < batchResponses.length; i += 1) {
|
||||
const batchResponse = batchResponses[i];
|
||||
for (let j = 0; j < batchResponse.length; j += 1) {
|
||||
embeddings.push(batchResponse[j]);
|
||||
for (const batchResponse of batchResponses) {
|
||||
for (const element of batchResponse) {
|
||||
embeddings.push(element);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -54,7 +56,7 @@ export class HuggingFaceTransformersEmbeddings extends Embeddings implements Hug
|
|||
}
|
||||
|
||||
async embedQuery(text: string): Promise<number[]> {
|
||||
const data = await this.runEmbedding([this.stripNewLines ? text.replace(/\n/g, " ") : text]);
|
||||
const data = await this.runEmbedding([this.stripNewLines ? text.replaceAll("\n", " ") : text]);
|
||||
return data[0];
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
import { BaseOutputParser } from "@langchain/core/output_parsers";
|
||||
|
||||
interface LineListOutputParserArgs {
|
||||
interface LineListOutputParserArguments {
|
||||
key?: string;
|
||||
}
|
||||
|
||||
class LineListOutputParser extends BaseOutputParser<string[]> {
|
||||
private key = "questions";
|
||||
|
||||
constructor(args?: LineListOutputParserArgs) {
|
||||
constructor(arguments_?: LineListOutputParserArguments) {
|
||||
super();
|
||||
this.key = args.key ?? this.key;
|
||||
this.key = arguments_.key ?? this.key;
|
||||
}
|
||||
|
||||
static lc_name() {
|
||||
|
|
|
|||
|
|
@ -36,8 +36,8 @@ export const getAvailableChatModelProviders = async () => {
|
|||
temperature: 0.7,
|
||||
}),
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error(`Error loading OpenAI models: ${err}`);
|
||||
} catch (error) {
|
||||
logger.error(`Error loading OpenAI models: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -85,8 +85,8 @@ export const getAvailableChatModelProviders = async () => {
|
|||
},
|
||||
),
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Groq models: ${err}`);
|
||||
} catch (error) {
|
||||
logger.error(`Error loading Groq models: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -101,16 +101,17 @@ export const getAvailableChatModelProviders = async () => {
|
|||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const { models: ollamaModels } = (await response.json()) as any;
|
||||
|
||||
models["ollama"] = ollamaModels.reduce((acc, model) => {
|
||||
acc[model.model] = new ChatOllama({
|
||||
// eslint-disable-next-line unicorn/no-array-reduce
|
||||
models["ollama"] = ollamaModels.reduce((accumulator, model) => {
|
||||
accumulator[model.model] = new ChatOllama({
|
||||
baseUrl: ollamaEndpoint,
|
||||
model: model.model,
|
||||
temperature: 0.7,
|
||||
});
|
||||
return acc;
|
||||
return accumulator;
|
||||
}, {});
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Ollama models: ${err}`);
|
||||
} catch (error) {
|
||||
logger.error(`Error loading Ollama models: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -137,8 +138,8 @@ export const getAvailableEmbeddingModelProviders = async () => {
|
|||
modelName: "text-embedding-3-large",
|
||||
}),
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error(`Error loading OpenAI embeddings: ${err}`);
|
||||
} catch (error) {
|
||||
logger.error(`Error loading OpenAI embeddings: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -153,15 +154,16 @@ export const getAvailableEmbeddingModelProviders = async () => {
|
|||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const { models: ollamaModels } = (await response.json()) as any;
|
||||
|
||||
models["ollama"] = ollamaModels.reduce((acc, model) => {
|
||||
acc[model.model] = new OllamaEmbeddings({
|
||||
// eslint-disable-next-line unicorn/no-array-reduce
|
||||
models["ollama"] = ollamaModels.reduce((accumulator, model) => {
|
||||
accumulator[model.model] = new OllamaEmbeddings({
|
||||
baseUrl: ollamaEndpoint,
|
||||
model: model.model,
|
||||
});
|
||||
return acc;
|
||||
return accumulator;
|
||||
}, {});
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Ollama embeddings: ${err}`);
|
||||
} catch (error) {
|
||||
logger.error(`Error loading Ollama embeddings: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -177,8 +179,8 @@ export const getAvailableEmbeddingModelProviders = async () => {
|
|||
modelName: "Xenova/bert-base-multilingual-uncased",
|
||||
}),
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error(`Error loading local embeddings: ${err}`);
|
||||
} catch (error) {
|
||||
logger.error(`Error loading local embeddings: ${error}`);
|
||||
}
|
||||
|
||||
return models;
|
||||
|
|
|
|||
|
|
@ -19,20 +19,20 @@ interface SearxngSearchResult {
|
|||
iframe_src?: string;
|
||||
}
|
||||
|
||||
export const searchSearxng = async (query: string, opts?: SearxngSearchOptions) => {
|
||||
export const searchSearxng = async (query: string, options?: SearxngSearchOptions) => {
|
||||
const searxngURL = getSearxngApiEndpoint();
|
||||
|
||||
const url = new URL(`${searxngURL}/search?format=json`);
|
||||
url.searchParams.append("q", query);
|
||||
|
||||
if (opts) {
|
||||
Object.keys(opts).forEach(key => {
|
||||
if (Array.isArray(opts[key])) {
|
||||
url.searchParams.append(key, opts[key].join(","));
|
||||
return;
|
||||
if (options) {
|
||||
for (const key of Object.keys(options)) {
|
||||
if (Array.isArray(options[key])) {
|
||||
url.searchParams.append(key, options[key].join(","));
|
||||
continue;
|
||||
}
|
||||
url.searchParams.append(key, opts[key]);
|
||||
});
|
||||
url.searchParams.append(key, options[key]);
|
||||
}
|
||||
}
|
||||
|
||||
const res = await axios.get(url.toString());
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue