feat: add expert search, legal search and UI improvements
This commit is contained in:
parent
2c5ca94b3c
commit
271199c527
53 changed files with 4595 additions and 708 deletions
235
src/chains/expertSearchAgent.ts
Normal file
235
src/chains/expertSearchAgent.ts
Normal file
|
|
@ -0,0 +1,235 @@
|
|||
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import {
|
||||
RunnableLambda,
|
||||
RunnableMap,
|
||||
RunnableSequence,
|
||||
} from '@langchain/core/runnables';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { supabase } from '../db/supabase';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { Expert, ExpertSearchRequest, ExpertSearchResponse } from '../types/types';
|
||||
|
||||
type ExpertSearchChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const ExpertSearchChainPrompt = `
|
||||
Vous êtes un agent spécialisé dans l'analyse et la recherche d'experts professionnels. Votre rôle est d'interpréter les demandes des utilisateurs et d'extraire les informations essentielles pour trouver l'expert le plus pertinent.
|
||||
|
||||
OBJECTIF :
|
||||
Analyser la requête pour identifier précisément :
|
||||
1. Le domaine d'expertise recherché
|
||||
2. La localisation souhaitée (si mentionnée)
|
||||
|
||||
RÈGLES D'EXTRACTION :
|
||||
- Pour l'EXPERTISE :
|
||||
* Identifier le domaine principal (comptabilité, droit, marketing, etc.)
|
||||
* Reconnaître les spécialisations (droit des affaires, marketing digital, etc.)
|
||||
* Nettoyer les mots parasites (expert, spécialiste, professionnel, etc.)
|
||||
|
||||
- Pour la VILLE :
|
||||
* Si mentionnée
|
||||
* Extraire la ville mentionnée
|
||||
* Ignorer si non spécifiée
|
||||
* Standardiser le format (tout en minuscules)
|
||||
|
||||
FORMAT DE RÉPONSE STRICT :
|
||||
Répondre en deux lignes exactement :
|
||||
expertise: [domaine d'expertise]
|
||||
ville: [ville si mentionnée]
|
||||
|
||||
EXEMPLES D'ANALYSE :
|
||||
|
||||
1. "Je cherche un expert comptable sur Paris"
|
||||
expertise: comptabilité
|
||||
ville: paris
|
||||
|
||||
2. "Il me faudrait un avocat spécialisé en droit des affaires à Lyon"
|
||||
expertise: droit des affaires
|
||||
ville: lyon
|
||||
|
||||
Conversation précédente :
|
||||
{chat_history}
|
||||
|
||||
Requête actuelle : {query}
|
||||
|
||||
Principe de recherche d'expert :
|
||||
- Pour toute recherche d'expert, extraire UNIQUEMENT :
|
||||
* L'expertise demandée
|
||||
* La ville (si mentionnée)
|
||||
|
||||
- Mots déclencheurs à reconnaître :
|
||||
* "cherche un expert/spécialiste/consultant"
|
||||
* "besoin d'un professionnel"
|
||||
* "recherche quelqu'un pour"
|
||||
* "qui peut m'aider avec"
|
||||
|
||||
<example>
|
||||
\`<query>
|
||||
Je cherche un expert comptable
|
||||
</query>
|
||||
expertise: comptabilité
|
||||
ville:
|
||||
\`
|
||||
|
||||
\`<query>
|
||||
J'ai besoin d'un spécialiste en droit des sociétés à Lyon
|
||||
</query>
|
||||
expertise: droit des sociétés
|
||||
ville: lyon
|
||||
\`
|
||||
|
||||
\`<query>
|
||||
Qui peut m'aider avec ma comptabilité sur Paris ?
|
||||
</query>
|
||||
expertise: comptabilité
|
||||
ville: paris
|
||||
\`
|
||||
</example>
|
||||
`;
|
||||
|
||||
const ExpertAnalysisPrompt = `
|
||||
Vous devez générer une synthèse des experts trouvés en vous basant UNIQUEMENT sur les données fournies.
|
||||
|
||||
Contexte de la recherche : {query}
|
||||
|
||||
Experts trouvés (à utiliser EXCLUSIVEMENT) :
|
||||
{experts}
|
||||
|
||||
Format de la synthèse :
|
||||
🎯 Synthèse de la recherche
|
||||
[Résumé bref de la demande]
|
||||
|
||||
💫 Experts disponibles :
|
||||
[Pour chaque expert trouvé dans les données :]
|
||||
- [Prénom Nom] à [Ville]
|
||||
Expertise : [expertises]
|
||||
Tarif : [tarif]€
|
||||
[Point clé de la biographie]
|
||||
|
||||
⚠️ IMPORTANT : N'inventez PAS d'experts. Utilisez UNIQUEMENT les données fournies.
|
||||
`;
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
// Fonction pour convertir les données de l'expert
|
||||
const convertToExpert = (data: any): Expert => {
|
||||
return {
|
||||
id: data.id,
|
||||
id_expert: data.id_expert || '',
|
||||
nom: data.nom,
|
||||
prenom: data.prenom,
|
||||
adresse: data.adresse || '',
|
||||
pays: data.pays,
|
||||
ville: data.ville,
|
||||
expertises: data.expertises,
|
||||
specialite: data.specialite || data.expertises?.[0] || '',
|
||||
biographie: data.biographie,
|
||||
tarif: data.tarif || 0,
|
||||
services: data.services,
|
||||
created_at: data.created_at,
|
||||
image_url: data.image_url
|
||||
};
|
||||
};
|
||||
|
||||
const createExpertSearchChain = (llm: BaseChatModel) => {
|
||||
return RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
chat_history: (input: ExpertSearchChainInput) => {
|
||||
return formatChatHistoryAsString(input.chat_history || []);
|
||||
},
|
||||
query: (input: ExpertSearchChainInput) => {
|
||||
return input.query || '';
|
||||
},
|
||||
}),
|
||||
PromptTemplate.fromTemplate(ExpertSearchChainPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (response: string) => {
|
||||
try {
|
||||
// Extraire expertise et ville avec gestion des erreurs
|
||||
const lines = response.split('\n').filter(line => line.trim() !== '');
|
||||
const expertise = lines[0]?.replace('expertise:', '')?.trim() || '';
|
||||
const ville = lines[1]?.replace('ville:', '')?.trim() || '';
|
||||
|
||||
if (!expertise) {
|
||||
return {
|
||||
experts: [],
|
||||
synthese: "Je n'ai pas pu identifier l'expertise recherchée."
|
||||
} as ExpertSearchResponse;
|
||||
}
|
||||
|
||||
// Rechercher les experts
|
||||
let query = supabase
|
||||
.from('experts')
|
||||
.select('*')
|
||||
.ilike('expertises', `%${expertise}%`)
|
||||
.limit(3);
|
||||
|
||||
if (ville) {
|
||||
query = query.ilike('ville', `%${ville}%`);
|
||||
}
|
||||
|
||||
const { data: experts, error } = await query;
|
||||
|
||||
if (error) throw error;
|
||||
|
||||
if (!experts || experts.length === 0) {
|
||||
return {
|
||||
experts: [],
|
||||
synthese: "Désolé, je n'ai pas trouvé d'experts correspondant à vos critères."
|
||||
} as ExpertSearchResponse;
|
||||
}
|
||||
|
||||
const synthesePrompt = PromptTemplate.fromTemplate(ExpertAnalysisPrompt);
|
||||
const formattedPrompt = await synthesePrompt.format({
|
||||
query: response,
|
||||
experts: JSON.stringify(experts, null, 2)
|
||||
});
|
||||
|
||||
const syntheseResponse = await llm.invoke(formattedPrompt);
|
||||
const syntheseString = typeof syntheseResponse.content === 'string'
|
||||
? syntheseResponse.content
|
||||
: JSON.stringify(syntheseResponse.content);
|
||||
|
||||
return {
|
||||
experts: experts.map(convertToExpert),
|
||||
synthese: syntheseString
|
||||
} as ExpertSearchResponse;
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ Erreur:', error);
|
||||
return {
|
||||
experts: [],
|
||||
synthese: "Une erreur est survenue lors de la recherche d'experts."
|
||||
} as ExpertSearchResponse;
|
||||
}
|
||||
}),
|
||||
]);
|
||||
};
|
||||
|
||||
const handleExpertSearch = async (input: ExpertSearchRequest, llm: BaseChatModel) => {
|
||||
try {
|
||||
// 1. Analyse de la requête via LLM pour extraire l'expertise et la ville
|
||||
const expertSearchChain = createExpertSearchChain(llm);
|
||||
const result = await expertSearchChain.invoke({
|
||||
query: input.query,
|
||||
chat_history: input.chat_history || []
|
||||
}) as ExpertSearchResponse; // Le résultat est déjà une ExpertSearchResponse
|
||||
|
||||
// Pas besoin de retraiter la réponse car createExpertSearchChain fait déjà tout le travail
|
||||
return result;
|
||||
|
||||
} catch (error) {
|
||||
console.error('❌ Erreur dans handleExpertSearch:', error);
|
||||
return {
|
||||
experts: [],
|
||||
synthese: "Une erreur est survenue."
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
export default handleExpertSearch;
|
||||
|
|
@ -11,25 +11,35 @@ import { searchSearxng } from '../lib/searxng';
|
|||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const imageSearchChainPrompt = `
|
||||
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search the web for images.
|
||||
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
|
||||
Vous êtes un expert en recherche d'images pour illustrer des contenus business. Votre objectif est de trouver des images élégantes et modernes qui illustrent le sujet de manière indirecte et esthétique.
|
||||
|
||||
Example:
|
||||
1. Follow up question: What is a cat?
|
||||
Rephrased: A cat
|
||||
Principes à suivre :
|
||||
- Privilégier des images lifestyle et esthétiques
|
||||
- Éviter les schémas, graphiques et images trop techniques
|
||||
- Favoriser des images avec des personnes dans des situations naturelles
|
||||
- Choisir des images lumineuses et positives
|
||||
- Préférer des compositions simples et épurées
|
||||
|
||||
2. Follow up question: What is a car? How does it works?
|
||||
Rephrased: Car working
|
||||
Format de la requête :
|
||||
- 2-3 mots-clés maximum
|
||||
- Ajouter "lifestyle" ou "modern" pour améliorer la qualité
|
||||
- Toujours ajouter "professional" pour le contexte business
|
||||
|
||||
3. Follow up question: How does an AC work?
|
||||
Rephrased: AC working
|
||||
Exemples :
|
||||
1. Question : "Comment créer une entreprise ?"
|
||||
Requête : "entrepreneur lifestyle modern"
|
||||
|
||||
Conversation:
|
||||
2. Question : "Qu'est-ce qu'un business plan ?"
|
||||
Requête : "business meeting professional"
|
||||
|
||||
3. Question : "Comment faire sa comptabilité ?"
|
||||
Requête : "office work lifestyle"
|
||||
|
||||
Conversation :
|
||||
{chat_history}
|
||||
|
||||
Follow up question: {query}
|
||||
Rephrased question:
|
||||
`;
|
||||
Question : {query}
|
||||
Requête de recherche d'image :`;
|
||||
|
||||
type ImageSearchChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
|
|
@ -53,11 +63,12 @@ const createImageSearchChain = (llm: BaseChatModel) => {
|
|||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const res = await searchSearxng(input, {
|
||||
engines: ['bing images', 'google images'],
|
||||
engines: ['google_images', 'bing_images'],
|
||||
language: 'fr',
|
||||
categories: ['images'],
|
||||
});
|
||||
|
||||
|
||||
const images = [];
|
||||
|
||||
res.results.forEach((result) => {
|
||||
if (result.img_src && result.url && result.title) {
|
||||
images.push({
|
||||
|
|
@ -67,7 +78,7 @@ const createImageSearchChain = (llm: BaseChatModel) => {
|
|||
});
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
return images.slice(0, 10);
|
||||
}),
|
||||
]);
|
||||
|
|
@ -81,4 +92,4 @@ const handleImageSearch = (
|
|||
return imageSearchChain.invoke(input);
|
||||
};
|
||||
|
||||
export default handleImageSearch;
|
||||
export default handleImageSearch;
|
||||
113
src/chains/legalSearchAgent.ts
Normal file
113
src/chains/legalSearchAgent.ts
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
import {
|
||||
RunnableSequence,
|
||||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../lib/searxng';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
const legalSearchChainPrompt = `
|
||||
Vous êtes un assistant juridique expert spécialisé dans la recherche documentaire légale française. Votre rôle est d'analyser la question de l'utilisateur et de générer une requête de recherche optimisée.
|
||||
|
||||
Contexte de la conversation :
|
||||
{chat_history}
|
||||
|
||||
Question actuelle : {query}
|
||||
|
||||
Instructions détaillées :
|
||||
1. Analysez précisément :
|
||||
- Le domaine juridique spécifique (droit du travail, droit des sociétés, etc.)
|
||||
- Le type de document recherché (loi, décret, jurisprudence, etc.)
|
||||
- Les points clés de la problématique
|
||||
|
||||
2. Construisez une requête qui inclut :
|
||||
- Les termes juridiques exacts (articles de code, références légales)
|
||||
- Les mots-clés techniques appropriés
|
||||
- Les synonymes pertinents
|
||||
- La période temporelle si pertinente (loi récente, modifications)
|
||||
|
||||
3. Priorisez les sources selon la hiérarchie :
|
||||
- Codes et lois : Légifrance
|
||||
- Information officielle : Service-public.fr
|
||||
- Publications : Journal-officiel
|
||||
- Informations pratiques : URSSAF, CCI
|
||||
|
||||
Exemples de reformulation :
|
||||
Question : "Comment créer une SARL ?"
|
||||
→ "Code commerce SARL constitution statuts gérance responsabilité associés capital social formalités légifrance service-public"
|
||||
|
||||
Question : "Licenciement économique procédure"
|
||||
→ "Code travail licenciement économique procédure CSE PSE motif notification délais recours légifrance"
|
||||
|
||||
Question : "Bail commercial résiliation"
|
||||
→ "Code commerce bail commercial résiliation article L145-4 congé indemnité éviction légifrance jurisprudence"
|
||||
|
||||
Reformulez la question de manière précise et technique :`;
|
||||
|
||||
type LegalSearchChainInput = {
|
||||
chat_history: BaseMessage[];
|
||||
query: string;
|
||||
};
|
||||
|
||||
const strParser = new StringOutputParser();
|
||||
|
||||
const createLegalSearchChain = (llm: BaseChatModel) => {
|
||||
return RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
chat_history: (input: LegalSearchChainInput) => {
|
||||
return formatChatHistoryAsString(input.chat_history);
|
||||
},
|
||||
query: (input: LegalSearchChainInput) => {
|
||||
return input.query;
|
||||
},
|
||||
}),
|
||||
PromptTemplate.fromTemplate(legalSearchChainPrompt),
|
||||
llm,
|
||||
strParser,
|
||||
RunnableLambda.from(async (input: string) => {
|
||||
const pdfQuery = `${input} filetype:pdf`;
|
||||
|
||||
const res = await searchSearxng(pdfQuery, {
|
||||
engines: [
|
||||
'legifrance',
|
||||
'journal_officiel',
|
||||
'service_public',
|
||||
'URSSAF',
|
||||
'CCI'
|
||||
],
|
||||
language: 'fr',
|
||||
categories: ['general', 'files']
|
||||
});
|
||||
|
||||
const documents = [];
|
||||
|
||||
res.results.forEach((result) => {
|
||||
if (result.url && result.title) {
|
||||
documents.push({
|
||||
url: result.url,
|
||||
title: result.title,
|
||||
snippet: result.content || '',
|
||||
source: result.url.split('/')[2] || 'unknown',
|
||||
type: 'pdf'
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return documents.slice(0, 10);
|
||||
}),
|
||||
]);
|
||||
};
|
||||
|
||||
const handleLegalSearch = (
|
||||
input: LegalSearchChainInput,
|
||||
llm: BaseChatModel,
|
||||
) => {
|
||||
const legalSearchChain = createLegalSearchChain(llm);
|
||||
return legalSearchChain.invoke(input);
|
||||
};
|
||||
|
||||
export default handleLegalSearch;
|
||||
292
src/chains/rag_document_upload.ts
Normal file
292
src/chains/rag_document_upload.ts
Normal file
|
|
@ -0,0 +1,292 @@
|
|||
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { Embeddings } from '@langchain/core/embeddings';
|
||||
import { Chroma } from '@langchain/community/vectorstores/chroma';
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { RunnableSequence, RunnableMap } from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
|
||||
// Type local pour la chaîne de recherche
|
||||
type SearchInput = {
|
||||
query: string;
|
||||
chat_history: BaseMessage[];
|
||||
type?: string;
|
||||
};
|
||||
|
||||
export class RAGDocumentChain {
|
||||
private vectorStore: Chroma | null = null;
|
||||
private textSplitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: 1000,
|
||||
chunkOverlap: 200,
|
||||
separators: ["\n\n", "\n", ".", "!", "?", ";", ":", " ", ""],
|
||||
keepSeparator: true,
|
||||
lengthFunction: (text) => text.length
|
||||
});
|
||||
|
||||
// Add chunk preprocessing
|
||||
private preprocessChunk(text: string): string {
|
||||
return text
|
||||
.replace(/\s+/g, ' ')
|
||||
.replace(/\n+/g, ' ')
|
||||
.trim();
|
||||
}
|
||||
|
||||
// Add metadata enrichment
|
||||
private enrichChunkMetadata(doc: Document): Document {
|
||||
const metadata = {
|
||||
...doc.metadata,
|
||||
chunk_type: 'text',
|
||||
word_count: doc.pageContent.split(/\s+/).length,
|
||||
processed_date: new Date().toISOString()
|
||||
};
|
||||
return new Document({
|
||||
pageContent: this.preprocessChunk(doc.pageContent),
|
||||
metadata
|
||||
});
|
||||
}
|
||||
|
||||
// Add chunk scoring
|
||||
private scoreChunk(chunk: string): number {
|
||||
const wordCount = chunk.split(/\s+/).length;
|
||||
const sentenceCount = chunk.split(/[.!?]+/).length;
|
||||
return wordCount > 10 && sentenceCount > 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
public async initializeVectorStoreFromDocuments(
|
||||
documents: Document[],
|
||||
embeddings: Embeddings
|
||||
) {
|
||||
try {
|
||||
console.log("🔄 Préparation des documents...");
|
||||
|
||||
// Validate and preprocess documents
|
||||
const validDocuments = documents
|
||||
.filter(doc => doc.pageContent && doc.pageContent.trim().length > 50)
|
||||
.map(doc => this.enrichChunkMetadata(doc));
|
||||
|
||||
// Split documents into chunks
|
||||
const texts = await this.textSplitter.splitDocuments(validDocuments);
|
||||
console.log(`📄 ${texts.length} chunks créés`);
|
||||
|
||||
// Score and filter chunks
|
||||
const scoredTexts = texts.filter(doc => this.scoreChunk(doc.pageContent) > 0);
|
||||
console.log(`📄 ${scoredTexts.length} chunks valides après scoring`);
|
||||
|
||||
// Deduplicate chunks
|
||||
const uniqueTexts = this.deduplicateChunks(scoredTexts);
|
||||
console.log(`📄 ${uniqueTexts.length} chunks uniques après déduplication`);
|
||||
|
||||
// Initialize vector store with optimized settings
|
||||
this.vectorStore = await Chroma.fromDocuments(
|
||||
uniqueTexts,
|
||||
embeddings,
|
||||
{
|
||||
collectionName: "uploaded_docs",
|
||||
url: "http://chroma:8000",
|
||||
collectionMetadata: {
|
||||
"hnsw:space": "cosine",
|
||||
"hnsw:construction_ef": 100, // Increased for better index quality
|
||||
"hnsw:search_ef": 50, // Balanced for search performance
|
||||
"hnsw:m": 16 // Number of connections per element
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
console.log("✅ VectorStore initialisé avec succès");
|
||||
return {
|
||||
totalDocuments: documents.length,
|
||||
validChunks: uniqueTexts.length,
|
||||
averageChunkSize: this.calculateAverageChunkSize(uniqueTexts)
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("❌ Erreur lors de l'initialisation:", error);
|
||||
throw new Error(`Erreur d'initialisation du VectorStore: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
private calculateAverageChunkSize(chunks: Document[]): number {
|
||||
if (chunks.length === 0) return 0;
|
||||
const totalLength = chunks.reduce((sum, doc) => sum + doc.pageContent.length, 0);
|
||||
return Math.round(totalLength / chunks.length);
|
||||
}
|
||||
|
||||
private deduplicateChunks(chunks: Document[]): Document[] {
|
||||
const seen = new Set<string>();
|
||||
return chunks.filter(chunk => {
|
||||
const normalized = chunk.pageContent
|
||||
.toLowerCase()
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim();
|
||||
|
||||
if (seen.has(normalized)) {
|
||||
return false;
|
||||
}
|
||||
seen.add(normalized);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
public async searchSimilarDocuments(query: string, limit: number = 5) {
|
||||
if (!this.vectorStore) {
|
||||
console.warn("⚠️ VectorStore non initialisé");
|
||||
return [];
|
||||
}
|
||||
|
||||
try {
|
||||
console.log("🔍 Recherche pour:", query);
|
||||
|
||||
const initialResults = await this.vectorStore.similaritySearch(
|
||||
query,
|
||||
limit * 2,
|
||||
{
|
||||
filter: { source: { $exists: true } },
|
||||
minScore: 0.7
|
||||
}
|
||||
);
|
||||
|
||||
const scoredResults = initialResults
|
||||
.filter(doc => doc.pageContent.trim().length > 50)
|
||||
.map(doc => ({
|
||||
document: doc,
|
||||
score: this.calculateRelevanceScore(query, doc.pageContent)
|
||||
}))
|
||||
.sort((a, b) => b.score - a.score)
|
||||
.slice(0, limit)
|
||||
.map(item => {
|
||||
const doc = item.document;
|
||||
const pageNumber = doc.metadata.page_number || doc.metadata.pageNumber || 1;
|
||||
const title = doc.metadata.title || 'Document';
|
||||
const source = doc.metadata.source;
|
||||
|
||||
// Préparer le texte à surligner
|
||||
const searchText = doc.pageContent
|
||||
.substring(0, 200)
|
||||
.replace(/[\n\r]+/g, ' ')
|
||||
.trim();
|
||||
|
||||
return new Document({
|
||||
pageContent: doc.pageContent,
|
||||
metadata: {
|
||||
title: title,
|
||||
pageNumber: pageNumber,
|
||||
source: source,
|
||||
type: doc.metadata.type || 'uploaded',
|
||||
searchText: searchText,
|
||||
url: source ?
|
||||
`/api/uploads/${source}/view?page=${pageNumber}&search=${encodeURIComponent(searchText)}` :
|
||||
undefined
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
const mergedResults = this.mergeRelatedChunks(scoredResults);
|
||||
console.log(`📄 ${mergedResults.length} documents pertinents trouvés après reranking`);
|
||||
return mergedResults;
|
||||
} catch (error) {
|
||||
console.error("❌ Erreur de recherche:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
private calculateRelevanceScore(query: string, content: string): number {
|
||||
const normalizedQuery = query.toLowerCase();
|
||||
const normalizedContent = content.toLowerCase();
|
||||
|
||||
// Basic relevance scoring based on multiple factors
|
||||
let score = 0;
|
||||
|
||||
// Term frequency
|
||||
const queryTerms = normalizedQuery.split(/\s+/);
|
||||
queryTerms.forEach(term => {
|
||||
const termCount = (normalizedContent.match(new RegExp(term, 'g')) || []).length;
|
||||
score += termCount * 0.1;
|
||||
});
|
||||
|
||||
// Exact phrase matching
|
||||
if (normalizedContent.includes(normalizedQuery)) {
|
||||
score += 1;
|
||||
}
|
||||
|
||||
// Content length penalty (prefer shorter, more focused chunks)
|
||||
const lengthPenalty = Math.max(0, 1 - (content.length / 5000));
|
||||
score *= (1 + lengthPenalty);
|
||||
|
||||
return score;
|
||||
}
|
||||
|
||||
private mergeRelatedChunks(documents: Document[]): Document[] {
|
||||
const merged: { [key: string]: Document } = {};
|
||||
|
||||
documents.forEach(doc => {
|
||||
const source = doc.metadata?.source || '';
|
||||
const page = doc.metadata?.pageNumber || 1;
|
||||
const key = `${source}-${page}`;
|
||||
|
||||
if (!merged[key]) {
|
||||
merged[key] = doc;
|
||||
} else {
|
||||
const existingDoc = merged[key];
|
||||
merged[key] = new Document({
|
||||
pageContent: `${existingDoc.pageContent}\n\n${doc.pageContent}`,
|
||||
metadata: {
|
||||
...existingDoc.metadata,
|
||||
searchText: existingDoc.metadata.searchText
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return Object.values(merged);
|
||||
}
|
||||
|
||||
public createSearchChain(llm: BaseChatModel) {
|
||||
return RunnableSequence.from([
|
||||
RunnableMap.from({
|
||||
query: (input: SearchInput) => input.query,
|
||||
chat_history: (input: SearchInput) => formatChatHistoryAsString(input.chat_history),
|
||||
context: async (input: SearchInput) => {
|
||||
const docs = await this.searchSimilarDocuments(input.query);
|
||||
return docs.map((doc, i) => {
|
||||
const source = doc.metadata?.source || 'Document';
|
||||
const title = doc.metadata?.title || '';
|
||||
const pageNumber = doc.metadata?.pageNumber;
|
||||
const url = doc.metadata?.url;
|
||||
|
||||
let sourceInfo = `Source: ${title || source}`;
|
||||
if (pageNumber) sourceInfo += ` (page ${pageNumber})`;
|
||||
if (url) sourceInfo += `\nURL: ${url}`;
|
||||
|
||||
return `[Source ${i + 1}] ${doc.pageContent}\n${sourceInfo}`;
|
||||
}).join("\n\n");
|
||||
}
|
||||
}),
|
||||
PromptTemplate.fromTemplate(`
|
||||
Tu es un assistant expert qui répond aux questions en se basant uniquement sur le contexte fourni.
|
||||
Historique de la conversation:
|
||||
{chat_history}
|
||||
|
||||
Contexte disponible:
|
||||
{context}
|
||||
|
||||
Question: {query}
|
||||
|
||||
Instructions:
|
||||
1. Réponds uniquement en te basant sur le contexte fourni
|
||||
2. Si la réponse n'est pas dans le contexte, dis-le clairement
|
||||
3. Cite les sources pertinentes en utilisant [Source X]
|
||||
4. Sois précis et concis
|
||||
|
||||
Réponse:
|
||||
`),
|
||||
llm,
|
||||
new StringOutputParser()
|
||||
]);
|
||||
}
|
||||
|
||||
public isInitialized(): boolean {
|
||||
return this.vectorStore !== null;
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue