BA-Chatbot/backend/evaluation/eval_retriever/eval_ada.py

37 lines
1.6 KiB
Python
Raw Permalink Normal View History

2023-11-15 14:28:48 +01:00
from custom_evaluation import eval
import sys
sys.path.append("../..")
from reranker import ReRanker
from retriever.retriever_pipeline import CustomPipeline
from haystack.nodes import PreProcessor
doc_index = "stupo_eval_docs"
label_index = "stupo_eval_labels"
pipeline= CustomPipeline(doc_index=doc_index, label_index=label_index, api_key="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")
reranker= ReRanker()
open_domain=True
if open_domain:
preprocessor = PreProcessor(
split_by="word",
split_length=100,
split_overlap=0,
split_respect_sentence_boundary=False,
clean_empty_lines=False,
clean_whitespace=False,
)
pipeline.doc_store_ada.delete_documents(index=doc_index)
pipeline.doc_store_ada.delete_documents(index=label_index)
# The add_eval_data() method converts the given dataset in json format into Haystack document and label objects. Those objects are then indexed in their respective document and label index in the document store. The method can be used with any dataset in SQuAD format.
pipeline.doc_store_ada.add_eval_data(
filename="squad_format.json",
doc_index=doc_index,
label_index=label_index,
preprocessor=preprocessor,
)
pipeline.doc_store_ada.update_embeddings(pipeline.emb_retriever_ada, index=doc_index)
index= "stupo" if open_domain else doc_index
retriever_eval_results= eval(label_index=label_index, doc_index=index, top_k=10, document_store= pipeline.doc_store_ada, retriever= pipeline.emb_retriever_ada, reRankerGPT=reranker, rerankerPipeline=None, open_domain=open_domain)
print(retriever_eval_results)