-
Notifications
You must be signed in to change notification settings - Fork 0
/
dummy.py
33 lines (25 loc) · 942 Bytes
/
dummy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
"""
The dummy file to check if the vectorizer and loading similar documentation works.
"""
import pickle
from dotenv import load_dotenv
import faiss
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
load_dotenv()
if __name__ == "__main__":
with open("faiss_store.pkl", "rb") as f:
store = pickle.load(f)
store.index = faiss.read_index("docs.index")
qa_chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(temperature=0),
retriever = store.as_retriever(),
return_source_documents=True,
)
chat_history = []
while True:
query = input("Query: ")
print(f"Query: {query}")
result = qa_chain({"question": query, "chat_history": chat_history})
print(f"Answer: {result['answer']}")
chat_history.append((query, result["answer"]))