-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest.py
More file actions
88 lines (71 loc) · 2.92 KB
/
test.py
File metadata and controls
88 lines (71 loc) · 2.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.prompts import PromptTemplate
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms import CTransformers
from langchain.chains import RetrievalQA
import chainlit as cl
DB_FAISS_PATH = 'vectorstore/db_faiss'
custom_prompt_template = """
You are an expert in homeopathy. Based on the provided context, answer the user's question concisely and only provide the information requested.
Context: {context}
Question: {question}
Answer:
"""
def set_custom_prompt():
prompt = PromptTemplate(template=custom_prompt_template, input_variables=['context', 'question'])
return prompt
def retrieval_qa_chain(llm, prompt, db):
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type='stuff',
retriever=db.as_retriever(search_kwargs={'k': 2}),
return_source_documents=True,
chain_type_kwargs={'prompt': prompt}
)
return qa_chain
def load_llm():
llm = CTransformers(
model="TheBloke/Llama-2-7B-Chat-GGML",
model_type="llama",
max_new_tokens=120, # Limit the maximum tokens generated to reduce long outputs
temperature=0.6, # Lower temperature to make outputs more deterministic
top_p=0.9 # Control diversity in the output
)
return llm
def qa_bot():
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'})
db = FAISS.load_local(DB_FAISS_PATH, embeddings, allow_dangerous_deserialization=True)
llm = load_llm()
qa_prompt = set_custom_prompt()
qa = retrieval_qa_chain(llm, qa_prompt, db)
return qa
def final_result(query):
qa_result = qa_bot()
response = qa_result({'query': query})
if isinstance(response["result"], list):
answers = [answer for answer in response["result"] if answer not in [None, ""]]
answer = "\n".join(answers) if answers else "No answers found"
else:
answer = response["result"]
# Post-processing to remove duplicates
answer = " ".join(dict.fromkeys(answer.split()))
return answer
@cl.on_chat_start
async def start():
chain = qa_bot()
msg = cl.Message(content="Starting the bot...")
await msg.send()
msg.content = "Hi, Welcome to Medical Bot. What is your query?"
await msg.update()
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message: cl.Message):
chain = cl.user_session.get("chain")
cb = cl.AsyncLangchainCallbackHandler(stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"])
cb.answer_reached = True
res = await chain.acall(message.content, callbacks=[cb])
answer = res["result"]
# Post-processing to remove duplicates
answer = " ".join(dict.fromkeys(answer.split()))
await cl.Message(content=answer).send()