shrut123 commited on
Commit
e2b9039
1 Parent(s): be19dc9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -0
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ import torch
4
+ from sentence_transformers import SentenceTransformer
5
+ from transformers import AutoTokenizer
6
+ from splade.models.transformer_rep import Splade
7
+ import pinecone
8
+
9
+ # Initialize Pinecone connection
10
+ api_key = os.getenv('PINECONE_API_KEY', 'b250d1e1-fa69-40f7-81e7-442d53f62859')
11
+ pinecone.init(api_key=api_key, environment='us-east1-gcp')
12
+ index_name = 'pubmed-splade'
13
+
14
+ # Connect to the Pinecone index
15
+ if pinecone.list_indexes() and index_name in pinecone.list_indexes():
16
+ index = pinecone.Index(index_name)
17
+ else:
18
+ st.error("Pinecone index not found! Ensure the correct Pinecone index is being used.")
19
+
20
+ # Initialize Dense and Sparse models
21
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
22
+
23
+ # Dense model (Sentence-BERT)
24
+ dense_model = SentenceTransformer('msmarco-bert-base-dot-v5', device=device)
25
+
26
+ # Sparse model (SPLADE)
27
+ sparse_model_id = 'naver/splade-cocondenser-ensembledistil'
28
+ sparse_model = Splade(sparse_model_id, agg='max').to(device)
29
+ sparse_model.eval()
30
+
31
+ # Tokenizer for sparse model
32
+ tokenizer = AutoTokenizer.from_pretrained(sparse_model_id)
33
+
34
+ # Function to encode query into dense and sparse vectors
35
+ def encode(text: str):
36
+ # Dense vector
37
+ dense_vec = dense_model.encode(text).tolist()
38
+
39
+ # Sparse vector
40
+ input_ids = tokenizer(text, return_tensors='pt')
41
+ with torch.no_grad():
42
+ sparse_vec = sparse_model(d_kwargs=input_ids.to(device))['d_rep'].squeeze()
43
+
44
+ # Extract non-zero values and indices for sparse vector
45
+ indices = sparse_vec.nonzero().squeeze().cpu().tolist()
46
+ values = sparse_vec[indices].cpu().tolist()
47
+
48
+ sparse_dict = {"indices": indices, "values": values}
49
+ return dense_vec, sparse_dict
50
+
51
+ # Function for hybrid search scaling
52
+ def hybrid_scale(dense, sparse, alpha: float):
53
+ if alpha < 0 or alpha > 1:
54
+ raise ValueError("Alpha must be between 0 and 1")
55
+
56
+ hsparse = {
57
+ 'indices': sparse['indices'],
58
+ 'values': [v * (1 - alpha) for v in sparse['values']]
59
+ }
60
+ hdense = [v * alpha for v in dense]
61
+
62
+ return hdense, hsparse
63
+
64
+ # Streamlit UI
65
+ st.title("PubMed Search Application")
66
+ query = st.text_input("Enter your query:", "")
67
+
68
+ # Slider to control sparse-dense scaling
69
+ alpha = st.slider("Hybrid Search Weight (Dense vs Sparse)", 0.0, 1.0, 0.5)
70
+
71
+ if query:
72
+ # Encode the query
73
+ dense_vec, sparse_vec = encode(query)
74
+
75
+ # Scale vectors based on slider value
76
+ hdense, hsparse = hybrid_scale(dense_vec, sparse_vec, alpha)
77
+
78
+ # Query Pinecone index
79
+ response = index.query(vector=hdense, sparse_vector=hsparse, top_k=3, include_metadata=True)
80
+
81
+ # Display results
82
+ st.write(f"Top results for query: **{query}**")
83
+ for match in response['matches']:
84
+ st.write(f"**Score**: {match['score']}")
85
+ st.write(f"**Context**: {match['metadata']['context']}")
86
+ st.write("---")