shrut123 commited on
Commit
b0a56a3
1 Parent(s): 728abbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -73
app.py CHANGED
@@ -1,86 +1,86 @@
1
  import os
2
  import streamlit as st
3
- from pinecone import Pinecone, ServerlessSpec
4
  from sentence_transformers import SentenceTransformer
 
 
 
5
 
6
- # Title of the Streamlit App
7
- st.title("Pinecone Index Management with Streamlit")
 
 
8
 
9
- # Function to initialize Pinecone
10
- def initialize_pinecone():
11
- api_key = os.getenv('PINECONE_API_KEY') # Get Pinecone API key from environment variable
12
- if api_key:
13
- # Initialize Pinecone client
14
- pc = Pinecone(api_key=api_key)
15
- return pc
16
- else:
17
- st.error("Pinecone API key not found! Please set the PINECONE_API_KEY environment variable.")
18
- return None
19
 
20
- # Function to create or connect to an index
21
- def create_or_connect_index(pc, index_name, dimension):
22
- if index_name not in pc.list_indexes().names():
23
- # Create index if it doesn't exist
24
- pc.create_index(
25
- name=index_name,
26
- dimension=dimension,
27
- metric='dotproduct', # Change this based on your use case
28
- spec=ServerlessSpec(cloud='aws', region='us-west-2') # Change to your cloud provider and region
29
- )
30
- st.success(f"Created new index '{index_name}'")
31
- else:
32
- st.info(f"Index '{index_name}' already exists.")
33
- # Connect to the index
34
- index = pc.Index(index_name)
35
- return index
36
 
37
- # Function to encode query using sentence transformers model
38
- def encode_query(model, query_text):
39
- return model.encode(query_text).tolist()
40
 
41
- # Initialize Pinecone
42
- pc = initialize_pinecone()
 
 
43
 
44
- # If Pinecone initialized successfully, proceed with index management
45
- if pc:
46
- index_name = st.text_input("Enter Index Name", "my_index")
47
- dimension = st.number_input("Enter Vector Dimension", min_value=1, value=768)
48
-
49
- # Button to create or connect to index
50
- if st.button("Create or Connect to Index"):
51
- index = create_or_connect_index(pc, index_name, dimension)
52
- if index:
53
- st.success(f"Successfully connected to index '{index_name}'")
54
 
55
- # Model for query encoding
56
- model = SentenceTransformer('msmarco-bert-base-dot-v5')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # Query input
59
- query_text = st.text_input("Enter a Query to Search", "Can clinicians use the PHQ-9 to assess depression?")
 
 
 
 
 
 
 
 
60
 
61
- # Button to encode query and search the Pinecone index
62
- if st.button("Search Query"):
63
- if query_text and index:
64
- dense_vector = encode_query(model, query_text)
65
- st.write(f"Encoded Query Vector: {dense_vector}")
66
-
67
- # Search the index (sparse values can be added here as well)
68
- results = index.query(
69
- vector=dense_vector,
70
- top_k=5,
71
- include_metadata=True
72
- )
73
-
74
- st.write("Search Results:")
75
- for match in results.matches:
76
- st.write(f"ID: {match.id}, Score: {match.score}, Metadata: {match.metadata}")
77
- else:
78
- st.error("Please enter a query and ensure the index is initialized.")
79
 
80
- # Option to delete index
81
- if st.button("Delete Index"):
82
- if pc and index_name in pc.list_indexes().names():
83
- pc.delete_index(index_name)
84
- st.success(f"Index '{index_name}' deleted successfully.")
85
- else:
86
- st.error("Index not found.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import streamlit as st
3
+ import torch
4
  from sentence_transformers import SentenceTransformer
5
+ from transformers import AutoTokenizer
6
+ from splade.models.transformer_rep import Splade
7
+ import pinecone
8
 
9
+ # Initialize Pinecone connection
10
+ api_key = os.getenv('PINECONE_API_KEY', 'b250d1e1-fa69-40f7-81e7-442d53f62859')
11
+ pinecone.init(api_key=api_key, environment='us-east1-gcp')
12
+ index_name = 'pubmed-splade'
13
 
14
+ # Connect to the Pinecone index
15
+ if pinecone.list_indexes() and index_name in pinecone.list_indexes():
16
+ index = pinecone.Index(index_name)
17
+ else:
18
+ st.error("Pinecone index not found! Ensure the correct Pinecone index is being used.")
 
 
 
 
 
19
 
20
+ # Initialize Dense and Sparse models
21
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ # Dense model (Sentence-BERT)
24
+ dense_model = SentenceTransformer('msmarco-bert-base-dot-v5', device=device)
 
25
 
26
+ # Sparse model (SPLADE)
27
+ sparse_model_id = 'naver/splade-cocondenser-ensembledistil'
28
+ sparse_model = Splade(sparse_model_id, agg='max').to(device)
29
+ sparse_model.eval()
30
 
31
+ # Tokenizer for sparse model
32
+ tokenizer = AutoTokenizer.from_pretrained(sparse_model_id)
 
 
 
 
 
 
 
 
33
 
34
+ # Function to encode query into dense and sparse vectors
35
+ def encode(text: str):
36
+ # Dense vector
37
+ dense_vec = dense_model.encode(text).tolist()
38
+
39
+ # Sparse vector
40
+ input_ids = tokenizer(text, return_tensors='pt')
41
+ with torch.no_grad():
42
+ sparse_vec = sparse_model(d_kwargs=input_ids.to(device))['d_rep'].squeeze()
43
+
44
+ # Extract non-zero values and indices for sparse vector
45
+ indices = sparse_vec.nonzero().squeeze().cpu().tolist()
46
+ values = sparse_vec[indices].cpu().tolist()
47
+
48
+ sparse_dict = {"indices": indices, "values": values}
49
+ return dense_vec, sparse_dict
50
 
51
+ # Function for hybrid search scaling
52
+ def hybrid_scale(dense, sparse, alpha: float):
53
+ if alpha < 0 or alpha > 1:
54
+ raise ValueError("Alpha must be between 0 and 1")
55
+
56
+ hsparse = {
57
+ 'indices': sparse['indices'],
58
+ 'values': [v * (1 - alpha) for v in sparse['values']]
59
+ }
60
+ hdense = [v * alpha for v in dense]
61
 
62
+ return hdense, hsparse
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
+ # Streamlit UI
65
+ st.title("PubMed Search Application")
66
+ query = st.text_input("Enter your query:", "")
67
+
68
+ # Slider to control sparse-dense scaling
69
+ alpha = st.slider("Hybrid Search Weight (Dense vs Sparse)", 0.0, 1.0, 0.5)
70
+
71
+ if query:
72
+ # Encode the query
73
+ dense_vec, sparse_vec = encode(query)
74
+
75
+ # Scale vectors based on slider value
76
+ hdense, hsparse = hybrid_scale(dense_vec, sparse_vec, alpha)
77
+
78
+ # Query Pinecone index
79
+ response = index.query(vector=hdense, sparse_vector=hsparse, top_k=3, include_metadata=True)
80
+
81
+ # Display results
82
+ st.write(f"Top results for query: **{query}**")
83
+ for match in response['matches']:
84
+ st.write(f"**Score**: {match['score']}")
85
+ st.write(f"**Context**: {match['metadata']['context']}")
86
+ st.write("---")