arman-aminian commited on
Commit
ce13074
1 Parent(s): b2de875

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ from html import escape
4
+ import torch
5
+ from transformers import AutoModel, AutoTokenizer
6
+
7
+ text_tokenizer = AutoTokenizer.from_pretrained('arman-aminian/farsi-image-search-text')
8
+ text_encoder = AutoModel.from_pretrained('arman-aminian/farsi-image-search-text').eval()
9
+ image_embeddings = torch.load('image_embeddings.pt')
10
+ image_links = np.load('image_links.npy', allow_pickle=True)
11
+
12
+
13
+ @st.experimental_memo
14
+ def image_search(query, top_k=10):
15
+ with torch.no_grad():
16
+ text_embedding = text_encoder(**text_tokenizer(query, return_tensors='pt')).pooler_output
17
+ _, indices = torch.cosine_similarity(image_embeddings, text_embedding).sort(descending=True)
18
+ return [image_links[i] for i in indices[:top_k]]
19
+
20
+
21
+ def get_html(url_list):
22
+ html = "<div style='margin-top: 50px; max-width: 1100px; display: flex; flex-wrap: wrap; justify-content: space-evenly'>"
23
+ for url in url_list:
24
+ html2 = f"<img style='height: 180px; margin: 2px' src='{escape(url)}'>"
25
+ html = html + html2
26
+ html += "</div>"
27
+ return html
28
+
29
+
30
+ def main():
31
+ st.markdown('''
32
+ <style>
33
+ .block-container{
34
+ max-width: 1200px;
35
+ }
36
+ section.main>div:first-child {
37
+ padding-top: 0px;
38
+ }
39
+ section:not(.main)>div:first-child {
40
+ padding-top: 30px;
41
+ }
42
+ div.reportview-container > section:first-child{
43
+ max-width: 320px;
44
+ }
45
+ #MainMenu {
46
+ visibility: hidden;
47
+ }
48
+ footer {
49
+ visibility: hidden;
50
+ }
51
+ </style>''',
52
+
53
+ unsafe_allow_html=True)
54
+
55
+
56
+ st.sidebar.markdown('''
57
+ # FARSI IMAGE SEARCH
58
+ Enter the query. We search for you among [25 thousand photos](https://unsplash.com/) and bring you the most relevant photos.
59
+ The thing about this model is that it searches for you among the raw photos and these photos do not have explanations next to them. For a better understanding, we will give some examples of the applications that this model can have
60
+ - For example, you can search in Farsi in your phone's photo gallery
61
+ - When you are writing a blog or any text, you can put related photos between your texts. In this way, you give the paragraph for which you want a photo to the model, so that it will find a photo related to it for you
62
+ Note: We used a small collection (25 thousand) of images to keep this program real-time, but obviously the quality of the image search depends heavily on the size of the image database and this version is just an initial demo to familiarize you with the model.
63
+ We have taught the model that this browser is using, and you can see its repository [here](https://github.com/NLP-Final-Projects/image-search).
64
+ ''')
65
+ _, c, _ = st.columns((1, 3, 1))
66
+ query = c.text_input('Search here')
67
+ if len(query) > 0:
68
+ results = image_search(query)
69
+ st.markdown(get_html(results), unsafe_allow_html=True)
70
+
71
+
72
+ if __name__ == '__main__':
73
+ main()