File size: 3,287 Bytes
ce13074
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f596fb
 
 
ce13074
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import streamlit as st
import numpy as np
from html import escape
import torch
from transformers import AutoModel, AutoTokenizer

text_tokenizer = AutoTokenizer.from_pretrained('arman-aminian/farsi-image-search-text')
text_encoder = AutoModel.from_pretrained('arman-aminian/farsi-image-search-text').eval()
image_embeddings = torch.load('image_embeddings.pt')
image_links = np.load('image_links.npy', allow_pickle=True)


@st.experimental_memo
def image_search(query, top_k=10):
    with torch.no_grad():
        text_embedding = text_encoder(**text_tokenizer(query, return_tensors='pt')).pooler_output
    _, indices = torch.cosine_similarity(image_embeddings, text_embedding).sort(descending=True)
    return [image_links[i] for i in indices[:top_k]]


def get_html(url_list):
    html = "<div style='margin-top: 50px; max-width: 1100px; display: flex; flex-wrap: wrap; justify-content: space-evenly'>"
    for url in url_list:
        html2 = f"<img style='height: 180px; margin: 2px' src='{escape(url)}'>"  
        html = html + html2
    html += "</div>"
    return html


def main():
    st.markdown('''
                <style>
                .block-container{
                  max-width: 1200px;
                }
                section.main>div:first-child {
                  padding-top: 0px;
                }
                section:not(.main)>div:first-child {
                  padding-top: 30px;
                }
                div.reportview-container > section:first-child{
                  max-width: 320px;
                }
                #MainMenu {
                  visibility: hidden;
                }
                footer {
                  visibility: hidden;
                }
                </style>''',
            
                unsafe_allow_html=True)        
         
                
    st.sidebar.markdown('''
# FARSI IMAGE SEARCH
Enter the query. We search for you among [25 thousand photos](https://unsplash.com/) and bring you the most relevant photos.
The thing about this model is that it searches for you among the raw photos and these photos do not have explanations next to them. For a better understanding, we will give some examples of the applications that this model can have
- For example, you can search in Farsi in your phone's photo gallery
- When you are writing a blog or any text, you can put related photos between your texts. In this way, you give the paragraph for which you want a photo to the model, so that it will find a photo related to it for you
Note: We used a small collection (25 thousand) of images to keep this program real-time, but obviously the quality of the image search depends heavily on the size of the image database and this version is just an initial demo to familiarize you with the model.
We have taught the model that this browser is using, and you can see its repository [here](https://github.com/NLP-Final-Projects/image-search).

# MEMBERS
Arman Aminian - Sina Pakseresht - Elham Abolhasani - Reihaneh Halvaie - Ali Khoshtinat - Mona Naderi
''')
    _, c, _ = st.columns((1, 3, 1))
    query = c.text_input('Search here')
    if len(query) > 0:
        results = image_search(query)
        st.markdown(get_html(results), unsafe_allow_html=True)


if __name__ == '__main__':
    main()