File size: 6,586 Bytes
6c23f4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8760796
 
6c23f4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08f59a2
 
 
6c23f4f
08f59a2
 
 
6c23f4f
08f59a2
 
6c23f4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c390af4
6c23f4f
c390af4
 
6c23f4f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import gradio as gr
import pandas as pd
from pytube import extract
import re
import string
import pickle
import nltk
import nltk.sentiment.util
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer 

from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tensorflow import keras

from youtube_comment_downloader import *

nltk.download('punkt')

# get YouTube ID
def getID(url):
    print("Getting YouTube ID...")
    return extract.video_id(url)

# function to clean comments
def clean_text(text):
    lemmatizer = WordNetLemmatizer()
    # stopwords
    sw = ["i","me","my","myself","we","our","ours","ourselves","you","you're","you've","you'll","you'd","your","yours","yourself","yourselves","he","him","his","himself","she","she's","her","hers","herself","it","it's","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","that'll","these","those","am","is","are","was","were","be","been","being","have","has","had","having","do","does","did","doing","a","an","the","and","but","if","or","because","as","until","while","of","at","by","for","with","about","against","between","into","through","during","before","after","above","below","to","from","up","down","in","out","on","off","over","under","again","further","then","once","here","there","when","where","why","how","all","any","both","each","few","more","most","other","some","such","no","nor","not","only","own","same","so","than","too","very","s","t","can","will","just","don","don't","should","should've","now","d","ll","m","o","re","ve","y","ain","aren","aren't","couldn","couldn't","didn","didn't","doesn","doesn't","hadn","hadn't","hasn","hasn't","haven","haven't","isn","isn't","ma","mightn","mightn't","mustn","mustn't","needn","needn't","shan","shan't","shouldn","shouldn't","wasn","wasn't","weren","weren't","won","won't","wouldn","wouldn't"]
    # remove symbols and Emojis
    text = text.lower()
    text = re.sub('@', '', text)
    text = re.sub('\[.*?\]', '', text)
    text = re.sub('https?://\S+|www\.\S+', '', text)
    text = re.sub('<.*?>+', '', text)
    text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
    text = re.sub('\n', '', text)
    text = re.sub('\w*\d\w*', '', text)
    text = re.sub(r"[^a-zA-Z ]+", "", text)
    
    # tokenize the data
    text = nltk.word_tokenize(text)
    
    # lemmatize
    text = [lemmatizer.lemmatize(t) for t in text]
    text = [lemmatizer.lemmatize(t, 'v') for t in text]
    
    # mark Negation
    tokens_neg_marked = nltk.sentiment.util.mark_negation(text)
    
    # remove stopwords
    text = [t for t in tokens_neg_marked
             if t.replace("_NEG", "").isalnum() and
             t.replace("_NEG", "") not in sw]
    
    return text

def getSentenceTrain():
    # open sentences_train file
    sentences_train_f = open('Deep learning/pickles/sentences_train.pickle', "rb")
    sentences_train = pickle.load(sentences_train_f)
    sentences_train_f.close()
    return sentences_train

# SGD_74_f = open('Shallow machine learning/pickles/SGD_74.pickle', "rb")
# SGD_train = pickle.load(SGD_74_f)
# SGD_74_f.close()

# logreg_79_f = open('Shallow machine learning/pickles/logreg_79.pickle', "rb")
# logreg_train = pickle.load(logreg_79_f)
# logreg_79_f.close()

# # get saved CNN model
# model = keras.models.load_model("Deep learning/CNN_82")

def vote(test_point, _test):
    print("Voting on video effectivess...\n")
    pos_weighting = []
    result = ''
    confidence = 0
    algos_score = 0
    
    algorithms = [
             {'name': 'SGD', 'accuracy': 0.74*100, 'trained': SGD_train},
             {'name': 'Logistic Regression', 'accuracy': 0.79*100, 'trained': logreg_train},
             {'name': 'CNN', 'accuracy': 0.82*100, 'trained': model}
        ]
    
    for algo in algorithms:
        weight = algo['accuracy']
        algos_score += weight
        if algo['name'] == "CNN":
            pred = algo['trained'].predict(_test)
            if pred[0][0] > 0.5:
                pos_weighting.append(weight)
            print("CNN voted for: effective" if pred[0][0]>0.5 else "CNN voted for: ineffective")
        else:
            pred = algo['trained'].predict(test_point)
            if pred == 'pos':
                pos_weighting.append(weight)
            print(algo['name'] + " voted for: effective" if pred=='pos' else algo['name'] + " voted for: ineffective")

    pos_result = sum(pos_weighting)/algos_score
    if pos_result < 0.5:
        result = 'ineffective'
        confidence = 1 - pos_result
    else:
        result = 'effective'
        confidence = pos_result
        
    return result, confidence

def quantizeEffectiveness(url):
    # 1. Get YouTube ID
    print("Getting YouTube ID...")
    videoId = getID(url)
    
    # 2. Download comments
    print("Downloading comments...")
    downloader = YoutubeCommentDownloader()
    comments_downloaded = downloader.get_comments_from_url(f'https://www.youtube.com/watch?v={videoId}')
    comments = [comment for comment in comments_downloaded]
    comments_df = pd.DataFrame(comments)
    
    # 3. Clean comments
    print("Cleaning Comments...")
    comments_df['text'] = comments_df['text'].apply(lambda x: clean_text(x))

    # get all words of video into one list
    all_words = [item for sublist in comments_df['text'].tolist() for item in sublist]

    # 4. Create test dataframe
    test = pd.DataFrame([[videoId]], columns=['VideoId'])
    
    # 5. Get documents (pre-processd comments)
    test_documents = []
    test_documents.append(all_words)
    test['cleaned'] = test_documents
    test['cleaned_string'] = [' '.join(map(str, l)) for l in test['cleaned']]
    
    # 6. Get ML test point
    test_point = test['cleaned_string']
    test_sentence = test['cleaned_string'].values
    
    # 7. Get trained sentences
    sentences_train = getSentenceTrain()
    
    # 8. Tokenize the data
    print("Tokenizing the data...")
    tokenizer = Tokenizer(num_words=5000)
    tokenizer.fit_on_texts(sentences_train)
    
    # 9. Get DL test point
    _test = pad_sequences(tokenizer.texts_to_sequences(test_sentence), padding='post', maxlen=100)
    
    # 10. Vote on video effectiveness
    result, confidence = vote(test_point, _test)
    
def greet(url):
    # result, confidence = quantizeEffectiveness(url)
    
    # return f"The video (ID: {videoId}) is {result} with a confidence of {round(confidence*100,2)}%"
    return "Hello"

iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()