project-moogeul / app.py
seriouspark's picture
make moogeul_ver2
8c1187a
raw
history blame
18.5 kB
import datetime
import numpy as np
import pandas as pd
import re
import json
import os
import glob
import torch
import torch.nn.functional as F
from torch.optim import Adam
from tqdm import tqdm
from torch import nn
from transformers import BertModel
from transformers import AutoTokenizer
import argparse
from bs4 import BeautifulSoup
import requests
def split_essay_to_sentence(origin_essay):
origin_essay_sentence = sum([[a.strip() for a in i.split('.')] for i in origin_essay.split('\n')], [])
essay_sent = [a for a in origin_essay_sentence if len(a) > 0]
return essay_sent
def get_first_extraction(text_sentence):
row_dict = {}
for row in tqdm(text_sentence):
question = 'what is the feeling?'
answer = question_answerer(question=question, context=row)
row_dict[row] = answer
return row_dict
def get_sent_labeldata():
label =pd.read_csv('./rawdata/sentimental_label.csv', encoding = 'cp949', header = None)
label[1] = label[1].apply(lambda x : re.findall(r'[๊ฐ€-ํžฃ]+', x)[0])
label_dict =label[label.index % 10 == 0].set_index(0).to_dict()[1]
emo2idx = {v : k for k, v in enumerate(label_dict.items())}
idx2emo = {v : k[1] for k, v in emo2idx.items()}
return emo2idx, idx2emo
class myDataset_for_infer(torch.utils.data.Dataset):
def __init__(self, X):
self.X = X
def __len__(self):
return len(self.X)
def __getitem__(self,idx):
sentences = tokenizer(self.X[idx], return_tensors = 'pt', padding = 'max_length', max_length = 96, truncation = True)
return sentences
def infer_data(model, main_feeling_keyword):
#ds = myDataset_for_infer()
df_infer = myDataset_for_infer(main_feeling_keyword)
infer_dataloader = torch.utils.data.DataLoader(df_infer, batch_size= 16)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
model = model.cuda()
result_list = []
with torch.no_grad():
for idx, infer_input in tqdm(enumerate(infer_dataloader)):
mask = infer_input['attention_mask'].to(device)
input_id = infer_input['input_ids'].squeeze(1).to(device)
output = model(input_id, mask)
result = np.argmax(output.logits, axis=1).numpy()
result_list.extend(result)
return result_list
def get_word_emotion_pair(cls_model, origin_essay_sentence, idx2emo):
import re
def get_noun(sent):
return [re.sub(r'[์„๋ฅผ]+', '', vocab) for (vocab, pos) in nlp(sent) if len(vocab) > 1 and pos == 'NOUN']
def get_adj(sent):
return [re.sub(r'[์„๋ฅผ]+', '', vocab) for (vocab, pos) in nlp(sent) if len(vocab) > 1 and pos == 'ADJ']
def get_verb(sent):
return [re.sub(r'[์„๋ฅผ]+', '', vocab) for (vocab, pos) in nlp(sent) if len(vocab) > 1 and pos == 'VERB']
result_list = infer_data(cls_model, origin_essay_sentence)
final_result = pd.DataFrame(data = {'text': origin_essay_sentence , 'label' : result_list})
final_result['emotion'] = final_result['label'].map(idx2emo)
nlp=lambda x:[(x[t["start"]:t["end"]],t["entity_group"]) for t in pipeline(x)]
#essay_sent_pos = [nlp(i) for i in tqdm(essay_sent)]
#final_result['text_pos'] = essay_sent_pos
final_result['noun_list'] = final_result['text'].map(get_noun)
final_result['adj_list'] = final_result['text'].map(get_adj)
final_result['verb_list'] = final_result['text'].map(get_verb)
final_result['title'] = 'none'
file_made_dt = datetime.datetime.now()
file_made_dt_str = datetime.datetime.strftime(file_made_dt, '%Y%m%d_%H%M%d')
os.makedirs(f'./result/{nickname}/{file_made_dt_str}/', exist_ok = True)
final_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_result.csv", index = False)
return final_result, file_made_dt_str
def get_essay_base_analysis(file_made_dt_str, nickname):
essay1 = pd.read_csv(f"./result/{nickname}/{file_made_dt_str}/essay_result.csv")
essay1['noun_list_len'] = essay1['noun_list'].apply(lambda x : len(x))
essay1['noun_list_uniqlen'] = essay1['noun_list'].apply(lambda x : len(set(x)))
essay1['adj_list_len'] = essay1['adj_list'].apply(lambda x : len(x))
essay1['adj_list_uniqlen'] = essay1['adj_list'].apply(lambda x : len(set(x)))
essay1['vocab_all'] = essay1[['noun_list','adj_list']].apply(lambda x : sum((eval(x[0]),eval(x[1])), []), axis=1)
essay1['vocab_cnt'] = essay1['vocab_all'].apply(lambda x : len(x))
essay1['vocab_unique_cnt'] = essay1['vocab_all'].apply(lambda x : len(set(x)))
essay1['noun_list'] = essay1['noun_list'].apply(lambda x : eval(x))
essay1['adj_list'] = essay1['adj_list'].apply(lambda x : eval(x))
d = essay1.groupby('title')[['noun_list','adj_list']].sum([]).reset_index()
d['noun_cnt'] = d['noun_list'].apply(lambda x : len(set(x)))
d['adj_cnt'] = d['adj_list'].apply(lambda x : len(set(x)))
# ๋ฌธ์žฅ ๊ธฐ์ค€ ์ตœ๊ณ  ๊ฐ์ •
essay_summary =essay1.groupby(['title'])['emotion'].value_counts().unstack(level =1)
emo_vocab_dict = {}
for k, v in essay1[['emotion','noun_list']].values:
for vocab in v:
if (k, 'noun', vocab) not in emo_vocab_dict:
emo_vocab_dict[(k, 'noun', vocab)] = 0
emo_vocab_dict[(k, 'noun', vocab)] += 1
for k, v in essay1[['emotion','adj_list']].values:
for vocab in v:
if (k, 'adj', vocab) not in emo_vocab_dict:
emo_vocab_dict[(k, 'adj', vocab)] = 0
emo_vocab_dict[(k, 'adj', vocab)] += 1
vocab_emo_cnt_dict = {}
for k, v in essay1[['emotion','noun_list']].values:
for vocab in v:
if (vocab, 'noun') not in vocab_emo_cnt_dict:
vocab_emo_cnt_dict[('noun', vocab)] = {}
if k not in vocab_emo_cnt_dict[( 'noun', vocab)]:
vocab_emo_cnt_dict[( 'noun', vocab)][k] = 0
vocab_emo_cnt_dict[('noun', vocab)][k] += 1
for k, v in essay1[['emotion','adj_list']].values:
for vocab in v:
if ('adj', vocab) not in vocab_emo_cnt_dict:
vocab_emo_cnt_dict[( 'adj', vocab)] = {}
if k not in vocab_emo_cnt_dict[( 'adj', vocab)]:
vocab_emo_cnt_dict[( 'adj', vocab)][k] = 0
vocab_emo_cnt_dict[('adj', vocab)][k] += 1
vocab_emo_cnt_df = pd.DataFrame(vocab_emo_cnt_dict).T
vocab_emo_cnt_df['total'] = vocab_emo_cnt_df.sum(axis=1)
# ๋‹จ์–ด๋ณ„ ์ตœ๊ณ  ๊ฐ์ • ๋ฐ ๊ฐ์ • ๊ฐœ์ˆ˜
all_result=vocab_emo_cnt_df.sort_values(by = 'total', ascending = False)
# ๋‹จ์–ด๋ณ„ ์ตœ๊ณ  ๊ฐ์ • ๋ฐ ๊ฐ์ • ๊ฐœ์ˆ˜ , ํ˜•์šฉ์‚ฌ ํฌํ•จ ์‹œ
adj_result=vocab_emo_cnt_df.sort_values(by = 'total', ascending = False)
# ๋ช…์‚ฌ๋งŒ ์‚ฌ์šฉ ์‹œ
noun_result=vocab_emo_cnt_df[vocab_emo_cnt_df.index.get_level_values(0) == 'noun'].sort_values(by = 'total', ascending = False)
final_file_name = f"essay_all_vocab_result.csv"
adj_file_name = f"essay_adj_vocab_result.csv"
noun_file_name = f"essay_noun_vocab_result.csv"
os.makedirs(f'./result/{nickname}/{file_made_dt_str}/', exist_ok = True)
all_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_all_vocab_result.csv", index = False)
adj_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_adj_vocab_result.csv", index = False)
noun_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_noun_vocab_result.csv", index = False)
return all_result, adj_result, noun_result, essay_summary, file_made_dt_str
from transformers import pipeline
#model_name = 'AlexKay/xlm-roberta-large-qa-multilingual-finedtuned-ru'
model_name = 'monologg/koelectra-base-v2-finetuned-korquad'
question_answerer = pipeline("question-answering", model=model_name)
from transformers import AutoTokenizer,AutoModelForTokenClassification,TokenClassificationPipeline
tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-large-korean-upos")
posmodel=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/roberta-large-korean-upos")
pipeline=TokenClassificationPipeline(tokenizer=tokenizer,
model=posmodel,
aggregation_strategy="simple",
task = 'token-classification')
nlp=lambda x:[(x[t["start"]:t["end"]],t["entity_group"]) for t in pipeline(x)]
from transformers import AutoModelForSequenceClassification
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def all_process(origin_essay, nickname):
essay_sent =split_essay_to_sentence(origin_essay)
row_dict = {}
for row in tqdm(essay_sent):
question = 'what is the feeling?'
answer = question_answerer(question=question, context=row)
row_dict[row] = answer
emo2idx, idx2emo = get_sent_labeldata()
tokenizer = AutoTokenizer.from_pretrained('bert-base-multilingual-cased')
cls_model = AutoModelForSequenceClassification.from_pretrained('seriouspark/bert-base-multilingual-cased-finetuning-sentimental-6label')
#cls_model = AutoModelForSequenceClassification.from_pretrained('bert-base-multilingual-cased', num_labels = 6)
final_result, file_name_dt = get_word_emotion_pair(cls_model, essay_sent, idx2emo)
all_result, adj_result, noun_result, essay_summary, file_made_dt_str = get_essay_base_analysis(file_name_dt, nickname)
summary_result = pd.concat([adj_result, noun_result]).fillna(0).sort_values(by = 'total', ascending = False).fillna(0).reset_index()[:30]
with open(f'./result/{nickname}/{file_name_dt}/summary.json','w') as f:
json.dump( essay_summary.to_json(),f)
with open(f'./result/{nickname}/{file_made_dt_str}/all_result.json','w') as f:
json.dump( all_result.to_json(),f)
with open(f'./result/{nickname}/{file_made_dt_str}/adj_result.json','w') as f:
json.dump( adj_result.to_json(),f)
with open(f'./result/{nickname}/{file_made_dt_str}/noun_result.json','w') as f:
json.dump( noun_result.to_json(),f)
#return essay_summary, summary_result
total_cnt = essay_summary.sum(axis=1).values[0]
essay_summary_list = sorted(essay_summary.T.to_dict()['none'].items(), key = lambda x: x[1], reverse =True)
essay_summary_list_str = ' '.join([f'{row[0]} {int(row[1]*100 / total_cnt)}%' for row in essay_summary_list])
summary1 = f"""{nickname}๋‹˜, ๋‹น์‹ ์˜ ๊ธ€ ์†์—์„œ ๋Š๊ปด์ง€๋Š” ๊ฐ์ •๋ถ„ํฌ๋Š” [{essay_summary_list_str}] ์ž…๋‹ˆ๋‹ค"""
return summary1
def get_similar_vocab(message):
#print(re.findall('[๊ฐ€-ํžฃ]+',message))
if (len(message) > 0) & (len(re.findall('[๊ฐ€-ํžฃ]+',message))>0):
vocab =ใ…Žใ…‘ใ…… message
all_dict_url = f"https://dict.naver.com/search.dict?dicQuery={vocab}&query={vocab}&target=dic&ie=utf8&query_utf=&isOnlyViewEE="
response = requests.get(all_dict_url)
html_content = response.text
# BeautifulSoup๋กœ HTML ํŒŒ์‹ฑ
soup = BeautifulSoup(html_content, 'html.parser')
resulttext = soup.find('script').string
# "similarWordName" ๋‹ค์Œ์˜ ๋‹จ์–ด ์ถ”์ถœ
similar_words = re.findall(r'similarWordName:"([^"]+)"', resulttext)
similar_words_final = list(set(sum([re.findall('[๊ฐ€-ํžฃ]+', i) for i in similar_words], [])))
return similar_words_final
else:
return '๋‹จ์–ด๋ฅผ ์ž…๋ ฅํ•ด ์ฃผ์„ธ์š”'
def get_similar_means(vocab):
all_dict_url = f"https://dict.naver.com/search.dict?dicQuery={vocab}&query={vocab}&target=dic&ie=utf8&query_utf=&isOnlyViewEE="
response = requests.get(all_dict_url)
html_content = response.text
# BeautifulSoup๋กœ HTML ํŒŒ์‹ฑ
soup = BeautifulSoup(html_content, 'html.parser')
resulttext = soup.find('script').string
# "meanList" ๋‹ค์Œ์˜ ๋ฆฌ์ŠคํŠธ ์ถ”์ถœ (๋ฆฌ์ŠคํŠธ ๋‚ด์šฉ์„ ๋ฌธ์ž์—ด๋กœ ์ถ”์ถœ)
mean_list_str = re.findall(r'meanList:(\[.*?\])', resulttext, re.DOTALL)
matches_list = []
for i in range(len(mean_list_str)):
matches = re.findall(r'mean:"(.*?)"', mean_list_str[i])
matches_list.append(matches)
mean_list_str_final = [i for i in sum(matches_list, []) if (len(re.findall(r'[A-Za-z0-9]', i) )==0 ) & (len(re.findall(r'[๊ฐ€-ํžฃ]', i) )!=0 )]
return mean_list_str_final
#info_dict = {}
def run_all(message, history):
global info_dict
if message.find('๋‹‰๋„ค์ž„:')>=0:
global nickname
nickname = message.replace('๋‹‰๋„ค์ž„','').replace(':','').strip()
#global nickname
info_dict[nickname] = {}
return f'''์ข‹์•„์š”! ์‹œ์ž‘ํ• ๊ฒŒ์š” {nickname}๋‹˜.
์ง€๊ธˆ ๋จธ๋ฆฟ์†์— ๋– ์˜ค๋ฅด๋Š” ๋‹จ์–ด๋ฅผ ํ•˜๋‚˜ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.
๋‹จ์–ด๋ฅผ ์ž…๋ ฅํ•  ๋• \"๋‹จ์–ด: \" ๋ฅผ ํฌํ•จํ•ด์ฃผ์„ธ์š”
(๋‹จ์–ด: ์ปคํ”ผ)
'''
try :
#print(nickname)
if message.find('๋‹จ์–ด:')>=0:
clear_message = message.replace('๋‹จ์–ด','').replace(':','').strip()
info_dict[nickname]['main_word'] = clear_message
vocab_mean_list = []
similar_words_final = get_similar_vocab(message)
similar_words_final_with_main = similar_words_final + [message]
if len(similar_words_final_with_main)>0:
for w in similar_words_final_with_main:
temp_means = get_similar_means(w)
vocab_mean_list.append(temp_means)
fixed_similar_words_final = list(set([i for i in sum(vocab_mean_list, []) if len(i) > 10]))[:10]
word_str = ' \n'.join([str(idx) + ") " + i for idx, i in enumerate(similar_words_final, 1)])
sentence_str = ' \n'.join([str(idx) + ") " + i for idx, i in enumerate(fixed_similar_words_final, 1)])
return f'''<{clear_message}> ์„ ํ™œ์šฉํ•œ ๊ธ€์“ฐ๊ธฐ๋ฅผ ์‹œ์ž‘ํ•ด๋ณผ๊นŒ์š”?
์šฐ์„ , ์œ ์‚ฌํ•œ ๋‹จ์–ด๋ถ€ํ„ฐ ํ™•์ธํ•ด๋ณผ๊ฒŒ์š”.
{word_str}
\n
์œ ์‚ฌํ•œ ๋‹จ์–ด๋“ค์˜ ๋œป์€ ์•„๋ž˜์™€ ๊ฐ™์Šต๋‹ˆ๋‹ค.
{sentence_str}
\n
\n
์›ํ•˜๋Š” ๋ฌธ์žฅ์„ ๊ณจ๋ผ์„œ "๋ฌธ์žฅ:" ์„ ํฌํ•จํ•ด ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.
'''
else:
return '\"๋‹จ์–ด:\" ๋ฅผ ํฌํ•จํ•ด์„œ ๋‹จ์–ด๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š” (๋‹จ์–ด: ์ปคํ”ผ)'
elif message.find('๋ฌธ์žฅ:')>=0:
clear_message = message.replace('๋ฌธ์žฅ','').replace(':','').strip()
info_dict[nickname]['selected_sentence'] = clear_message
return f'''<{clear_message}>๋ฅผ ๊ณ ๋ฅด์…จ๋„ค์š”.
\n
์œ„ ๋ฌธ์žฅ์„ ํ™œ์šฉํ•ด ์งง์€ ๊ธ€์“ฐ๊ธฐ๋ฅผ ํ•ด๋ณผ๊นŒ์š”?
\"์งง์€๊ธ€: \"์„ ํฌํ•จํ•ด ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”
(์งง์€๊ธ€: ์ง€๊ธˆ ๋ฐฅ์„ ๋จน๊ณ  ์žˆ๋Š” ์ค‘์ด๋‹ค)
'''
elif message.find('์งง์€๊ธ€:')>=0:
clear_message = message.replace('์งง์€๊ธ€','').replace(':','').strip()
info_dict[nickname]['short_contents'] = clear_message
return f'''<{clear_message}>๋ผ๊ณ  ์ž…๋ ฅํ•ด์ฃผ์…จ๋„ค์š”.
\n ์œ„ ๋ฌธ์žฅ์„ ํ™œ์šฉํ•ด ๊ธด ๊ธ€์“ฐ๊ธฐ๋ฅผ ํ•ด๋ณผ๊นŒ์š”? 500์ž ์ด์ƒ ์ž‘์„ฑํ•ด์ฃผ์‹œ๋ฉด ์ข‹์•„์š”.
\n \"๊ธด๊ธ€: \"์„ ํฌํ•จํ•ด ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”
\n (๊ธด๊ธ€: ์ง€๊ธˆ ๋ฐฅ์„ ๋จน๊ณ  ์žˆ๋Š” ์ค‘์ด๋‹ค. ๋ฐฅ์„ ๋จน์„๋•Œ ๋งˆ๋‹ค ๋‚˜๋Š” ๋ฐฅ์•Œ์„ ํ˜“๋ฐ”๋‹ฅ์œผ๋กœ ๊ตด๋ ค๋ณธ๋‹ค. ... (์ƒ๋žต) )
'''
elif message.find('๊ธด๊ธ€:')>=0:
long_message = message.replace('๊ธด๊ธ€','').replace(':','').strip()
length_of_lm = len(long_message)
if length_of_lm >= 500:
info_dict['long_contents'] = long_message
os.makedirs(f"./result/{nickname}/", exist_ok = True)
with open(f"./result/{nickname}/contents.txt",'w') as f:
f.write(long_message)
return f'์ž…๋ ฅํ•ด์ฃผ์‹  ๊ธ€์€ {length_of_lm}์ž ์ž…๋‹ˆ๋‹ค. ์ด ๊ธ€์€ ๋ถ„์„ํ•ด๋ณผ๋งŒ ํ•ด์š”. ๋ถ„์„์„ ์›ํ•˜์‹ ๋‹ค๋ฉด "๋ถ„์„์‹œ์ž‘" ์ด๋ผ๊ณ  ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”'
else :
return f'์ž…๋ ฅํ•ด์ฃผ์‹  ๊ธ€์€ {length_of_lm}์ž ์ž…๋‹ˆ๋‹ค. ๋ถ„์„ํ•˜๊ธฐ์— ์กฐ๊ธˆ ์งง์•„์š”. ์กฐ๊ธˆ ๋” ์ž…๋ ฅํ•ด์ฃผ์‹œ๊ฒ ์–ด์š”?'
elif message.find('๋ถ„์„์‹œ์ž‘')>=0:
with open(f"./result/{nickname}/contents.txt",'r') as f:
orign_essay = f.read()
all_process(orign_essay, nickname)
else:
return '์ฒ˜์Œ๋ถ€ํ„ฐ ์‹œ์ž‘ํ•ด์ฃผ์„ธ์š”'
except:
return '์—๋Ÿฌ๊ฐ€ ๋ฐœ์ƒํ–ˆ์–ด์š”. ์ฒ˜์Œ๋ถ€ํ„ฐ ์‹œ์ž‘ํ•ฉ๋‹ˆ๋‹ค. ๋‹‰๋„ค์ž„: ์„ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”'
import gradio as gr
import requests
history = []
info_dict = {}
iface = gr.ChatInterface(
fn=run_all,
chatbot = gr.Chatbot(),
textbox = gr.Textbox(placeholder='์ฑ—๋ด‡์˜ ์š”์ฒญ ์ ‘๋‘์‚ฌ๋ฅผ ํฌํ•จํ•˜์—ฌ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”', container = True, scale = 7),
title = 'MooGeulMooGeul',
description = '๋‹น์‹ ์˜ ๋‹‰๋„ค์ž„๋ถ€ํ„ฐ ์ •ํ•ด์„œ ์•Œ๋ ค์ฃผ์„ธ์š”. "๋‹‰๋„ค์ž„: " ์„ ํฌํ•จํ•ด์„œ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.',
theme = 'soft',
examples = ['๋‹‰๋„ค์ž„: ์ปคํ”ผ๋Ÿฌ๋ฒ„',
'๋‹จ์–ด: ์ปคํ”ผ',
'๋ฌธ์žฅ: ์ผ์ •ํ•œ ์ฃผ์ œ๋‚˜ ์ค„๊ฑฐ๋ฆฌ๋ฅผ ๊ฐ€์ง„ ์ด์•ผ๊ธฐ',
'์งง์€๊ธ€: ์–ด๋–ค ์ฃผ์ œ๋‚˜ ์ค„๊ฑฐ๋ฆฌ์— ๋Œ€ํ•ด์„œ๋„ ์ด์•ผ๊ธฐ๋ฅผ ์ž˜ ํ•˜๋Š” ์‚ฌ๋žŒ์ด ํ•˜๋‚˜ ์žˆ์—ˆ๋‹ค. ๋‚˜์˜ ์ด๋ชจ. ๊ทธ ์‚ฌ๋žŒ์€ ์ปคํ”ผ ํ•œ์ž”๋งŒ ์žˆ๋‹ค๋ฉด ์–ด๋–ค ์ด์•ผ๊ธฐ๋“  ๋‚ด๊ฒŒ ๋“ค๋ ค์ฃผ์—ˆ๋‹ค.',
'''๊ธด๊ธ€: ์–ด๋–ค ์ฃผ์ œ๋‚˜ ์ค„๊ฑฐ๋ฆฌ์— ๋Œ€ํ•ด์„œ๋„ ์ด์•ผ๊ธฐ๋ฅผ ์ž˜ ํ•˜๋Š” ์‚ฌ๋žŒ์ด ํ•˜๋‚˜ ์žˆ์—ˆ๋‹ค. ๋‚˜์˜ ์ด๋ชจ. ๊ทธ ์‚ฌ๋žŒ์€ ์ปคํ”ผ ํ•œ ์ž”๋งŒ ์žˆ๋‹ค๋ฉด ์–ด๋–ค ์ด์•ผ๊ธฐ๋“  ํ•  ์ˆ˜ ์žˆ์—ˆ๋‹ค.
์–ด๋ฆฐ์‹œ์ ˆ์˜ ๋‚˜๋Š” ๊ทธ ์ด์•ผ๊ธฐ๋ฅผ ๋“ฃ๊ธฐ ์œ„ํ•ด ํ•„์‚ฌ์ ์œผ๋กœ ์ง‘์œผ๋กœ ๋Œ์•„์™”๋‹ค. ์œ ์น˜์›๋•Œ๋Š” ์ง‘์— ๊ฐ€์•ผ ํ•œ๋‹ค๋ฉฐ ๋–ผ๋ฅผ ์“ฐ๊ณ  ์šธ์—ˆ๋‹ค๊ณ  ํ–ˆ๋‹ค.
์ดˆ๋“ฑํ•™์ƒ์ด ๋˜์–ด์„œ๋Š” 4๊ต์‹œ ๋•ก! ํ•˜๋Š” ์†Œ๋ฆฌ๊ฐ€ ๋“ค๋ฆฌ๋ฉด ๊ฐ€๋ฐฉ์„ ์žฌ๋นจ๋ฆฌ ์‹ธ์„œ ์ง‘์œผ๋กœ ๋Œ์•„์™”๋‹ค. ์ง‘์—๋Š” ํ•ญ์ƒ ๋‚˜๋ฅผ ๊ธฐ๋‹ค๋ฆฌ๊ณ  ์žˆ๋Š” ์ด๋ชจ์™€ ์ด๋ชจ์˜ ์ปคํ”ผ ๋ƒ„์ƒˆ๊ฐ€ ์žˆ์—ˆ๋‹ค.
๋”ฐ๋œปํ•œ ๋ฏน์Šค์ปคํ”ผ๋ƒ„์ƒˆ, ๊ทธ๋ฆฌ๊ณ  ๊ณ ์š”ํ•œ ์ง‘์•ˆ์— ์šธ๋ฆฌ๋˜ ์ด์•ผ๊นƒ๊ฑฐ๋ฆฌ๊ฐ€ ์ƒ์ƒํ•˜๋‹ค. ์ด๋ชจ๋Š” ์–ด๋–ป๊ฒŒ ๊ทธ ๋งŽ์€ ์ด์•ผ๊ธฐ๋ฅผ ์•Œ๊ณ  ์žˆ์—ˆ์„๊นŒ.
ํ•œ๋ฒˆ์€ ์ •๋ง ๋ฌผ์–ด๋ณธ ์ ์ด ์žˆ์—ˆ๋‹ค. ์–ด๋–ป๊ฒŒ ํ•ด์„œ ๊ทธ๋Ÿฐ ์ด์•ผ๊ธฐ๋ฅผ ์•Œ๊ณ  ์žˆ๋Š๋ƒ๊ณ . ๊ทธ๋Ÿด๋•Œ ๋งˆ๋‹ค ์ด๋ชจ๋Š” ๋‚ด๊ฒŒ ์–ด๋ฅธ์ด ๋˜๋ผ๊ณ  ๋งํ•ด์คฌ๋‹ค.
'์–ด๋ฅธ์ด ๋˜๋ฉด ์•Œ ์ˆ˜ ์žˆ์–ด. ์–ด๋ฅธ์ด ๋˜๋ ด.'
์–ด๋ฅธ, ๊ทธ ๋‹น์‹œ์˜ ๋‚˜๋Š” ์žฅ๋ž˜ํฌ๋ง์œผ๋กœ <์–ด๋ฅธ>์„ ์จ๋„ฃ์„ ์ •๋„์˜€๋‹ค.
'''],
cache_examples = False,
retry_btn = None,
undo_btn = 'Delete Previous',
clear_btn = 'Clear',
)
iface.launch(share=True)