Spaces:
Runtime error
Runtime error
import datetime | |
import numpy as np | |
import pandas as pd | |
import re | |
import json | |
import os | |
import glob | |
import torch | |
import torch.nn.functional as F | |
from torch.optim import Adam | |
from tqdm import tqdm | |
from torch import nn | |
from transformers import BertModel | |
from transformers import AutoTokenizer | |
import argparse | |
from bs4 import BeautifulSoup | |
import requests | |
def split_essay_to_sentence(origin_essay): | |
origin_essay_sentence = sum([[a.strip() for a in i.split('.')] for i in origin_essay.split('\n')], []) | |
essay_sent = [a for a in origin_essay_sentence if len(a) > 0] | |
return essay_sent | |
def get_first_extraction(text_sentence): | |
row_dict = {} | |
for row in tqdm(text_sentence): | |
question = 'what is the feeling?' | |
answer = question_answerer(question=question, context=row) | |
row_dict[row] = answer | |
return row_dict | |
def get_sent_labeldata(): | |
label =pd.read_csv('./rawdata/sentimental_label.csv', encoding = 'cp949', header = None) | |
label[1] = label[1].apply(lambda x : re.findall(r'[๊ฐ-ํฃ]+', x)[0]) | |
label_dict =label[label.index % 10 == 0].set_index(0).to_dict()[1] | |
emo2idx = {v : k for k, v in enumerate(label_dict.items())} | |
idx2emo = {v : k[1] for k, v in emo2idx.items()} | |
return emo2idx, idx2emo | |
class myDataset_for_infer(torch.utils.data.Dataset): | |
def __init__(self, X): | |
self.X = X | |
def __len__(self): | |
return len(self.X) | |
def __getitem__(self,idx): | |
sentences = tokenizer(self.X[idx], return_tensors = 'pt', padding = 'max_length', max_length = 96, truncation = True) | |
return sentences | |
def infer_data(model, main_feeling_keyword): | |
#ds = myDataset_for_infer() | |
df_infer = myDataset_for_infer(main_feeling_keyword) | |
infer_dataloader = torch.utils.data.DataLoader(df_infer, batch_size= 16) | |
device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
if device == 'cuda': | |
model = model.cuda() | |
result_list = [] | |
with torch.no_grad(): | |
for idx, infer_input in tqdm(enumerate(infer_dataloader)): | |
mask = infer_input['attention_mask'].to(device) | |
input_id = infer_input['input_ids'].squeeze(1).to(device) | |
output = model(input_id, mask) | |
result = np.argmax(output.logits, axis=1).numpy() | |
result_list.extend(result) | |
return result_list | |
def get_word_emotion_pair(cls_model, origin_essay_sentence, idx2emo): | |
import re | |
def get_noun(sent): | |
return [re.sub(r'[์๋ฅผ]+', '', vocab) for (vocab, pos) in nlp(sent) if len(vocab) > 1 and pos == 'NOUN'] | |
def get_adj(sent): | |
return [re.sub(r'[์๋ฅผ]+', '', vocab) for (vocab, pos) in nlp(sent) if len(vocab) > 1 and pos == 'ADJ'] | |
def get_verb(sent): | |
return [re.sub(r'[์๋ฅผ]+', '', vocab) for (vocab, pos) in nlp(sent) if len(vocab) > 1 and pos == 'VERB'] | |
result_list = infer_data(cls_model, origin_essay_sentence) | |
final_result = pd.DataFrame(data = {'text': origin_essay_sentence , 'label' : result_list}) | |
final_result['emotion'] = final_result['label'].map(idx2emo) | |
nlp=lambda x:[(x[t["start"]:t["end"]],t["entity_group"]) for t in pipeline(x)] | |
#essay_sent_pos = [nlp(i) for i in tqdm(essay_sent)] | |
#final_result['text_pos'] = essay_sent_pos | |
final_result['noun_list'] = final_result['text'].map(get_noun) | |
final_result['adj_list'] = final_result['text'].map(get_adj) | |
final_result['verb_list'] = final_result['text'].map(get_verb) | |
final_result['title'] = 'none' | |
file_made_dt = datetime.datetime.now() | |
file_made_dt_str = datetime.datetime.strftime(file_made_dt, '%Y%m%d_%H%M%d') | |
os.makedirs(f'./result/{nickname}/{file_made_dt_str}/', exist_ok = True) | |
final_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_result.csv", index = False) | |
return final_result, file_made_dt_str | |
def get_essay_base_analysis(file_made_dt_str, nickname): | |
essay1 = pd.read_csv(f"./result/{nickname}/{file_made_dt_str}/essay_result.csv") | |
essay1['noun_list_len'] = essay1['noun_list'].apply(lambda x : len(x)) | |
essay1['noun_list_uniqlen'] = essay1['noun_list'].apply(lambda x : len(set(x))) | |
essay1['adj_list_len'] = essay1['adj_list'].apply(lambda x : len(x)) | |
essay1['adj_list_uniqlen'] = essay1['adj_list'].apply(lambda x : len(set(x))) | |
essay1['vocab_all'] = essay1[['noun_list','adj_list']].apply(lambda x : sum((eval(x[0]),eval(x[1])), []), axis=1) | |
essay1['vocab_cnt'] = essay1['vocab_all'].apply(lambda x : len(x)) | |
essay1['vocab_unique_cnt'] = essay1['vocab_all'].apply(lambda x : len(set(x))) | |
essay1['noun_list'] = essay1['noun_list'].apply(lambda x : eval(x)) | |
essay1['adj_list'] = essay1['adj_list'].apply(lambda x : eval(x)) | |
d = essay1.groupby('title')[['noun_list','adj_list']].sum([]).reset_index() | |
d['noun_cnt'] = d['noun_list'].apply(lambda x : len(set(x))) | |
d['adj_cnt'] = d['adj_list'].apply(lambda x : len(set(x))) | |
# ๋ฌธ์ฅ ๊ธฐ์ค ์ต๊ณ ๊ฐ์ | |
essay_summary =essay1.groupby(['title'])['emotion'].value_counts().unstack(level =1) | |
emo_vocab_dict = {} | |
for k, v in essay1[['emotion','noun_list']].values: | |
for vocab in v: | |
if (k, 'noun', vocab) not in emo_vocab_dict: | |
emo_vocab_dict[(k, 'noun', vocab)] = 0 | |
emo_vocab_dict[(k, 'noun', vocab)] += 1 | |
for k, v in essay1[['emotion','adj_list']].values: | |
for vocab in v: | |
if (k, 'adj', vocab) not in emo_vocab_dict: | |
emo_vocab_dict[(k, 'adj', vocab)] = 0 | |
emo_vocab_dict[(k, 'adj', vocab)] += 1 | |
vocab_emo_cnt_dict = {} | |
for k, v in essay1[['emotion','noun_list']].values: | |
for vocab in v: | |
if (vocab, 'noun') not in vocab_emo_cnt_dict: | |
vocab_emo_cnt_dict[('noun', vocab)] = {} | |
if k not in vocab_emo_cnt_dict[( 'noun', vocab)]: | |
vocab_emo_cnt_dict[( 'noun', vocab)][k] = 0 | |
vocab_emo_cnt_dict[('noun', vocab)][k] += 1 | |
for k, v in essay1[['emotion','adj_list']].values: | |
for vocab in v: | |
if ('adj', vocab) not in vocab_emo_cnt_dict: | |
vocab_emo_cnt_dict[( 'adj', vocab)] = {} | |
if k not in vocab_emo_cnt_dict[( 'adj', vocab)]: | |
vocab_emo_cnt_dict[( 'adj', vocab)][k] = 0 | |
vocab_emo_cnt_dict[('adj', vocab)][k] += 1 | |
vocab_emo_cnt_df = pd.DataFrame(vocab_emo_cnt_dict).T | |
vocab_emo_cnt_df['total'] = vocab_emo_cnt_df.sum(axis=1) | |
# ๋จ์ด๋ณ ์ต๊ณ ๊ฐ์ ๋ฐ ๊ฐ์ ๊ฐ์ | |
all_result=vocab_emo_cnt_df.sort_values(by = 'total', ascending = False) | |
# ๋จ์ด๋ณ ์ต๊ณ ๊ฐ์ ๋ฐ ๊ฐ์ ๊ฐ์ , ํ์ฉ์ฌ ํฌํจ ์ | |
adj_result=vocab_emo_cnt_df.sort_values(by = 'total', ascending = False) | |
# ๋ช ์ฌ๋ง ์ฌ์ฉ ์ | |
noun_result=vocab_emo_cnt_df[vocab_emo_cnt_df.index.get_level_values(0) == 'noun'].sort_values(by = 'total', ascending = False) | |
final_file_name = f"essay_all_vocab_result.csv" | |
adj_file_name = f"essay_adj_vocab_result.csv" | |
noun_file_name = f"essay_noun_vocab_result.csv" | |
os.makedirs(f'./result/{nickname}/{file_made_dt_str}/', exist_ok = True) | |
all_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_all_vocab_result.csv", index = False) | |
adj_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_adj_vocab_result.csv", index = False) | |
noun_result.to_csv(f"./result/{nickname}/{file_made_dt_str}/essay_noun_vocab_result.csv", index = False) | |
return all_result, adj_result, noun_result, essay_summary, file_made_dt_str | |
from transformers import pipeline | |
#model_name = 'AlexKay/xlm-roberta-large-qa-multilingual-finedtuned-ru' | |
model_name = 'monologg/koelectra-base-v2-finetuned-korquad' | |
question_answerer = pipeline("question-answering", model=model_name) | |
from transformers import AutoTokenizer,AutoModelForTokenClassification,TokenClassificationPipeline | |
tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-large-korean-upos") | |
posmodel=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/roberta-large-korean-upos") | |
pipeline=TokenClassificationPipeline(tokenizer=tokenizer, | |
model=posmodel, | |
aggregation_strategy="simple", | |
task = 'token-classification') | |
nlp=lambda x:[(x[t["start"]:t["end"]],t["entity_group"]) for t in pipeline(x)] | |
from transformers import AutoModelForSequenceClassification | |
device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
def all_process(origin_essay, nickname): | |
essay_sent =split_essay_to_sentence(origin_essay) | |
row_dict = {} | |
for row in tqdm(essay_sent): | |
question = 'what is the feeling?' | |
answer = question_answerer(question=question, context=row) | |
row_dict[row] = answer | |
emo2idx, idx2emo = get_sent_labeldata() | |
tokenizer = AutoTokenizer.from_pretrained('bert-base-multilingual-cased') | |
cls_model = AutoModelForSequenceClassification.from_pretrained('seriouspark/bert-base-multilingual-cased-finetuning-sentimental-6label') | |
#cls_model = AutoModelForSequenceClassification.from_pretrained('bert-base-multilingual-cased', num_labels = 6) | |
final_result, file_name_dt = get_word_emotion_pair(cls_model, essay_sent, idx2emo) | |
all_result, adj_result, noun_result, essay_summary, file_made_dt_str = get_essay_base_analysis(file_name_dt, nickname) | |
summary_result = pd.concat([adj_result, noun_result]).fillna(0).sort_values(by = 'total', ascending = False).fillna(0).reset_index()[:30] | |
with open(f'./result/{nickname}/{file_name_dt}/summary.json','w') as f: | |
json.dump( essay_summary.to_json(),f) | |
with open(f'./result/{nickname}/{file_made_dt_str}/all_result.json','w') as f: | |
json.dump( all_result.to_json(),f) | |
with open(f'./result/{nickname}/{file_made_dt_str}/adj_result.json','w') as f: | |
json.dump( adj_result.to_json(),f) | |
with open(f'./result/{nickname}/{file_made_dt_str}/noun_result.json','w') as f: | |
json.dump( noun_result.to_json(),f) | |
#return essay_summary, summary_result | |
total_cnt = essay_summary.sum(axis=1).values[0] | |
essay_summary_list = sorted(essay_summary.T.to_dict()['none'].items(), key = lambda x: x[1], reverse =True) | |
essay_summary_list_str = ' '.join([f'{row[0]} {int(row[1]*100 / total_cnt)}%' for row in essay_summary_list]) | |
summary1 = f"""{nickname}๋, ๋น์ ์ ๊ธ ์์์ ๋๊ปด์ง๋ ๊ฐ์ ๋ถํฌ๋ [{essay_summary_list_str}] ์ ๋๋ค""" | |
return summary1 | |
def get_similar_vocab(message): | |
#print(re.findall('[๊ฐ-ํฃ]+',message)) | |
if (len(message) > 0) & (len(re.findall('[๊ฐ-ํฃ]+',message))>0): | |
vocab = message | |
all_dict_url = f"https://dict.naver.com/search.dict?dicQuery={vocab}&query={vocab}&target=dic&ie=utf8&query_utf=&isOnlyViewEE=" | |
response = requests.get(all_dict_url) | |
html_content = response.text | |
# BeautifulSoup๋ก HTML ํ์ฑ | |
soup = BeautifulSoup(html_content, 'html.parser') | |
resulttext = soup.find('script').string | |
# "similarWordName" ๋ค์์ ๋จ์ด ์ถ์ถ | |
similar_words = re.findall(r'similarWordName:"([^"]+)"', resulttext) | |
similar_words_final = list(set(sum([re.findall('[๊ฐ-ํฃ]+', i) for i in similar_words], []))) | |
similar_words_final = [i for i in set(sum([re.findall('[๊ฐ-ํฃ]+', i) for i in similar_words], [])) if i not in ('๋ฑ๋ง','๋ง','์ด์ฌ')] | |
return similar_words_final | |
else: | |
return '๋จ์ด๋ฅผ ์ ๋ ฅํด ์ฃผ์ธ์' | |
def get_similar_means(vocab): | |
all_dict_url = f"https://dict.naver.com/search.dict?dicQuery={vocab}&query={vocab}&target=dic&ie=utf8&query_utf=&isOnlyViewEE=" | |
response = requests.get(all_dict_url) | |
html_content = response.text | |
# BeautifulSoup๋ก HTML ํ์ฑ | |
soup = BeautifulSoup(html_content, 'html.parser') | |
resulttext = soup.find('script').string | |
# "meanList" ๋ค์์ ๋ฆฌ์คํธ ์ถ์ถ (๋ฆฌ์คํธ ๋ด์ฉ์ ๋ฌธ์์ด๋ก ์ถ์ถ) | |
mean_list_str = re.findall(r'meanList:(\[.*?\])', resulttext, re.DOTALL) | |
mean_list_str = [i.replace('\\u002F','').replace('\\u003C','').replace('strong','').replace('\\u003E','') for i in mean_list_str] | |
matches_list = [] | |
for i in range(len(mean_list_str)): | |
matches = re.findall(r'mean:"(.*?)"', mean_list_str[i]) | |
matches_list.append(matches) | |
mean_list_str_final = [i for i in sum(matches_list, []) if (len(re.findall(r'[A-Za-z0-9]', i) )==0 ) & (len(re.findall(r'[๊ฐ-ํฃ]', i) )!=0 )] | |
return mean_list_str_final | |
info_dict = {} | |
#info_dict = {} | |
def run_all(message, history): | |
global info_dict | |
if message.find('๋๋ค์:')>=0: | |
global nickname | |
nickname = message.replace('๋๋ค์','').replace(':','').strip() | |
#global nickname | |
info_dict[nickname] = {} | |
return f'''์ข์์! ์์ํ ๊ฒ์ {nickname}๋. | |
์ง๊ธ ๋จธ๋ฆฟ์์ ๋ ์ค๋ฅด๋ ๋จ์ด๋ฅผ ํ๋ ์ ๋ ฅํด์ฃผ์ธ์. | |
\n\n\n๋จ์ด๋ฅผ ์ ๋ ฅํ ๋ \"๋จ์ด: \" ๋ฅผ ํฌํจํด์ฃผ์ธ์ | |
์์ <๋จ์ด: ์ปคํผ> | |
''' | |
try : | |
#print(nickname) | |
if message.find('๋จ์ด:')>=0: | |
clear_message = message.replace('๋จ์ด','').replace(':','').strip() | |
info_dict[nickname]['main_word'] = clear_message | |
vocab_mean_list = [] | |
similar_words_final = get_similar_vocab(message) | |
similar_words_final_with_main = similar_words_final + [message] | |
if len(similar_words_final_with_main)>0: | |
for w in similar_words_final_with_main: | |
temp_means = get_similar_means(w) | |
vocab_mean_list.append(temp_means) | |
fixed_similar_words_final = list(set([i for i in sum(vocab_mean_list, []) if len(i) > 10]))[:10] | |
word_str = ' \n'.join([str(idx) + ") " + i for idx, i in enumerate(similar_words_final, 1)]) | |
sentence_str = ' \n'.join([str(idx) + ") " + i for idx, i in enumerate(fixed_similar_words_final, 1)]) | |
return f'''<{clear_message}> ์ ํ์ฉํ ๊ธ์ฐ๊ธฐ๋ฅผ ์์ํด๋ณผ๊น์? | |
์ฐ์ , ์ ์ฌํ ๋จ์ด๋ถํฐ ํ์ธํด๋ณผ๊ฒ์. | |
{word_str} \n | |
์ ์ฌํ ๋จ์ด๋ค์ ๋ป์ ์๋์ ๊ฐ์ต๋๋ค. | |
{sentence_str}\n | |
์ ๋ป ์ค์ ์ํ๋ ๋ป์ ๊ณจ๋ผ ์ ๋ ฅํด์ฃผ์ธ์ | |
\n\n\n ์ ๋ ฅ์์ \"๋ฌธ์ฅ:\" ์ ํฌํจํด์ฃผ์ธ์. ์์๋ ๋ณด์ฌ๋๋ฆด๊ฒ์. | |
\n ์์ <๋ฌธ์ฅ: ์ผ์ ํ ์ฃผ์ ๋ ์ค๊ฑฐ๋ฆฌ๋ฅผ ๊ฐ์ง ์ด์ผ๊ธฐ> | |
''' | |
else: | |
return '\"๋จ์ด:\" ๋ฅผ ํฌํจํด์ ๋จ์ด๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์ (๋จ์ด: ์ปคํผ)' | |
elif message.find('๋ฌธ์ฅ:')>=0: | |
clear_message = message.replace('๋ฌธ์ฅ','').replace(':','').strip() | |
info_dict[nickname]['selected_sentence'] = clear_message | |
return f'''[{clear_message}]๋ฅผ ๊ณ ๋ฅด์ จ๋ค์. | |
\n ์ ๋ฌธ์ฅ์ ํ์ฉํด ์งง์ ๊ธ์ฐ๊ธฐ๋ฅผ ํด๋ณผ๊น์? | |
\n\n\n ์ ๋ ฅ์์\"์งง์๊ธ: \"์ ํฌํจํด์ฃผ์ธ์. ์์๋ ๋ณด์ฌ๋๋ฆด๊ฒ์. | |
\n ์์ <์งง์๊ธ: ์ง๊ธ ๋ฐฅ์ ๋จน๊ณ ์๋ ์ค์ด๋ค> | |
''' | |
elif message.find('์งง์๊ธ:')>=0: | |
clear_message = message.replace('์งง์๊ธ','').replace(':','').strip() | |
info_dict[nickname]['short_contents'] = clear_message | |
return f'''<{clear_message}>๋ผ๊ณ ์ ๋ ฅํด์ฃผ์ จ๋ค์. | |
\n ์ ๋ฌธ์ฅ์ ํ์ฉํด ๊ธด ๊ธ์ฐ๊ธฐ๋ฅผ ํด๋ณผ๊น์? 500์ ์ด์ ์์ฑํด์ฃผ์๋ฉด ์ข์์. | |
\n\n\n ์ ๋ ฅ์์\"๊ธด๊ธ: \"์ ํฌํจํด์ฃผ์ธ์. ์์๋ ๋ณด์ฌ๋๋ฆด๊ฒ์. | |
\n ์์ <๊ธด๊ธ: ์ง๊ธ ๋ฐฅ์ ๋จน๊ณ ์๋ ์ค์ด๋ค. ๋ฐฅ์ ๋จน์๋ ๋ง๋ค ๋๋ ๋ฐฅ์์ ํ๋ฐ๋ฅ์ผ๋ก ๊ตด๋ ค๋ณธ๋ค. ... (์๋ต) > | |
''' | |
elif message.find('๊ธด๊ธ:')>=0: | |
long_message = message.replace('๊ธด๊ธ','').replace(':','').strip() | |
length_of_lm = len(long_message) | |
if length_of_lm >= 500: | |
info_dict['long_contents'] = long_message | |
os.makedirs(f"./result/{nickname}/", exist_ok = True) | |
with open(f"./result/{nickname}/contents.txt",'w') as f: | |
f.write(long_message) | |
return f'์ ๋ ฅํด์ฃผ์ ๊ธ์ {length_of_lm}์ ์ ๋๋ค. ์ด ๊ธ์ ๋ถ์ํด๋ณผ๋ง ํด์. ๋ถ์์ ์ํ์ ๋ค๋ฉด "๋ถ์์์" ์ด๋ผ๊ณ ์ ๋ ฅํด์ฃผ์ธ์' | |
else : | |
return f'์ ๋ ฅํด์ฃผ์ ๊ธ์ {length_of_lm}์ ์ ๋๋ค. ๋ถ์ํ๊ธฐ์ ์กฐ๊ธ ์งง์์. ์กฐ๊ธ ๋ ์ ๋ ฅํด์ฃผ์๊ฒ ์ด์?' | |
elif message.find('๋ถ์์์')>=0: | |
with open(f"./result/{nickname}/contents.txt",'r') as f: | |
orign_essay = f.read() | |
summary = all_process(orign_essay, nickname) | |
#print(summary) | |
return summary | |
else: | |
return '์ฒ์๋ถํฐ ์์ํด์ฃผ์ธ์' | |
except: | |
return '์๋ฌ๊ฐ ๋ฐ์ํ์ด์. ์ฒ์๋ถํฐ ์์ํฉ๋๋ค. ๋๋ค์: ์ ์ ๋ ฅํด์ฃผ์ธ์' | |
import gradio as gr | |
import requests | |
history = [] | |
info_dict = {} | |
iface = gr.ChatInterface( | |
fn=run_all, | |
chatbot = gr.Chatbot(), | |
textbox = gr.Textbox(placeholder='์ฑ๋ด์ ์์ฒญ ์ ๋์ฌ๋ฅผ ํฌํจํ์ฌ ์ ๋ ฅํด์ฃผ์ธ์', container = True, scale = 7), | |
title = 'MooGeulMooGeul', | |
description = '๋น์ ์ ๋๋ค์๋ถํฐ ์ ํด์ ์๋ ค์ฃผ์ธ์. "๋๋ค์: " ์ ํฌํจํด์ ์ ๋ ฅํด์ฃผ์ธ์.', | |
theme = 'soft', | |
examples = ['๋๋ค์: ์ปคํผ๋ฌ๋ฒ', | |
'๋จ์ด: ์ปคํผ', | |
'๋ฌธ์ฅ: ์ผ์ ํ ์ฃผ์ ๋ ์ค๊ฑฐ๋ฆฌ๋ฅผ ๊ฐ์ง ์ด์ผ๊ธฐ', | |
'์งง์๊ธ: ์ด๋ค ์ฃผ์ ๋ ์ค๊ฑฐ๋ฆฌ์ ๋ํด์๋ ์ด์ผ๊ธฐ๋ฅผ ์ ํ๋ ์ฌ๋์ด ํ๋ ์์๋ค. ๋์ ์ด๋ชจ. ๊ทธ ์ฌ๋์ ์ปคํผ ํ์๋ง ์๋ค๋ฉด ์ด๋ค ์ด์ผ๊ธฐ๋ ๋ด๊ฒ ๋ค๋ ค์ฃผ์๋ค.', | |
'''๊ธด๊ธ: ์ด๋ค ์ฃผ์ ๋ ์ค๊ฑฐ๋ฆฌ์ ๋ํด์๋ ์ด์ผ๊ธฐ๋ฅผ ์ ํ๋ ์ฌ๋์ด ํ๋ ์์๋ค. ๋์ ์ด๋ชจ. ๊ทธ ์ฌ๋์ ์ปคํผ ํ ์๋ง ์๋ค๋ฉด ์ด๋ค ์ด์ผ๊ธฐ๋ ํ ์ ์์๋ค. | |
์ด๋ฆฐ์์ ์ ๋๋ ๊ทธ ์ด์ผ๊ธฐ๋ฅผ ๋ฃ๊ธฐ ์ํด ํ์ฌ์ ์ผ๋ก ์ง์ผ๋ก ๋์์๋ค. ์ ์น์๋๋ ์ง์ ๊ฐ์ผ ํ๋ค๋ฉฐ ๋ผ๋ฅผ ์ฐ๊ณ ์ธ์๋ค๊ณ ํ๋ค. | |
์ด๋ฑํ์์ด ๋์ด์๋ 4๊ต์ ๋ก! ํ๋ ์๋ฆฌ๊ฐ ๋ค๋ฆฌ๋ฉด ๊ฐ๋ฐฉ์ ์ฌ๋นจ๋ฆฌ ์ธ์ ์ง์ผ๋ก ๋์์๋ค. ์ง์๋ ํญ์ ๋๋ฅผ ๊ธฐ๋ค๋ฆฌ๊ณ ์๋ ์ด๋ชจ์ ์ด๋ชจ์ ์ปคํผ ๋์๊ฐ ์์๋ค. | |
๋ฐ๋ปํ ๋ฏน์ค์ปคํผ๋์, ๊ทธ๋ฆฌ๊ณ ๊ณ ์ํ ์ง์์ ์ธ๋ฆฌ๋ ์ด์ผ๊น๊ฑฐ๋ฆฌ๊ฐ ์์ํ๋ค. ์ด๋ชจ๋ ์ด๋ป๊ฒ ๊ทธ ๋ง์ ์ด์ผ๊ธฐ๋ฅผ ์๊ณ ์์์๊น. | |
ํ๋ฒ์ ์ ๋ง ๋ฌผ์ด๋ณธ ์ ์ด ์์๋ค. ์ด๋ป๊ฒ ํด์ ๊ทธ๋ฐ ์ด์ผ๊ธฐ๋ฅผ ์๊ณ ์๋๋๊ณ . ๊ทธ๋ด๋ ๋ง๋ค ์ด๋ชจ๋ ๋ด๊ฒ ์ด๋ฅธ์ด ๋๋ผ๊ณ ๋งํด์คฌ๋ค. | |
'์ด๋ฅธ์ด ๋๋ฉด ์ ์ ์์ด. ์ด๋ฅธ์ด ๋๋ ด.' | |
์ด๋ฅธ, ๊ทธ ๋น์์ ๋๋ ์ฅ๋ํฌ๋ง์ผ๋ก <์ด๋ฅธ>์ ์จ๋ฃ์ ์ ๋์๋ค. | |
'''], | |
cache_examples = False, | |
retry_btn = None, | |
undo_btn = 'Delete Previous', | |
clear_btn = 'Clear', | |
) | |
iface.launch(share=True) |