from langchain_community.llms import HuggingFacePipeline import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline from components import caption_chain, tag_chain from components import pexels, utils import os, gc import gradio as gr import os from moviepy.editor import VideoFileClip, concatenate_videoclips # 모델과 토크나이저 로드 model = AutoModelForSeq2SeqLM.from_pretrained("declare-lab/flan-alpaca-large") tokenizer = AutoTokenizer.from_pretrained("declare-lab/flan-alpaca-large") # 파이프라인 설정 pipe = pipeline( 'text2text-generation', model=model, tokenizer=tokenizer, max_length=120 ) # HuggingFacePipeline을 사용하여 LLM 초기화 local_llm = HuggingFacePipeline(pipeline=pipe) # 체인 구성 llm_chain = caption_chain.chain(llm=local_llm) sum_llm_chain = tag_chain.chain(llm=local_llm) # Pexels API 키 pexels_api_key = os.getenv('pexels_api_key') def pred(product_name, orientation): # 기존 코드 생략... folder_name, sentences = pexels.generate_videos(product_name, pexels_api_key, orientation, height, width, llm_chain, sum_llm_chain) video_files = [os.path.join(folder_name, f) for f in os.listdir(folder_name) if f.endswith('.mp4')] if not video_files: # 비디오 파일이 없으면 메시지 반환 return ["No videos were generated. Please check the input and try again.", ""] # 비디오 파일 결합 video_path = combine_videos(video_files, folder_name) if not video_path: # 비디오 결합에 실패하면 메시지 반환 return ["Failed to combine videos.", ""] return ["\n".join(sentences), video_path] def combine_videos(video_files, output_folder): if not video_files: print("No video files to combine.") return "" # 빈 문자열 반환 clips = [VideoFileClip(vf) for vf in video_files] final_clip = concatenate_videoclips(clips) output_path = os.path.join(output_folder, "final_video.mp4") final_clip.write_videofile(output_path) return output_path # 예측 함수 def pred(product_name, orientation): # 비디오 방향과 해상도 설정 if orientation == "Shorts/Reels/TikTok (1080 x 1920)": orientation = "portrait" # 오타 수정 height = 1920 width = 1080 elif orientation == "Youtube Videos (1920 x 1080)": orientation = "landscape" height = 1080 width = 1920 else: orientation = "square" height = 1080 width = 1080 # 비디오 생성 및 문장 추출 folder_name, sentences = pexels.generate_videos(product_name, pexels_api_key, orientation, height, width, llm_chain, sum_llm_chain) gc.collect() # 비디오 파일이 실제로 생성되었는지 확인 if not sentences: return ["No videos generated. Please try again.", ""] # 비디오 파일 결합 video_path = combine_videos(video_files, folder_name) if not video_path or not os.path.exists(video_path): return ["Failed to combine videos.", ""] return ["\n".join(sentences), video_path] # Gradio 인터페이스 설정 및 런칭 with gr.Blocks() as demo: gr.Markdown( """ # Ads Generator Create video ads based on your product name using AI ### Note: the video generation takes about 2-4 minutes """ ) dimension = gr.Dropdown( ["Shorts/Reels/TikTok (1080 x 1920)", "Facebook/Youtube Videos (1920 x 1080)", "Square (1080 x 1080)"], label="Video Dimension", info="Choose dimension" ) product_name = gr.Textbox(label="Product name") captions = gr.Label(label="Captions") video = gr.Video() btn = gr.Button("Submit") btn.click(pred, inputs=[product_name, dimension], outputs=[captions, video]) # 여기에 추가적인 Gradio 인터페이스 구성 요소를 추가할 수 있습니다. # Gradio 앱 실행 demo.launch()