Python 1
China Movies Speak Khmer.
May 08, 2026
import os, whisper, asyncio, edge_tts, re, torch
from googletrans import Translator
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
import moviepy.video.fx.all as vfx
# --- ⚙️ ការកំណត់ ---
Video_Name = "movie.mp4"
VIDEO_PATH = f"input_video/{Video_Name}"
OUTPUT_PATH = f"final_result/BM7_ANGKOR_PRO_{Video_Name}"
translator = Translator()
# --- 🧠 មុខងារបែងចែកភេទ និងចម្រោះពាក្យឡប់ ---
def detect_gender(text):
female_keywords = ['នាង', 'ស្រី', 'អូន', 'អ្នកនាង', 'កញ្ញា', 'ម៉ាក់', 'យាយ', 'ព្រះនាង']
if any(word in text for word in female_keywords):
return "km-KH-SreymomNeural"
return "km-KH-PisethNeural"
def clean_repetition(text):
return re.sub(r'\b(\w+)( \1\b)+', r'\1', text)
def professional_khmer_filter(text):
try:
raw_kh = translator.translate(text, src='zh-cn', dest='km').text
corrections = {
"អ្នក": "ឯង", "ខ្ញុំ": "យើង", "សួស្តី": "ជម្រាបសួរ",
"អរគុណ": "អរគុណច្រើន", "មែនទេ": "មែនអត់?",
"តើមានរឿងអ្វី": "មានរឿងអី?", "ពិតជា": "ពិតមែនហើយ"
}
for old, new in corrections.items():
raw_kh = raw_kh.replace(old, new)
clean_text = clean_repetition(raw_kh)
return re.sub(r'[.,!?]', ' ', clean_text).strip()
except:
return ""
async def generate_voice(text, start, duration, index):
try:
kh_text = professional_khmer_filter(text)
if not kh_text or len(kh_text) < 2: return None, None
selected_voice = detect_gender(kh_text)
tmp = f"tmp_{index}.mp3"
# បង្កើត File សំឡេងតាមរយៈ Edge-TTS
communicate = edge_tts.Communicate(kh_text, selected_voice, rate="+8%", pitch="-1Hz")
await communicate.save(tmp)
# ទាញយក File មកធ្វើជា Audio Clip
audio = AudioFileClip(tmp).set_start(start).volumex(5.0)
if audio.duration > duration:
audio = vfx.speedx(audio, factor=audio.duration/duration).set_duration(duration)
return audio, tmp
except Exception as e:
print(f"Error at {start}s: {e}")
return None, None
async def start_dubbing():
if not os.path.exists(VIDEO_PATH):
print(f"❌ រកមិនឃើញ File: {VIDEO_PATH}")
return
print("🚀 កំពុងដាស់ម៉ាស៊ីន AI (Whisper Medium)...")
device = "cuda" if torch.cuda.is_available() else "cpu"
model = whisper.load_model("medium").to(device)
print("🔍 កំពុងវិភាគសាច់រឿង (ទប់ស្កាត់ Hallucination)...")
transcribe = model.transcribe(
VIDEO_PATH,
task="transcribe",
language="zh",
temperature=0,
beam_size=5,
compression_ratio_threshold=2.4,
no_speech_threshold=0.6
)
segments = transcribe['segments']
video = VideoFileClip(VIDEO_PATH)
# បន្ថយសំឡេងដើម (Background Music)
bg_audio = video.audio.volumex(0.12)
audio_tracks = [bg_audio]
temp_files = []
print(f"🎙️ ចាប់ផ្ដើមផលិតសម្លេងតួអង្គចំនួន {len(segments)} ឃ្លា...")
for i, s in enumerate(segments):
# --- ✅ ចំណុចសំខាន់៖ ត្រូវតែមានពាក្យ await នៅខាងមុខ ---
aud, path = await generate_voice(s['text'], s['start'], s['end'] - s['start'], i)
if aud:
audio_tracks.append(aud)
temp_files.append(path)
if i % 25 == 0:
print(f"⏳ ដំណើរការបាន {round((i/len(segments))*100)}% ...")
print("🎬 កំពុងបូកបញ្ចូលគ្នា និង Export វីដេអូសម្រេច...")
final_audio = CompositeAudioClip(audio_tracks)
final_video = video.set_audio(final_audio)
final_video.write_videofile(
OUTPUT_PATH,
codec="libx264",
audio_codec="aac",
fps=video.fps,
threads=4,
logger=None
)
# បិទ File ដើម្បីឈប់ប្រើប្រាស់ RAM
video.close()
final_video.close()
# លុប File បណ្ដោះអាសន្ន
for f in temp_files:
try:
if os.path.exists(f):
os.remove(f)
except: pass
print(f"✅ សម្រេចមហាជោគជ័យ! ឮសំឡេងបកប្រែច្បាស់ហើយម្ចាស់គ្រូ៖ {OUTPUT_PATH}")
# រត់ដំណើរការ
if __name__ == "__main__":
await start_dubbing()
- Blogger Comment
- Facebook Comment
0 comments:
Post a Comment