78 lines
2.8 KiB
Python
78 lines
2.8 KiB
Python
import os
|
|
import json
|
|
import hashlib
|
|
from pathlib import Path
|
|
|
|
def text_to_id(text):
|
|
return hashlib.md5(text.encode('utf-8')).hexdigest()
|
|
|
|
def load_json_file(path):
|
|
if path.exists():
|
|
with open(path, 'r', encoding='utf-8') as f:
|
|
return json.load(f)
|
|
return {}
|
|
|
|
def update_translations(old_src_dir, new_src_dir, old_dst_dir, new_dst_dir):
|
|
os.makedirs(new_dst_dir, exist_ok=True)
|
|
translation_files = [
|
|
"StarText.json",
|
|
"DescText.json",
|
|
"OtherText.json",
|
|
"BlockText.json",
|
|
"Choices.json"
|
|
]
|
|
|
|
for filename in translation_files:
|
|
old_src_path = Path(old_src_dir) / filename
|
|
old_dst_path = Path(old_dst_dir) / filename
|
|
new_src_path = Path(new_src_dir) / filename
|
|
|
|
if not new_src_path.exists():
|
|
continue
|
|
|
|
new_src = load_json_file(new_src_path)
|
|
old_src = load_json_file(old_src_path)
|
|
old_dst = load_json_file(old_dst_path)
|
|
|
|
translation_map = {}
|
|
for key in old_src:
|
|
original_text = old_src[key]
|
|
translated_text = old_dst.get(key, "")
|
|
if translated_text:
|
|
translation_map[original_text] = translated_text
|
|
|
|
new_dst = {}
|
|
new_texts = {}
|
|
unused_texts = {}
|
|
|
|
for new_id, new_text in new_src.items():
|
|
if new_text in translation_map:
|
|
new_dst[new_id] = translation_map[new_text]
|
|
else:
|
|
new_dst[new_id] = ""
|
|
new_texts[new_id] = new_text
|
|
for old_text, old_trans in translation_map.items():
|
|
if old_text not in new_src.values():
|
|
unused_id = text_to_id(old_text)
|
|
unused_texts[unused_id] = old_trans
|
|
output_path = Path(new_dst_dir) / filename
|
|
with open(output_path, 'w', encoding='utf-8') as f:
|
|
json.dump(new_dst, f, ensure_ascii=False, indent=2)
|
|
if new_texts:
|
|
new_texts_path = Path(new_dst_dir) / f"NEW_{filename}"
|
|
with open(new_texts_path, 'w', encoding='utf-8') as f:
|
|
json.dump(new_texts, f, ensure_ascii=False, indent=2)
|
|
if unused_texts:
|
|
unused_texts_path = Path(new_dst_dir) / f"UNUSED_{filename}"
|
|
with open(unused_texts_path, 'w', encoding='utf-8') as f:
|
|
json.dump(unused_texts, f, ensure_ascii=False, indent=2)
|
|
|
|
print(f"✅ 已处理:{filename} | 新增: {len(new_texts)} | 未使用: {len(unused_texts)}")
|
|
|
|
if __name__ == "__main__":
|
|
update_translations(
|
|
old_src_dir="old_source/translate", # 旧版原始文本目录
|
|
new_src_dir="new_source/translate", # 新版原始文本目录
|
|
old_dst_dir="old_translations/translate", # 旧版翻译目录
|
|
new_dst_dir="new_project/translate" # 输出版本目录
|
|
) |