能跑就行
This commit is contained in:
parent
0ebd4bc1aa
commit
099c9ff025
164
Data/Text_EnUS/Z-INPUTER.py
Normal file
164
Data/Text_EnUS/Z-INPUTER.py
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
def load_json(path):
|
||||||
|
"""讀取JSON檔案,順便在控制台顯示加載狀態"""
|
||||||
|
if not os.path.exists(path):
|
||||||
|
print(f"⚠ 檔案不存在,已略過:{path}")
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
print(f"✔ 已從 {os.path.basename(path)} 載入 {len(data)} 條翻譯項")
|
||||||
|
return data
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 載入 {path} 時發生錯誤:{str(e)}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def apply_translation(dirpath):
|
||||||
|
"""把翻譯內容實際套用到檔案裡"""
|
||||||
|
print(f"\n🔍 正在處理目錄:{dirpath}")
|
||||||
|
|
||||||
|
# 準備三個翻譯檔案的路徑
|
||||||
|
trans_paths = {
|
||||||
|
"normal": os.path.join(dirpath, "translate", "translation.json"),
|
||||||
|
"special": os.path.join(dirpath, "translate", "translation_special.json"),
|
||||||
|
"namedesc": os.path.join(dirpath, "translate", "translation_namedesc.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
# 把三個翻譯檔案的內容都讀進來
|
||||||
|
translations = {
|
||||||
|
"normal": load_json(trans_paths["normal"]),
|
||||||
|
"special": load_json(trans_paths["special"]),
|
||||||
|
"namedesc": load_json(trans_paths["namedesc"])
|
||||||
|
}
|
||||||
|
|
||||||
|
# 先把普通和特殊翻譯混在一起
|
||||||
|
combined = {}
|
||||||
|
combined.update(translations["normal"])
|
||||||
|
combined.update(translations["special"])
|
||||||
|
print(f"📦 合併普通+特殊翻譯共 {len(combined)} 條")
|
||||||
|
|
||||||
|
# 處理那些有前綴的翻譯內容(像是name:、desc:這種的)
|
||||||
|
namedesc_count = 0
|
||||||
|
for key, value in translations["namedesc"].items():
|
||||||
|
try:
|
||||||
|
# 把key拆成檔案名稱和行號
|
||||||
|
filename, line_no = key.split("__")
|
||||||
|
line_no = int(line_no)
|
||||||
|
src_path = os.path.join(dirpath, filename)
|
||||||
|
|
||||||
|
# 檢查原始檔案是否存在
|
||||||
|
if not os.path.exists(src_path):
|
||||||
|
print(f"⚠ 注意!找不到來源檔案:{src_path}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 把原始檔案內容全部讀出來
|
||||||
|
with open(src_path, "r", encoding="utf-8") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
|
||||||
|
# 檢查行號是不是在合理範圍
|
||||||
|
if line_no >= len(lines):
|
||||||
|
print(f"⚠ 行號 {line_no} 在 {filename} 超出範圍(最大 {len(lines)-1})")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 找出原本的前綴是name:、desc:還是menuDesc:
|
||||||
|
original = lines[line_no].strip()
|
||||||
|
prefix = ""
|
||||||
|
for field in ["name:", "desc:", "menuDesc:"]:
|
||||||
|
if original.startswith(field):
|
||||||
|
prefix = field
|
||||||
|
break
|
||||||
|
|
||||||
|
if prefix:
|
||||||
|
# 把前綴和翻譯值組合起來
|
||||||
|
combined[key] = prefix + value
|
||||||
|
namedesc_count += 1
|
||||||
|
else:
|
||||||
|
print(f"⚠ 在 {filename} 第 {line_no} 行找不到前綴:{original}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 處理 namedesc 鍵值 '{key}' 時出錯:{str(e)}")
|
||||||
|
|
||||||
|
print(f"📦 新增 {namedesc_count} 條名稱描述翻譯")
|
||||||
|
|
||||||
|
# 建立檔案修改對照表
|
||||||
|
file_map = {}
|
||||||
|
total_updates = 0
|
||||||
|
for key, value in combined.items():
|
||||||
|
try:
|
||||||
|
# 再次拆分鍵值取得檔案資訊
|
||||||
|
filename, line_no = key.split("__")
|
||||||
|
line_no = int(line_no)
|
||||||
|
|
||||||
|
# 如果還沒讀過這個檔案,就先讀取內容
|
||||||
|
if filename not in file_map:
|
||||||
|
src_path = os.path.join(dirpath, filename)
|
||||||
|
if not os.path.exists(src_path):
|
||||||
|
print(f"⚠ 注意!找不到目標檔案:{src_path}")
|
||||||
|
continue
|
||||||
|
with open(src_path, "r", encoding="utf-8") as f:
|
||||||
|
file_map[filename] = {
|
||||||
|
"path": src_path,
|
||||||
|
"lines": f.readlines(),
|
||||||
|
"modified": False # 用來記錄是否有修改
|
||||||
|
}
|
||||||
|
|
||||||
|
# 再次檢查行號是否有效
|
||||||
|
if line_no >= len(file_map[filename]["lines"]):
|
||||||
|
print(f"⚠ 行號 {line_no} 在 {filename} 超出範圍(最大 {len(file_map[filename]['lines'])-1})")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 準備要替換的新內容
|
||||||
|
original = file_map[filename]["lines"][line_no].strip()
|
||||||
|
new_line = f"{value}\n"
|
||||||
|
|
||||||
|
# 只有當內容確實不同時才修改
|
||||||
|
if file_map[filename]["lines"][line_no] != new_line:
|
||||||
|
file_map[filename]["lines"][line_no] = new_line
|
||||||
|
file_map[filename]["modified"] = True
|
||||||
|
total_updates += 1
|
||||||
|
print(f"✏ 已更新 {filename} 第 {line_no} 行:")
|
||||||
|
print(f" 原始內容:{original[:50]}...")
|
||||||
|
print(f" 新內容:{new_line.strip()[:50]}...")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 套用翻譯 '{key}' 時發生錯誤:{str(e)}")
|
||||||
|
|
||||||
|
# 開始正式寫入修改
|
||||||
|
success_count = 0
|
||||||
|
for filename, data in file_map.items():
|
||||||
|
if data["modified"]:
|
||||||
|
try:
|
||||||
|
# 先建立備份檔案(安全起見)
|
||||||
|
backup_path = data["path"] + ".bak"
|
||||||
|
os.replace(data["path"], backup_path)
|
||||||
|
|
||||||
|
# 寫入新內容
|
||||||
|
with open(data["path"], "w", encoding="utf-8") as f:
|
||||||
|
f.writelines(data["lines"])
|
||||||
|
|
||||||
|
print(f"✔ 成功更新檔案:{filename}(已建立備份)")
|
||||||
|
success_count += 1
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 寫入 {filename} 失敗:{str(e)}")
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
print(f"\n🎯 總共套用 {total_updates} 處更新")
|
||||||
|
print(f"✅ 成功修改 {success_count}/{len(file_map)} 個檔案")
|
||||||
|
|
||||||
|
def walk_and_apply(root="."):
|
||||||
|
"""自動掃描資料夾找需要處理的目錄"""
|
||||||
|
print(f"🔎 開始從以下路徑進行翻譯作業:{os.path.abspath(root)}")
|
||||||
|
for dirpath, dirnames, filenames in os.walk(root):
|
||||||
|
if "translate" in dirnames:
|
||||||
|
print(f"\n{'='*50}")
|
||||||
|
try:
|
||||||
|
apply_translation(dirpath)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ 在 {dirpath} 發生嚴重錯誤:{str(e)}")
|
||||||
|
traceback.print_exc()
|
||||||
|
print(f"{'='*50}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
walk_and_apply()
|
||||||
|
print("\n🏁 翻譯作業執行完畢!")
|
@ -2,61 +2,82 @@ import os
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
def should_ignore(line):
|
def should_ignore(line):
|
||||||
|
"""檢查這行是不是要跳過不處理的(比如註解或特殊標記)"""
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
return line.startswith(("@", ">", ".", "END")) or line == ""
|
return line.startswith(("@", ">", ".", "END")) or line == ""
|
||||||
|
|
||||||
def collect_texts_from_txt(path):
|
def collect_texts_from_txt(path):
|
||||||
normal_texts = {}
|
"""從目錄下的所有txt檔案裡收集需要翻譯的文字"""
|
||||||
special_texts = {}
|
normal_texts = {} # 存普通對話文本
|
||||||
namedesc_texts = {}
|
special_texts = {} # 存星號開頭的特殊文本
|
||||||
|
namedesc_texts = {} # 存名稱和描述類的文本
|
||||||
|
|
||||||
|
# 掃描目錄裡所有txt檔案
|
||||||
for filename in os.listdir(path):
|
for filename in os.listdir(path):
|
||||||
if filename.endswith(".txt"):
|
if filename.endswith(".txt"):
|
||||||
file_path = os.path.join(path, filename)
|
file_path = os.path.join(path, filename)
|
||||||
|
# 讀取檔案全部內容
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
lines = f.readlines()
|
lines = f.readlines()
|
||||||
|
|
||||||
|
# 逐行分析內容
|
||||||
for i, line in enumerate(lines):
|
for i, line in enumerate(lines):
|
||||||
stripped = line.strip()
|
stripped = line.strip()
|
||||||
if should_ignore(stripped):
|
if should_ignore(stripped):
|
||||||
continue
|
continue # 跳過不需要處理的行
|
||||||
|
|
||||||
|
# 生成唯一鍵值(檔案名+行號)
|
||||||
key = f"{filename}__{i}"
|
key = f"{filename}__{i}"
|
||||||
|
|
||||||
|
# 遇到名稱或描述類的文本(name:/desc:/menuDesc:)
|
||||||
if stripped.startswith(("name:", "desc:", "menuDesc:")):
|
if stripped.startswith(("name:", "desc:", "menuDesc:")):
|
||||||
# 把开头的 name:/desc:/menuDesc: 去掉,只保留后面的文本
|
# 把開頭的標記切掉,只留內容文字
|
||||||
idx = stripped.find(":")
|
idx = stripped.find(":")
|
||||||
namedesc_texts[key] = stripped[idx + 1:].strip()
|
namedesc_texts[key] = stripped[idx + 1:].strip()
|
||||||
|
# 遇到星號開頭的就是特殊文本
|
||||||
elif stripped.startswith("*"):
|
elif stripped.startswith("*"):
|
||||||
special_texts[key] = stripped
|
special_texts[key] = stripped
|
||||||
|
# 剩下的都當普通文本處理
|
||||||
else:
|
else:
|
||||||
normal_texts[key] = stripped
|
normal_texts[key] = stripped
|
||||||
|
|
||||||
return normal_texts, special_texts, namedesc_texts
|
return normal_texts, special_texts, namedesc_texts
|
||||||
|
|
||||||
def save_json(data, path, name):
|
def save_json(data, path, name):
|
||||||
|
"""把整理好的資料存成JSON檔案"""
|
||||||
|
# 先建立放翻譯檔的目錄(如果不存在就自動創建)
|
||||||
translate_dir = os.path.join(path, "translate")
|
translate_dir = os.path.join(path, "translate")
|
||||||
os.makedirs(translate_dir, exist_ok=True)
|
os.makedirs(translate_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# 組合完整檔案路徑
|
||||||
json_path = os.path.join(translate_dir, name)
|
json_path = os.path.join(translate_dir, name)
|
||||||
|
# 寫入JSON格式(保持非ASCII字原樣顯示)
|
||||||
with open(json_path, "w", encoding="utf-8") as f:
|
with open(json_path, "w", encoding="utf-8") as f:
|
||||||
json.dump(data, f, ensure_ascii=False, indent=2)
|
json.dump(data, f, ensure_ascii=False, indent=2)
|
||||||
|
|
||||||
def walk_and_process(root="."):
|
def walk_and_process(root="."):
|
||||||
|
"""開始掃描整個資料夾結構,找出需要處理的目錄"""
|
||||||
|
# 走訪所有子目錄
|
||||||
for dirpath, dirnames, filenames in os.walk(root):
|
for dirpath, dirnames, filenames in os.walk(root):
|
||||||
|
# 檢查目錄裡是否有txt檔案
|
||||||
txts_in_dir = [f for f in filenames if f.endswith(".txt")]
|
txts_in_dir = [f for f in filenames if f.endswith(".txt")]
|
||||||
if not txts_in_dir:
|
if not txts_in_dir:
|
||||||
continue
|
continue # 跳過沒有txt檔的目錄
|
||||||
|
|
||||||
|
# 收集三類文本資料
|
||||||
normal, special, namedesc = collect_texts_from_txt(dirpath)
|
normal, special, namedesc = collect_texts_from_txt(dirpath)
|
||||||
|
|
||||||
|
# 分別儲存不同類型的翻譯文件
|
||||||
if normal:
|
if normal:
|
||||||
save_json(normal, dirpath, "translation.json")
|
save_json(normal, dirpath, "translation.json")
|
||||||
print(f"✔ Saved translation.json for {dirpath}")
|
print(f"✔ 已生成 translation.json 於 {dirpath}") # 提示
|
||||||
if special:
|
if special:
|
||||||
save_json(special, dirpath, "translation_special.json")
|
save_json(special, dirpath, "translation_special.json")
|
||||||
print(f"✔ Saved translation_special.json for {dirpath}")
|
print(f"✔ 已生成 translation_special.json 於 {dirpath}")
|
||||||
if namedesc:
|
if namedesc:
|
||||||
save_json(namedesc, dirpath, "translation_namedesc.json")
|
save_json(namedesc, dirpath, "translation_namedesc.json")
|
||||||
print(f"✔ Saved translation_namedesc.json for {dirpath}")
|
print(f"✔ 已生成 translation_namedesc.json 於 {dirpath}")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
walk_and_process()
|
walk_and_process()
|
||||||
|
print("\n🏁 文本收集作業完成!") # 結尾提示
|
@ -1,67 +0,0 @@
|
|||||||
import os
|
|
||||||
import json
|
|
||||||
|
|
||||||
def load_json(path):
|
|
||||||
if not os.path.exists(path):
|
|
||||||
return {}
|
|
||||||
with open(path, "r", encoding="utf-8") as f:
|
|
||||||
return json.load(f)
|
|
||||||
|
|
||||||
def apply_translation(dirpath):
|
|
||||||
normal_path = os.path.join(dirpath, "translate", "translation.json")
|
|
||||||
special_path = os.path.join(dirpath, "translate", "translation_special.json")
|
|
||||||
namedesc_path = os.path.join(dirpath, "translate", "translation_namedesc.json")
|
|
||||||
|
|
||||||
normal = load_json(normal_path)
|
|
||||||
special = load_json(special_path)
|
|
||||||
namedesc = load_json(namedesc_path)
|
|
||||||
|
|
||||||
combined = {}
|
|
||||||
for key, value in normal.items():
|
|
||||||
combined[key] = value
|
|
||||||
for key, value in special.items():
|
|
||||||
combined[key] = value
|
|
||||||
for key, value in namedesc.items():
|
|
||||||
# 判断 key 是属于 name: / desc: / menuDesc: 的
|
|
||||||
filename, line_no = key.split("__")
|
|
||||||
line_no = int(line_no)
|
|
||||||
with open(os.path.join(dirpath, filename), "r", encoding="utf-8") as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
if line_no >= len(lines):
|
|
||||||
continue
|
|
||||||
original_line = lines[line_no].strip()
|
|
||||||
if original_line.startswith("name:"):
|
|
||||||
prefix = "name:"
|
|
||||||
elif original_line.startswith("desc:"):
|
|
||||||
prefix = "desc:"
|
|
||||||
elif original_line.startswith("menuDesc:"):
|
|
||||||
prefix = "menuDesc:"
|
|
||||||
else:
|
|
||||||
prefix = ""
|
|
||||||
combined[key] = prefix + value
|
|
||||||
|
|
||||||
file_line_map = {}
|
|
||||||
for key, value in combined.items():
|
|
||||||
filename, line_no = key.split("__")
|
|
||||||
line_no = int(line_no)
|
|
||||||
|
|
||||||
if filename not in file_line_map:
|
|
||||||
file_path = os.path.join(dirpath, filename)
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
file_line_map[filename] = f.readlines()
|
|
||||||
|
|
||||||
file_line_map[filename][line_no] = value + "\n"
|
|
||||||
|
|
||||||
for filename, lines in file_line_map.items():
|
|
||||||
file_path = os.path.join(dirpath, filename)
|
|
||||||
with open(file_path, "w", encoding="utf-8") as f:
|
|
||||||
f.writelines(lines)
|
|
||||||
print(f"✔ Updated: {file_path}")
|
|
||||||
|
|
||||||
def walk_and_apply(root="."):
|
|
||||||
for dirpath, dirnames, filenames in os.walk(root):
|
|
||||||
if "translate" in dirnames:
|
|
||||||
apply_translation(dirpath)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
walk_and_apply()
|
|
Loading…
x
Reference in New Issue
Block a user