Browse Source

Add support pfLocEditor round-trips.

Splits up MOUL/EOA localization files into per-language database
thingos. Also fixes a bug with the PotS encoding escape hatch.
pull/276/head
Adam Johnson 3 years ago
parent
commit
4324735448
Signed by: Hoikas
GPG Key ID: 0B6515D6FF6F271E
  1. 130
      korman/exporter/locman.py
  2. 8
      korman/operators/op_export.py
  3. 1
      korman/ui/ui_world.py

130
korman/exporter/locman.py

@ -14,17 +14,25 @@
# along with Korman. If not, see <http://www.gnu.org/licenses/>. # along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy import bpy
from PyHSPlasma import *
from contextlib import contextmanager from contextlib import contextmanager
import itertools
from pathlib import Path
import re
from xml.sax.saxutils import escape as xml_escape
import weakref
from .explosions import NonfatalExportError from .explosions import NonfatalExportError
from .. import korlib from .. import korlib
from . import logger from . import logger
from pathlib import Path
from PyHSPlasma import *
import weakref
from xml.sax.saxutils import escape as xml_escape
_SP_LANGUAGES = {"English", "French", "German", "Italian", "Spanish"} _SP_LANGUAGES = {"English", "French", "German", "Italian", "Spanish"}
# Detects if there are any Plasma esHTML tags in the translated data. If so, we store
# as CDATA instead of XML encoding the entry.
_ESHTML_REGEX = re.compile("<.+>")
class LocalizationConverter: class LocalizationConverter:
def __init__(self, exporter=None, **kwargs): def __init__(self, exporter=None, **kwargs):
if exporter is not None: if exporter is not None:
@ -46,11 +54,15 @@ class LocalizationConverter:
name, language, indent=indent) name, language, indent=indent)
journal = self._journals.setdefault(name, {}) journal = self._journals.setdefault(name, {})
journal[language] = text_id.as_string() journal[language] = text_id.as_string()
return True
def add_string(self, set_name, element_name, language, value): def add_string(self, set_name, element_name, language, value):
trans_set = self._strings.setdefault(set_name, {}) trans_set = self._strings.setdefault(set_name, {})
trans_element = trans_set.setdefault(element_name, {}) trans_element = trans_set.setdefault(element_name, {})
trans_element[language] = value trans_element[language] = value
if self._exporter is not None and self._exporter().mgr.getVer() <= pvPots:
return False
return True
@contextmanager @contextmanager
def _generate_file(self, filename, **kwargs): def _generate_file(self, filename, **kwargs):
@ -68,21 +80,21 @@ class LocalizationConverter:
finally: finally:
handle.close() handle.close()
def _generate_journal_texts(self): def _generate_text_files(self):
age_name = self._age_name age_name = self._age_name
def write_journal_file(language, file_name, contents): def write_text_file(language, file_name, contents):
try: with self._generate_file(dirname="ageresources", filename=file_name) as stream:
with self._generate_file(dirname="ageresources", filename=file_name) as stream: try:
stream.write(contents.encode("windows-1252")) stream.write(contents.encode("windows-1252"))
except UnicodeEncodeError: except UnicodeEncodeError:
self._report.warn("Translation '{}': Contents contains characters that cannot be used in this version of Plasma. They will appear as a '?' in game.", self._report.warn("Translation '{}': Contents contains characters that cannot be used in this version of Plasma. They will appear as a '?' in game.",
language, indent=2) language, indent=2)
# Yes, there are illegal characters... As a stopgap, we will export the file with # Yes, there are illegal characters... As a stopgap, we will export the file with
# replacement characters ("?") just so it'll work dammit. # replacement characters ("?") just so it'll work dammit.
stream.write(contents.encode("windows-1252", "replace")) stream.write(contents.encode("windows-1252", "replace"))
return True return True
for journal_name, translations in self._journals.items(): for journal_name, translations in self._journals.items():
self._report.msg("Copying Journal '{}'", journal_name, indent=1) self._report.msg("Copying Journal '{}'", journal_name, indent=1)
@ -93,7 +105,7 @@ class LocalizationConverter:
continue continue
suffix = "_{}".format(language_name.lower()) if language_name != "English" else "" suffix = "_{}".format(language_name.lower()) if language_name != "English" else ""
file_name = "{}--{}{}.txt".format(age_name, journal_name, suffix) file_name = "{}--{}{}.txt".format(age_name, journal_name, suffix)
write_journal_file(language_name, file_name, value) write_text_file(language_name, file_name, value)
# Ensure that default (read: "English") journal is available # Ensure that default (read: "English") journal is available
if "English" not in translations: if "English" not in translations:
@ -102,55 +114,79 @@ class LocalizationConverter:
if language_name is not None: if language_name is not None:
file_name = "{}--{}.txt".format(age_name, journal_name) file_name = "{}--{}.txt".format(age_name, journal_name)
# If you manage to screw up this badly... Well, I am very sorry. # If you manage to screw up this badly... Well, I am very sorry.
if write_journal_file(language_name, file_name, value): if write_text_file(language_name, file_name, value):
self._report.warn("No 'English' translation available, so '{}' will be used as the default", self._report.warn("No 'English' translation available, so '{}' will be used as the default",
language_name, indent=2) language_name, indent=2)
else: else:
self._report.port("No 'English' nor any other suitable default translation available", indent=2) self._report.port("No 'English' nor any other suitable default translation available", indent=2)
def _generate_loc_file(self): def _generate_loc_files(self):
# Only generate this junk if needed set_LUT = {
if not self._strings and not self._journals: "Journals": self._journals
}
# Merge in any manual strings, but error if dupe sets are encountered.
special_sets, string_sets = frozenset(set_LUT.keys()), frozenset(self._strings.keys())
intersection = special_sets & string_sets
assert not intersection, "Duplicate localization sets: {}".format(" ".join(intersection))
set_LUT.update(self._strings)
if not any(itertools.chain.from_iterable(set_LUT.values())):
return return
method = bpy.context.scene.world.plasma_age.localization_method
if method == "single_file":
self._generate_loc_file("{}.loc".format(self._age_name), set_LUT)
elif method in {"database", "database_back_compat"}:
# Where the strings are set -> element -> language: str, we want language -> set -> element: str
# This is so we can mimic pfLocalizationEditor's <agename>English.loc pathing.
database = {}
for set_name, elements in set_LUT.items():
for element_name, translations in elements.items():
for language_name, value in translations.items():
database.setdefault(language_name, {}).setdefault(set_name, {})[element_name] = value
for language_name, sets in database.items():
self._generate_loc_file("{}{}.loc".format(self._age_name, language_name), sets, language_name)
# Generate an empty localization file to defeat any old ones from Korman 0.11 (and lower)
if method == "database_back_compat":
self._generate_loc_file("{}.loc".format(self._age_name), {})
else:
raise RuntimeError("Unexpected localization method {}".format(method))
def _generate_loc_file(self, filename, sets, language_name=None):
def write_line(value, *args, **kwargs): def write_line(value, *args, **kwargs):
# tabs suck, then you die... # tabs suck, then you die...
whitespace = " " * kwargs.pop("indent", 0) whitespace = " " * kwargs.pop("indent", 0)
if args or kwargs: if args or kwargs:
value = value.format(*args, **kwargs) value = value.format(*args, **kwargs)
line = "".join((whitespace, value, "\n")) line = "".join((whitespace, value, "\n"))
stream.write(line.encode("utf-16_le")) stream.write(line.encode("utf-8"))
age_name = self._age_name def iter_element(element):
enc = plEncryptedStream.kEncAes if self._version == pvEoa else None if language_name is None:
file_name = "{}.loc".format(age_name) yield from element.items()
with self._generate_file(file_name, enc=enc) as stream: else:
# UTF-16 little endian byte order mark yield language_name, element
stream.write(b"\xFF\xFE")
write_line("<?xml version=\"1.0\" encoding=\"utf-16\"?>") enc = plEncryptedStream.kEncAes if self._version == pvEoa else None
with self._generate_file(filename, enc=enc) as stream:
write_line("<?xml version=\"1.0\" encoding=\"utf-8\"?>")
write_line("<localizations>") write_line("<localizations>")
write_line("<age name=\"{}\">", age_name, indent=1) write_line("<age name=\"{}\">", self._age_name, indent=1)
# Arbitrary strings defined by something like a GUI or a node tree for set_name, elements in sets.items():
for set_name, elements in self._strings.items():
write_line("<set name=\"{}\">", set_name, indent=2) write_line("<set name=\"{}\">", set_name, indent=2)
for element_name, translations in elements.items(): for element_name, value in elements.items():
write_line("<element name=\"{}\">", element_name, indent=3) write_line("<element name=\"{}\">", element_name, indent=3)
for language_name, value in translations.items(): for translation_language, translation_value in iter_element(value):
write_line("<translation language=\"{language}\">{translation}</translation>", if _ESHTML_REGEX.search(translation_value):
language=language_name, translation=xml_escape(value), indent=4) encoded_value = "<![CDATA[{}]]>".format(translation_value)
write_line("</element>", indent=3) else:
write_line("</set>", indent=2) encoded_value = xml_escape(translation_value)
# Journals
if self._journals:
write_line("<set name=\"Journals\">", indent=2)
for journal_name, translations in self._journals.items():
write_line("<element name=\"{}\">", journal_name, indent=3)
for language_name, value in translations.items():
write_line("<translation language=\"{language}\">{translation}</translation>", write_line("<translation language=\"{language}\">{translation}</translation>",
language=language_name, translation=xml_escape(value), indent=4) language=translation_language, translation=encoded_value, indent=4)
write_line("</element>", indent=3) write_line("</element>", indent=3)
write_line("</set>", indent=2) write_line("</set>", indent=2)
@ -198,6 +234,6 @@ class LocalizationConverter:
def save(self): def save(self):
if self._version > pvPots: if self._version > pvPots:
self._generate_loc_file() self._generate_loc_files()
else: else:
self._generate_journal_texts() self._generate_text_files()

8
korman/operators/op_export.py

@ -96,6 +96,14 @@ class PlasmaAgeExportOperator(ExportOperator, bpy.types.Operator):
"default": "as_requested", "default": "as_requested",
"options": set()}), "options": set()}),
"localization_method": (EnumProperty, {"name": "Localization",
"description": "Specifies how localization data should be exported",
"items": [("database", "Localization Database", "A per-language database compatible with pfLocalizationEditor"),
("database_back_compat", "Localization Database (Compat Mode)", "A per-language database compatible with pfLocalizationEditor and Korman <=0.11"),
("single_file", "Single File", "A single file database, as in Korman <=0.11")],
"default": "database",
"options": set()}),
"export_active": (BoolProperty, {"name": "INTERNAL: Export currently running", "export_active": (BoolProperty, {"name": "INTERNAL: Export currently running",
"default": False, "default": False,
"options": {"SKIP_SAVE"}}), "options": {"SKIP_SAVE"}}),

1
korman/ui/ui_world.py

@ -276,6 +276,7 @@ class PlasmaAgePanel(AgeButtonsPanel, bpy.types.Panel):
layout.separator() layout.separator()
layout.prop(age, "envmap_method") layout.prop(age, "envmap_method")
layout.prop(age, "lighting_method") layout.prop(age, "lighting_method")
layout.prop(age, "localization_method")
layout.prop(age, "python_method") layout.prop(age, "python_method")
layout.prop(age, "texcache_method") layout.prop(age, "texcache_method")

Loading…
Cancel
Save