mirror of
https://github.com/H-uru/korman.git
synced 2025-07-14 02:27:36 -04:00
Implement cubemap export
This commit is contained in:
@ -23,6 +23,7 @@ extern "C" {
|
|||||||
static PyMethodDef korlib_Methods[] = {
|
static PyMethodDef korlib_Methods[] = {
|
||||||
{ _pycs("create_bump_LUT"), (PyCFunction)create_bump_LUT, METH_VARARGS, NULL },
|
{ _pycs("create_bump_LUT"), (PyCFunction)create_bump_LUT, METH_VARARGS, NULL },
|
||||||
{ _pycs("inspect_vorbisfile"), (PyCFunction)inspect_vorbisfile, METH_VARARGS, NULL },
|
{ _pycs("inspect_vorbisfile"), (PyCFunction)inspect_vorbisfile, METH_VARARGS, NULL },
|
||||||
|
{ _pycs("scale_image"), (PyCFunction)scale_image, METH_KEYWORDS | METH_VARARGS, NULL },
|
||||||
|
|
||||||
{ NULL, NULL, 0, NULL },
|
{ NULL, NULL, 0, NULL },
|
||||||
};
|
};
|
||||||
|
@ -28,6 +28,8 @@
|
|||||||
|
|
||||||
#define TEXTARGET_TEXTURE_2D 0
|
#define TEXTARGET_TEXTURE_2D 0
|
||||||
|
|
||||||
|
// ===============================================================================================
|
||||||
|
|
||||||
static inline void _ensure_copy_bytes(PyObject* parent, PyObject*& data) {
|
static inline void _ensure_copy_bytes(PyObject* parent, PyObject*& data) {
|
||||||
// PyBytes objects are immutable and ought not to be changed once they are returned to Python
|
// PyBytes objects are immutable and ought not to be changed once they are returned to Python
|
||||||
// code. Therefore, this tests to see if the given bytes object is the same as one we're holding.
|
// code. Therefore, this tests to see if the given bytes object is the same as one we're holding.
|
||||||
@ -157,6 +159,33 @@ static void _scale_image(const uint8_t* srcBuf, const size_t srcW, const size_t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ===============================================================================================
|
||||||
|
|
||||||
|
PyObject* scale_image(PyObject*, PyObject* args, PyObject* kwargs) {
|
||||||
|
static char* kwlist[] = { _pycs("buf"), _pycs("srcW"), _pycs("srcH"),
|
||||||
|
_pycs("dstW"), _pycs("dstH"), NULL };
|
||||||
|
const uint8_t* srcBuf;
|
||||||
|
int srcBufSz;
|
||||||
|
uint32_t srcW, srcH, dstW, dstH;
|
||||||
|
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#IIII", kwlist, &srcBuf, &srcBufSz, &srcW, &srcH, &dstW, &dstH)) {
|
||||||
|
PyErr_SetString(PyExc_TypeError, "scale_image expects a bytes object, int, int, int int");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int expectedBufSz = srcW * srcH * sizeof(uint32_t);
|
||||||
|
if (srcBufSz != expectedBufSz) {
|
||||||
|
PyErr_Format(PyExc_ValueError, "buf size (%i bytes) incorrect (expected: %i bytes)", srcBufSz, expectedBufSz);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
PyObject* dst = PyBytes_FromStringAndSize(NULL, dstW * dstH * sizeof(uint32_t));
|
||||||
|
uint8_t* dstBuf = reinterpret_cast<uint8_t*>(PyBytes_AS_STRING(dst));
|
||||||
|
_scale_image(srcBuf, srcW, srcH, dstBuf, dstW, dstH);
|
||||||
|
return dst;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ===============================================================================================
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TEX_DETAIL_ALPHA = 0,
|
TEX_DETAIL_ALPHA = 0,
|
||||||
TEX_DETAIL_ADD = 1,
|
TEX_DETAIL_ADD = 1,
|
||||||
@ -180,6 +209,8 @@ typedef struct {
|
|||||||
bool fPyOwned;
|
bool fPyOwned;
|
||||||
} pyMipmap;
|
} pyMipmap;
|
||||||
|
|
||||||
|
// ===============================================================================================
|
||||||
|
|
||||||
static void pyGLTexture_dealloc(pyGLTexture* self) {
|
static void pyGLTexture_dealloc(pyGLTexture* self) {
|
||||||
Py_CLEAR(self->m_textureKey);
|
Py_CLEAR(self->m_textureKey);
|
||||||
Py_CLEAR(self->m_blenderImage);
|
Py_CLEAR(self->m_blenderImage);
|
||||||
@ -445,6 +476,26 @@ static PyObject* pyGLTexture_get_has_alpha(pyGLTexture* self, void*) {
|
|||||||
return PyBool_FromLong(0);
|
return PyBool_FromLong(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static PyObject* pyGLTexture_get_image_data(pyGLTexture* self, void*) {
|
||||||
|
Py_XINCREF(self->m_imageData);
|
||||||
|
return Py_BuildValue("iiO", self->m_width, self->m_height, self->m_imageData);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int pyGLTexture_set_image_data(pyGLTexture* self, PyObject* value, void*) {
|
||||||
|
PyObject* data;
|
||||||
|
// Requesting a Bytes object "S" instead of a buffer "y#" so we can just increment the reference
|
||||||
|
// count on a buffer that already exists, instead of doing a memcpy.
|
||||||
|
if (!PyArg_ParseTuple(value, "iiS", &self->m_width, &self->m_height, &data)) {
|
||||||
|
PyErr_SetString(PyExc_TypeError, "image_data should be a sequence of int, int, bytes");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
Py_XDECREF(self->m_imageData);
|
||||||
|
Py_XINCREF(data);
|
||||||
|
self->m_imageData = data;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static PyObject* pyGLTexture_get_num_levels(pyGLTexture* self, void*) {
|
static PyObject* pyGLTexture_get_num_levels(pyGLTexture* self, void*) {
|
||||||
return PyLong_FromLong(_get_num_levels(self->m_width, self->m_height));
|
return PyLong_FromLong(_get_num_levels(self->m_width, self->m_height));
|
||||||
}
|
}
|
||||||
@ -461,6 +512,7 @@ static PyObject* pyGLTexture_get_size_pot(pyGLTexture* self, void*) {
|
|||||||
|
|
||||||
static PyGetSetDef pyGLTexture_GetSet[] = {
|
static PyGetSetDef pyGLTexture_GetSet[] = {
|
||||||
{ _pycs("has_alpha"), (getter)pyGLTexture_get_has_alpha, NULL, NULL, NULL },
|
{ _pycs("has_alpha"), (getter)pyGLTexture_get_has_alpha, NULL, NULL, NULL },
|
||||||
|
{ _pycs("image_data"), (getter)pyGLTexture_get_image_data, (setter)pyGLTexture_set_image_data, NULL, NULL },
|
||||||
{ _pycs("num_levels"), (getter)pyGLTexture_get_num_levels, NULL, NULL, NULL },
|
{ _pycs("num_levels"), (getter)pyGLTexture_get_num_levels, NULL, NULL, NULL },
|
||||||
{ _pycs("size_npot"), (getter)pyGLTexture_get_size_npot, NULL, NULL, NULL },
|
{ _pycs("size_npot"), (getter)pyGLTexture_get_size_npot, NULL, NULL, NULL },
|
||||||
{ _pycs("size_pot"), (getter)pyGLTexture_get_size_pot, NULL, NULL, NULL },
|
{ _pycs("size_pot"), (getter)pyGLTexture_get_size_pot, NULL, NULL, NULL },
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
||||||
|
PyObject* scale_image(PyObject*, PyObject*, PyObject*);
|
||||||
|
|
||||||
extern PyTypeObject pyGLTexture_Type;
|
extern PyTypeObject pyGLTexture_Type;
|
||||||
PyObject* Init_pyGLTexture_Type();
|
PyObject* Init_pyGLTexture_Type();
|
||||||
|
|
||||||
|
@ -26,6 +26,11 @@ from . import utils
|
|||||||
|
|
||||||
_MAX_STENCILS = 6
|
_MAX_STENCILS = 6
|
||||||
|
|
||||||
|
# Blender cube map mega image to libHSPlasma plCubicEnvironmap faces mapping...
|
||||||
|
# See https://blender.stackexchange.com/questions/46891/how-to-render-an-environment-to-a-cube-map-in-cycles
|
||||||
|
_BLENDER_CUBE_MAP = ("leftFace", "backFace", "rightFace",
|
||||||
|
"bottomFace", "topFace", "frontFace")
|
||||||
|
|
||||||
class _Texture:
|
class _Texture:
|
||||||
_DETAIL_BLEND = {
|
_DETAIL_BLEND = {
|
||||||
TEX_DETAIL_ALPHA: "AL",
|
TEX_DETAIL_ALPHA: "AL",
|
||||||
@ -40,7 +45,7 @@ class _Texture:
|
|||||||
if texture is not None:
|
if texture is not None:
|
||||||
if image is None:
|
if image is None:
|
||||||
image = texture.image
|
image = texture.image
|
||||||
self.calc_alpha = texture.use_calculate_alpha
|
self.calc_alpha = getattr(texture, "use_calculate_alpha", False)
|
||||||
self.mipmap = texture.use_mipmap
|
self.mipmap = texture.use_mipmap
|
||||||
else:
|
else:
|
||||||
self.layer = kwargs.get("layer")
|
self.layer = kwargs.get("layer")
|
||||||
@ -58,6 +63,7 @@ class _Texture:
|
|||||||
self.calc_alpha = False
|
self.calc_alpha = False
|
||||||
self.use_alpha = True
|
self.use_alpha = True
|
||||||
self.allowed_formats = {"DDS"}
|
self.allowed_formats = {"DDS"}
|
||||||
|
self.is_cube_map = False
|
||||||
else:
|
else:
|
||||||
self.is_detail_map = False
|
self.is_detail_map = False
|
||||||
use_alpha = kwargs.get("use_alpha")
|
use_alpha = kwargs.get("use_alpha")
|
||||||
@ -70,6 +76,7 @@ class _Texture:
|
|||||||
self.use_alpha = use_alpha
|
self.use_alpha = use_alpha
|
||||||
self.allowed_formats = kwargs.get("allowed_formats",
|
self.allowed_formats = kwargs.get("allowed_formats",
|
||||||
{"DDS"} if self.mipmap else {"PNG", "JPG"})
|
{"DDS"} if self.mipmap else {"PNG", "JPG"})
|
||||||
|
self.is_cube_map = kwargs.get("is_cube_map", False)
|
||||||
|
|
||||||
# Basic format sanity
|
# Basic format sanity
|
||||||
if self.mipmap:
|
if self.mipmap:
|
||||||
@ -489,13 +496,37 @@ class MaterialConverter:
|
|||||||
else:
|
else:
|
||||||
pl_env = plDynamicEnvMap
|
pl_env = plDynamicEnvMap
|
||||||
pl_env = self.export_dynamic_env(bo, layer, texture, pl_env)
|
pl_env = self.export_dynamic_env(bo, layer, texture, pl_env)
|
||||||
|
elif bl_env.source == "IMAGE_FILE":
|
||||||
|
pl_env = self.export_cubic_env(bo, layer, texture)
|
||||||
else:
|
else:
|
||||||
# We should really export a CubicEnvMap here, but we have a good setup for DynamicEnvMaps
|
raise NotImplementedError(bl_env.source)
|
||||||
# that create themselves when the explorer links in, so really... who cares about CEMs?
|
|
||||||
self._exporter().report.warn("IMAGE EnvironmentMaps are not supported. '{}' will not be exported!".format(layer.key.name))
|
|
||||||
pl_env = None
|
|
||||||
layer.state.shadeFlags |= hsGMatState.kShadeEnvironMap
|
layer.state.shadeFlags |= hsGMatState.kShadeEnvironMap
|
||||||
layer.texture = pl_env.key
|
if pl_env is not None:
|
||||||
|
layer.texture = pl_env.key
|
||||||
|
|
||||||
|
def export_cubic_env(self, bo, layer, texture):
|
||||||
|
width, height = texture.image.size
|
||||||
|
|
||||||
|
# Sanity check: the image here should be 3x2 faces, so we should not have any
|
||||||
|
# dam remainder...
|
||||||
|
if width % 3 != 0:
|
||||||
|
raise ExportError("CubeMap '{}' width must be a multiple of 3".format(image.name))
|
||||||
|
if height % 2 != 0:
|
||||||
|
raise ExportError("CubeMap '{}' height must be a multiple of 2".format(image.name))
|
||||||
|
|
||||||
|
# According to PlasmaMAX, we don't give a rip about UVs...
|
||||||
|
layer.UVWSrc = plLayerInterface.kUVWReflect
|
||||||
|
layer.state.miscFlags |= hsGMatState.kMiscUseReflectionXform
|
||||||
|
|
||||||
|
# Well, this is kind of sad...
|
||||||
|
# Back before the texture cache existed, all the image work was offloaded
|
||||||
|
# to a big "finalize" save step to prevent races. The texture cache would
|
||||||
|
# prevent that as well, so we could theoretically slice-and-dice the single
|
||||||
|
# image here... but... meh. Offloading taim.
|
||||||
|
self.export_prepared_image(texture=texture, owner=layer, indent=3,
|
||||||
|
use_alpha=False, mipmap=True, allowed_formats={"DDS"},
|
||||||
|
is_cube_map=True, tag="cubemap")
|
||||||
|
|
||||||
|
|
||||||
def export_dynamic_env(self, bo, layer, texture, pl_class):
|
def export_dynamic_env(self, bo, layer, texture, pl_class):
|
||||||
# To protect the user from themselves, let's check to make sure that a DEM/DCM matching this
|
# To protect the user from themselves, let's check to make sure that a DEM/DCM matching this
|
||||||
@ -695,6 +726,8 @@ class MaterialConverter:
|
|||||||
- ephemeral: (optional) never cache this image
|
- ephemeral: (optional) never cache this image
|
||||||
- tag: (optional) an optional identifier hint that allows multiple images with the
|
- tag: (optional) an optional identifier hint that allows multiple images with the
|
||||||
same name to coexist in the cache
|
same name to coexist in the cache
|
||||||
|
- is_cube_map: (optional) indicates the provided image contains six cube faces
|
||||||
|
that must be split into six separate images for Plasma
|
||||||
"""
|
"""
|
||||||
owner = kwargs.pop("owner", None)
|
owner = kwargs.pop("owner", None)
|
||||||
indent = kwargs.pop("indent", 2)
|
indent = kwargs.pop("indent", 2)
|
||||||
@ -723,7 +756,8 @@ class MaterialConverter:
|
|||||||
|
|
||||||
for key, owners in self._pending.items():
|
for key, owners in self._pending.items():
|
||||||
name = str(key)
|
name = str(key)
|
||||||
self._report.msg("\n[Mipmap '{}']", name)
|
pClassName = "CubicEnvironmap" if key.is_cube_map else "Mipmap"
|
||||||
|
self._report.msg("\n[{} '{}']", pClassName, name)
|
||||||
|
|
||||||
image = key.image
|
image = key.image
|
||||||
|
|
||||||
@ -747,7 +781,10 @@ class MaterialConverter:
|
|||||||
cached_image = texcache.get_from_texture(key, compression)
|
cached_image = texcache.get_from_texture(key, compression)
|
||||||
|
|
||||||
if cached_image is None:
|
if cached_image is None:
|
||||||
numLevels, width, height, data = self._finalize_single_image(key, image, name, compression, dxt)
|
if key.is_cube_map:
|
||||||
|
numLevels, width, height, data = self._finalize_cube_map(key, image, name, compression, dxt)
|
||||||
|
else:
|
||||||
|
numLevels, width, height, data = self._finalize_single_image(key, image, name, compression, dxt)
|
||||||
texcache.add_texture(key, numLevels, (width, height), compression, data)
|
texcache.add_texture(key, numLevels, (width, height), compression, data)
|
||||||
else:
|
else:
|
||||||
width, height = cached_image.export_size
|
width, height = cached_image.export_size
|
||||||
@ -768,28 +805,110 @@ class MaterialConverter:
|
|||||||
# then we need to do that and stuff the level data. This is a little tedious, but we
|
# then we need to do that and stuff the level data. This is a little tedious, but we
|
||||||
# need to be careful to manage our resources correctly
|
# need to be careful to manage our resources correctly
|
||||||
if page not in pages:
|
if page not in pages:
|
||||||
mipmap = plMipmap(name=name, width=width, height=height,
|
mipmap = plMipmap(name=name, width=width, height=height, numLevels=numLevels,
|
||||||
numLevels=numLevels, compType=compression,
|
compType=compression, format=plBitmap.kRGB8888, dxtLevel=dxt)
|
||||||
format=plBitmap.kRGB8888, dxtLevel=dxt)
|
if key.is_cube_map:
|
||||||
for i in range(numLevels):
|
assert len(data) == 6
|
||||||
mipmap.setLevel(i, data[0][i])
|
texture = plCubicEnvironmap(name)
|
||||||
mgr.AddObject(page, mipmap)
|
for face_name, face_data in zip(_BLENDER_CUBE_MAP, data):
|
||||||
pages[page] = mipmap
|
for i in range(numLevels):
|
||||||
|
mipmap.setLevel(i, face_data[i])
|
||||||
|
setattr(texture, face_name, mipmap)
|
||||||
|
else:
|
||||||
|
assert len(data) == 1
|
||||||
|
for i in range(numLevels):
|
||||||
|
mipmap.setLevel(i, data[0][i])
|
||||||
|
texture = mipmap
|
||||||
|
|
||||||
|
mgr.AddObject(page, texture)
|
||||||
|
pages[page] = texture
|
||||||
else:
|
else:
|
||||||
mipmap = pages[page]
|
texture = pages[page]
|
||||||
|
|
||||||
# The object that references this image can be either a layer (will appear
|
# The object that references this image can be either a layer (will appear
|
||||||
# in the 3d world) or an image library (will appear in a journal or in another
|
# in the 3d world) or an image library (will appear in a journal or in another
|
||||||
# dynamic manner in game)
|
# dynamic manner in game)
|
||||||
if isinstance(owner, plLayerInterface):
|
if isinstance(owner, plLayerInterface):
|
||||||
owner.texture = mipmap.key
|
owner.texture = texture.key
|
||||||
elif isinstance(owner, plImageLibMod):
|
elif isinstance(owner, plImageLibMod):
|
||||||
owner.addImage(mipmap.key)
|
owner.addImage(texture.key)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(owner.ClassName())
|
raise RuntimeError(owner.ClassName())
|
||||||
|
|
||||||
inc_progress()
|
inc_progress()
|
||||||
|
|
||||||
|
def _finalize_cube_map(self, key, image, name, compression, dxt):
|
||||||
|
oWidth, oHeight = image.size
|
||||||
|
if oWidth == 0 and oHeight == 0:
|
||||||
|
raise ExportError("Image '{}' could not be loaded.".format(image.name))
|
||||||
|
|
||||||
|
# Non-DXT images are BGRA in Plasma
|
||||||
|
bgra = compression != plBitmap.kDirectXCompression
|
||||||
|
|
||||||
|
# Grab the cube map data from OpenGL and prepare to begin...
|
||||||
|
with GLTexture(key, bgra=bgra) as glimage:
|
||||||
|
cWidth, cHeight, data = glimage.image_data
|
||||||
|
|
||||||
|
# On some platforms, Blender will be "helpful" and scale the image to a POT.
|
||||||
|
# That's great, but we have 3 faces as a width, which will certainly be NPOT
|
||||||
|
# in the case of POT faces. So, we will scale the image AGAIN, if Blender did
|
||||||
|
# something funky.
|
||||||
|
if oWidth != cWidth or oHeight != cHeight:
|
||||||
|
self._report.warn("Image was resized by Blender to ({}x{})--resizing the resize to ({}x{})",
|
||||||
|
cWidth, cHeight, oWidth, oHeight, indent=1)
|
||||||
|
data = scale_image(data, cWidth, cHeight, oWidth, oHeight)
|
||||||
|
|
||||||
|
# Face dimensions
|
||||||
|
fWidth, fHeight = oWidth // 3, oHeight // 2
|
||||||
|
|
||||||
|
# Copy each of the six faces into a separate image buffer.
|
||||||
|
# NOTE: At present, I am well pleased with the speed of this functionality.
|
||||||
|
# According to my profiling, it takes roughly 0.7 seconds to process a
|
||||||
|
# cube map whose faces are 1024x1024 (3072x2048 total). Maybe a later
|
||||||
|
# commit will move this into korlib. We'll see.
|
||||||
|
face_num = len(_BLENDER_CUBE_MAP)
|
||||||
|
face_images = [None] * face_num
|
||||||
|
for i in range(face_num):
|
||||||
|
col_id = i if i < 3 else i - 3
|
||||||
|
row_start = 0 if i < 3 else fHeight
|
||||||
|
row_end = fHeight if i < 3 else oHeight
|
||||||
|
|
||||||
|
face_data = bytearray(fWidth * fHeight * 4)
|
||||||
|
for row_current in range(row_start, row_end, 1):
|
||||||
|
src_start_idx = (row_current * oWidth * 4) + (col_id * fWidth * 4)
|
||||||
|
src_end_idx = src_start_idx + (fWidth * 4)
|
||||||
|
dst_start_idx = (row_current - row_start) * fWidth * 4
|
||||||
|
dst_end_idx = dst_start_idx + (fWidth * 4)
|
||||||
|
face_data[dst_start_idx:dst_end_idx] = data[src_start_idx:src_end_idx]
|
||||||
|
face_images[i] = bytes(face_data)
|
||||||
|
|
||||||
|
# Now that we have our six faces, we'll toss them into the GLTexture helper
|
||||||
|
# to generate mipmaps, if needed...
|
||||||
|
for i, face_name in enumerate(_BLENDER_CUBE_MAP):
|
||||||
|
glimage = GLTexture(key)
|
||||||
|
glimage.image_data = fWidth, fHeight, face_images[i]
|
||||||
|
eWidth, eHeight = glimage.size_pot
|
||||||
|
name = face_name[:-4].upper()
|
||||||
|
if compression == plBitmap.kDirectXCompression:
|
||||||
|
numLevels = glimage.num_levels
|
||||||
|
self._report.msg("Generating mip levels for cube face '{}'", name, indent=1)
|
||||||
|
|
||||||
|
# If we're compressing this mofo, we'll need a temporary mipmap to do that here...
|
||||||
|
mipmap = plMipmap(name=name, width=eWidth, height=eHeight, numLevels=numLevels,
|
||||||
|
compType=compression, format=plBitmap.kRGB8888, dxtLevel=dxt)
|
||||||
|
else:
|
||||||
|
numLevels = 1
|
||||||
|
self._report.msg("Compressing single level for cube face '{}'", name, indent=1)
|
||||||
|
|
||||||
|
face_images[i] = [None] * numLevels
|
||||||
|
for j in range(numLevels):
|
||||||
|
level_data = glimage.get_level_data(j, key.calc_alpha, report=self._report)
|
||||||
|
if compression == plBitmap.kDirectXCompression:
|
||||||
|
mipmap.CompressImage(j, level_data)
|
||||||
|
level_data = mipmap.getLevel(j)
|
||||||
|
face_images[i][j] = level_data
|
||||||
|
return numLevels, eWidth, eHeight, face_images
|
||||||
|
|
||||||
def _finalize_single_image(self, key, image, name, compression, dxt):
|
def _finalize_single_image(self, key, image, name, compression, dxt):
|
||||||
oWidth, oHeight = image.size
|
oWidth, oHeight = image.size
|
||||||
if oWidth == 0 and oHeight == 0:
|
if oWidth == 0 and oHeight == 0:
|
||||||
@ -804,24 +923,24 @@ class MaterialConverter:
|
|||||||
if compression == plBitmap.kDirectXCompression:
|
if compression == plBitmap.kDirectXCompression:
|
||||||
numLevels = glimage.num_levels
|
numLevels = glimage.num_levels
|
||||||
self._report.msg("Generating mip levels", indent=1)
|
self._report.msg("Generating mip levels", indent=1)
|
||||||
|
|
||||||
|
# If this is a DXT-compressed mipmap, we need to use a temporary mipmap
|
||||||
|
# to do the compression. We'll then steal the data from it.
|
||||||
|
mipmap = plMipmap(name=name, width=eWidth, height=eHeight, numLevels=numLevels,
|
||||||
|
compType=compression, format=plBitmap.kRGB8888, dxtLevel=dxt)
|
||||||
else:
|
else:
|
||||||
numLevels = 1
|
numLevels = 1
|
||||||
self._report.msg("Compressing single level", indent=1)
|
self._report.msg("Compressing single level", indent=1)
|
||||||
|
|
||||||
# Hold the uncompressed level data for now. We may have to make multiple copies of
|
# Hold the uncompressed level data for now. We may have to make multiple copies of
|
||||||
# this mipmap for per-page textures :(
|
# this mipmap for per-page textures :(
|
||||||
data = []
|
data = [None] * numLevels
|
||||||
for i in range(numLevels):
|
for i in range(numLevels):
|
||||||
data.append(glimage.get_level_data(i, key.calc_alpha, report=self._report))
|
level_data = glimage.get_level_data(i, key.calc_alpha, report=self._report)
|
||||||
|
if compression == plBitmap.kDirectXCompression:
|
||||||
# If this is a DXT-compressed mipmap, we need to use a temporary mipmap
|
mipmap.CompressImage(i, level_data)
|
||||||
# to do the compression. We'll then steal the data from it.
|
level_data = mipmap.getLevel(i)
|
||||||
if compression == plBitmap.kDirectXCompression:
|
data[i] = level_data
|
||||||
mipmap = plMipmap(name=name, width=eWidth, height=eHeight, numLevels=numLevels,
|
|
||||||
compType=compression, format=plBitmap.kRGB8888, dxtLevel=dxt)
|
|
||||||
for i in range(numLevels):
|
|
||||||
mipmap.CompressImage(i, data[i])
|
|
||||||
data[i] = mipmap.getLevel(i)
|
|
||||||
return numLevels, eWidth, eHeight, [data,]
|
return numLevels, eWidth, eHeight, [data,]
|
||||||
|
|
||||||
def get_materials(self, bo):
|
def get_materials(self, bo):
|
||||||
|
@ -27,7 +27,7 @@ TEX_DETAIL_ALPHA = 0
|
|||||||
TEX_DETAIL_ADD = 1
|
TEX_DETAIL_ADD = 1
|
||||||
TEX_DETAIL_MULTIPLY = 2
|
TEX_DETAIL_MULTIPLY = 2
|
||||||
|
|
||||||
def _scale_image(buf, srcW, srcH, dstW, dstH):
|
def scale_image(buf, srcW, srcH, dstW, dstH):
|
||||||
"""Scales an RGBA image using the algorithm from CWE's plMipmap::ScaleNicely"""
|
"""Scales an RGBA image using the algorithm from CWE's plMipmap::ScaleNicely"""
|
||||||
dst, dst_idx = bytearray(dstW * dstH * 4), 0
|
dst, dst_idx = bytearray(dstW * dstH * 4), 0
|
||||||
scaleX, scaleY = (srcW / dstW), (srcH / dstH)
|
scaleX, scaleY = (srcW / dstW), (srcH / dstH)
|
||||||
@ -229,6 +229,12 @@ class GLTexture:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def _get_image_data(self):
|
||||||
|
return (self._width, self._height, self._image_data)
|
||||||
|
def _set_image_data(self, value):
|
||||||
|
self._width, self._height, self._image_data = value
|
||||||
|
image_data = property(_get_image_data, _set_image_data)
|
||||||
|
|
||||||
def _invert_image(self, width, height, buf):
|
def _invert_image(self, width, height, buf):
|
||||||
size = width * height * 4
|
size = width * height * 4
|
||||||
finalBuf = bytearray(size)
|
finalBuf = bytearray(size)
|
||||||
|
Reference in New Issue
Block a user