Browse Source

Use the area weighted inverse normal for GUI Cameras.

We will now calculate the area-weighted normal of all polygons in the
GUI page, then flip it to create the direction that the GUI camera
should face. This will, theoretically, be more intuitive to artists in
that the GUI camera should always point at what they've modelled instead
of imposing arbitrary rules on the coordinate system of GUI objects.
pull/373/head
Adam Johnson 1 year ago
parent
commit
3ff64ff861
Signed by: Hoikas
GPG Key ID: 0B6515D6FF6F271E
  1. 61
      korman/exporter/gui.py

61
korman/exporter/gui.py

@ -20,6 +20,7 @@ import mathutils
from contextlib import contextmanager, ExitStack from contextlib import contextmanager, ExitStack
import itertools import itertools
import math
from PyHSPlasma import * from PyHSPlasma import *
from typing import * from typing import *
import weakref import weakref
@ -84,27 +85,28 @@ class GuiConverter:
if not objects: if not objects:
raise ExportError("No objects specified for GUI Camera generation.") raise ExportError("No objects specified for GUI Camera generation.")
class ObjArea(NamedTuple): # Generally, GUIs are flat planes. However, we are not Cyan, so artists cannot walk down
obj: bpy.types.Object # the hallway to get smacked on the knuckles by programmers. This means that they might
area: float # give us some three dimensional crap as a GUI. Therefore, to come up with a camera matrix,
# we'll use the average area-weighted inverse normal of all the polygons they give us. That
# way, the camera *always* should face the GUI as would be expected.
remove_mesh = bpy.data.meshes.remove remove_mesh = bpy.data.meshes.remove
obj_areas: List[ObjArea] = [] avg_normal = mathutils.Vector()
for i in objects: for i in objects:
mesh = i.to_mesh(bpy.context.scene, True, "RENDER", calc_tessface=False) mesh = i.to_mesh(bpy.context.scene, True, "RENDER", calc_tessface=False)
with helpers.TemporaryObject(mesh, remove_mesh): with helpers.TemporaryObject(mesh, remove_mesh):
utils.transform_mesh(mesh, i.matrix_world) utils.transform_mesh(mesh, i.matrix_world)
obj_areas.append( for polygon in mesh.polygons:
ObjArea(i, sum((polygon.area for polygon in mesh.polygons))) avg_normal += (polygon.normal * polygon.area)
) avg_normal.normalize()
largest_obj = max(obj_areas, key=lambda x: x.area) avg_normal *= -1.0
# GUIs are generally flat planes, which, by default in Blender, have their normal pointing # From the inverse area weighted normal we found above, get the rotation from the up axis
# in the localspace z axis. Therefore, what we would like to do is have a camera that points # (that is to say, the +Z axis) and create our rotation matrix.
# at the localspace -Z axis. Where up is the localspace +Y axis. We are already pointing at axis = mathutils.Vector((avg_normal.x, avg_normal.y, 0.0))
# -Z with +Y being up from what I can tel. So, just use this matrix. axis.normalize()
mat = largest_obj.obj.matrix_world.to_3x3() angle = math.acos(avg_normal.z)
mat = mathutils.Matrix.Rotation(angle, 3, axis)
# Now, we know the rotation of the camera. Great! What we need to do now is ensure that all # Now, we know the rotation of the camera. Great! What we need to do now is ensure that all
# of the objects in question fit within the view of a 4:3 camera rotated as above. Blender # of the objects in question fit within the view of a 4:3 camera rotated as above. Blender
@ -120,10 +122,10 @@ class GuiConverter:
camera.data.lens_unit = "FOV" camera.data.lens_unit = "FOV"
# Get all of the bounding points and make sure they all fit inside the camera's view frame. # Get all of the bounding points and make sure they all fit inside the camera's view frame.
bound_boxes = ( bound_boxes = [
obj.matrix_world * mathutils.Vector(bbox) obj.matrix_world * mathutils.Vector(bbox)
for obj in objects for bbox in obj.bound_box for obj in objects for bbox in obj.bound_box
) ]
co, _ = camera.camera_fit_coords( co, _ = camera.camera_fit_coords(
scene, scene,
# bound_box is a list of vectors of each corner of all the objects' bounding boxes; # bound_box is a list of vectors of each corner of all the objects' bounding boxes;
@ -132,11 +134,24 @@ class GuiConverter:
list(itertools.chain.from_iterable(bound_boxes)) list(itertools.chain.from_iterable(bound_boxes))
) )
# Calculate the distance from the largest object to the camera. The scale we were given # This generates a list of 6 faces per bounding box, which we then flatten out and pass
# will be used to push the camera back in the Z+ direction of that object by scale times. # into the BVHTree constructor. This is to calculate the distance from the camera to the
bvh = mathutils.bvhtree.BVHTree.FromObject(largest_obj.obj, scene) # "entire GUI" - which we can then use to apply the scale given to us.
loc, normal, index, distance = bvh.find_nearest(co) if scale != 1.0:
co += normal * distance * (scale - 1.0) bvh = mathutils.bvhtree.BVHTree.FromPolygons(
bound_boxes,
list(itertools.chain.from_iterable(
[(i + 0, i + 1, i + 5, i + 4),
(i + 1, i + 2, i + 5, i + 6),
(i + 3, i + 2, i + 6, i + 7),
(i + 0, i + 1, i + 2, i + 3),
(i + 0, i + 3, i + 7, i + 4),
(i + 4, i + 5, i + 6, i + 7),
] for i in range(0, len(bound_boxes), 8)
))
)
loc, normal, index, distance = bvh.find_nearest(co)
co += normal * distance * (scale - 1.0)
# ... # ...
mat.resize_4x4() mat.resize_4x4()

Loading…
Cancel
Save