Update Files

This commit is contained in:
2025-01-22 16:18:30 +01:00
parent ed4603cf95
commit a36294b518
16718 changed files with 2960346 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,124 @@
import bpy
#Todo - Check if already exists, in case multiple objects has the same material
def backup_material_copy(slot):
material = slot.material
dup = material.copy()
dup.name = "." + material.name + "_Original"
dup.use_fake_user = True
def backup_material_cache(slot, path):
bpy.ops.wm.save_as_mainfile(filepath=path, copy=True)
def backup_material_cache_restore(slot, path):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Restore cache")
# def backup_material_restore(obj): #??
# if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
# print("Restoring material for: " + obj.name)
#Check if object has TLM_PrevMatArray
# if yes
# - check if array.len is bigger than 0:
# if yes:
# for slot in object:
# originalMaterial = TLM_PrevMatArray[index]
#
#
# if no:
# - In which cases are these?
# if no:
# - In which cases are there not?
# - If a lightmapped material was applied to a non-lightmap object?
# if bpy.data.materials[originalMaterial].users > 0: #TODO - Check if all lightmapped
# print("Material has multiple users")
# if originalMaterial in bpy.data.materials:
# slot.material = bpy.data.materials[originalMaterial]
# slot.material.use_fake_user = False
# elif "." + originalMaterial + "_Original" in bpy.data.materials:
# slot.material = bpy.data.materials["." + originalMaterial + "_Original"]
# slot.material.use_fake_user = False
# else:
# print("Material has one user")
# if "." + originalMaterial + "_Original" in bpy.data.materials:
# slot.material = bpy.data.materials["." + originalMaterial + "_Original"]
# slot.material.use_fake_user = False
# elif originalMaterial in bpy.data.materials:
# slot.material = bpy.data.materials[originalMaterial]
# slot.material.use_fake_user = False
def backup_material_restore(obj): #??
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Restoring material for: " + obj.name)
if "TLM_PrevMatArray" in obj:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Material restore array found: " + str(obj["TLM_PrevMatArray"]))
#Running through the slots
prevMatArray = obj["TLM_PrevMatArray"]
slotsLength = len(prevMatArray)
if len(prevMatArray) > 0:
for idx, slot in enumerate(obj.material_slots): #For each slot, we get the index
#We only need the index, corresponds to the array index
try:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Attempting to set material")
originalMaterial = prevMatArray[idx]
except IndexError:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Material restore failed - Resetting")
originalMaterial = ""
if slot.material is not None:
#if slot.material.users < 2:
#slot.material.user_clear() #Seems to be bad; See: https://developer.blender.org/T49837
#bpy.data.materials.remove(slot.material)
if "." + originalMaterial + "_Original" in bpy.data.materials:
slot.material = bpy.data.materials["." + originalMaterial + "_Original"]
slot.material.use_fake_user = False
else:
print("No previous material for " + obj.name)
else:
print("No previous material for " + obj.name)
def backup_material_rename(obj): #??
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Renaming material for: " + obj.name)
if "TLM_PrevMatArray" in obj:
for slot in obj.material_slots:
if slot.material is not None:
if slot.material.name.endswith("_Original"):
newname = slot.material.name[1:-9]
if newname in bpy.data.materials:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Removing material: " + bpy.data.materials[newname].name)
#if bpy.data.materials[newname].users < 2:
#bpy.data.materials.remove(bpy.data.materials[newname]) #TODO - Maybe remove this
slot.material.name = newname
del obj["TLM_PrevMatArray"]
else:
print("No Previous material array for: " + obj.name)

View File

@ -0,0 +1,179 @@
import bpy, os
from .. import build
from time import time, sleep
def bake(plus_pass=0):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Initializing lightmap baking.")
for obj in bpy.context.scene.objects:
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(False)
iterNum = 0
currentIterNum = 0
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
hidden = False
#We check if the object is hidden
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
#We check if the object's collection is hidden
collections = obj.users_collection
for collection in collections:
if collection.hide_viewport:
hidden = True
if collection.hide_render:
hidden = True
try:
if collection.name in bpy.context.scene.view_layers[0].layer_collection.children:
if bpy.context.scene.view_layers[0].layer_collection.children[collection.name].hide_viewport:
hidden = True
except:
print("Error: Could not find collection: " + collection.name)
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use and not hidden:
iterNum = iterNum + 1
if iterNum > 1:
iterNum = iterNum - 1
for obj in bpy.context.scene.objects:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Checking visibility status for object and collections: " + obj.name)
hidden = False
#We check if the object is hidden
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
#We check if the object's collection is hidden
collections = obj.users_collection
for collection in collections:
if collection.hide_viewport:
hidden = True
if collection.hide_render:
hidden = True
try:
if collection.name in bpy.context.scene.view_layers[0].layer_collection.children:
if bpy.context.scene.view_layers[0].layer_collection.children[collection.name].hide_viewport:
hidden = True
except:
print("Error: Could not find collection: " + collection.name)
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use and not hidden:
scene = bpy.context.scene
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
obs = bpy.context.view_layer.objects
active = obs.active
obj.hide_render = False
scene.render.bake.use_clear = False
#os.system("cls")
#if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Baking " + str(currentIterNum) + "/" + str(iterNum) + " (" + str(round(currentIterNum/iterNum*100, 2)) + "%) : " + obj.name)
#elapsed = build.sec_to_hours((time() - bpy.app.driver_namespace["tlm_start_time"]))
#print("Baked: " + str(currentIterNum) + " | Left: " + str(iterNum-currentIterNum))
elapsedSeconds = time() - bpy.app.driver_namespace["tlm_start_time"]
bakedObjects = currentIterNum
bakedLeft = iterNum-currentIterNum
if bakedObjects == 0:
bakedObjects = 1
averagePrBake = elapsedSeconds / bakedObjects
remaining = averagePrBake * bakedLeft
#print(time() - bpy.app.driver_namespace["tlm_start_time"])
print("Elapsed time: " + str(round(elapsedSeconds, 2)) + "s | ETA remaining: " + str(round(remaining, 2)) + "s") #str(elapsed[0])
#print("Averaged: " + str(averagePrBake))
#print("Remaining: " + str(remaining))
if scene.TLM_EngineProperties.tlm_target == "vertex":
scene.render.bake.target = "VERTEX_COLORS"
if scene.TLM_EngineProperties.tlm_lighting_mode == "combined":
print("Baking combined: Direct + Indirect")
bpy.ops.object.bake(type="DIFFUSE", pass_filter={"DIRECT","INDIRECT"}, margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
elif scene.TLM_EngineProperties.tlm_lighting_mode == "indirect":
print("Baking combined: Indirect")
bpy.ops.object.bake(type="DIFFUSE", pass_filter={"INDIRECT"}, margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
elif scene.TLM_EngineProperties.tlm_lighting_mode == "ao":
print("Baking combined: AO")
bpy.ops.object.bake(type="AO", margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
elif scene.TLM_EngineProperties.tlm_lighting_mode == "combinedao":
if bpy.app.driver_namespace["tlm_plus_mode"] == 1:
bpy.ops.object.bake(type="DIFFUSE", pass_filter={"DIRECT","INDIRECT"}, margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
elif bpy.app.driver_namespace["tlm_plus_mode"] == 2:
bpy.ops.object.bake(type="AO", margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
elif scene.TLM_EngineProperties.tlm_lighting_mode == "indirectao":
print("IndirAO")
if bpy.app.driver_namespace["tlm_plus_mode"] == 1:
print("IndirAO: 1")
bpy.ops.object.bake(type="DIFFUSE", pass_filter={"INDIRECT"}, margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
elif bpy.app.driver_namespace["tlm_plus_mode"] == 2:
print("IndirAO: 2")
bpy.ops.object.bake(type="AO", margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
elif scene.TLM_EngineProperties.tlm_lighting_mode == "complete":
bpy.ops.object.bake(type="COMBINED", margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
else:
bpy.ops.object.bake(type="DIFFUSE", pass_filter={"DIRECT","INDIRECT"}, margin=scene.TLM_EngineProperties.tlm_dilation_margin, use_clear=False)
#Save image between
if scene.TLM_SceneProperties.tlm_save_preprocess_lightmaps:
for image in bpy.data.images:
if image.name.endswith("_baked"):
saveDir = os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_EngineProperties.tlm_lightmap_savedir)
bakemap_path = os.path.join(saveDir, image.name)
filepath_ext = ".hdr"
image.filepath_raw = bakemap_path + filepath_ext
image.file_format = "HDR"
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Saving to: " + image.filepath_raw)
image.save()
bpy.ops.object.select_all(action='DESELECT')
currentIterNum = currentIterNum + 1
for image in bpy.data.images:
if image.name.endswith("_baked"):
saveDir = os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_EngineProperties.tlm_lightmap_savedir)
bakemap_path = os.path.join(saveDir, image.name)
filepath_ext = ".hdr"
image.filepath_raw = bakemap_path + filepath_ext
image.file_format = "HDR"
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Saving to: " + image.filepath_raw)
image.save()

View File

@ -0,0 +1,527 @@
import bpy, os
def apply_lightmaps():
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
hidden = False
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
if not hidden:
for slot in obj.material_slots:
mat = slot.material
node_tree = mat.node_tree
nodes = mat.node_tree.nodes
scene = bpy.context.scene
dirpath = os.path.join(os.path.dirname(bpy.data.filepath), scene.TLM_EngineProperties.tlm_lightmap_savedir)
#Find nodes
for node in nodes:
if node.name == "Baked Image":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Finding node source for material: " + mat.name + " @ " + obj.name)
extension = ".hdr"
postfix = "_baked"
if scene.TLM_SceneProperties.tlm_denoise_use:
postfix = "_denoised"
if scene.TLM_SceneProperties.tlm_filtering_use:
postfix = "_filtered"
if node.image:
node.image.source = "FILE"
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA":
print("Atlas object image")
image_name = obj.TLM_ObjectProperties.tlm_atlas_pointer + postfix + extension #TODO FIX EXTENSION
elif obj.TLM_ObjectProperties.tlm_postpack_object:
print("Atlas object image (postpack)")
image_name = obj.TLM_ObjectProperties.tlm_postatlas_pointer + postfix + extension #TODO FIX EXTENSION
else:
print("Baked object image")
image_name = obj.name + postfix + extension #TODO FIX EXTENSION
node.image.filepath_raw = os.path.join(dirpath, image_name)
def apply_materials(load_atlas=0):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Applying materials")
if load_atlas:
print("- In load Atlas mode")
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
hidden = False
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
if not hidden:
uv_layers = obj.data.uv_layers
uv_layers.active_index = 0
scene = bpy.context.scene
decoding = False
#Sort name
for slot in obj.material_slots:
mat = slot.material
if mat.name.endswith('_temp'):
old = slot.material
slot.material = bpy.data.materials[old.name.split('_' + obj.name)[0]]
if(scene.TLM_SceneProperties.tlm_decoder_setup):
tlm_rgbm = bpy.data.node_groups.get('RGBM Decode')
tlm_rgbd = bpy.data.node_groups.get('RGBD Decode')
tlm_logluv = bpy.data.node_groups.get('LogLuv Decode')
if tlm_rgbm == None:
load_library('RGBM Decode')
if tlm_rgbd == None:
load_library('RGBD Decode')
if tlm_logluv == None:
load_library('LogLuv Decode')
if(scene.TLM_EngineProperties.tlm_exposure_multiplier > 0):
tlm_exposure = bpy.data.node_groups.get("Exposure")
if tlm_exposure == None:
load_library("Exposure")
#Apply materials
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(obj.name)
for slot in obj.material_slots:
mat = slot.material
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(slot.material)
if not mat.TLM_ignore:
node_tree = mat.node_tree
nodes = mat.node_tree.nodes
foundBakedNode = False
#Find nodes
for node in nodes:
if node.name == "Baked Image":
lightmapNode = node
lightmapNode.location = -1200, 300
lightmapNode.name = "TLM_Lightmap"
foundBakedNode = True
# if load_atlas:
# print("Load Atlas for: " + obj.name)
# img_name = obj.TLM_ObjectProperties.tlm_atlas_pointer + '_baked'
# print("Src: " + img_name)
# else:
# img_name = obj.name + '_baked'
img_name = obj.name + '_baked'
if not foundBakedNode:
if scene.TLM_EngineProperties.tlm_target == "vertex":
lightmapNode = node_tree.nodes.new(type="ShaderNodeVertexColor")
lightmapNode.location = -1200, 300
lightmapNode.name = "TLM_Lightmap"
else:
lightmapNode = node_tree.nodes.new(type="ShaderNodeTexImage")
lightmapNode.location = -1200, 300
lightmapNode.name = "TLM_Lightmap"
lightmapNode.interpolation = bpy.context.scene.TLM_SceneProperties.tlm_texture_interpolation
lightmapNode.extension = bpy.context.scene.TLM_SceneProperties.tlm_texture_extrapolation
if (obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA" and obj.TLM_ObjectProperties.tlm_atlas_pointer != ""):
lightmapNode.image = bpy.data.images[obj.TLM_ObjectProperties.tlm_atlas_pointer + "_baked"]
else:
lightmapNode.image = bpy.data.images[img_name]
#Find output node
outputNode = nodes[0]
if(outputNode.type != "OUTPUT_MATERIAL"):
for node in node_tree.nodes:
if node.type == "OUTPUT_MATERIAL":
outputNode = node
break
#Find mainnode
mainNode = outputNode.inputs[0].links[0].from_node
if (mainNode.type == "MIX_SHADER"):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Mix shader found")
#TODO SHIFT BETWEEN from node input 1 or 2 based on which type
mainNode = outputNode.inputs[0].links[0].from_node.inputs[1].links[0].from_node
if (mainNode.type == "ADD_SHADER"):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Mix shader found")
mainNode = outputNode.inputs[0].links[0].from_node.inputs[0].links[0].from_node
if (mainNode.type == "ShaderNodeMixRGB"):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Mix RGB shader found")
mainNode = outputNode.inputs[0].links[0].from_node.inputs[0].links[0].from_node
#Add all nodes first
#Add lightmap multipliction texture
mixNode = node_tree.nodes.new(type="ShaderNodeMixRGB")
mixNode.name = "Lightmap_Multiplication"
mixNode.location = -800, 300
if scene.TLM_EngineProperties.tlm_lighting_mode == "indirect" or scene.TLM_EngineProperties.tlm_lighting_mode == "indirectAO":
mixNode.blend_type = 'MULTIPLY'
else:
mixNode.blend_type = 'MULTIPLY'
if scene.TLM_EngineProperties.tlm_lighting_mode == "complete":
mixNode.inputs[0].default_value = 0.0
else:
mixNode.inputs[0].default_value = 1.0
UVLightmap = node_tree.nodes.new(type="ShaderNodeUVMap")
if not obj.TLM_ObjectProperties.tlm_use_default_channel:
uv_channel = obj.TLM_ObjectProperties.tlm_uv_channel
else:
uv_channel = "UVMap_Lightmap"
UVLightmap.uv_map = uv_channel
UVLightmap.name = "Lightmap_UV"
UVLightmap.location = -1500, 300
if(scene.TLM_SceneProperties.tlm_decoder_setup):
if scene.TLM_SceneProperties.tlm_encoding_device == "CPU":
if scene.TLM_SceneProperties.tlm_encoding_mode_a == 'RGBM':
DecodeNode = node_tree.nodes.new(type="ShaderNodeGroup")
DecodeNode.node_tree = bpy.data.node_groups["RGBM Decode"]
DecodeNode.location = -400, 300
DecodeNode.name = "Lightmap_RGBM_Decode"
decoding = True
if scene.TLM_SceneProperties.tlm_encoding_mode_b == "RGBD":
DecodeNode = node_tree.nodes.new(type="ShaderNodeGroup")
DecodeNode.node_tree = bpy.data.node_groups["RGBD Decode"]
DecodeNode.location = -400, 300
DecodeNode.name = "Lightmap_RGBD_Decode"
decoding = True
else:
if scene.TLM_SceneProperties.tlm_encoding_mode_b == 'RGBM':
DecodeNode = node_tree.nodes.new(type="ShaderNodeGroup")
DecodeNode.node_tree = bpy.data.node_groups["RGBM Decode"]
DecodeNode.location = -400, 300
DecodeNode.name = "Lightmap_RGBM_Decode"
decoding = True
if scene.TLM_SceneProperties.tlm_encoding_mode_b == "RGBD":
DecodeNode = node_tree.nodes.new(type="ShaderNodeGroup")
DecodeNode.node_tree = bpy.data.node_groups["RGBD Decode"]
DecodeNode.location = -400, 300
DecodeNode.name = "Lightmap_RGBD_Decode"
decoding = True
if scene.TLM_SceneProperties.tlm_encoding_mode_b == "LogLuv":
DecodeNode = node_tree.nodes.new(type="ShaderNodeGroup")
DecodeNode.node_tree = bpy.data.node_groups["LogLuv Decode"]
DecodeNode.location = -400, 300
DecodeNode.name = "Lightmap_LogLuv_Decode"
decoding = True
if scene.TLM_SceneProperties.tlm_split_premultiplied:
lightmapNodeExtra = node_tree.nodes.new(type="ShaderNodeTexImage")
lightmapNodeExtra.location = -1200, 800
lightmapNodeExtra.name = "TLM_Lightmap_Extra"
lightmapNodeExtra.interpolation = bpy.context.scene.TLM_SceneProperties.tlm_texture_interpolation
lightmapNodeExtra.extension = bpy.context.scene.TLM_SceneProperties.tlm_texture_extrapolation
lightmapNodeExtra.image = lightmapNode.image
# #IF OBJ IS USING ATLAS?
# if (obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA" and obj.TLM_ObjectProperties.tlm_atlas_pointer != ""):
# #lightmapNode.image = bpy.data.images[obj.TLM_ObjectProperties.tlm_atlas_pointer + "_baked"]
# #print("OBS! OBJ IS USING ATLAS, RESULT WILL BE WRONG!")
# #bpy.app.driver_namespace["logman"].append("OBS! OBJ IS USING ATLAS, RESULT WILL BE WRONG!")
# pass
# if (obj.TLM_ObjectProperties.tlm_postpack_object and obj.TLM_ObjectProperties.tlm_postatlas_pointer != ""):
# #print("OBS! OBJ IS USING ATLAS, RESULT WILL BE WRONG!")
# #bpy.app.driver_namespace["logman"].append("OBS! OBJ IS USING ATLAS, RESULT WILL BE WRONG!")
# print()
# lightmapNodeExtra.image = lightmapNode.image
#lightmapPath = lightmapNode.image.filepath_raw
#print("PREM: " + lightmapPath)
if(scene.TLM_EngineProperties.tlm_exposure_multiplier > 0):
ExposureNode = node_tree.nodes.new(type="ShaderNodeGroup")
ExposureNode.node_tree = bpy.data.node_groups["Exposure"]
ExposureNode.inputs[1].default_value = scene.TLM_EngineProperties.tlm_exposure_multiplier
ExposureNode.location = -500, 300
ExposureNode.name = "Lightmap_Exposure"
#Add Basecolor node
if len(mainNode.inputs[0].links) == 0:
baseColorValue = mainNode.inputs[0].default_value
baseColorNode = node_tree.nodes.new(type="ShaderNodeRGB")
baseColorNode.outputs[0].default_value = baseColorValue
baseColorNode.location = ((mainNode.location[0] - 1100, mainNode.location[1] - 300))
baseColorNode.name = "Lightmap_BasecolorNode_A"
else:
baseColorNode = mainNode.inputs[0].links[0].from_node
baseColorNode.name = "LM_P"
#Linking
if decoding and scene.TLM_SceneProperties.tlm_encoding_use:
if(scene.TLM_EngineProperties.tlm_exposure_multiplier > 0):
mat.node_tree.links.new(lightmapNode.outputs[0], DecodeNode.inputs[0]) #Connect lightmap node to decodenode
if scene.TLM_SceneProperties.tlm_split_premultiplied:
mat.node_tree.links.new(lightmapNodeExtra.outputs[0], DecodeNode.inputs[1]) #Connect lightmap node to decodenode
else:
mat.node_tree.links.new(lightmapNode.outputs[1], DecodeNode.inputs[1]) #Connect lightmap node to decodenode
mat.node_tree.links.new(DecodeNode.outputs[0], mixNode.inputs[1]) #Connect decode node to mixnode
mat.node_tree.links.new(ExposureNode.outputs[0], mixNode.inputs[1]) #Connect exposure node to mixnode
else:
mat.node_tree.links.new(lightmapNode.outputs[0], DecodeNode.inputs[0]) #Connect lightmap node to decodenode
if scene.TLM_SceneProperties.tlm_split_premultiplied:
mat.node_tree.links.new(lightmapNodeExtra.outputs[0], DecodeNode.inputs[1]) #Connect lightmap node to decodenode
else:
mat.node_tree.links.new(lightmapNode.outputs[1], DecodeNode.inputs[1]) #Connect lightmap node to decodenode
mat.node_tree.links.new(DecodeNode.outputs[0], mixNode.inputs[1]) #Connect lightmap node to mixnode
mat.node_tree.links.new(baseColorNode.outputs[0], mixNode.inputs[2]) #Connect basecolor to pbr node
mat.node_tree.links.new(mixNode.outputs[0], mainNode.inputs[0]) #Connect mixnode to pbr node
if not scene.TLM_EngineProperties.tlm_target == "vertex":
mat.node_tree.links.new(UVLightmap.outputs[0], lightmapNode.inputs[0]) #Connect uvnode to lightmapnode
if scene.TLM_SceneProperties.tlm_split_premultiplied:
mat.node_tree.links.new(UVLightmap.outputs[0], lightmapNodeExtra.inputs[0]) #Connect uvnode to lightmapnode
else:
if(scene.TLM_EngineProperties.tlm_exposure_multiplier > 0):
mat.node_tree.links.new(lightmapNode.outputs[0], ExposureNode.inputs[0]) #Connect lightmap node to mixnode
mat.node_tree.links.new(ExposureNode.outputs[0], mixNode.inputs[1]) #Connect lightmap node to mixnode
else:
mat.node_tree.links.new(lightmapNode.outputs[0], mixNode.inputs[1]) #Connect lightmap node to mixnode
mat.node_tree.links.new(baseColorNode.outputs[0], mixNode.inputs[2]) #Connect basecolor to pbr node
mat.node_tree.links.new(mixNode.outputs[0], mainNode.inputs[0]) #Connect mixnode to pbr node
if not scene.TLM_EngineProperties.tlm_target == "vertex":
mat.node_tree.links.new(UVLightmap.outputs[0], lightmapNode.inputs[0]) #Connect uvnode to lightmapnode
#If skip metallic
if scene.TLM_SceneProperties.tlm_metallic_clamp == "skip":
if mainNode.inputs[4].default_value > 0.1: #DELIMITER
moutput = mainNode.inputs[0].links[0].from_node
mat.node_tree.links.remove(moutput.outputs[0].links[0])
def exchangeLightmapsToPostfix(ext_postfix, new_postfix, formatHDR=".hdr"):
if not bpy.context.scene.TLM_EngineProperties.tlm_target == "vertex":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(ext_postfix, new_postfix, formatHDR)
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
#Here
#If the object is part of atlas
print("CHECKING FOR REPART")
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA": #TODO, ALSO CONFIGURE FOR POSTATLAS
if bpy.context.scene.TLM_AtlasList[obj.TLM_ObjectProperties.tlm_atlas_pointer].tlm_atlas_merge_samemat:
#For each material we check if it ends with a number
for slot in obj.material_slots:
part = slot.name.rpartition('.')
if part[2].isnumeric() and part[0] in bpy.data.materials:
print("Material for obj: " + obj.name + " was numeric, and the material: " + part[0] + " was found.")
slot.material = bpy.data.materials.get(part[0])
# for slot in obj.material_slots:
# mat = slot.material
# node_tree = mat.node_tree
# nodes = mat.node_tree.nodes
try:
hidden = False
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
if not hidden:
for slot in obj.material_slots:
mat = slot.material
node_tree = mat.node_tree
nodes = mat.node_tree.nodes
for node in nodes:
if node.name == "Baked Image" or node.name == "TLM_Lightmap":
img_name = node.image.filepath_raw
cutLen = len(ext_postfix + formatHDR)
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Len:" + str(len(ext_postfix + formatHDR)) + "|" + ext_postfix + ".." + formatHDR)
#Simple way to sort out objects with multiple materials
if formatHDR == ".hdr" or formatHDR == ".exr":
if not node.image.filepath_raw.endswith(new_postfix + formatHDR):
print("Node1: " + node.image.filepath_raw + " => " + img_name[:-cutLen] + new_postfix + formatHDR)
node.image.filepath_raw = img_name[:-cutLen] + new_postfix + formatHDR
else:
cutLen = len(ext_postfix + ".hdr")
if not node.image.filepath_raw.endswith(new_postfix + formatHDR):
if not node.image.filepath_raw.endswith("_XYZ.png"):
print("Node2: " + node.image.filepath_raw + " => " + img_name[:-cutLen] + new_postfix + formatHDR)
node.image.filepath_raw = img_name[:-cutLen] + new_postfix + formatHDR
for node in nodes:
if bpy.context.scene.TLM_SceneProperties.tlm_encoding_use and bpy.context.scene.TLM_SceneProperties.tlm_encoding_mode_b == "LogLuv":
if bpy.context.scene.TLM_SceneProperties.tlm_split_premultiplied:
if node.name == "TLM_Lightmap":
img_name = node.image.filepath_raw
print("PREM Main: " + img_name)
if node.image.filepath_raw.endswith("_encoded.png"):
print(node.image.filepath_raw + " => " + node.image.filepath_raw[:-4] + "_XYZ.png")
if not node.image.filepath_raw.endswith("_XYZ.png"):
node.image.filepath_raw = node.image.filepath_raw[:-4] + "_XYZ.png"
if node.name == "TLM_Lightmap_Extra":
img_path = node.image.filepath_raw[:-8] + "_W.png"
img = bpy.data.images.load(img_path)
node.image = img
bpy.data.images.load(img_path)
print("PREM Extra: " + img_path)
node.image.filepath_raw = img_path
node.image.colorspace_settings.name = "Linear"
except:
print("Error occured with postfix change for obj: " + obj.name)
for image in bpy.data.images:
image.reload()
def applyAOPass():
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
hidden = False
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
if not hidden:
for slot in obj.material_slots:
mat = slot.material
node_tree = mat.node_tree
nodes = mat.node_tree.nodes
for node in nodes:
if node.name == "Baked Image" or node.name == "TLM_Lightmap":
filepath = bpy.data.filepath
dirpath = os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_EngineProperties.tlm_lightmap_savedir)
LightmapPath = node.image.filepath_raw
filebase = os.path.basename(LightmapPath)
filename = os.path.splitext(filebase)[0]
extension = os.path.splitext(filebase)[1]
AOImagefile = filename[:-4] + "_ao"
AOImagePath = os.path.join(dirpath, AOImagefile + extension)
AOMap = nodes.new('ShaderNodeTexImage')
AOMap.name = "TLM_AOMap"
AOImage = bpy.data.images.load(AOImagePath)
AOMap.image = AOImage
AOMap.location = -800, 0
AOMult = nodes.new(type="ShaderNodeMixRGB")
AOMult.name = "TLM_AOMult"
AOMult.blend_type = 'MULTIPLY'
AOMult.inputs[0].default_value = 1.0
AOMult.location = -300, 300
multyNode = nodes["Lightmap_Multiplication"]
mainNode = nodes["Principled BSDF"]
UVMapNode = nodes["Lightmap_UV"]
node_tree.links.remove(multyNode.outputs[0].links[0])
node_tree.links.new(multyNode.outputs[0], AOMult.inputs[1])
node_tree.links.new(AOMap.outputs[0], AOMult.inputs[2])
node_tree.links.new(AOMult.outputs[0], mainNode.inputs[0])
node_tree.links.new(UVMapNode.outputs[0], AOMap.inputs[0])
def load_library(asset_name):
scriptDir = os.path.dirname(os.path.realpath(__file__))
if bpy.data.filepath.endswith('tlm_data.blend'): # Prevent load in library itself
return
data_path = os.path.abspath(os.path.join(scriptDir, '..', '..', 'assets/tlm_data.blend'))
data_names = [asset_name]
# Import
data_refs = data_names.copy()
with bpy.data.libraries.load(data_path, link=False) as (data_from, data_to):
data_to.node_groups = data_refs
for ref in data_refs:
ref.use_fake_user = True

View File

@ -0,0 +1,947 @@
import bpy, math, time
from . import cache
from .. utility import *
def assemble():
configure_world()
configure_lights()
configure_meshes()
def init(self, prev_container):
store_existing(prev_container)
set_settings()
configure_world()
configure_lights()
configure_meshes(self)
print("Config mesh catch omitted: REMEMBER TO SET IT BACK NAXELA")
# try:
# configure_meshes(self)
# except Exception as e:
# print("An error occured during mesh configuration. See error below:")
# print(f"{type(e).__name__} at line {e.__traceback__.tb_lineno} of {__file__}: {e}")
# if not bpy.context.scene.TLM_SceneProperties.tlm_verbose:
# print("Turn on verbose mode to get more detail.")
def configure_world():
pass
def configure_lights():
pass
def configure_meshes(self):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Configuring meshes: Start")
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Configuring meshes: Material restore")
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
cache.backup_material_restore(obj)
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Configuring meshes: Material rename check")
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
cache.backup_material_rename(obj)
for mat in bpy.data.materials:
if mat.users < 1:
bpy.data.materials.remove(mat)
for mat in bpy.data.materials:
if mat.name.startswith("."):
if "_Original" in mat.name:
bpy.data.materials.remove(mat)
#for image in bpy.data.images:
# if image.name.endswith("_baked"):
# bpy.data.images.remove(image, do_unlink=True)
iterNum = 0
currentIterNum = 0
scene = bpy.context.scene
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Object: Setting UV, converting modifiers and prepare channels")
#OBJECT: Set UV, CONVERT AND PREPARE
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
hidden = False
#We check if the object is hidden
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
#We check if the object's collection is hidden
collections = obj.users_collection
for collection in collections:
if collection.hide_viewport:
hidden = True
if collection.hide_render:
hidden = True
try:
if collection.name in bpy.context.scene.view_layers[0].layer_collection.children:
if bpy.context.scene.view_layers[0].layer_collection.children[collection.name].hide_viewport:
hidden = True
except:
print("Error: Could not find collection: " + collection.name)
#Additional check for zero poly meshes
mesh = obj.data
if (len(mesh.polygons)) < 1:
print("Found an object with zero polygons. Skipping object: " + obj.name)
obj.TLM_ObjectProperties.tlm_mesh_lightmap_use = False
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use and not hidden:
print("Preparing: UV initiation for object: " + obj.name)
if len(obj.data.vertex_colors) < 1:
obj.data.vertex_colors.new(name="TLM")
if scene.TLM_SceneProperties.tlm_reset_uv:
uv_layers = obj.data.uv_layers
uv_channel = "UVMap_Lightmap"
for uvlayer in uv_layers:
if uvlayer.name == uv_channel:
uv_layers.remove(uvlayer)
if scene.TLM_SceneProperties.tlm_apply_on_unwrap:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Applying transform to: " + obj.name)
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.transform_apply(location=False, rotation=True, scale=True)
if scene.TLM_SceneProperties.tlm_apply_modifiers:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Applying modifiers to: " + obj.name)
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.convert(target='MESH')
for slot in obj.material_slots:
material = slot.material
skipIncompatibleMaterials(material)
obj.hide_select = False #Remember to toggle this back
for slot in obj.material_slots:
if "." + slot.name + '_Original' in bpy.data.materials:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("The material: " + slot.name + " shifted to " + "." + slot.name + '_Original')
slot.material = bpy.data.materials["." + slot.name + '_Original']
#ATLAS UV PROJECTING
print("PREPARE: ATLAS")
for atlasgroup in scene.TLM_AtlasList:
print("Adding UV Projection for Atlas group: " + atlasgroup.name)
atlas = atlasgroup.name
atlas_items = []
bpy.ops.object.select_all(action='DESELECT')
#Atlas: Set UV, CONVERT AND PREPARE
for obj in bpy.context.scene.objects:
if obj.TLM_ObjectProperties.tlm_atlas_pointer == atlasgroup.name:
hidden = False
#We check if the object is hidden
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
#We check if the object's collection is hidden
collections = obj.users_collection
for collection in collections:
if collection.hide_viewport:
hidden = True
if collection.hide_render:
hidden = True
try:
if collection.name in bpy.context.scene.view_layers[0].layer_collection.children:
if bpy.context.scene.view_layers[0].layer_collection.children[collection.name].hide_viewport:
hidden = True
except:
print("Error: Could not find collection: " + collection.name)
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA" and not hidden:
uv_layers = obj.data.uv_layers
if not obj.TLM_ObjectProperties.tlm_use_default_channel:
uv_channel = obj.TLM_ObjectProperties.tlm_uv_channel
else:
uv_channel = "UVMap_Lightmap"
if not uv_channel in uv_layers:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("UV map created for object: " + obj.name)
uvmap = uv_layers.new(name=uv_channel)
uv_layers.active_index = len(uv_layers) - 1
else:
print("Existing UV map found for object: " + obj.name)
for i in range(0, len(uv_layers)):
if uv_layers[i].name == 'UVMap_Lightmap':
uv_layers.active_index = i
break
atlas_items.append(obj)
obj.select_set(True)
if atlasgroup.tlm_atlas_lightmap_unwrap_mode == "SmartProject":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Atlasgroup Smart Project for: " + str(atlas_items))
for obj in atlas_items:
print("Applying Smart Project to: ")
print(obj.name + ": Active UV: " + obj.data.uv_layers[obj.data.uv_layers.active_index].name)
if len(atlas_items) > 0:
bpy.context.view_layer.objects.active = atlas_items[0]
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
#API changes in 2.91 causes errors:
if (2, 91, 0) > bpy.app.version:
bpy.ops.uv.smart_project(angle_limit=45.0, island_margin=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin, user_area_weight=1.0, use_aspect=True, stretch_to_bounds=False)
else:
angle = math.radians(45.0)
bpy.ops.uv.smart_project(angle_limit=angle, island_margin=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin, area_weight=1.0, correct_aspect=True, scale_to_bounds=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
print("Smart project done.")
elif atlasgroup.tlm_atlas_lightmap_unwrap_mode == "Lightmap":
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.uv.lightmap_pack('EXEC_SCREEN', PREF_CONTEXT='ALL_FACES', PREF_MARGIN_DIV=atlasgroup.tlm_atlas_unwrap_margin)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
elif atlasgroup.tlm_atlas_lightmap_unwrap_mode == "Xatlas":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Using Xatlas on Atlas Group: " + atlas)
for obj in atlas_items:
obj.select_set(True)
if len(atlas_items) > 0:
bpy.context.view_layer.objects.active = atlas_items[0]
bpy.ops.object.mode_set(mode='EDIT')
Unwrap_Lightmap_Group_Xatlas_2_headless_call(obj)
bpy.ops.object.mode_set(mode='OBJECT')
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Copied Existing UV Map for Atlas Group: " + atlas)
if atlasgroup.tlm_use_uv_packer:
bpy.ops.object.select_all(action='DESELECT')
for obj in atlas_items:
obj.select_set(True)
if len(atlas_items) > 0:
bpy.context.view_layer.objects.active = atlas_items[0]
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.context.scene.UVPackerProps.uvp_padding = atlasgroup.tlm_uv_packer_padding
bpy.context.scene.UVPackerProps.uvp_engine = atlasgroup.tlm_uv_packer_packing_engine
#print(x)
print("!!!!!!!!!!!!!!!!!!!!! Using UV Packer on: " + obj.name)
if uv_layers.active == "UVMap_Lightmap":
print("YES")
else:
print("NO")
uv_layers.active_index = len(uv_layers) - 1
if uv_layers.active == "UVMap_Lightmap":
print("YES")
else:
print("NO")
uv_layers.active_index = len(uv_layers) - 1
bpy.ops.uvpackeroperator.packbtn()
# if bpy.context.scene.UVPackerProps.uvp_engine == "OP0":
# time.sleep(1)
# else:
# time.sleep(2)
time.sleep(2)
#FIX THIS! MAKE A SEPARATE CALL. THIS IS A THREADED ASYNC
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
#print(x)
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
iterNum = iterNum + 1
#OBJECT UV PROJECTING
print("PREPARE: OBJECTS")
for obj in bpy.context.scene.objects:
if obj.name in bpy.context.view_layer.objects: #Possible fix for view layer error
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
hidden = False
#We check if the object is hidden
if obj.hide_get():
hidden = True
if obj.hide_viewport:
hidden = True
if obj.hide_render:
hidden = True
#We check if the object's collection is hidden
collections = obj.users_collection
for collection in collections:
if collection.hide_viewport:
hidden = True
if collection.hide_render:
hidden = True
try:
if collection.name in bpy.context.scene.view_layers[0].layer_collection.children:
if bpy.context.scene.view_layers[0].layer_collection.children[collection.name].hide_viewport:
hidden = True
except:
print("Error: Could not find collection: " + collection.name)
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use and not hidden:
objWasHidden = False
#For some reason, a Blender bug might prevent invisible objects from being smart projected
#We will turn the object temporarily visible
obj.hide_viewport = False
obj.hide_set(False)
currentIterNum = currentIterNum + 1
#Configure selection
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
obs = bpy.context.view_layer.objects
active = obs.active
#Provide material if none exists
print("Preprocessing material for: " + obj.name)
preprocess_material(obj, scene)
#UV Layer management here
if not obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA":
print("Managing layer for Obj: " + obj.name)
uv_layers = obj.data.uv_layers
if not obj.TLM_ObjectProperties.tlm_use_default_channel:
uv_channel = obj.TLM_ObjectProperties.tlm_uv_channel
else:
uv_channel = "UVMap_Lightmap"
if not uv_channel in uv_layers:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("UV map created for obj: " + obj.name)
uvmap = uv_layers.new(name=uv_channel)
uv_layers.active_index = len(uv_layers) - 1
#If lightmap
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "Lightmap":
bpy.ops.uv.lightmap_pack('EXEC_SCREEN', PREF_CONTEXT='ALL_FACES', PREF_MARGIN_DIV=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin)
#If smart project
elif obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "SmartProject":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Smart Project B")
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
#API changes in 2.91 causes errors:
if (2, 91, 0) > bpy.app.version:
bpy.ops.uv.smart_project(angle_limit=45.0, island_margin=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin, user_area_weight=1.0, use_aspect=True, stretch_to_bounds=False)
else:
angle = math.radians(45.0)
bpy.ops.uv.smart_project(angle_limit=angle, island_margin=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin, area_weight=1.0, correct_aspect=True, scale_to_bounds=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
elif obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "Xatlas":
Unwrap_Lightmap_Group_Xatlas_2_headless_call(obj)
elif obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("ATLAS GROUP: " + obj.TLM_ObjectProperties.tlm_atlas_pointer)
else: #if copy existing
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Copied Existing UV Map for object: " + obj.name)
if obj.TLM_ObjectProperties.tlm_use_uv_packer:
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.context.scene.UVPackerProps.uvp_padding = obj.TLM_ObjectProperties.tlm_uv_packer_padding
bpy.context.scene.UVPackerProps.uvp_engine = obj.TLM_ObjectProperties.tlm_uv_packer_packing_engine
#print(x)
print("!!!!!!!!!!!!!!!!!!!!! Using UV Packer on: " + obj.name)
if uv_layers.active == "UVMap_Lightmap":
print("YES")
else:
print("NO")
uv_layers.active_index = len(uv_layers) - 1
if uv_layers.active == "UVMap_Lightmap":
print("YES")
else:
print("NO")
uv_layers.active_index = len(uv_layers) - 1
bpy.ops.uvpackeroperator.packbtn()
if bpy.context.scene.UVPackerProps.uvp_engine == "OP0":
time.sleep(1)
else:
time.sleep(2)
#FIX THIS! MAKE A SEPARATE CALL. THIS IS A THREADED ASYNC
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
#print(x)
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Existing UV map found for obj: " + obj.name)
for i in range(0, len(uv_layers)):
if uv_layers[i].name == uv_channel:
uv_layers.active_index = i
break
#print(x)
#Sort out nodes
for slot in obj.material_slots:
nodetree = slot.material.node_tree
outputNode = nodetree.nodes[0] #Presumed to be material output node
if(outputNode.type != "OUTPUT_MATERIAL"):
for node in nodetree.nodes:
if node.type == "OUTPUT_MATERIAL":
outputNode = node
break
mainNode = outputNode.inputs[0].links[0].from_node
if mainNode.type not in ['BSDF_PRINCIPLED','BSDF_DIFFUSE','GROUP']:
#TODO! FIND THE PRINCIPLED PBR
self.report({'INFO'}, "The primary material node is not supported. Seeking first principled.")
if len(find_node_by_type(nodetree.nodes, Node_Types.pbr_node)) > 0:
mainNode = find_node_by_type(nodetree.nodes, Node_Types.pbr_node)[0]
else:
self.report({'INFO'}, "No principled found. Seeking diffuse")
if len(find_node_by_type(nodetree.nodes, Node_Types.diffuse)) > 0:
mainNode = find_node_by_type(nodetree.nodes, Node_Types.diffuse)[0]
else:
self.report({'INFO'}, "No supported nodes. Continuing anyway.")
if mainNode.type == 'GROUP':
if mainNode.node_tree != "Leenkx PBR":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("The material group is not supported!")
if (mainNode.type == "ShaderNodeMixRGB"):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Mix shader found")
#Skip for now
slot.material.TLM_ignore = True
if (mainNode.type == "BSDF_PRINCIPLED"):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("BSDF_Principled")
if scene.TLM_EngineProperties.tlm_directional_mode == "None":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Directional mode")
if not len(mainNode.inputs[22].links) == 0:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("NOT LEN 0")
ninput = mainNode.inputs[22].links[0]
noutput = mainNode.inputs[22].links[0].from_node
nodetree.links.remove(noutput.outputs[0].links[0])
#Clamp metallic
if bpy.context.scene.TLM_SceneProperties.tlm_metallic_clamp == "limit":
MainMetNodeSocket = mainNode.inputs.get("Metallic")
if not len(MainMetNodeSocket.links) == 0:
print("Creating new clamp node")
nodes = nodetree.nodes
MetClampNode = nodes.new('ShaderNodeClamp')
MetClampNode.location = (-200,150)
MetClampNode.inputs[2].default_value = 0.9
minput = mainNode.inputs.get("Metallic").links[0] #Metal input socket
moutput = mainNode.inputs.get("Metallic").links[0].from_socket #Output socket
nodetree.links.remove(minput)
nodetree.links.new(moutput, MetClampNode.inputs[0]) #minput node to clamp node
nodetree.links.new(MetClampNode.outputs[0], MainMetNodeSocket) #clamp node to metinput
elif mainNode.type == "PRINCIPLED_BSDF" and MainMetNodeSocket.links[0].from_node.type == "CLAMP":
pass
else:
print("New clamp node NOT made")
if mainNode.inputs[4].default_value > 0.9:
mainNode.inputs[4].default_value = 0.9
elif bpy.context.scene.TLM_SceneProperties.tlm_metallic_clamp == "zero":
MainMetNodeSocket = mainNode.inputs[4]
if not len(MainMetNodeSocket.links) == 0:
nodes = nodetree.nodes
MetClampNode = nodes.new('ShaderNodeClamp')
MetClampNode.location = (-200,150)
MetClampNode.inputs[2].default_value = 0.0
minput = mainNode.inputs[4].links[0] #Metal input socket
moutput = mainNode.inputs[4].links[0].from_socket #Output socket
nodetree.links.remove(minput)
nodetree.links.new(moutput, MetClampNode.inputs[0]) #minput node to clamp node
nodetree.links.new(MetClampNode.outputs[0], MainMetNodeSocket) #clamp node to metinput
else:
mainNode.inputs[4].default_value = 0.0
else: #Skip
pass
if (mainNode.type == "BSDF_DIFFUSE"):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("BSDF_Diffuse")
# if (mainNode.type == "BSDF_DIFFUSE"):
# if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
# print("BSDF_Diffuse")
#TODO FIX THIS PART!
#THIS IS USED IN CASES WHERE FOR SOME REASON THE USER FORGETS TO CONNECT SOMETHING INTO THE OUTPUT MATERIAL
for slot in obj.material_slots:
nodetree = bpy.data.materials[slot.name].node_tree
nodes = nodetree.nodes
#First search to get the first output material type
for node in nodetree.nodes:
if node.type == "OUTPUT_MATERIAL":
mainNode = node
break
#Fallback to get search
if not mainNode.type == "OUTPUT_MATERIAL":
mainNode = nodetree.nodes.get("Material Output")
#Last resort to first node in list
if not mainNode.type == "OUTPUT_MATERIAL":
mainNode = nodetree.nodes[0].inputs[0].links[0].from_node
# for node in nodes:
# if "LM" in node.name:
# nodetree.links.new(node.outputs[0], mainNode.inputs[0])
# for node in nodes:
# if "Lightmap" in node.name:
# nodes.remove(node)
def preprocess_material(obj, scene):
if len(obj.material_slots) == 0:
single = False
number = 0
while single == False:
matname = obj.name + ".00" + str(number)
if matname in bpy.data.materials:
single = False
number = number + 1
else:
mat = bpy.data.materials.new(name=matname)
mat.use_nodes = True
obj.data.materials.append(mat)
single = True
#We copy the existing material slots to an ordered array, which corresponds to the slot index
matArray = []
for slot in obj.material_slots:
matArray.append(slot.name)
obj["TLM_PrevMatArray"] = matArray
#We check and safeguard against NoneType
for slot in obj.material_slots:
if slot.material is None:
matName = obj.name + ".00" + str(0)
bpy.data.materials.new(name=matName)
slot.material = bpy.data.materials[matName]
slot.material.use_nodes = True
for slot in obj.material_slots:
cache.backup_material_copy(slot)
mat = slot.material
if mat.users > 1:
copymat = mat.copy()
slot.material = copymat
#SOME ATLAS EXCLUSION HERE?
ob = obj
for slot in ob.material_slots:
#If temporary material already exists
if slot.material.name.endswith('_temp'):
continue
n = slot.material.name + '_' + ob.name + '_temp'
if not n in bpy.data.materials:
slot.material = slot.material.copy()
slot.material.name = n
#Add images for baking
img_name = obj.name + '_baked'
#Resolution is object lightmap resolution divided by global scaler
if scene.TLM_EngineProperties.tlm_setting_supersample == "2x":
supersampling_scale = 2
elif scene.TLM_EngineProperties.tlm_setting_supersample == "4x":
supersampling_scale = 4
else:
supersampling_scale = 1
if (obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA" and obj.TLM_ObjectProperties.tlm_atlas_pointer != ""):
atlas_image_name = obj.TLM_ObjectProperties.tlm_atlas_pointer + "_baked"
res = int(scene.TLM_AtlasList[obj.TLM_ObjectProperties.tlm_atlas_pointer].tlm_atlas_lightmap_resolution) / int(scene.TLM_EngineProperties.tlm_resolution_scale) * int(supersampling_scale)
#If image not in bpy.data.images or if size changed, make a new image
if atlas_image_name not in bpy.data.images or bpy.data.images[atlas_image_name].size[0] != res or bpy.data.images[atlas_image_name].size[1] != res:
img = bpy.data.images.new(img_name, int(res), int(res), alpha=True, float_buffer=True)
num_pixels = len(img.pixels)
result_pixel = list(img.pixels)
for i in range(0,num_pixels,4):
if scene.TLM_SceneProperties.tlm_override_bg_color:
result_pixel[i+0] = scene.TLM_SceneProperties.tlm_override_color[0]
result_pixel[i+1] = scene.TLM_SceneProperties.tlm_override_color[1]
result_pixel[i+2] = scene.TLM_SceneProperties.tlm_override_color[2]
else:
result_pixel[i+0] = 0.0
result_pixel[i+1] = 0.0
result_pixel[i+2] = 0.0
result_pixel[i+3] = 1.0
img.pixels = result_pixel
img.name = atlas_image_name
else:
img = bpy.data.images[atlas_image_name]
for slot in obj.material_slots:
mat = slot.material
mat.use_nodes = True
nodes = mat.node_tree.nodes
if "Baked Image" in nodes:
img_node = nodes["Baked Image"]
else:
img_node = nodes.new('ShaderNodeTexImage')
img_node.name = 'Baked Image'
img_node.location = (100, 100)
img_node.image = img
img_node.select = True
nodes.active = img_node
#We need to save this file first in Blender 3.3 due to new filmic option?
image = img
saveDir = os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_EngineProperties.tlm_lightmap_savedir)
bakemap_path = os.path.join(saveDir, image.name)
filepath_ext = ".hdr"
image.filepath_raw = bakemap_path + filepath_ext
image.file_format = "HDR"
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Saving to: " + image.filepath_raw)
image.save()
else:
res = int(obj.TLM_ObjectProperties.tlm_mesh_lightmap_resolution) / int(scene.TLM_EngineProperties.tlm_resolution_scale) * int(supersampling_scale)
#If image not in bpy.data.images or if size changed, make a new image
if img_name not in bpy.data.images or bpy.data.images[img_name].size[0] != res or bpy.data.images[img_name].size[1] != res:
img = bpy.data.images.new(img_name, int(res), int(res), alpha=True, float_buffer=True)
num_pixels = len(img.pixels)
result_pixel = list(img.pixels)
for i in range(0,num_pixels,4):
if scene.TLM_SceneProperties.tlm_override_bg_color:
result_pixel[i+0] = scene.TLM_SceneProperties.tlm_override_color[0]
result_pixel[i+1] = scene.TLM_SceneProperties.tlm_override_color[1]
result_pixel[i+2] = scene.TLM_SceneProperties.tlm_override_color[2]
else:
result_pixel[i+0] = 0.0
result_pixel[i+1] = 0.0
result_pixel[i+2] = 0.0
result_pixel[i+3] = 1.0
img.pixels = result_pixel
img.name = img_name
else:
img = bpy.data.images[img_name]
for slot in obj.material_slots:
mat = slot.material
mat.use_nodes = True
nodes = mat.node_tree.nodes
if "Baked Image" in nodes:
img_node = nodes["Baked Image"]
else:
img_node = nodes.new('ShaderNodeTexImage')
img_node.name = 'Baked Image'
img_node.location = (100, 100)
img_node.image = img
img_node.select = True
nodes.active = img_node
#We need to save this file first in Blender 3.3 due to new filmic option?
image = img
saveDir = os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_EngineProperties.tlm_lightmap_savedir)
bakemap_path = os.path.join(saveDir, image.name)
filepath_ext = ".hdr"
image.filepath_raw = bakemap_path + filepath_ext
image.file_format = "HDR"
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Saving to: " + image.filepath_raw)
image.save()
def set_settings():
scene = bpy.context.scene
cycles = scene.cycles
scene.render.engine = "CYCLES"
sceneProperties = scene.TLM_SceneProperties
engineProperties = scene.TLM_EngineProperties
cycles.device = scene.TLM_EngineProperties.tlm_mode
print(bpy.app.version)
if bpy.app.version[0] == 3:
if cycles.device == "GPU":
scene.cycles.tile_size = 256
else:
scene.cycles.tile_size = 32
else:
if cycles.device == "GPU":
scene.render.tile_x = 256
scene.render.tile_y = 256
else:
scene.render.tile_x = 32
scene.render.tile_y = 32
if engineProperties.tlm_quality == "0":
cycles.samples = 32
cycles.max_bounces = 1
cycles.diffuse_bounces = 1
cycles.glossy_bounces = 1
cycles.transparent_max_bounces = 1
cycles.transmission_bounces = 1
cycles.volume_bounces = 1
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "1":
cycles.samples = 64
cycles.max_bounces = 2
cycles.diffuse_bounces = 2
cycles.glossy_bounces = 2
cycles.transparent_max_bounces = 2
cycles.transmission_bounces = 2
cycles.volume_bounces = 2
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "2":
cycles.samples = 512
cycles.max_bounces = 2
cycles.diffuse_bounces = 2
cycles.glossy_bounces = 2
cycles.transparent_max_bounces = 2
cycles.transmission_bounces = 2
cycles.volume_bounces = 2
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "3":
cycles.samples = 1024
cycles.max_bounces = 256
cycles.diffuse_bounces = 256
cycles.glossy_bounces = 256
cycles.transparent_max_bounces = 256
cycles.transmission_bounces = 256
cycles.volume_bounces = 256
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "4":
cycles.samples = 2048
cycles.max_bounces = 512
cycles.diffuse_bounces = 512
cycles.glossy_bounces = 512
cycles.transparent_max_bounces = 512
cycles.transmission_bounces = 512
cycles.volume_bounces = 512
cycles.caustics_reflective = True
cycles.caustics_refractive = True
else: #Custom
pass
def store_existing(prev_container):
scene = bpy.context.scene
cycles = scene.cycles
selected = []
for obj in bpy.context.scene.objects:
if obj.select_get():
selected.append(obj.name)
prev_container["settings"] = [
cycles.samples,
cycles.max_bounces,
cycles.diffuse_bounces,
cycles.glossy_bounces,
cycles.transparent_max_bounces,
cycles.transmission_bounces,
cycles.volume_bounces,
cycles.caustics_reflective,
cycles.caustics_refractive,
cycles.device,
scene.render.engine,
bpy.context.view_layer.objects.active,
selected,
[scene.render.resolution_x, scene.render.resolution_y]
]
def skipIncompatibleMaterials(material):
node_tree = material.node_tree
nodes = material.node_tree.nodes
#ADD OR MIX SHADER? CUSTOM/GROUP?
#IF Principled has emissive or transparency?
SkipMatList = ["EMISSION",
"BSDF_TRANSPARENT",
"BACKGROUND",
"BSDF_HAIR",
"BSDF_HAIR_PRINCIPLED",
"HOLDOUT",
"PRINCIPLED_VOLUME",
"BSDF_REFRACTION",
"EEVEE_SPECULAR",
"BSDF_TRANSLUCENT",
"VOLUME_ABSORPTION",
"VOLUME_SCATTER"]
#Find output node
outputNode = nodes[0]
if(outputNode.type != "OUTPUT_MATERIAL"):
for node in node_tree.nodes:
if node.type == "OUTPUT_MATERIAL":
outputNode = node
break
#Find mainnode
mainNode = outputNode.inputs[0].links[0].from_node
if mainNode.type in SkipMatList:
material.TLM_ignore = True
print("Ignored material: " + material.name)
def packUVPack():
pass

View File

@ -0,0 +1,80 @@
import bpy, os
class TLM_Integrated_Denoise:
image_array = []
image_output_destination = ""
def load(self, images):
self.image_array = images
self.cull_undefined()
def setOutputDir(self, dir):
self.image_output_destination = dir
def cull_undefined(self):
#Do a validation check before denoising
cam = bpy.context.scene.camera
if not cam:
bpy.ops.object.camera_add()
#Just select the first camera we find, needed for the compositor
for obj in bpy.context.scene.objects:
if obj.type == "CAMERA":
bpy.context.scene.camera = obj
return
def denoise(self):
if not bpy.context.scene.use_nodes:
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
for image in self.image_array:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Image...: " + image)
img = bpy.data.images.load(self.image_output_destination + "/" + image)
image_node = tree.nodes.new(type='CompositorNodeImage')
image_node.image = img
image_node.location = 0, 0
denoise_node = tree.nodes.new(type='CompositorNodeDenoise')
denoise_node.location = 300, 0
comp_node = tree.nodes.new('CompositorNodeComposite')
comp_node.location = 600, 0
links = tree.links
links.new(image_node.outputs[0], denoise_node.inputs[0])
links.new(denoise_node.outputs[0], comp_node.inputs[0])
# set output resolution to image res
bpy.context.scene.render.resolution_x = img.size[0]
bpy.context.scene.render.resolution_y = img.size[1]
bpy.context.scene.render.resolution_percentage = 100
filePath = bpy.data.filepath
path = os.path.dirname(filePath)
base = os.path.basename(image)
filename, file_extension = os.path.splitext(image)
filename = filename[:-6]
bpy.context.scene.render.filepath = self.image_output_destination + "/" + filename + "_denoised" + file_extension
denoised_image_path = self.image_output_destination
bpy.context.scene.render.image_settings.file_format = "HDR"
bpy.ops.render.render(write_still=True)
#Cleanup
comp_nodes = [image_node, denoise_node, comp_node]
for node in comp_nodes:
tree.nodes.remove(node)

View File

@ -0,0 +1,207 @@
import bpy, os, sys, re, platform, subprocess
import numpy as np
class TLM_OIDN_Denoise:
image_array = []
image_output_destination = ""
denoised_array = []
def __init__(self, oidnProperties, img_array, dirpath):
self.oidnProperties = oidnProperties
self.image_array = img_array
self.image_output_destination = dirpath
self.check_binary()
def check_binary(self):
oidnPath = self.oidnProperties.tlm_oidn_path
if oidnPath != "":
file = oidnPath
filename, file_extension = os.path.splitext(file)
if platform.system() == 'Windows':
if(file_extension == ".exe"):
pass
else:
self.oidnProperties.tlm_oidn_path = os.path.join(self.oidnProperties.tlm_oidn_path,"oidnDenoise.exe")
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Please provide OIDN path")
def denoise(self):
for image in self.image_array:
if image not in self.denoised_array:
image_path = os.path.join(self.image_output_destination, image)
#Save to pfm
loaded_image = bpy.data.images.load(image_path, check_existing=False)
width = loaded_image.size[0]
height = loaded_image.size[1]
image_output_array = np.zeros([width, height, 3], dtype="float32")
image_output_array = np.array(loaded_image.pixels)
image_output_array = image_output_array.reshape(height, width, 4)
image_output_array = np.float32(image_output_array[:,:,:3])
image_output_denoise_destination = image_path[:-4] + ".pfm"
image_output_denoise_result_destination = image_path[:-4] + "_denoised.pfm"
with open(image_output_denoise_destination, "wb") as fileWritePFM:
self.save_pfm(fileWritePFM, image_output_array)
#Denoise
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Loaded image: " + str(loaded_image))
verbose = self.oidnProperties.tlm_oidn_verbose
affinity = self.oidnProperties.tlm_oidn_affinity
if verbose:
print("Denoiser search: " + bpy.path.abspath(self.oidnProperties.tlm_oidn_path))
v = "3"
else:
v = "0"
if affinity:
a = "1"
else:
a = "0"
threads = str(self.oidnProperties.tlm_oidn_threads)
maxmem = str(self.oidnProperties.tlm_oidn_maxmem)
if platform.system() == 'Windows':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath, '-f', 'RTLightmap', '-hdr', image_output_denoise_destination, '-o', image_output_denoise_result_destination, '-verbose', v, '-threads', threads, '-affinity', a, '-maxmem', maxmem]
elif platform.system() == 'Darwin':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
else:
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
oidnPath = oidnPath.replace(' ', '\\ ')
image_output_denoise_destination = image_output_denoise_destination.replace(' ', '\\ ')
image_output_denoise_result_destination = image_output_denoise_result_destination.replace(' ', '\\ ')
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
if not verbose:
denoisePipe = subprocess.Popen(pipePath, stdout=subprocess.PIPE, stderr=None, shell=True)
else:
denoisePipe = subprocess.Popen(pipePath, shell=True)
denoisePipe.communicate()[0]
if platform.system() != 'Windows':
image_output_denoise_result_destination = image_output_denoise_result_destination.replace('\\', '')
with open(image_output_denoise_result_destination, "rb") as f:
denoise_data, scale = self.load_pfm(f)
ndata = np.array(denoise_data)
ndata2 = np.dstack((ndata, np.ones((width,height))))
img_array = ndata2.ravel()
loaded_image.pixels = img_array
loaded_image.filepath_raw = image_output_denoise_result_destination = image_path[:-10] + "_denoised.hdr"
loaded_image.file_format = "HDR"
loaded_image.save()
self.denoised_array.append(image)
print(image_path)
def clean(self):
self.denoised_array.clear()
self.image_array.clear()
for file in self.image_output_destination:
if file.endswith("_baked.hdr"):
baked_image_array.append(file)
#self.image_output_destination
#Clean temporary files here..
#...pfm
#...denoised.hdr
def load_pfm(self, file, as_flat_list=False):
#start = time()
header = file.readline().decode("utf-8").rstrip()
if header == "PF":
color = True
elif header == "Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("utf-8"))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("utf-8").rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
if as_flat_list:
result = data
else:
result = np.reshape(data, shape)
#print("PFM import took %.3f s" % (time() - start))
return result, scale
def save_pfm(self, file, image, scale=1):
#start = time()
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32 (got %s)" % image.dtype.name)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write(b"PF\n" if color else b"Pf\n")
file.write(b"%d %d\n" % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write(b"%f\n" % scale)
image.tofile(file)
#print("PFM export took %.3f s" % (time() - start))

View File

@ -0,0 +1,92 @@
import bpy, os, platform, subprocess
class TLM_Optix_Denoise:
image_array = []
image_output_destination = ""
denoised_array = []
def __init__(self, optixProperties, img_array, dirpath):
self.optixProperties = optixProperties
self.image_array = img_array
self.image_output_destination = dirpath
self.check_binary()
def check_binary(self):
optixPath = self.optixProperties.tlm_optix_path
if optixPath != "":
file = optixPath
filename, file_extension = os.path.splitext(file)
if(file_extension == ".exe"):
#if file exists optixDenoise or denoise
pass
else:
#if file exists optixDenoise or denoise
self.optixProperties.tlm_optix_path = os.path.join(self.optixProperties.tlm_optix_path,"Denoiser.exe")
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Please provide Optix path")
def denoise(self):
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Optix: Denoising")
for image in self.image_array:
if image not in self.denoised_array:
image_path = os.path.join(self.image_output_destination, image)
denoise_output_destination = image_path[:-10] + "_denoised.hdr"
if platform.system() == 'Windows':
optixPath = bpy.path.abspath(self.optixProperties.tlm_optix_path)
pipePath = [optixPath, '-i', image_path, '-o', denoise_output_destination]
elif platform.system() == 'Darwin':
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Mac for Optix is still unsupported")
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Linux for Optix is still unsupported")
if self.optixProperties.tlm_optix_verbose:
denoisePipe = subprocess.Popen(pipePath, shell=True)
else:
denoisePipe = subprocess.Popen(pipePath, stdout=subprocess.PIPE, stderr=None, shell=True)
denoisePipe.communicate()[0]
image = bpy.data.images.load(image_path, check_existing=False)
bpy.data.images[image.name].filepath_raw = bpy.data.images[image.name].filepath_raw[:-4] + "_denoised.hdr"
bpy.data.images[image.name].reload()
def clean(self):
self.denoised_array.clear()
self.image_array.clear()
for file in self.image_output_destination:
if file.endswith("_baked.hdr"):
baked_image_array.append(file)
#self.image_output_destination
#Clean temporary files here..
#...pfm
#...denoised.hdr

View File

@ -0,0 +1,674 @@
import bpy, math, os, gpu, bgl, importlib
import numpy as np
from . import utility
from fractions import Fraction
from gpu_extras.batch import batch_for_shader
def splitLogLuvAlphaAtlas(imageIn, outDir, quality):
pass
def splitLogLuvAlpha(imageIn, outDir, quality):
bpy.app.driver_namespace["logman"].append("Starting LogLuv split for: " + str(imageIn))
cv2 = importlib.util.find_spec("cv2")
if cv2 is None:
print("CV2 not found - Ignoring filtering")
return 0
else:
cv2 = importlib.__import__("cv2")
print(imageIn)
image = cv2.imread(imageIn, cv2.IMREAD_UNCHANGED)
#cv2.imshow('image', image)
split = cv2.split(image)
merged = cv2.merge([split[0], split[1], split[2]])
alpha = split[3]
#b,g,r = cv2.split(image)
#merged = cv2.merge([b, g, r])
#alpha = cv2.merge([a,a,a])
image_name = os.path.basename(imageIn)[:-4]
#os.path.join(outDir, image_name+"_XYZ.png")
cv2.imwrite(os.path.join(outDir, image_name+"_XYZ.png"), merged)
cv2.imwrite(os.path.join(outDir, image_name+"_W.png"), alpha)
def encodeLogLuvGPU(image, outDir, quality):
bpy.app.driver_namespace["logman"].append("Starting LogLuv encode for: " + str(image.name))
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );
vec4 LinearToLogLuv( in vec4 value ) {
vec3 Xp_Y_XYZp = cLogLuvM * value.rgb;
Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) );
vec4 vResult;
vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;
float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;
vResult.w = fract( Le );
vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0;
return vResult;
//return vec4(Xp_Y_XYZp,1);
}
const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );
vec4 LogLuvToLinear( in vec4 value ) {
float Le = value.z * 255.0 + value.w;
vec3 Xp_Y_XYZp;
Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 );
Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;
Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;
vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb;
//return vec4( max( vRGB, 0.0 ), 1.0 );
return vec4( max( Xp_Y_XYZp, 0.0 ), 1.0 );
}
void main()
{
//fragColor = LinearToLogLuv(pow(texture(image, texCoord_interp), vec4(0.454)));
fragColor = LinearToLogLuv(texture(image, texCoord_interp));
//fragColor = LogLuvToLinear(LinearToLogLuv(texture(image, texCoord_interp)));
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
#Save LogLuv
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
#input_image.filepath_raw = outDir + "_encoded.png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
input_image.save()
def encodeImageRGBDGPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
//Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx
const float PI = 3.1415926535897932384626433832795;
const float HALF_MIN = 5.96046448e-08; // Smallest positive half.
const float LinearEncodePowerApprox = 2.2;
const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox;
const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722);
const float Epsilon = 0.0000001;
#define saturate(x) clamp(x, 0.0, 1.0)
float maxEps(float x) {
return max(x, Epsilon);
}
float toLinearSpace(float color)
{
return pow(color, LinearEncodePowerApprox);
}
vec3 toLinearSpace(vec3 color)
{
return pow(color, vec3(LinearEncodePowerApprox));
}
vec4 toLinearSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a);
}
vec3 toGammaSpace(vec3 color)
{
return pow(color, vec3(GammaEncodePowerApprox));
}
vec4 toGammaSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a);
}
float toGammaSpace(float color)
{
return pow(color, GammaEncodePowerApprox);
}
float square(float value)
{
return value * value;
}
// Check if configurable value is needed.
const float rgbdMaxRange = 255.0;
vec4 toRGBD(vec3 color) {
float maxRGB = maxEps(max(color.r, max(color.g, color.b)));
float D = max(rgbdMaxRange / maxRGB, 1.);
D = clamp(floor(D) / 255.0, 0., 1.);
vec3 rgb = color.rgb * D;
// Helps with png quantization.
rgb = toGammaSpace(rgb);
return vec4(rgb, D);
}
vec3 fromRGBD(vec4 rgbd) {
// Helps with png quantization.
rgbd.rgb = toLinearSpace(rgbd.rgb);
// return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a);
return rgbd.rgb / rgbd.a;
}
void main()
{
fragColor = toRGBD(texture(image, texCoord_interp).rgb);
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
#Save LogLuv
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
#input_image.filepath_raw = outDir + "_encoded.png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
input_image.save()
#Todo - Find a way to save
#bpy.ops.image.save_all_modified()
#TODO - FINISH THIS
def encodeImageRGBMGPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
offscreen = gpu.types.GPUOffScreen(input_image.size[0], input_image.size[1])
image = input_image
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
//gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
//gl_Position.z = 1.0;
gl_Position = vec4(pos.xy, 100, 100);
texCoord_interp = texCoord;
}
'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
//Code from here: https://github.com/BabylonJS/Babylon.js/blob/master/src/Shaders/ShadersInclude/helperFunctions.fx
const float PI = 3.1415926535897932384626433832795;
const float HALF_MIN = 5.96046448e-08; // Smallest positive half.
const float LinearEncodePowerApprox = 2.2;
const float GammaEncodePowerApprox = 1.0 / LinearEncodePowerApprox;
const vec3 LuminanceEncodeApprox = vec3(0.2126, 0.7152, 0.0722);
const float Epsilon = 0.0000001;
#define saturate(x) clamp(x, 0.0, 1.0)
float maxEps(float x) {
return max(x, Epsilon);
}
float toLinearSpace(float color)
{
return pow(color, LinearEncodePowerApprox);
}
vec3 toLinearSpace(vec3 color)
{
return pow(color, vec3(LinearEncodePowerApprox));
}
vec4 toLinearSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(LinearEncodePowerApprox)), color.a);
}
vec3 toGammaSpace(vec3 color)
{
return pow(color, vec3(GammaEncodePowerApprox));
}
vec4 toGammaSpace(vec4 color)
{
return vec4(pow(color.rgb, vec3(GammaEncodePowerApprox)), color.a);
}
float toGammaSpace(float color)
{
return pow(color, GammaEncodePowerApprox);
}
float square(float value)
{
return value * value;
}
// Check if configurable value is needed.
const float rgbdMaxRange = 255.0;
vec4 toRGBM(vec3 color) {
vec4 rgbm;
color *= 1.0/6.0;
rgbm.a = saturate( max( max( color.r, color.g ), max( color.b, 1e-6 ) ) );
rgbm.a = clamp(floor(D) / 255.0, 0., 1.);
rgbm.rgb = color / rgbm.a;
return
float maxRGB = maxEps(max(color.r, max(color.g, color.b)));
float D = max(rgbdMaxRange / maxRGB, 1.);
D = clamp(floor(D) / 255.0, 0., 1.);
vec3 rgb = color.rgb * D;
// Helps with png quantization.
rgb = toGammaSpace(rgb);
return vec4(rgb, D);
}
vec3 fromRGBD(vec4 rgbd) {
// Helps with png quantization.
rgbd.rgb = toLinearSpace(rgbd.rgb);
// return rgbd.rgb * ((rgbdMaxRange / 255.0) / rgbd.a);
return rgbd.rgb / rgbd.a;
}
void main()
{
fragColor = toRGBM(texture(image, texCoord_interp).rgb);
}
'''
x_screen = 0
off_x = -100
off_y = -100
y_screen_flip = 0
sx = 200
sy = 200
vertices = (
(x_screen + off_x, y_screen_flip - off_y),
(x_screen + off_x, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - sy - off_y),
(x_screen + off_x + sx, y_screen_flip - off_x))
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
batch = batch_for_shader(
shader, 'TRI_FAN',
{
"pos": vertices,
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1)),
},
)
if image.gl_load():
raise Exception()
with offscreen.bind():
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, image.bindcode)
shader.bind()
shader.uniform_int("image", 0)
batch.draw(shader)
buffer = bgl.Buffer(bgl.GL_BYTE, input_image.size[0] * input_image.size[1] * 4)
bgl.glReadBuffer(bgl.GL_BACK)
bgl.glReadPixels(0, 0, input_image.size[0], input_image.size[1], bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
offscreen.free()
target_image.pixels = [v / 255 for v in buffer]
input_image = target_image
#Save LogLuv
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
#input_image.filepath_raw = outDir + "_encoded.png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
input_image.save()
#Todo - Find a way to save
#bpy.ops.image.save_all_modified()
def encodeImageRGBMCPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
num_pixels = len(input_image.pixels)
result_pixel = list(input_image.pixels)
for i in range(0,num_pixels,4):
for j in range(3):
result_pixel[i+j] *= 1.0 / maxRange;
result_pixel[i+3] = saturate(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2], 1e-6))
result_pixel[i+3] = math.ceil(result_pixel[i+3] * 255.0) / 255.0
for j in range(3):
result_pixel[i+j] /= result_pixel[i+3]
target_image.pixels = result_pixel
input_image = target_image
#Save RGBM
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()
#input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
# input_image.filepath_raw = outDir + "_encoded.png"
# input_image.file_format = "PNG"
# bpy.context.scene.render.image_settings.quality = quality
# input_image.save_render(filepath = input_image.filepath_raw, scene = bpy.context.scene)
#input_image.
#input_image.save()
def saturate(num, floats=True):
if num <= 0:
num = 0
elif num > (1 if floats else 255):
num = (1 if floats else 255)
return num
def maxEps(x):
return max(x, 1e-6)
def encodeImageRGBDCPU(image, maxRange, outDir, quality):
input_image = bpy.data.images[image.name]
image_name = input_image.name
if input_image.colorspace_settings.name != 'Linear':
input_image.colorspace_settings.name = 'Linear'
# Removing .exr or .hdr prefix
if image_name[-4:] == '.exr' or image_name[-4:] == '.hdr':
image_name = image_name[:-4]
target_image = bpy.data.images.get(image_name + '_encoded')
if not target_image:
target_image = bpy.data.images.new(
name = image_name + '_encoded',
width = input_image.size[0],
height = input_image.size[1],
alpha = True,
float_buffer = False
)
num_pixels = len(input_image.pixels)
result_pixel = list(input_image.pixels)
rgbdMaxRange = 255.0
for i in range(0,num_pixels,4):
maxRGB = maxEps(max(result_pixel[i], result_pixel[i+1], result_pixel[i+2]))
D = max(rgbdMaxRange/maxRGB, 1.0)
D = np.clip((math.floor(D) / 255.0), 0.0, 1.0)
result_pixel[i] = math.pow(result_pixel[i] * D, 1/2.2)
result_pixel[i+1] = math.pow(result_pixel[i+1] * D, 1/2.2)
result_pixel[i+2] = math.pow(result_pixel[i+2] * D, 1/2.2)
result_pixel[i+3] = D
target_image.pixels = result_pixel
input_image = target_image
#Save RGBD
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print(input_image.name)
input_image.filepath_raw = outDir + "/" + input_image.name + ".png"
input_image.file_format = "PNG"
bpy.context.scene.render.image_settings.quality = quality
input_image.save()

View File

@ -0,0 +1,49 @@
import bpy, os, importlib
from os import listdir
from os.path import isfile, join
class TLM_NP_Filtering:
image_output_destination = ""
def init(lightmap_dir, denoise):
scene = bpy.context.scene
print("Beginning filtering for files: ")
if denoise:
file_ending = "_denoised.hdr"
else:
file_ending = "_baked.hdr"
dirfiles = [f for f in listdir(lightmap_dir) if isfile(join(lightmap_dir, f))]
for file in dirfiles:
if denoise:
file_ending = "_denoised.hdr"
file_split = 13
else:
file_ending = "_baked.hdr"
file_split = 10
if file.endswith(file_ending):
file_input = os.path.join(lightmap_dir, file)
os.chdir(lightmap_dir)
#opencv_process_image = cv2.imread(file_input, -1)
print("Filtering: " + file_input)
print(os.path.join(lightmap_dir, file))
if scene.TLM_SceneProperties.tlm_numpy_filtering_mode == "3x3 blur":
pass
#filter_file_output = os.path.join(lightmap_dir, file[:-file_split] + "_filtered.hdr")
#cv2.imwrite(filter_file_output, opencv_bl_result)
print("Written to: " + filter_file_output)

View File

@ -0,0 +1,178 @@
import bpy, os, importlib
from os import listdir
from os.path import isfile, join
class TLM_CV_Filtering:
image_output_destination = ""
def init(lightmap_dir, denoise):
scene = bpy.context.scene
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Beginning filtering for files: ")
if denoise:
file_ending = "_denoised.hdr"
else:
file_ending = "_baked.hdr"
dirfiles = [f for f in listdir(lightmap_dir) if isfile(join(lightmap_dir, f))]
cv2 = importlib.util.find_spec("cv2")
if cv2 is None:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("CV2 not found - Ignoring filtering")
return 0
else:
cv2 = importlib.__import__("cv2")
for file in dirfiles:
if denoise:
file_ending = "_denoised.hdr"
file_split = 13
else:
file_ending = "_baked.hdr"
file_split = 10
if file.endswith(file_ending):
file_input = os.path.join(lightmap_dir, file)
os.chdir(lightmap_dir)
opencv_process_image = cv2.imread(file_input, -1)
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Filtering: " + os.path.basename(file_input))
obj_name = os.path.basename(file_input).split("_")[0]
#SEAM TESTING# #####################
# obj = bpy.data.objects[obj_name]
# bpy.context.view_layer.objects.active = obj
# bpy.ops.object.mode_set(mode='EDIT')
# bpy.ops.uv.export_layout(filepath=os.path.join(lightmap_dir,obj_name), export_all=True, mode='PNG', opacity=0.0)
# bpy.ops.object.mode_set(mode='OBJECT')
# print("Exported")
#SEAM TESTING# #####################
if obj_name in bpy.context.scene.objects:
override = bpy.data.objects[obj_name].TLM_ObjectProperties.tlm_mesh_filter_override
elif obj_name in scene.TLM_AtlasList:
override = False
else:
override = False
if override:
print(os.path.join(lightmap_dir, file))
objectProperties = bpy.data.objects[obj_name].TLM_ObjectProperties
#TODO OVERRIDE FILTERING OPTION! REWRITE
if objectProperties.tlm_mesh_filtering_mode == "Box":
if objectProperties.tlm_mesh_filtering_box_strength % 2 == 0:
kernel_size = (objectProperties.tlm_mesh_filtering_box_strength + 1, objectProperties.tlm_mesh_filtering_box_strength + 1)
else:
kernel_size = (objectProperties.tlm_mesh_filtering_box_strength, objectProperties.tlm_mesh_filtering_box_strength)
opencv_bl_result = cv2.blur(opencv_process_image, kernel_size)
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.blur(opencv_bl_result, kernel_size)
elif objectProperties.tlm_mesh_filtering_mode == "Gaussian":
if objectProperties.tlm_mesh_filtering_gaussian_strength % 2 == 0:
kernel_size = (objectProperties.tlm_mesh_filtering_gaussian_strength + 1, objectProperties.tlm_mesh_filtering_gaussian_strength + 1)
else:
kernel_size = (objectProperties.tlm_mesh_filtering_gaussian_strength, objectProperties.tlm_mesh_filtering_gaussian_strength)
sigma_size = 0
opencv_bl_result = cv2.GaussianBlur(opencv_process_image, kernel_size, sigma_size)
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.GaussianBlur(opencv_bl_result, kernel_size, sigma_size)
elif objectProperties.tlm_mesh_filtering_mode == "Bilateral":
diameter_size = objectProperties.tlm_mesh_filtering_bilateral_diameter
sigma_color = objectProperties.tlm_mesh_filtering_bilateral_color_deviation
sigma_space = objectProperties.tlm_mesh_filtering_bilateral_coordinate_deviation
opencv_bl_result = cv2.bilateralFilter(opencv_process_image, diameter_size, sigma_color, sigma_space)
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.bilateralFilter(opencv_bl_result, diameter_size, sigma_color, sigma_space)
else:
if objectProperties.tlm_mesh_filtering_median_kernel % 2 == 0:
kernel_size = (objectProperties.tlm_mesh_filtering_median_kernel + 1, objectProperties.tlm_mesh_filtering_median_kernel + 1)
else:
kernel_size = (objectProperties.tlm_mesh_filtering_median_kernel, objectProperties.tlm_mesh_filtering_median_kernel)
opencv_bl_result = cv2.medianBlur(opencv_process_image, kernel_size[0])
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.medianBlur(opencv_bl_result, kernel_size[0])
filter_file_output = os.path.join(lightmap_dir, file[:-file_split] + "_filtered.hdr")
cv2.imwrite(filter_file_output, opencv_bl_result)
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Written to: " + filter_file_output)
else:
print(os.path.join(lightmap_dir, file))
#TODO OVERRIDE FILTERING OPTION!
if scene.TLM_SceneProperties.tlm_filtering_mode == "Box":
if scene.TLM_SceneProperties.tlm_filtering_box_strength % 2 == 0:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_box_strength + 1,scene.TLM_SceneProperties.tlm_filtering_box_strength + 1)
else:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_box_strength,scene.TLM_SceneProperties.tlm_filtering_box_strength)
opencv_bl_result = cv2.blur(opencv_process_image, kernel_size)
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.blur(opencv_bl_result, kernel_size)
elif scene.TLM_SceneProperties.tlm_filtering_mode == "Gaussian":
if scene.TLM_SceneProperties.tlm_filtering_gaussian_strength % 2 == 0:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_gaussian_strength + 1,scene.TLM_SceneProperties.tlm_filtering_gaussian_strength + 1)
else:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_gaussian_strength,scene.TLM_SceneProperties.tlm_filtering_gaussian_strength)
sigma_size = 0
opencv_bl_result = cv2.GaussianBlur(opencv_process_image, kernel_size, sigma_size)
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.GaussianBlur(opencv_bl_result, kernel_size, sigma_size)
elif scene.TLM_SceneProperties.tlm_filtering_mode == "Bilateral":
diameter_size = scene.TLM_SceneProperties.tlm_filtering_bilateral_diameter
sigma_color = scene.TLM_SceneProperties.tlm_filtering_bilateral_color_deviation
sigma_space = scene.TLM_SceneProperties.tlm_filtering_bilateral_coordinate_deviation
opencv_bl_result = cv2.bilateralFilter(opencv_process_image, diameter_size, sigma_color, sigma_space)
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.bilateralFilter(opencv_bl_result, diameter_size, sigma_color, sigma_space)
else:
if scene.TLM_SceneProperties.tlm_filtering_median_kernel % 2 == 0:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_median_kernel + 1 , scene.TLM_SceneProperties.tlm_filtering_median_kernel + 1)
else:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_median_kernel, scene.TLM_SceneProperties.tlm_filtering_median_kernel)
opencv_bl_result = cv2.medianBlur(opencv_process_image, kernel_size[0])
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.medianBlur(opencv_bl_result, kernel_size[0])
filter_file_output = os.path.join(lightmap_dir, file[:-file_split] + "_filtered.hdr")
cv2.imwrite(filter_file_output, opencv_bl_result)
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Written to: " + filter_file_output)

View File

@ -0,0 +1,160 @@
import bpy, os, importlib
from os import listdir
from os.path import isfile, join
class TLM_Shader_Filtering:
image_output_destination = ""
def init(lightmap_dir, denoise):
scene = bpy.context.scene
print("Beginning filtering for files: ")
if denoise:
file_ending = "_denoised.hdr"
else:
file_ending = "_baked.hdr"
dirfiles = [f for f in listdir(lightmap_dir) if isfile(join(lightmap_dir, f))]
cv2 = importlib.util.find_spec("cv2")
if cv2 is None:
print("CV2 not found - Ignoring filtering")
return 0
else:
cv2 = importlib.__import__("cv2")
for file in dirfiles:
if denoise:
file_ending = "_denoised.hdr"
file_split = 13
else:
file_ending = "_baked.hdr"
file_split = 10
if file.endswith(file_ending):
file_input = os.path.join(lightmap_dir, file)
os.chdir(lightmap_dir)
opencv_process_image = cv2.imread(file_input, -1)
print("Filtering: " + os.path.basename(file_input))
obj_name = os.path.basename(file_input).split("_")[0]
if bpy.data.objects[obj_name].TLM_ObjectProperties.tlm_mesh_filter_override:
print("OVERRIDE!")
print(os.path.join(lightmap_dir, file))
objectProperties = bpy.data.objects[obj_name].TLM_ObjectProperties
#TODO OVERRIDE FILTERING OPTION! REWRITE
if objectProperties.tlm_mesh_filtering_mode == "Box":
if objectProperties.tlm_mesh_filtering_box_strength % 2 == 0:
kernel_size = (objectProperties.tlm_mesh_filtering_box_strength + 1, objectProperties.tlm_mesh_filtering_box_strength + 1)
else:
kernel_size = (objectProperties.tlm_mesh_filtering_box_strength, objectProperties.tlm_mesh_filtering_box_strength)
opencv_bl_result = cv2.blur(opencv_process_image, kernel_size)
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.blur(opencv_bl_result, kernel_size)
elif objectProperties.tlm_mesh_filtering_mode == "Gaussian":
if objectProperties.tlm_mesh_filtering_gaussian_strength % 2 == 0:
kernel_size = (objectProperties.tlm_mesh_filtering_gaussian_strength + 1, objectProperties.tlm_mesh_filtering_gaussian_strength + 1)
else:
kernel_size = (objectProperties.tlm_mesh_filtering_gaussian_strength, objectProperties.tlm_mesh_filtering_gaussian_strength)
sigma_size = 0
opencv_bl_result = cv2.GaussianBlur(opencv_process_image, kernel_size, sigma_size)
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.GaussianBlur(opencv_bl_result, kernel_size, sigma_size)
elif objectProperties.tlm_mesh_filtering_mode == "Bilateral":
diameter_size = objectProperties.tlm_mesh_filtering_bilateral_diameter
sigma_color = objectProperties.tlm_mesh_filtering_bilateral_color_deviation
sigma_space = objectProperties.tlm_mesh_filtering_bilateral_coordinate_deviation
opencv_bl_result = cv2.bilateralFilter(opencv_process_image, diameter_size, sigma_color, sigma_space)
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.bilateralFilter(opencv_bl_result, diameter_size, sigma_color, sigma_space)
else:
if objectProperties.tlm_mesh_filtering_median_kernel % 2 == 0:
kernel_size = (objectProperties.tlm_mesh_filtering_median_kernel + 1, objectProperties.tlm_mesh_filtering_median_kernel + 1)
else:
kernel_size = (objectProperties.tlm_mesh_filtering_median_kernel, objectProperties.tlm_mesh_filtering_median_kernel)
opencv_bl_result = cv2.medianBlur(opencv_process_image, kernel_size[0])
if objectProperties.tlm_mesh_filtering_iterations > 1:
for x in range(objectProperties.tlm_mesh_filtering_iterations):
opencv_bl_result = cv2.medianBlur(opencv_bl_result, kernel_size[0])
filter_file_output = os.path.join(lightmap_dir, file[:-file_split] + "_filtered.hdr")
cv2.imwrite(filter_file_output, opencv_bl_result)
print("Written to: " + filter_file_output)
else:
print(os.path.join(lightmap_dir, file))
#TODO OVERRIDE FILTERING OPTION!
if scene.TLM_SceneProperties.tlm_filtering_mode == "Box":
if scene.TLM_SceneProperties.tlm_filtering_box_strength % 2 == 0:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_box_strength + 1,scene.TLM_SceneProperties.tlm_filtering_box_strength + 1)
else:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_box_strength,scene.TLM_SceneProperties.tlm_filtering_box_strength)
opencv_bl_result = cv2.blur(opencv_process_image, kernel_size)
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.blur(opencv_bl_result, kernel_size)
elif scene.TLM_SceneProperties.tlm_filtering_mode == "Gaussian":
if scene.TLM_SceneProperties.tlm_filtering_gaussian_strength % 2 == 0:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_gaussian_strength + 1,scene.TLM_SceneProperties.tlm_filtering_gaussian_strength + 1)
else:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_gaussian_strength,scene.TLM_SceneProperties.tlm_filtering_gaussian_strength)
sigma_size = 0
opencv_bl_result = cv2.GaussianBlur(opencv_process_image, kernel_size, sigma_size)
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.GaussianBlur(opencv_bl_result, kernel_size, sigma_size)
elif scene.TLM_SceneProperties.tlm_filtering_mode == "Bilateral":
diameter_size = scene.TLM_SceneProperties.tlm_filtering_bilateral_diameter
sigma_color = scene.TLM_SceneProperties.tlm_filtering_bilateral_color_deviation
sigma_space = scene.TLM_SceneProperties.tlm_filtering_bilateral_coordinate_deviation
opencv_bl_result = cv2.bilateralFilter(opencv_process_image, diameter_size, sigma_color, sigma_space)
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.bilateralFilter(opencv_bl_result, diameter_size, sigma_color, sigma_space)
else:
if scene.TLM_SceneProperties.tlm_filtering_median_kernel % 2 == 0:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_median_kernel + 1 , scene.TLM_SceneProperties.tlm_filtering_median_kernel + 1)
else:
kernel_size = (scene.TLM_SceneProperties.tlm_filtering_median_kernel, scene.TLM_SceneProperties.tlm_filtering_median_kernel)
opencv_bl_result = cv2.medianBlur(opencv_process_image, kernel_size[0])
if scene.TLM_SceneProperties.tlm_filtering_iterations > 1:
for x in range(scene.TLM_SceneProperties.tlm_filtering_iterations):
opencv_bl_result = cv2.medianBlur(opencv_bl_result, kernel_size[0])
filter_file_output = os.path.join(lightmap_dir, file[:-file_split] + "_filtered.hdr")
cv2.imwrite(filter_file_output, opencv_bl_result)
print("Written to: " + filter_file_output)
# if file.endswith(file_ending):
# print()
# baked_image_array.append(file)

View File

@ -0,0 +1,77 @@
import bpy, blf, bgl, os, gpu
from gpu_extras.batch import batch_for_shader
class ViewportDraw:
def __init__(self, context, text):
bakefile = "TLM_Overlay.png"
scriptDir = os.path.dirname(os.path.realpath(__file__))
bakefile_path = os.path.abspath(os.path.join(scriptDir, '..', '..', 'assets', bakefile))
image_name = "TLM_Overlay.png"
bpy.ops.image.open(filepath=bakefile_path)
print("Self path: " + bakefile_path)
for img in bpy.data.images:
if img.filepath.endswith(image_name):
image = img
break
if not image:
image = bpy.data.images[image_name]
x = 15
y = 15
w = 400
h = 200
self.shader = gpu.shader.from_builtin('2D_IMAGE')
self.batch = batch_for_shader(
self.shader, 'TRI_FAN',
{
"pos": ((x, y), (x+w, y), (x+w, y+h), (x, y+h)),
"texCoord": ((0, 0), (1, 0), (1, 1), (0, 1)),
},
)
if image.gl_load():
raise Exception()
self.text = text
self.image = image
#self.handle = bpy.types.SpaceView3D.draw_handler_add(self.draw_text_callback, (context,), 'WINDOW', 'POST_PIXEL')
self.handle2 = bpy.types.SpaceView3D.draw_handler_add(self.draw_image_callback, (context,), 'WINDOW', 'POST_PIXEL')
def draw_text_callback(self, context):
font_id = 0
blf.position(font_id, 15, 15, 0)
blf.size(font_id, 20, 72)
blf.draw(font_id, "%s" % (self.text))
def draw_image_callback(self, context):
if self.image:
bgl.glEnable(bgl.GL_BLEND)
bgl.glActiveTexture(bgl.GL_TEXTURE0)
try:
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.image.bindcode)
except:
bpy.types.SpaceView3D.draw_handler_remove(self.handle2, 'WINDOW')
self.shader.bind()
self.shader.uniform_int("image", 0)
self.batch.draw(self.shader)
bgl.glDisable(bgl.GL_BLEND)
def update_text(self, text):
self.text = text
def remove_handle(self):
#bpy.types.SpaceView3D.draw_handler_remove(self.handle, 'WINDOW')
bpy.types.SpaceView3D.draw_handler_remove(self.handle2, 'WINDOW')

View File

@ -0,0 +1,31 @@
import os
import bpy
from bpy.utils import previews
icons = None
directory = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'icons'))
def id(identifier):
return image(identifier).icon_id
def image(identifier):
def icon(identifier):
if identifier in icons:
return icons[identifier]
return icons.load(identifier, os.path.join(directory, identifier + '.png'), 'IMAGE')
if icons:
return icon(identifier)
else:
create()
return icon(identifier)
def create():
global icons
icons = previews.new()
def remove():
previews.remove(icons)

View File

@ -0,0 +1,21 @@
import bpy
import datetime
class TLM_Logman:
_log = []
def __init__(self):
print("Logger started Init")
self.append("Logger started.")
def append(self, appended):
self._log.append(str(datetime.datetime.now()) + ": " + str(appended))
#TODO!
def stats():
pass
def dumpLog(self):
for line in self._log:
print(line)

View File

@ -0,0 +1,259 @@
import bpy
from .. utility import *
def init(self, prev_container):
#TODO - JSON classes
export.scene = """scene.camera.cliphither = 0.1
scene.camera.clipyon = 100
scene.camera.shutteropen = 0
scene.camera.shutterclose = 1
scene.camera.autovolume.enable = 1
scene.camera.lookat.orig = 7.358891 -6.925791 4.958309
scene.camera.lookat.target = 6.707333 -6.31162 4.513038
scene.camera.up = -0.3240135 0.3054208 0.8953956
scene.camera.screenwindow = -1 1 -0.5625 0.5625
scene.camera.lensradius = 0
scene.camera.focaldistance = 10
scene.camera.autofocus.enable = 0
scene.camera.type = "perspective"
scene.camera.oculusrift.barrelpostpro.enable = 0
scene.camera.fieldofview = 39.59776
scene.camera.bokeh.blades = 0
scene.camera.bokeh.power = 3
scene.camera.bokeh.distribution.type = "NONE"
scene.camera.bokeh.scale.x = 0.7071068
scene.camera.bokeh.scale.y = 0.7071068
scene.lights.__WORLD_BACKGROUND_LIGHT__.gain = 2e-05 2e-05 2e-05
scene.lights.__WORLD_BACKGROUND_LIGHT__.transformation = 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1
scene.lights.__WORLD_BACKGROUND_LIGHT__.id = 0
scene.lights.__WORLD_BACKGROUND_LIGHT__.temperature = -1
scene.lights.__WORLD_BACKGROUND_LIGHT__.temperature.normalize = 0
scene.lights.__WORLD_BACKGROUND_LIGHT__.visibility.indirect.diffuse.enable = 1
scene.lights.__WORLD_BACKGROUND_LIGHT__.visibility.indirect.glossy.enable = 1
scene.lights.__WORLD_BACKGROUND_LIGHT__.visibility.indirect.specular.enable = 1
scene.lights.__WORLD_BACKGROUND_LIGHT__.type = "sky2"
scene.lights.__WORLD_BACKGROUND_LIGHT__.dir = 0 0 1
scene.lights.__WORLD_BACKGROUND_LIGHT__.turbidity = 2.2
scene.lights.__WORLD_BACKGROUND_LIGHT__.groundalbedo = 0.5 0.5 0.5
scene.lights.__WORLD_BACKGROUND_LIGHT__.ground.enable = 0
scene.lights.__WORLD_BACKGROUND_LIGHT__.ground.color = 0.5 0.5 0.5
scene.lights.__WORLD_BACKGROUND_LIGHT__.ground.autoscale = 1
scene.lights.__WORLD_BACKGROUND_LIGHT__.distribution.width = 512
scene.lights.__WORLD_BACKGROUND_LIGHT__.distribution.height = 256
scene.lights.__WORLD_BACKGROUND_LIGHT__.visibilitymapcache.enable = 0
scene.lights.2382361116072.gain = 1 1 1
scene.lights.2382361116072.transformation = -0.2908646 0.9551712 -0.05518906 0 -0.7711008 -0.1998834 0.6045247 0 0.5663932 0.2183912 0.7946723 0 4.076245 1.005454 5.903862 1
scene.lights.2382361116072.id = 0
scene.lights.2382361116072.temperature = -1
scene.lights.2382361116072.temperature.normalize = 0
scene.lights.2382361116072.type = "sphere"
scene.lights.2382361116072.color = 1 1 1
scene.lights.2382361116072.power = 0
scene.lights.2382361116072.normalizebycolor = 0
scene.lights.2382361116072.efficency = 0
scene.lights.2382361116072.position = 0 0 0
scene.lights.2382361116072.radius = 0.1
scene.materials.Material2382357175256.type = "disney"
scene.materials.Material2382357175256.basecolor = "0.7 0.7 0.7"
scene.materials.Material2382357175256.subsurface = "0"
scene.materials.Material2382357175256.roughness = "0.2"
scene.materials.Material2382357175256.metallic = "0"
scene.materials.Material2382357175256.specular = "0.5"
scene.materials.Material2382357175256.speculartint = "0"
scene.materials.Material2382357175256.clearcoat = "0"
scene.materials.Material2382357175256.clearcoatgloss = "1"
scene.materials.Material2382357175256.anisotropic = "0"
scene.materials.Material2382357175256.sheen = "0"
scene.materials.Material2382357175256.sheentint = "0"
scene.materials.Material2382357175256.transparency.shadow = 0 0 0
scene.materials.Material2382357175256.id = 3364224
scene.materials.Material2382357175256.emission.gain = 1 1 1
scene.materials.Material2382357175256.emission.power = 0
scene.materials.Material2382357175256.emission.normalizebycolor = 1
scene.materials.Material2382357175256.emission.efficency = 0
scene.materials.Material2382357175256.emission.theta = 90
scene.materials.Material2382357175256.emission.id = 0
scene.materials.Material2382357175256.emission.importance = 1
scene.materials.Material2382357175256.emission.temperature = -1
scene.materials.Material2382357175256.emission.temperature.normalize = 0
scene.materials.Material2382357175256.emission.directlightsampling.type = "AUTO"
scene.materials.Material2382357175256.visibility.indirect.diffuse.enable = 1
scene.materials.Material2382357175256.visibility.indirect.glossy.enable = 1
scene.materials.Material2382357175256.visibility.indirect.specular.enable = 1
scene.materials.Material2382357175256.shadowcatcher.enable = 0
scene.materials.Material2382357175256.shadowcatcher.onlyinfinitelights = 0
scene.materials.Material2382357175256.photongi.enable = 1
scene.materials.Material2382357175256.holdout.enable = 0
scene.materials.Material__0012382357172440.type = "disney"
scene.materials.Material__0012382357172440.basecolor = "0.7 0.7 0.7"
scene.materials.Material__0012382357172440.subsurface = "0"
scene.materials.Material__0012382357172440.roughness = "0.2"
scene.materials.Material__0012382357172440.metallic = "0"
scene.materials.Material__0012382357172440.specular = "0.5"
scene.materials.Material__0012382357172440.speculartint = "0"
scene.materials.Material__0012382357172440.clearcoat = "0"
scene.materials.Material__0012382357172440.clearcoatgloss = "1"
scene.materials.Material__0012382357172440.anisotropic = "0"
scene.materials.Material__0012382357172440.sheen = "0"
scene.materials.Material__0012382357172440.sheentint = "0"
scene.materials.Material__0012382357172440.transparency.shadow = 0 0 0
scene.materials.Material__0012382357172440.id = 6728256
scene.materials.Material__0012382357172440.emission.gain = 1 1 1
scene.materials.Material__0012382357172440.emission.power = 0
scene.materials.Material__0012382357172440.emission.normalizebycolor = 1
scene.materials.Material__0012382357172440.emission.efficency = 0
scene.materials.Material__0012382357172440.emission.theta = 90
scene.materials.Material__0012382357172440.emission.id = 0
scene.materials.Material__0012382357172440.emission.importance = 1
scene.materials.Material__0012382357172440.emission.temperature = -1
scene.materials.Material__0012382357172440.emission.temperature.normalize = 0
scene.materials.Material__0012382357172440.emission.directlightsampling.type = "AUTO"
scene.materials.Material__0012382357172440.visibility.indirect.diffuse.enable = 1
scene.materials.Material__0012382357172440.visibility.indirect.glossy.enable = 1
scene.materials.Material__0012382357172440.visibility.indirect.specular.enable = 1
scene.materials.Material__0012382357172440.shadowcatcher.enable = 0
scene.materials.Material__0012382357172440.shadowcatcher.onlyinfinitelights = 0
scene.materials.Material__0012382357172440.photongi.enable = 1
scene.materials.Material__0012382357172440.holdout.enable = 0
scene.objects.23823611086320.material = "Material2382357175256"
scene.objects.23823611086320.ply = "mesh-00000.ply"
scene.objects.23823611086320.camerainvisible = 0
scene.objects.23823611086320.id = 1326487202
scene.objects.23823611086320.appliedtransformation = 1 0 0 0 0 1 0 0 0 0 1 0 0 0 1 1
scene.objects.23823611279760.material = "Material__0012382357172440"
scene.objects.23823611279760.ply = "mesh-00001.ply"
scene.objects.23823611279760.camerainvisible = 0
scene.objects.23823611279760.id = 3772660237
scene.objects.23823611279760.appliedtransformation = 5 0 0 0 0 5 0 0 0 0 5 0 0 0 0 1
"""
export.config = """context.verbose = 1
accelerator.type = "AUTO"
accelerator.instances.enable = 1
accelerator.motionblur.enable = 1
accelerator.bvh.builder.type = "EMBREE_BINNED_SAH"
accelerator.bvh.treetype = 4
accelerator.bvh.costsamples = 0
accelerator.bvh.isectcost = 80
accelerator.bvh.travcost = 10
accelerator.bvh.emptybonus = 0.5
scene.epsilon.min = "1e-05"
scene.epsilon.max = "0.1"
scene.file = "scene.scn"
images.scale = 1
lightstrategy.type = "LOG_POWER"
native.threads.count = 8
renderengine.type = "BAKECPU"
path.pathdepth.total = "7"
path.pathdepth.diffuse = "5"
path.pathdepth.glossy = "5"
path.pathdepth.specular = "6"
path.hybridbackforward.enable = "0"
path.hybridbackforward.partition = "0.8"
path.hybridbackforward.glossinessthreshold = "0.049"
path.russianroulette.depth = 3
path.russianroulette.cap = 0.5
path.clamping.variance.maxvalue = 0
path.forceblackbackground.enable = "0"
sampler.type = "SOBOL"
sampler.imagesamples.enable = 1
sampler.sobol.adaptive.strength = "0.9"
sampler.sobol.adaptive.userimportanceweight = 0.75
sampler.sobol.bucketsize = "16"
sampler.sobol.tilesize = "16"
sampler.sobol.supersampling = "1"
sampler.sobol.overlapping = "1"
path.photongi.sampler.type = "METROPOLIS"
path.photongi.photon.maxcount = 100000000
path.photongi.photon.maxdepth = 4
path.photongi.photon.time.start = 0
path.photongi.photon.time.end = -1
path.photongi.visibility.lookup.radius = 0
path.photongi.visibility.lookup.normalangle = 10
path.photongi.visibility.targethitrate = 0.99
path.photongi.visibility.maxsamplecount = 1048576
path.photongi.glossinessusagethreshold = 0.05
path.photongi.indirect.enabled = 0
path.photongi.indirect.maxsize = 0
path.photongi.indirect.haltthreshold = 0.05
path.photongi.indirect.lookup.radius = 0
path.photongi.indirect.lookup.normalangle = 10
path.photongi.indirect.usagethresholdscale = 8
path.photongi.indirect.filter.radiusscale = 3
path.photongi.caustic.enabled = 0
path.photongi.caustic.maxsize = 100000
path.photongi.caustic.updatespp = 8
path.photongi.caustic.updatespp.radiusreduction = 0.96
path.photongi.caustic.updatespp.minradius = 0.003
path.photongi.caustic.lookup.radius = 0.15
path.photongi.caustic.lookup.normalangle = 10
path.photongi.debug.type = "none"
path.photongi.persistent.file = ""
path.photongi.persistent.safesave = 1
film.filter.type = "BLACKMANHARRIS"
film.filter.width = 2
opencl.platform.index = -1
film.width = 960
film.height = 600
film.safesave = 1
film.noiseestimation.step = "32"
film.noiseestimation.warmup = "8"
film.noiseestimation.filter.scale = 4
batch.haltnoisethreshold = 0.01
batch.haltnoisethreshold.step = 64
batch.haltnoisethreshold.warmup = 64
batch.haltnoisethreshold.filter.enable = 1
batch.haltnoisethreshold.stoprendering.enable = 1
batch.halttime = "0"
batch.haltspp = 32
film.outputs.safesave = 1
film.outputs.0.type = "RGB_IMAGEPIPELINE"
film.outputs.0.filename = "RGB_IMAGEPIPELINE_0.png"
film.outputs.0.index = "0"
film.imagepipelines.000.0.type = "NOP"
film.imagepipelines.000.1.type = "TONEMAP_LINEAR"
film.imagepipelines.000.1.scale = "1"
film.imagepipelines.000.2.type = "GAMMA_CORRECTION"
film.imagepipelines.000.2.value = "2.2"
film.imagepipelines.000.radiancescales.0.enabled = "1"
film.imagepipelines.000.radiancescales.0.globalscale = "1"
film.imagepipelines.000.radiancescales.0.rgbscale = "1" "1" "1"
periodicsave.film.outputs.period = 0
periodicsave.film.period = 0
periodicsave.film.filename = "film.flm"
periodicsave.resumerendering.period = 0
periodicsave.resumerendering.filename = "rendering.rsm"
resumerendering.filesafe = 1
debug.renderconfig.parse.print = 0
debug.scene.parse.print = 0
screen.refresh.interval = 100
screen.tool.type = "CAMERA_EDIT"
screen.tiles.pending.show = 1
screen.tiles.converged.show = 0
screen.tiles.notconverged.show = 0
screen.tiles.passcount.show = 0
screen.tiles.error.show = 0
bake.minmapautosize = 64
bake.maxmapautosize = 1024
bake.powerof2autosize.enable = 1
bake.skipexistingmapfiles = 1
film.imagepipelines.1.0.type = "NOP"
bake.maps.0.type = "COMBINED"
bake.maps.0.filename = "23823611086320.exr"
bake.maps.0.imagepipelineindex = 1
bake.maps.0.width = 512
bake.maps.0.height = 512
bake.maps.0.autosize.enabled = 1
bake.maps.0.uvindex = 0
bake.maps.0.objectnames = "23823611086320"
bake.maps.1.type = "COMBINED"
bake.maps.1.filename = "23823611279760.exr"
bake.maps.1.imagepipelineindex = 1
bake.maps.1.width = 512
bake.maps.1.height = 512
bake.maps.1.autosize.enabled = 1
bake.maps.1.uvindex = 0
bake.maps.1.objectnames = "23823611279760"
"""

View File

@ -0,0 +1,243 @@
import bpy, math
#from . import cache
from .. utility import *
def init(self, prev_container):
#store_existing(prev_container)
#set_settings()
configure_world()
configure_lights()
configure_meshes(self)
def configure_world():
pass
def configure_lights():
pass
def configure_meshes(self):
for mat in bpy.data.materials:
if mat.users < 1:
bpy.data.materials.remove(mat)
for mat in bpy.data.materials:
if mat.name.startswith("."):
if "_Original" in mat.name:
bpy.data.materials.remove(mat)
for image in bpy.data.images:
if image.name.endswith("_baked"):
bpy.data.images.remove(image, do_unlink=True)
iterNum = 1
currentIterNum = 0
scene = bpy.context.scene
for obj in scene.objects:
if obj.type == "MESH":
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
obj.hide_select = False #Remember to toggle this back
currentIterNum = currentIterNum + 1
obj.octane.baking_group_id = 1 + currentIterNum #0 doesn't exist, 1 is neutral and 2 is first baked object
print("Obj: " + obj.name + " set to baking group: " + str(obj.octane.baking_group_id))
for slot in obj.material_slots:
if "." + slot.name + '_Original' in bpy.data.materials:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("The material: " + slot.name + " shifted to " + "." + slot.name + '_Original')
slot.material = bpy.data.materials["." + slot.name + '_Original']
objWasHidden = False
#For some reason, a Blender bug might prevent invisible objects from being smart projected
#We will turn the object temporarily visible
obj.hide_viewport = False
obj.hide_set(False)
#Configure selection
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
obs = bpy.context.view_layer.objects
active = obs.active
uv_layers = obj.data.uv_layers
if not obj.TLM_ObjectProperties.tlm_use_default_channel:
uv_channel = obj.TLM_ObjectProperties.tlm_uv_channel
else:
uv_channel = "UVMap_Lightmap"
if not uv_channel in uv_layers:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("UV map created for obj: " + obj.name)
uvmap = uv_layers.new(name=uv_channel)
uv_layers.active_index = len(uv_layers) - 1
print("Setting active UV to: " + uv_layers.active_index)
#If lightmap
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "Lightmap":
bpy.ops.uv.lightmap_pack('EXEC_SCREEN', PREF_CONTEXT='ALL_FACES', PREF_MARGIN_DIV=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin)
#If smart project
elif obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "SmartProject":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Smart Project B")
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
#API changes in 2.91 causes errors:
if (2, 91, 0) > bpy.app.version:
bpy.ops.uv.smart_project(angle_limit=45.0, island_margin=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin, user_area_weight=1.0, use_aspect=True, stretch_to_bounds=False)
else:
angle = math.radians(45.0)
bpy.ops.uv.smart_project(angle_limit=angle, island_margin=obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin, area_weight=1.0, correct_aspect=True, scale_to_bounds=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
elif obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "Xatlas":
Unwrap_Lightmap_Group_Xatlas_2_headless_call(obj)
elif obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA":
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("ATLAS GROUP: " + obj.TLM_ObjectProperties.tlm_atlas_pointer)
else: #if copy existing
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Copied Existing UV Map for object: " + obj.name)
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Existing UV map found for obj: " + obj.name)
for i in range(0, len(uv_layers)):
if uv_layers[i].name == uv_channel:
uv_layers.active_index = i
break
set_camera()
def set_camera():
cam_name = "TLM-BakeCam"
if not cam_name in bpy.context.scene:
camera = bpy.data.cameras.new(cam_name)
camobj_name = "TLM-BakeCam-obj"
cam_obj = bpy.data.objects.new(camobj_name, camera)
bpy.context.collection.objects.link(cam_obj)
cam_obj.location = ((0,0,0))
bpy.context.scene.camera = cam_obj
def set_settings():
scene = bpy.context.scene
cycles = scene.cycles
scene.render.engine = "CYCLES"
sceneProperties = scene.TLM_SceneProperties
engineProperties = scene.TLM_EngineProperties
cycles.device = scene.TLM_EngineProperties.tlm_mode
if cycles.device == "GPU":
scene.render.tile_x = 256
scene.render.tile_y = 256
else:
scene.render.tile_x = 32
scene.render.tile_y = 32
if engineProperties.tlm_quality == "0":
cycles.samples = 32
cycles.max_bounces = 1
cycles.diffuse_bounces = 1
cycles.glossy_bounces = 1
cycles.transparent_max_bounces = 1
cycles.transmission_bounces = 1
cycles.volume_bounces = 1
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "1":
cycles.samples = 64
cycles.max_bounces = 2
cycles.diffuse_bounces = 2
cycles.glossy_bounces = 2
cycles.transparent_max_bounces = 2
cycles.transmission_bounces = 2
cycles.volume_bounces = 2
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "2":
cycles.samples = 512
cycles.max_bounces = 2
cycles.diffuse_bounces = 2
cycles.glossy_bounces = 2
cycles.transparent_max_bounces = 2
cycles.transmission_bounces = 2
cycles.volume_bounces = 2
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "3":
cycles.samples = 1024
cycles.max_bounces = 256
cycles.diffuse_bounces = 256
cycles.glossy_bounces = 256
cycles.transparent_max_bounces = 256
cycles.transmission_bounces = 256
cycles.volume_bounces = 256
cycles.caustics_reflective = False
cycles.caustics_refractive = False
elif engineProperties.tlm_quality == "4":
cycles.samples = 2048
cycles.max_bounces = 512
cycles.diffuse_bounces = 512
cycles.glossy_bounces = 512
cycles.transparent_max_bounces = 512
cycles.transmission_bounces = 512
cycles.volume_bounces = 512
cycles.caustics_reflective = True
cycles.caustics_refractive = True
else: #Custom
pass
def store_existing(prev_container):
scene = bpy.context.scene
cycles = scene.cycles
selected = []
for obj in bpy.context.scene.objects:
if obj.select_get():
selected.append(obj.name)
prev_container["settings"] = [
cycles.samples,
cycles.max_bounces,
cycles.diffuse_bounces,
cycles.glossy_bounces,
cycles.transparent_max_bounces,
cycles.transmission_bounces,
cycles.volume_bounces,
cycles.caustics_reflective,
cycles.caustics_refractive,
cycles.device,
scene.render.engine,
bpy.context.view_layer.objects.active,
selected,
[scene.render.resolution_x, scene.render.resolution_y]
]

View File

@ -0,0 +1,71 @@
import bpy, os
def bake():
cam_name = "TLM-BakeCam-obj"
if cam_name in bpy.context.scene.objects:
print("Camera found...")
camera = bpy.context.scene.objects[cam_name]
camera.data.octane.baking_camera = True
for obj in bpy.context.scene.objects:
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(False)
iterNum = 2
currentIterNum = 1
for obj in bpy.context.scene.objects:
if obj.type == "MESH":
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
iterNum = iterNum + 1
if iterNum > 1:
iterNum = iterNum - 1
for obj in bpy.context.scene.objects:
if obj.type == 'MESH' and obj.name in bpy.context.view_layer.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
currentIterNum = currentIterNum + 1
scene = bpy.context.scene
print("Baking obj: " + obj.name)
print("Baking ID: " + str(currentIterNum) + " out of " + str(iterNum))
bpy.ops.object.select_all(action='DESELECT')
camera.data.octane.baking_group_id = currentIterNum
savedir = os.path.dirname(bpy.data.filepath)
user_dir = scene.TLM_Engine3Properties.tlm_lightmap_savedir
directory = os.path.join(savedir, user_dir)
image_settings = bpy.context.scene.render.image_settings
image_settings.file_format = "HDR"
image_settings.color_depth = '32'
filename = os.path.join(directory, "LM") + "_" + obj.name + ".hdr"
bpy.context.scene.render.filepath = filename
resolution = int(obj.TLM_ObjectProperties.tlm_mesh_lightmap_resolution)
bpy.context.scene.render.resolution_x = resolution
bpy.context.scene.render.resolution_y = resolution
bpy.ops.render.render(write_still=True)
else:
print("No baking camera found")
print("Baking in Octane!")

View File

@ -0,0 +1,354 @@
import bpy, os, sys, math, mathutils, importlib
import numpy as np
from . rectpack import newPacker, PackingMode, PackingBin
def postpack():
cv_installed = False
cv2 = importlib.util.find_spec("cv2")
if cv2 is None:
print("CV2 not found - Ignoring postpacking")
return 0
else:
cv2 = importlib.__import__("cv2")
cv_installed = True
if cv_installed:
lightmap_directory = os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_EngineProperties.tlm_lightmap_savedir)
packedAtlas = {}
#TODO - TEST WITH ONLY 1 ATLAS AT FIRST (1 Atlas for each, but only 1 bin (no overflow))
#PackedAtlas = Packer
#Each atlas has bins
#Each bins has rects
#Each rect corresponds to a pack_object
scene = bpy.context.scene
sceneProperties = scene.TLM_SceneProperties
end = "_baked"
if sceneProperties.tlm_denoise_use:
end = "_denoised"
if sceneProperties.tlm_filtering_use:
end = "_filtered"
formatEnc = ".hdr"
image_channel_depth = cv2.IMREAD_ANYDEPTH
linear_straight = False
if sceneProperties.tlm_encoding_use and scene.TLM_EngineProperties.tlm_bake_mode != "Background":
if sceneProperties.tlm_encoding_device == "CPU":
if sceneProperties.tlm_encoding_mode_a == "HDR":
if sceneProperties.tlm_format == "EXR":
formatEnc = ".exr"
if sceneProperties.tlm_encoding_mode_a == "RGBM":
formatEnc = "_encoded.png"
image_channel_depth = cv2.IMREAD_UNCHANGED
else:
if sceneProperties.tlm_encoding_mode_b == "HDR":
if sceneProperties.tlm_format == "EXR":
formatEnc = ".exr"
if sceneProperties.tlm_encoding_mode_b == "LogLuv":
formatEnc = "_encoded.png"
image_channel_depth = cv2.IMREAD_UNCHANGED
linear_straight = True
if sceneProperties.tlm_encoding_mode_b == "RGBM":
formatEnc = "_encoded.png"
image_channel_depth = cv2.IMREAD_UNCHANGED
if sceneProperties.tlm_encoding_mode_b == "RGBD":
formatEnc = "_encoded.png"
image_channel_depth = cv2.IMREAD_UNCHANGED
packer = {}
for atlas in bpy.context.scene.TLM_PostAtlasList: #For each atlas
packer[atlas.name] = newPacker(PackingMode.Offline, PackingBin.BFF, rotation=False)
bpy.app.driver_namespace["logman"].append("Postpacking: " + str(atlas.name))
if scene.TLM_EngineProperties.tlm_setting_supersample == "2x":
supersampling_scale = 2
elif scene.TLM_EngineProperties.tlm_setting_supersample == "4x":
supersampling_scale = 4
else:
supersampling_scale = 1
atlas_resolution = int(int(atlas.tlm_atlas_lightmap_resolution) / int(scene.TLM_EngineProperties.tlm_resolution_scale) * int(supersampling_scale))
packer[atlas.name].add_bin(atlas_resolution, atlas_resolution, 1)
#AtlasList same name prevention?
rect = []
#For each object that targets the atlas
for obj in bpy.context.scene.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
if obj.TLM_ObjectProperties.tlm_postpack_object:
if obj.TLM_ObjectProperties.tlm_postatlas_pointer == atlas.name:
res = int(int(obj.TLM_ObjectProperties.tlm_mesh_lightmap_resolution) / int(scene.TLM_EngineProperties.tlm_resolution_scale) * int(supersampling_scale))
rect.append((res, res, obj.name))
#Add rect to bin
for r in rect:
packer[atlas.name].add_rect(*r)
print("Rects: " + str(rect))
print("Bins:" + str(packer[atlas.name]))
packedAtlas[atlas.name] = np.zeros((atlas_resolution,atlas_resolution, 3), dtype="float32")
#Continue here...overwrite value if using 8-bit encoding
if sceneProperties.tlm_encoding_use:
if sceneProperties.tlm_encoding_device == "CPU":
if sceneProperties.tlm_encoding_mode_a == "RGBM":
packedAtlas[atlas.name] = np.zeros((atlas_resolution,atlas_resolution, 4), dtype=np.uint8)
if sceneProperties.tlm_encoding_mode_a == "RGBD":
packedAtlas[atlas.name] = np.zeros((atlas_resolution,atlas_resolution, 4), dtype=np.uint8)
if sceneProperties.tlm_encoding_device == "GPU":
if sceneProperties.tlm_encoding_mode_b == "RGBM":
packedAtlas[atlas.name] = np.zeros((atlas_resolution,atlas_resolution, 4), dtype=np.uint8)
if sceneProperties.tlm_encoding_mode_b == "RGBD":
packedAtlas[atlas.name] = np.zeros((atlas_resolution,atlas_resolution, 4), dtype=np.uint8)
if sceneProperties.tlm_encoding_mode_b == "LogLuv":
packedAtlas[atlas.name] = np.zeros((atlas_resolution,atlas_resolution, 4), dtype=np.uint8)
packer[atlas.name].pack()
for idy, rect in enumerate(packer[atlas.name].rect_list()):
print("Packing atlas at: " + str(rect))
aob = rect[5]
src = cv2.imread(os.path.join(lightmap_directory, aob + end + formatEnc), image_channel_depth) #"_baked.hdr"
print("Obj name is: " + aob)
x,y,w,h = rect[1],rect[2],rect[3],rect[4]
print("Obj Shape: " + str(src.shape))
print("Atlas shape: " + str(packedAtlas[atlas.name].shape))
print("Bin Pos: ",x,y,w,h)
packedAtlas[atlas.name][y:h+y, x:w+x] = src
obj = bpy.data.objects[aob]
for idx, layer in enumerate(obj.data.uv_layers):
if not obj.TLM_ObjectProperties.tlm_use_default_channel:
uv_channel = obj.TLM_ObjectProperties.tlm_uv_channel
else:
uv_channel = "UVMap_Lightmap"
if layer.name == uv_channel:
obj.data.uv_layers.active_index = idx
print("UVLayer set to: " + str(obj.data.uv_layers.active_index))
atlasRes = atlas_resolution
texRes = rect[3] #Any dimension w/h (square)
ratio = texRes/atlasRes
scaleUV(obj.data.uv_layers.active, (ratio, ratio), (0,1))
print(rect)
#Postpack error here...
for uv_verts in obj.data.uv_layers.active.data:
#For each vert
#NOTES! => X FUNKER
#TODO => Y
#[0] = bin index
#[1] = x
#[2] = y (? + 1)
#[3] = w
#[4] = h
vertex_x = uv_verts.uv[0] + (rect[1]/atlasRes) #WORKING!
vertex_y = uv_verts.uv[1] - (rect[2]/atlasRes) # + ((rect[2]-rect[4])/atlasRes) # # + (1-((rect[1]-rect[4])/atlasRes))
#tr = "X: {0} + ({1}/{2})".format(uv_verts.uv[0],rect[1],atlasRes)
#print(tr)
#vertex_y = 1 - (uv_verts.uv[1]) uv_verts.uv[1] + (rect[1]/atlasRes)
#SET UV LAYER TO
# atlasRes = atlas_resolution
# texRes = rect[3] #Any dimension w/h (square)
# print(texRes)
# #texRes = 0.0,0.0
# #x,y,w,z = x,y,texRes,texRes
# x,y,w,z = x,y,0,0
# ratio = atlasRes/texRes
# if x == 0:
# x_offset = 0
# else:
# x_offset = 1/(atlasRes/x)
# if y == 0:
# y_offset = 0
# else:
# y_offset = 1/(atlasRes/y)
# vertex_x = (uv_verts.uv[0] * 1/(ratio)) + x_offset
# vertex_y = (1 - ((uv_verts.uv[1] * 1/(ratio)) + y_offset))
#TO FIX:
#SELECT ALL
#Scale Y => -1
uv_verts.uv[0] = vertex_x
uv_verts.uv[1] = vertex_y
#scaleUV(obj.data.uv_layers.active, (1, -1), getBoundsCenter(obj.data.uv_layers.active))
#print(getCenter(obj.data.uv_layers.active))
cv2.imwrite(os.path.join(lightmap_directory, atlas.name + end + formatEnc), packedAtlas[atlas.name])
print("Written: " + str(os.path.join(lightmap_directory, atlas.name + end + formatEnc)))
#Change the material for each material, slot
for obj in bpy.context.scene.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
if obj.TLM_ObjectProperties.tlm_postpack_object:
if obj.TLM_ObjectProperties.tlm_postatlas_pointer == atlas.name:
for slot in obj.material_slots:
nodetree = slot.material.node_tree
for node in nodetree.nodes:
if node.name == "TLM_Lightmap":
existing_image = node.image
atlasImage = bpy.data.images.load(os.path.join(lightmap_directory, atlas.name + end + formatEnc), check_existing=True)
if linear_straight:
if atlasImage.colorspace_settings.name != 'Linear':
atlasImage.colorspace_settings.name = 'Linear'
node.image = atlasImage
#print("Seeking for: " + atlasImage.filepath_raw)
#print(x)
if(os.path.exists(os.path.join(lightmap_directory, obj.name + end + formatEnc))):
os.remove(os.path.join(lightmap_directory, obj.name + end + formatEnc))
existing_image.user_clear()
#Add dilation map here...
for obj in bpy.context.scene.objects:
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
if obj.TLM_ObjectProperties.tlm_postpack_object:
if obj.TLM_ObjectProperties.tlm_postatlas_pointer == atlas.name:
if atlas.tlm_atlas_dilation:
for slot in obj.material_slots:
nodetree = slot.material.node_tree
for node in nodetree.nodes:
if node.name == "TLM_Lightmap":
existing_image = node.image
atlasImage = bpy.data.images.load(os.path.join(lightmap_directory, atlas.name + end + formatEnc), check_existing=True)
img = cv2.imread(atlasImage.filepath_raw, image_channel_depth)
kernel = np.ones((5,5), dtype="float32")
img_dilation = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
cv2.imshow('Dilation', img_dilation)
cv2.waitKey(0)
print("TODO: Adding dilation for: " + obj.name)
#TODO MASKING OPTION!
else:
print("OpenCV not installed. Skipping postpacking process.")
def getCenter(uv_layer):
total_x, total_y = 0,0
len = 0
for uv_verts in uv_layer.data:
total_x += uv_verts.uv[0]
total_y += uv_verts.uv[1]
len += 1
center_x = total_x / len
center_y = total_y / len
return (center_x, center_y)
def getBoundsCenter(uv_layer):
min_x = getCenter(uv_layer)[0]
max_x = getCenter(uv_layer)[0]
min_y = getCenter(uv_layer)[1]
max_y = getCenter(uv_layer)[1]
len = 0
for uv_verts in uv_layer.data:
if uv_verts.uv[0] < min_x:
min_x = uv_verts.uv[0]
if uv_verts.uv[0] > max_x:
max_x = uv_verts.uv[0]
if uv_verts.uv[1] < min_y:
min_y = uv_verts.uv[1]
if uv_verts.uv[1] > max_y:
max_y = uv_verts.uv[1]
center_x = (max_x - min_x) / 2 + min_x
center_y = (max_y - min_y) / 2 + min_y
return (center_x, center_y)
def scale2D(v, s, p):
return (p[0] + s[0]*(v[0] - p[0]), p[1] + s[1]*(v[1] - p[1]))
def scaleUV( uvMap, scale, pivot ):
for uvIndex in range( len(uvMap.data) ):
uvMap.data[uvIndex].uv = scale2D(uvMap.data[uvIndex].uv, scale, pivot)

View File

@ -0,0 +1,5 @@
import bpy, os, re, sys
def prepare(obj):
print("Preparing: " + obj.name)
pass

View File

@ -0,0 +1,23 @@
from .guillotine import GuillotineBssfSas, GuillotineBssfLas, \
GuillotineBssfSlas, GuillotineBssfLlas, GuillotineBssfMaxas, \
GuillotineBssfMinas, GuillotineBlsfSas, GuillotineBlsfLas, \
GuillotineBlsfSlas, GuillotineBlsfLlas, GuillotineBlsfMaxas, \
GuillotineBlsfMinas, GuillotineBafSas, GuillotineBafLas, \
GuillotineBafSlas, GuillotineBafLlas, GuillotineBafMaxas, \
GuillotineBafMinas
from .maxrects import MaxRectsBl, MaxRectsBssf, MaxRectsBaf, MaxRectsBlsf
from .skyline import SkylineMwf, SkylineMwfl, SkylineBl, \
SkylineBlWm, SkylineMwfWm, SkylineMwflWm
from .packer import SORT_AREA, SORT_PERI, SORT_DIFF, SORT_SSIDE, \
SORT_LSIDE, SORT_RATIO, SORT_NONE
from .packer import PackerBNF, PackerBFF, PackerBBF, PackerOnlineBNF, \
PackerOnlineBFF, PackerOnlineBBF, PackerGlobal, newPacker, \
PackingMode, PackingBin, float2dec

View File

@ -0,0 +1,148 @@
import heapq # heapq.heappush, heapq.heappop
from .packer import newPacker, PackingMode, PackingBin, SORT_LSIDE
from .skyline import SkylineBlWm
class Enclose(object):
def __init__(self, rectangles=[], max_width=None, max_height=None, rotation=True):
"""
Arguments:
rectangles (list): Rectangle to be enveloped
[(width1, height1), (width2, height2), ...]
max_width (number|None): Enveloping rectangle max allowed width.
max_height (number|None): Enveloping rectangle max allowed height.
rotation (boolean): Enable/Disable rectangle rotation.
"""
# Enclosing rectangle max width
self._max_width = max_width
# Encloseing rectangle max height
self._max_height = max_height
# Enable or disable rectangle rotation
self._rotation = rotation
# Default packing algorithm
self._pack_algo = SkylineBlWm
# rectangles to enclose [(width, height), (width, height, ...)]
self._rectangles = []
for r in rectangles:
self.add_rect(*r)
def _container_candidates(self):
"""Generate container candidate list
Returns:
tuple list: [(width1, height1), (width2, height2), ...]
"""
if not self._rectangles:
return []
if self._rotation:
sides = sorted(side for rect in self._rectangles for side in rect)
max_height = sum(max(r[0], r[1]) for r in self._rectangles)
min_width = max(min(r[0], r[1]) for r in self._rectangles)
max_width = max_height
else:
sides = sorted(r[0] for r in self._rectangles)
max_height = sum(r[1] for r in self._rectangles)
min_width = max(r[0] for r in self._rectangles)
max_width = sum(sides)
if self._max_width and self._max_width < max_width:
max_width = self._max_width
if self._max_height and self._max_height < max_height:
max_height = self._max_height
assert(max_width>min_width)
# Generate initial container widths
candidates = [max_width, min_width]
width = 0
for s in reversed(sides):
width += s
candidates.append(width)
width = 0
for s in sides:
width += s
candidates.append(width)
candidates.append(max_width)
candidates.append(min_width)
# Remove duplicates and widths too big or small
seen = set()
seen_add = seen.add
candidates = [x for x in candidates if not(x in seen or seen_add(x))]
candidates = [x for x in candidates if not(x>max_width or x<min_width)]
# Remove candidates too small to fit all the rectangles
min_area = sum(r[0]*r[1] for r in self._rectangles)
return [(c, max_height) for c in candidates if c*max_height>=min_area]
def _refine_candidate(self, width, height):
"""
Use bottom-left packing algorithm to find a lower height for the
container.
Arguments:
width
height
Returns:
tuple (width, height, PackingAlgorithm):
"""
packer = newPacker(PackingMode.Offline, PackingBin.BFF,
pack_algo=self._pack_algo, sort_algo=SORT_LSIDE,
rotation=self._rotation)
packer.add_bin(width, height)
for r in self._rectangles:
packer.add_rect(*r)
packer.pack()
# Check all rectangles where packed
if len(packer[0]) != len(self._rectangles):
return None
# Find highest rectangle
new_height = max(packer[0], key=lambda x: x.top).top
return(width, new_height, packer)
def generate(self):
# Generate initial containers
candidates = self._container_candidates()
if not candidates:
return None
# Refine candidates and return the one with the smaller area
containers = [self._refine_candidate(*c) for c in candidates]
containers = [c for c in containers if c]
if not containers:
return None
width, height, packer = min(containers, key=lambda x: x[0]*x[1])
packer.width = width
packer.height = height
return packer
def add_rect(self, width, height):
"""
Add anoter rectangle to be enclosed
Arguments:
width (number): Rectangle width
height (number): Rectangle height
"""
self._rectangles.append((width, height))

View File

@ -0,0 +1,344 @@
from math import sqrt
class Point(object):
__slots__ = ('x', 'y')
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return (self.x == other.x and self.y == other.y)
def __repr__(self):
return "P({}, {})".format(self.x, self.y)
def distance(self, point):
"""
Calculate distance to another point
"""
return sqrt((self.x-point.x)**2+(self.y-point.y)**2)
def distance_squared(self, point):
return (self.x-point.x)**2+(self.y-point.y)**2
class Segment(object):
__slots__ = ('start', 'end')
def __init__(self, start, end):
"""
Arguments:
start (Point): Segment start point
end (Point): Segment end point
"""
assert(isinstance(start, Point) and isinstance(end, Point))
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, self.__class__):
None
return self.start==other.start and self.end==other.end
def __repr__(self):
return "S({}, {})".format(self.start, self.end)
@property
def length_squared(self):
"""Faster than length and useful for some comparisons"""
return self.start.distance_squared(self.end)
@property
def length(self):
return self.start.distance(self.end)
@property
def top(self):
return max(self.start.y, self.end.y)
@property
def bottom(self):
return min(self.start.y, self.end.y)
@property
def right(self):
return max(self.start.x, self.end.x)
@property
def left(self):
return min(self.start.x, self.end.x)
class HSegment(Segment):
"""Horizontal Segment"""
def __init__(self, start, length):
"""
Create an Horizontal segment given its left most end point and its
length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(HSegment, self).__init__(start, Point(start.x+length, start.y))
@property
def length(self):
return self.end.x-self.start.x
class VSegment(Segment):
"""Vertical Segment"""
def __init__(self, start, length):
"""
Create a Vertical segment given its bottom most end point and its
length.
Arguments:
- start (Point): Starting Point
- length (number): segment length
"""
assert(isinstance(start, Point) and not isinstance(length, Point))
super(VSegment, self).__init__(start, Point(start.x, start.y+length))
@property
def length(self):
return self.end.y-self.start.y
class Rectangle(object):
"""Basic rectangle primitive class.
x, y-> Lower right corner coordinates
width -
height -
"""
__slots__ = ('width', 'height', 'x', 'y', 'rid')
def __init__(self, x, y, width, height, rid = None):
"""
Args:
x (int, float):
y (int, float):
width (int, float):
height (int, float):
rid (int):
"""
assert(height >=0 and width >=0)
self.width = width
self.height = height
self.x = x
self.y = y
self.rid = rid
@property
def bottom(self):
"""
Rectangle bottom edge y coordinate
"""
return self.y
@property
def top(self):
"""
Rectangle top edge y coordiante
"""
return self.y+self.height
@property
def left(self):
"""
Rectangle left ednge x coordinate
"""
return self.x
@property
def right(self):
"""
Rectangle right edge x coordinate
"""
return self.x+self.width
@property
def corner_top_l(self):
return Point(self.left, self.top)
@property
def corner_top_r(self):
return Point(self.right, self.top)
@property
def corner_bot_r(self):
return Point(self.right, self.bottom)
@property
def corner_bot_l(self):
return Point(self.left, self.bottom)
def __lt__(self, other):
"""
Compare rectangles by area (used for sorting)
"""
return self.area() < other.area()
def __eq__(self, other):
"""
Equal rectangles have same area.
"""
if not isinstance(other, self.__class__):
return False
return (self.width == other.width and \
self.height == other.height and \
self.x == other.x and \
self.y == other.y)
def __hash__(self):
return hash((self.x, self.y, self.width, self.height))
def __iter__(self):
"""
Iterate through rectangle corners
"""
yield self.corner_top_l
yield self.corner_top_r
yield self.corner_bot_r
yield self.corner_bot_l
def __repr__(self):
return "R({}, {}, {}, {})".format(self.x, self.y, self.width, self.height)
def area(self):
"""
Rectangle area
"""
return self.width * self.height
def move(self, x, y):
"""
Move Rectangle to x,y coordinates
Arguments:
x (int, float): X coordinate
y (int, float): Y coordinate
"""
self.x = x
self.y = y
def contains(self, rect):
"""
Tests if another rectangle is contained by this one
Arguments:
rect (Rectangle): The other rectangle
Returns:
bool: True if it is container, False otherwise
"""
return (rect.y >= self.y and \
rect.x >= self.x and \
rect.y+rect.height <= self.y+self.height and \
rect.x+rect.width <= self.x+self.width)
def intersects(self, rect, edges=False):
"""
Detect intersections between this and another Rectangle.
Parameters:
rect (Rectangle): The other rectangle.
edges (bool): True to consider rectangles touching by their
edges or corners to be intersecting.
(Should have been named include_touching)
Returns:
bool: True if the rectangles intersect, False otherwise
"""
if edges:
if (self.bottom > rect.top or self.top < rect.bottom or\
self.left > rect.right or self.right < rect.left):
return False
else:
if (self.bottom >= rect.top or self.top <= rect.bottom or
self.left >= rect.right or self.right <= rect.left):
return False
return True
def intersection(self, rect, edges=False):
"""
Returns the rectangle resulting of the intersection between this and another
rectangle. If the rectangles are only touching by their edges, and the
argument 'edges' is True the rectangle returned will have an area of 0.
Returns None if there is no intersection.
Arguments:
rect (Rectangle): The other rectangle.
edges (bool): If True Rectangles touching by their edges are
considered to be intersection. In this case a rectangle of
0 height or/and width will be returned.
Returns:
Rectangle: Intersection.
None: There was no intersection.
"""
if not self.intersects(rect, edges=edges):
return None
bottom = max(self.bottom, rect.bottom)
left = max(self.left, rect.left)
top = min(self.top, rect.top)
right = min(self.right, rect.right)
return Rectangle(left, bottom, right-left, top-bottom)
def join(self, other):
"""
Try to join a rectangle to this one, if the result is also a rectangle
and the operation is successful and this rectangle is modified to the union.
Arguments:
other (Rectangle): Rectangle to join
Returns:
bool: True when successfully joined, False otherwise
"""
if self.contains(other):
return True
if other.contains(self):
self.x = other.x
self.y = other.y
self.width = other.width
self.height = other.height
return True
if not self.intersects(other, edges=True):
return False
# Other rectangle is Up/Down from this
if self.left == other.left and self.width == other.width:
y_min = min(self.bottom, other.bottom)
y_max = max(self.top, other.top)
self.y = y_min
self.height = y_max-y_min
return True
# Other rectangle is Right/Left from this
if self.bottom == other.bottom and self.height == other.height:
x_min = min(self.left, other.left)
x_max = max(self.right, other.right)
self.x = x_min
self.width = x_max-x_min
return True
return False

View File

@ -0,0 +1,368 @@
from .pack_algo import PackingAlgorithm
from .geometry import Rectangle
import itertools
import operator
class Guillotine(PackingAlgorithm):
"""Implementation of several variants of Guillotine packing algorithm
For a more detailed explanation of the algorithm used, see:
Jukka Jylanki - A Thousand Ways to Pack the Bin (February 27, 2010)
"""
def __init__(self, width, height, rot=True, merge=True, *args, **kwargs):
"""
Arguments:
width (int, float):
height (int, float):
merge (bool): Optional keyword argument
"""
self._merge = merge
super(Guillotine, self).__init__(width, height, rot, *args, **kwargs)
def _add_section(self, section):
"""Adds a new section to the free section list, but before that and if
section merge is enabled, tries to join the rectangle with all existing
sections, if successful the resulting section is again merged with the
remaining sections until the operation fails. The result is then
appended to the list.
Arguments:
section (Rectangle): New free section.
"""
section.rid = 0
plen = 0
while self._merge and self._sections and plen != len(self._sections):
plen = len(self._sections)
self._sections = [s for s in self._sections if not section.join(s)]
self._sections.append(section)
def _split_horizontal(self, section, width, height):
"""For an horizontal split the rectangle is placed in the lower
left corner of the section (section's xy coordinates), the top
most side of the rectangle and its horizontal continuation,
marks the line of division for the split.
+-----------------+
| |
| |
| |
| |
+-------+---------+
|#######| |
|#######| |
|#######| |
+-------+---------+
If the rectangle width is equal to the the section width, only one
section is created over the rectangle. If the rectangle height is
equal to the section height, only one section to the right of the
rectangle is created. If both width and height are equal, no sections
are created.
"""
# First remove the section we are splitting so it doesn't
# interfere when later we try to merge the resulting split
# rectangles, with the rest of free sections.
#self._sections.remove(section)
# Creates two new empty sections, and returns the new rectangle.
if height < section.height:
self._add_section(Rectangle(section.x, section.y+height,
section.width, section.height-height))
if width < section.width:
self._add_section(Rectangle(section.x+width, section.y,
section.width-width, height))
def _split_vertical(self, section, width, height):
"""For a vertical split the rectangle is placed in the lower
left corner of the section (section's xy coordinates), the
right most side of the rectangle and its vertical continuation,
marks the line of division for the split.
+-------+---------+
| | |
| | |
| | |
| | |
+-------+ |
|#######| |
|#######| |
|#######| |
+-------+---------+
If the rectangle width is equal to the the section width, only one
section is created over the rectangle. If the rectangle height is
equal to the section height, only one section to the right of the
rectangle is created. If both width and height are equal, no sections
are created.
"""
# When a section is split, depending on the rectangle size
# two, one, or no new sections will be created.
if height < section.height:
self._add_section(Rectangle(section.x, section.y+height,
width, section.height-height))
if width < section.width:
self._add_section(Rectangle(section.x+width, section.y,
section.width-width, section.height))
def _split(self, section, width, height):
"""
Selects the best split for a section, given a rectangle of dimmensions
width and height, then calls _split_vertical or _split_horizontal,
to do the dirty work.
Arguments:
section (Rectangle): Section to split
width (int, float): Rectangle width
height (int, float): Rectangle height
"""
raise NotImplementedError
def _section_fitness(self, section, width, height):
"""The subclass for each one of the Guillotine selection methods,
BAF, BLSF.... will override this method, this is here only
to asure a valid value return if the worst happens.
"""
raise NotImplementedError
def _select_fittest_section(self, w, h):
"""Calls _section_fitness for each of the sections in free section
list. Returns the section with the minimal fitness value, all the rest
is boilerplate to make the fitness comparison, to rotatate the rectangles,
and to take into account when _section_fitness returns None because
the rectangle couldn't be placed.
Arguments:
w (int, float): Rectangle width
h (int, float): Rectangle height
Returns:
(section, was_rotated): Returns the tuple
section (Rectangle): Section with best fitness
was_rotated (bool): The rectangle was rotated
"""
fitn = ((self._section_fitness(s, w, h), s, False) for s in self._sections
if self._section_fitness(s, w, h) is not None)
fitr = ((self._section_fitness(s, h, w), s, True) for s in self._sections
if self._section_fitness(s, h, w) is not None)
if not self.rot:
fitr = []
fit = itertools.chain(fitn, fitr)
try:
_, sec, rot = min(fit, key=operator.itemgetter(0))
except ValueError:
return None, None
return sec, rot
def add_rect(self, width, height, rid=None):
"""
Add rectangle of widthxheight dimensions.
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
rid: Optional rectangle user id
Returns:
Rectangle: Rectangle with placemente coordinates
None: If the rectangle couldn be placed.
"""
assert(width > 0 and height >0)
# Obtain the best section to place the rectangle.
section, rotated = self._select_fittest_section(width, height)
if not section:
return None
if rotated:
width, height = height, width
# Remove section, split and store results
self._sections.remove(section)
self._split(section, width, height)
# Store rectangle in the selected position
rect = Rectangle(section.x, section.y, width, height, rid)
self.rectangles.append(rect)
return rect
def fitness(self, width, height):
"""
In guillotine algorithm case, returns the min of the fitness of all
free sections, for the given dimension, both normal and rotated
(if rotation enabled.)
"""
assert(width > 0 and height > 0)
# Get best fitness section.
section, rotated = self._select_fittest_section(width, height)
if not section:
return None
# Return fitness of returned section, with correct dimmensions if the
# the rectangle was rotated.
if rotated:
return self._section_fitness(section, height, width)
else:
return self._section_fitness(section, width, height)
def reset(self):
super(Guillotine, self).reset()
self._sections = []
self._add_section(Rectangle(0, 0, self.width, self.height))
class GuillotineBaf(Guillotine):
"""Implements Best Area Fit (BAF) section selection criteria for
Guillotine algorithm.
"""
def _section_fitness(self, section, width, height):
if width > section.width or height > section.height:
return None
return section.area()-width*height
class GuillotineBlsf(Guillotine):
"""Implements Best Long Side Fit (BLSF) section selection criteria for
Guillotine algorithm.
"""
def _section_fitness(self, section, width, height):
if width > section.width or height > section.height:
return None
return max(section.width-width, section.height-height)
class GuillotineBssf(Guillotine):
"""Implements Best Short Side Fit (BSSF) section selection criteria for
Guillotine algorithm.
"""
def _section_fitness(self, section, width, height):
if width > section.width or height > section.height:
return None
return min(section.width-width, section.height-height)
class GuillotineSas(Guillotine):
"""Implements Short Axis Split (SAS) selection rule for Guillotine
algorithm.
"""
def _split(self, section, width, height):
if section.width < section.height:
return self._split_horizontal(section, width, height)
else:
return self._split_vertical(section, width, height)
class GuillotineLas(Guillotine):
"""Implements Long Axis Split (LAS) selection rule for Guillotine
algorithm.
"""
def _split(self, section, width, height):
if section.width >= section.height:
return self._split_horizontal(section, width, height)
else:
return self._split_vertical(section, width, height)
class GuillotineSlas(Guillotine):
"""Implements Short Leftover Axis Split (SLAS) selection rule for
Guillotine algorithm.
"""
def _split(self, section, width, height):
if section.width-width < section.height-height:
return self._split_horizontal(section, width, height)
else:
return self._split_vertical(section, width, height)
class GuillotineLlas(Guillotine):
"""Implements Long Leftover Axis Split (LLAS) selection rule for
Guillotine algorithm.
"""
def _split(self, section, width, height):
if section.width-width >= section.height-height:
return self._split_horizontal(section, width, height)
else:
return self._split_vertical(section, width, height)
class GuillotineMaxas(Guillotine):
"""Implements Max Area Axis Split (MAXAS) selection rule for Guillotine
algorithm. Maximize the larger area == minimize the smaller area.
Tries to make the rectangles more even-sized.
"""
def _split(self, section, width, height):
if width*(section.height-height) <= height*(section.width-width):
return self._split_horizontal(section, width, height)
else:
return self._split_vertical(section, width, height)
class GuillotineMinas(Guillotine):
"""Implements Min Area Axis Split (MINAS) selection rule for Guillotine
algorithm.
"""
def _split(self, section, width, height):
if width*(section.height-height) >= height*(section.width-width):
return self._split_horizontal(section, width, height)
else:
return self._split_vertical(section, width, height)
# Guillotine algorithms GUILLOTINE-RECT-SPLIT, Selecting one
# Axis split, and one selection criteria.
class GuillotineBssfSas(GuillotineBssf, GuillotineSas):
pass
class GuillotineBssfLas(GuillotineBssf, GuillotineLas):
pass
class GuillotineBssfSlas(GuillotineBssf, GuillotineSlas):
pass
class GuillotineBssfLlas(GuillotineBssf, GuillotineLlas):
pass
class GuillotineBssfMaxas(GuillotineBssf, GuillotineMaxas):
pass
class GuillotineBssfMinas(GuillotineBssf, GuillotineMinas):
pass
class GuillotineBlsfSas(GuillotineBlsf, GuillotineSas):
pass
class GuillotineBlsfLas(GuillotineBlsf, GuillotineLas):
pass
class GuillotineBlsfSlas(GuillotineBlsf, GuillotineSlas):
pass
class GuillotineBlsfLlas(GuillotineBlsf, GuillotineLlas):
pass
class GuillotineBlsfMaxas(GuillotineBlsf, GuillotineMaxas):
pass
class GuillotineBlsfMinas(GuillotineBlsf, GuillotineMinas):
pass
class GuillotineBafSas(GuillotineBaf, GuillotineSas):
pass
class GuillotineBafLas(GuillotineBaf, GuillotineLas):
pass
class GuillotineBafSlas(GuillotineBaf, GuillotineSlas):
pass
class GuillotineBafLlas(GuillotineBaf, GuillotineLlas):
pass
class GuillotineBafMaxas(GuillotineBaf, GuillotineMaxas):
pass
class GuillotineBafMinas(GuillotineBaf, GuillotineMinas):
pass

View File

@ -0,0 +1,244 @@
from .pack_algo import PackingAlgorithm
from .geometry import Rectangle
import itertools
import collections
import operator
first_item = operator.itemgetter(0)
class MaxRects(PackingAlgorithm):
def __init__(self, width, height, rot=True, *args, **kwargs):
super(MaxRects, self).__init__(width, height, rot, *args, **kwargs)
def _rect_fitness(self, max_rect, width, height):
"""
Arguments:
max_rect (Rectangle): Destination max_rect
width (int, float): Rectangle width
height (int, float): Rectangle height
Returns:
None: Rectangle couldn't be placed into max_rect
integer, float: fitness value
"""
if width <= max_rect.width and height <= max_rect.height:
return 0
else:
return None
def _select_position(self, w, h):
"""
Find max_rect with best fitness for placing a rectangle
of dimentsions w*h
Arguments:
w (int, float): Rectangle width
h (int, float): Rectangle height
Returns:
(rect, max_rect)
rect (Rectangle): Placed rectangle or None if was unable.
max_rect (Rectangle): Maximal rectangle were rect was placed
"""
if not self._max_rects:
return None, None
# Normal rectangle
fitn = ((self._rect_fitness(m, w, h), w, h, m) for m in self._max_rects
if self._rect_fitness(m, w, h) is not None)
# Rotated rectangle
fitr = ((self._rect_fitness(m, h, w), h, w, m) for m in self._max_rects
if self._rect_fitness(m, h, w) is not None)
if not self.rot:
fitr = []
fit = itertools.chain(fitn, fitr)
try:
_, w, h, m = min(fit, key=first_item)
except ValueError:
return None, None
return Rectangle(m.x, m.y, w, h), m
def _generate_splits(self, m, r):
"""
When a rectangle is placed inside a maximal rectangle, it stops being one
and up to 4 new maximal rectangles may appear depending on the placement.
_generate_splits calculates them.
Arguments:
m (Rectangle): max_rect rectangle
r (Rectangle): rectangle placed
Returns:
list : list containing new maximal rectangles or an empty list
"""
new_rects = []
if r.left > m.left:
new_rects.append(Rectangle(m.left, m.bottom, r.left-m.left, m.height))
if r.right < m.right:
new_rects.append(Rectangle(r.right, m.bottom, m.right-r.right, m.height))
if r.top < m.top:
new_rects.append(Rectangle(m.left, r.top, m.width, m.top-r.top))
if r.bottom > m.bottom:
new_rects.append(Rectangle(m.left, m.bottom, m.width, r.bottom-m.bottom))
return new_rects
def _split(self, rect):
"""
Split all max_rects intersecting the rectangle rect into up to
4 new max_rects.
Arguments:
rect (Rectangle): Rectangle
Returns:
split (Rectangle list): List of rectangles resulting from the split
"""
max_rects = collections.deque()
for r in self._max_rects:
if r.intersects(rect):
max_rects.extend(self._generate_splits(r, rect))
else:
max_rects.append(r)
# Add newly generated max_rects
self._max_rects = list(max_rects)
def _remove_duplicates(self):
"""
Remove every maximal rectangle contained by another one.
"""
contained = set()
for m1, m2 in itertools.combinations(self._max_rects, 2):
if m1.contains(m2):
contained.add(m2)
elif m2.contains(m1):
contained.add(m1)
# Remove from max_rects
self._max_rects = [m for m in self._max_rects if m not in contained]
def fitness(self, width, height):
"""
Metric used to rate how much space is wasted if a rectangle is placed.
Returns a value greater or equal to zero, the smaller the value the more
'fit' is the rectangle. If the rectangle can't be placed, returns None.
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
Returns:
int, float: Rectangle fitness
None: Rectangle can't be placed
"""
assert(width > 0 and height > 0)
rect, max_rect = self._select_position(width, height)
if rect is None:
return None
# Return fitness
return self._rect_fitness(max_rect, rect.width, rect.height)
def add_rect(self, width, height, rid=None):
"""
Add rectangle of widthxheight dimensions.
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
rid: Optional rectangle user id
Returns:
Rectangle: Rectangle with placemente coordinates
None: If the rectangle couldn be placed.
"""
assert(width > 0 and height >0)
# Search best position and orientation
rect, _ = self._select_position(width, height)
if not rect:
return None
# Subdivide all the max rectangles intersecting with the selected
# rectangle.
self._split(rect)
# Remove any max_rect contained by another
self._remove_duplicates()
# Store and return rectangle position.
rect.rid = rid
self.rectangles.append(rect)
return rect
def reset(self):
super(MaxRects, self).reset()
self._max_rects = [Rectangle(0, 0, self.width, self.height)]
class MaxRectsBl(MaxRects):
def _select_position(self, w, h):
"""
Select the position where the y coordinate of the top of the rectangle
is lower, if there are severtal pick the one with the smallest x
coordinate
"""
fitn = ((m.y+h, m.x, w, h, m) for m in self._max_rects
if self._rect_fitness(m, w, h) is not None)
fitr = ((m.y+w, m.x, h, w, m) for m in self._max_rects
if self._rect_fitness(m, h, w) is not None)
if not self.rot:
fitr = []
fit = itertools.chain(fitn, fitr)
try:
_, _, w, h, m = min(fit, key=first_item)
except ValueError:
return None, None
return Rectangle(m.x, m.y, w, h), m
class MaxRectsBssf(MaxRects):
"""Best Sort Side Fit minimize short leftover side"""
def _rect_fitness(self, max_rect, width, height):
if width > max_rect.width or height > max_rect.height:
return None
return min(max_rect.width-width, max_rect.height-height)
class MaxRectsBaf(MaxRects):
"""Best Area Fit pick maximal rectangle with smallest area
where the rectangle can be placed"""
def _rect_fitness(self, max_rect, width, height):
if width > max_rect.width or height > max_rect.height:
return None
return (max_rect.width*max_rect.height)-(width*height)
class MaxRectsBlsf(MaxRects):
"""Best Long Side Fit minimize long leftover side"""
def _rect_fitness(self, max_rect, width, height):
if width > max_rect.width or height > max_rect.height:
return None
return max(max_rect.width-width, max_rect.height-height)

View File

@ -0,0 +1,140 @@
from .geometry import Rectangle
class PackingAlgorithm(object):
"""PackingAlgorithm base class"""
def __init__(self, width, height, rot=True, bid=None, *args, **kwargs):
"""
Initialize packing algorithm
Arguments:
width (int, float): Packing surface width
height (int, float): Packing surface height
rot (bool): Rectangle rotation enabled or disabled
bid (string|int|...): Packing surface identification
"""
self.width = width
self.height = height
self.rot = rot
self.rectangles = []
self.bid = bid
self._surface = Rectangle(0, 0, width, height)
self.reset()
def __len__(self):
return len(self.rectangles)
def __iter__(self):
return iter(self.rectangles)
def _fits_surface(self, width, height):
"""
Test surface is big enough to place a rectangle
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
Returns:
boolean: True if it could be placed, False otherwise
"""
assert(width > 0 and height > 0)
if self.rot and (width > self.width or height > self.height):
width, height = height, width
if width > self.width or height > self.height:
return False
else:
return True
def __getitem__(self, key):
"""
Return rectangle in selected position.
"""
return self.rectangles[key]
def used_area(self):
"""
Total area of rectangles placed
Returns:
int, float: Area
"""
return sum(r.area() for r in self)
def fitness(self, width, height, rot = False):
"""
Metric used to rate how much space is wasted if a rectangle is placed.
Returns a value greater or equal to zero, the smaller the value the more
'fit' is the rectangle. If the rectangle can't be placed, returns None.
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
rot (bool): Enable rectangle rotation
Returns:
int, float: Rectangle fitness
None: Rectangle can't be placed
"""
raise NotImplementedError
def add_rect(self, width, height, rid=None):
"""
Add rectangle of widthxheight dimensions.
Arguments:
width (int, float): Rectangle width
height (int, float): Rectangle height
rid: Optional rectangle user id
Returns:
Rectangle: Rectangle with placemente coordinates
None: If the rectangle couldn be placed.
"""
raise NotImplementedError
def rect_list(self):
"""
Returns a list with all rectangles placed into the surface.
Returns:
List: Format [(x, y, width, height, rid), ...]
"""
rectangle_list = []
for r in self:
rectangle_list.append((r.x, r.y, r.width, r.height, r.rid))
return rectangle_list
def validate_packing(self):
"""
Check for collisions between rectangles, also check all are placed
inside surface.
"""
surface = Rectangle(0, 0, self.width, self.height)
for r in self:
if not surface.contains(r):
raise Exception("Rectangle placed outside surface")
rectangles = [r for r in self]
if len(rectangles) <= 1:
return
for r1 in range(0, len(rectangles)-2):
for r2 in range(r1+1, len(rectangles)-1):
if rectangles[r1].intersects(rectangles[r2]):
raise Exception("Rectangle collision detected")
def is_empty(self):
# Returns true if there is no rectangles placed.
return not bool(len(self))
def reset(self):
self.rectangles = [] # List of placed Rectangles.

View File

@ -0,0 +1,580 @@
from .maxrects import MaxRectsBssf
import operator
import itertools
import collections
import decimal
# Float to Decimal helper
def float2dec(ft, decimal_digits):
"""
Convert float (or int) to Decimal (rounding up) with the
requested number of decimal digits.
Arguments:
ft (float, int): Number to convert
decimal (int): Number of digits after decimal point
Return:
Decimal: Number converted to decima
"""
with decimal.localcontext() as ctx:
ctx.rounding = decimal.ROUND_UP
places = decimal.Decimal(10)**(-decimal_digits)
return decimal.Decimal.from_float(float(ft)).quantize(places)
# Sorting algos for rectangle lists
SORT_AREA = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]*r[1]) # Sort by area
SORT_PERI = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]+r[1]) # Sort by perimeter
SORT_DIFF = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: abs(r[0]-r[1])) # Sort by Diff
SORT_SSIDE = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: (min(r[0], r[1]), max(r[0], r[1]))) # Sort by short side
SORT_LSIDE = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: (max(r[0], r[1]), min(r[0], r[1]))) # Sort by long side
SORT_RATIO = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]/r[1]) # Sort by side ratio
SORT_NONE = lambda rectlist: list(rectlist) # Unsorted
class BinFactory(object):
def __init__(self, width, height, count, pack_algo, *args, **kwargs):
self._width = width
self._height = height
self._count = count
self._pack_algo = pack_algo
self._algo_kwargs = kwargs
self._algo_args = args
self._ref_bin = None # Reference bin used to calculate fitness
self._bid = kwargs.get("bid", None)
def _create_bin(self):
return self._pack_algo(self._width, self._height, *self._algo_args, **self._algo_kwargs)
def is_empty(self):
return self._count<1
def fitness(self, width, height):
if not self._ref_bin:
self._ref_bin = self._create_bin()
return self._ref_bin.fitness(width, height)
def fits_inside(self, width, height):
# Determine if rectangle widthxheight will fit into empty bin
if not self._ref_bin:
self._ref_bin = self._create_bin()
return self._ref_bin._fits_surface(width, height)
def new_bin(self):
if self._count > 0:
self._count -= 1
return self._create_bin()
else:
return None
def __eq__(self, other):
return self._width*self._height == other._width*other._height
def __lt__(self, other):
return self._width*self._height < other._width*other._height
def __str__(self):
return "Bin: {} {} {}".format(self._width, self._height, self._count)
class PackerBNFMixin(object):
"""
BNF (Bin Next Fit): Only one open bin at a time. If the rectangle
doesn't fit, close the current bin and go to the next.
"""
def add_rect(self, width, height, rid=None):
while True:
# if there are no open bins, try to open a new one
if len(self._open_bins)==0:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return None
# we have at least one open bin, so check if it can hold this rect
rect = self._open_bins[0].add_rect(width, height, rid=rid)
if rect is not None:
return rect
# since the rect doesn't fit, close this bin and try again
closed_bin = self._open_bins.popleft()
self._closed_bins.append(closed_bin)
class PackerBFFMixin(object):
"""
BFF (Bin First Fit): Pack rectangle in first bin it fits
"""
def add_rect(self, width, height, rid=None):
# see if this rect will fit in any of the open bins
for b in self._open_bins:
rect = b.add_rect(width, height, rid=rid)
if rect is not None:
return rect
while True:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return None
# _new_open_bin may return a bin that's too small,
# so we have to double-check
rect = new_bin.add_rect(width, height, rid=rid)
if rect is not None:
return rect
class PackerBBFMixin(object):
"""
BBF (Bin Best Fit): Pack rectangle in bin that gives best fitness
"""
# only create this getter once
first_item = operator.itemgetter(0)
def add_rect(self, width, height, rid=None):
# Try packing into open bins
fit = ((b.fitness(width, height), b) for b in self._open_bins)
fit = (b for b in fit if b[0] is not None)
try:
_, best_bin = min(fit, key=self.first_item)
best_bin.add_rect(width, height, rid)
return True
except ValueError:
pass
# Try packing into one of the empty bins
while True:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return False
# _new_open_bin may return a bin that's too small,
# so we have to double-check
if new_bin.add_rect(width, height, rid):
return True
class PackerOnline(object):
"""
Rectangles are packed as soon are they are added
"""
def __init__(self, pack_algo=MaxRectsBssf, rotation=True):
"""
Arguments:
pack_algo (PackingAlgorithm): What packing algo to use
rotation (bool): Enable/Disable rectangle rotation
"""
self._rotation = rotation
self._pack_algo = pack_algo
self.reset()
def __iter__(self):
return itertools.chain(self._closed_bins, self._open_bins)
def __len__(self):
return len(self._closed_bins)+len(self._open_bins)
def __getitem__(self, key):
"""
Return bin in selected position. (excluding empty bins)
"""
if not isinstance(key, int):
raise TypeError("Indices must be integers")
size = len(self) # avoid recalulations
if key < 0:
key += size
if not 0 <= key < size:
raise IndexError("Index out of range")
if key < len(self._closed_bins):
return self._closed_bins[key]
else:
return self._open_bins[key-len(self._closed_bins)]
def _new_open_bin(self, width=None, height=None, rid=None):
"""
Extract the next empty bin and append it to open bins
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if the rect fits.
# (If width or height is None, caller doesn't know the size.)
if not binfac.fits_inside(width, height):
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
def add_bin(self, width, height, count=1, **kwargs):
# accept the same parameters as PackingAlgorithm objects
kwargs['rot'] = self._rotation
bin_factory = BinFactory(width, height, count, self._pack_algo, **kwargs)
self._empty_bins[next(self._bin_count)] = bin_factory
def rect_list(self):
rectangles = []
bin_count = 0
for abin in self:
for rect in abin:
rectangles.append((bin_count, rect.x, rect.y, rect.width, rect.height, rect.rid))
bin_count += 1
return rectangles
def bin_list(self):
"""
Return a list of the dimmensions of the bins in use, that is closed
or open containing at least one rectangle
"""
return [(b.width, b.height) for b in self]
def validate_packing(self):
for b in self:
b.validate_packing()
def reset(self):
# Bins fully packed and closed.
self._closed_bins = collections.deque()
# Bins ready to pack rectangles
self._open_bins = collections.deque()
# User provided bins not in current use
self._empty_bins = collections.OrderedDict() # O(1) deletion of arbitrary elem
self._bin_count = itertools.count()
class Packer(PackerOnline):
"""
Rectangles aren't packed untils pack() is called
"""
def __init__(self, pack_algo=MaxRectsBssf, sort_algo=SORT_NONE,
rotation=True):
"""
"""
super(Packer, self).__init__(pack_algo=pack_algo, rotation=rotation)
self._sort_algo = sort_algo
# User provided bins and Rectangles
self._avail_bins = collections.deque()
self._avail_rect = collections.deque()
# Aux vars used during packing
self._sorted_rect = []
def add_bin(self, width, height, count=1, **kwargs):
self._avail_bins.append((width, height, count, kwargs))
def add_rect(self, width, height, rid=None):
self._avail_rect.append((width, height, rid))
def _is_everything_ready(self):
return self._avail_rect and self._avail_bins
def pack(self):
self.reset()
if not self._is_everything_ready():
# maybe we should throw an error here?
return
# Add available bins to packer
for b in self._avail_bins:
width, height, count, extra_kwargs = b
super(Packer, self).add_bin(width, height, count, **extra_kwargs)
# If enabled sort rectangles
self._sorted_rect = self._sort_algo(self._avail_rect)
# Start packing
for r in self._sorted_rect:
super(Packer, self).add_rect(*r)
class PackerBNF(Packer, PackerBNFMixin):
"""
BNF (Bin Next Fit): Only one open bin, if rectangle doesn't fit
go to next bin and close current one.
"""
pass
class PackerBFF(Packer, PackerBFFMixin):
"""
BFF (Bin First Fit): Pack rectangle in first bin it fits
"""
pass
class PackerBBF(Packer, PackerBBFMixin):
"""
BBF (Bin Best Fit): Pack rectangle in bin that gives best fitness
"""
pass
class PackerOnlineBNF(PackerOnline, PackerBNFMixin):
"""
BNF Bin Next Fit Online variant
"""
pass
class PackerOnlineBFF(PackerOnline, PackerBFFMixin):
"""
BFF Bin First Fit Online variant
"""
pass
class PackerOnlineBBF(PackerOnline, PackerBBFMixin):
"""
BBF Bin Best Fit Online variant
"""
pass
class PackerGlobal(Packer, PackerBNFMixin):
"""
GLOBAL: For each bin pack the rectangle with the best fitness.
"""
first_item = operator.itemgetter(0)
def __init__(self, pack_algo=MaxRectsBssf, rotation=True):
"""
"""
super(PackerGlobal, self).__init__(pack_algo=pack_algo,
sort_algo=SORT_NONE, rotation=rotation)
def _find_best_fit(self, pbin):
"""
Return best fitness rectangle from rectangles packing _sorted_rect list
Arguments:
pbin (PackingAlgorithm): Packing bin
Returns:
key of the rectangle with best fitness
"""
fit = ((pbin.fitness(r[0], r[1]), k) for k, r in self._sorted_rect.items())
fit = (f for f in fit if f[0] is not None)
try:
_, rect = min(fit, key=self.first_item)
return rect
except ValueError:
return None
def _new_open_bin(self, remaining_rect):
"""
Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if at least one of the remaining
# rectangles fit inside.
a_rectangle_fits = False
for _, rect in remaining_rect.items():
if binfac.fits_inside(rect[0], rect[1]):
a_rectangle_fits = True
break
if not a_rectangle_fits:
factories_to_delete.add(key)
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
def pack(self):
self.reset()
if not self._is_everything_ready():
return
# Add available bins to packer
for b in self._avail_bins:
width, height, count, extra_kwargs = b
super(Packer, self).add_bin(width, height, count, **extra_kwargs)
# Store rectangles into dict for fast deletion
self._sorted_rect = collections.OrderedDict(
enumerate(self._sort_algo(self._avail_rect)))
# For each bin pack the rectangles with lowest fitness until it is filled or
# the rectangles exhausted, then open the next bin where at least one rectangle
# will fit and repeat the process until there aren't more rectangles or bins
# available.
while len(self._sorted_rect) > 0:
# Find one bin where at least one of the remaining rectangles fit
pbin = self._new_open_bin(self._sorted_rect)
if pbin is None:
break
# Pack as many rectangles as possible into the open bin
while True:
# Find 'fittest' rectangle
best_rect_key = self._find_best_fit(pbin)
if best_rect_key is None:
closed_bin = self._open_bins.popleft()
self._closed_bins.append(closed_bin)
break # None of the remaining rectangles can be packed in this bin
best_rect = self._sorted_rect[best_rect_key]
del self._sorted_rect[best_rect_key]
PackerBNFMixin.add_rect(self, *best_rect)
# Packer factory
class Enum(tuple):
__getattr__ = tuple.index
PackingMode = Enum(["Online", "Offline"])
PackingBin = Enum(["BNF", "BFF", "BBF", "Global"])
def newPacker(mode=PackingMode.Offline,
bin_algo=PackingBin.BBF,
pack_algo=MaxRectsBssf,
sort_algo=SORT_AREA,
rotation=True):
"""
Packer factory helper function
Arguments:
mode (PackingMode): Packing mode
Online: Rectangles are packed as soon are they are added
Offline: Rectangles aren't packed untils pack() is called
bin_algo (PackingBin): Bin selection heuristic
pack_algo (PackingAlgorithm): Algorithm used
rotation (boolean): Enable or disable rectangle rotation.
Returns:
Packer: Initialized packer instance.
"""
packer_class = None
# Online Mode
if mode == PackingMode.Online:
sort_algo=None
if bin_algo == PackingBin.BNF:
packer_class = PackerOnlineBNF
elif bin_algo == PackingBin.BFF:
packer_class = PackerOnlineBFF
elif bin_algo == PackingBin.BBF:
packer_class = PackerOnlineBBF
else:
raise AttributeError("Unsupported bin selection heuristic")
# Offline Mode
elif mode == PackingMode.Offline:
if bin_algo == PackingBin.BNF:
packer_class = PackerBNF
elif bin_algo == PackingBin.BFF:
packer_class = PackerBFF
elif bin_algo == PackingBin.BBF:
packer_class = PackerBBF
elif bin_algo == PackingBin.Global:
packer_class = PackerGlobal
sort_algo=None
else:
raise AttributeError("Unsupported bin selection heuristic")
else:
raise AttributeError("Unknown packing mode.")
if sort_algo:
return packer_class(pack_algo=pack_algo, sort_algo=sort_algo,
rotation=rotation)
else:
return packer_class(pack_algo=pack_algo, rotation=rotation)

View File

@ -0,0 +1,303 @@
import collections
import itertools
import operator
import heapq
import copy
from .pack_algo import PackingAlgorithm
from .geometry import Point as P
from .geometry import HSegment, Rectangle
from .waste import WasteManager
class Skyline(PackingAlgorithm):
""" Class implementing Skyline algorithm as described by
Jukka Jylanki - A Thousand Ways to Pack the Bin (February 27, 2010)
_skyline: stores all the segments at the top of the skyline.
_waste: Handles all wasted sections.
"""
def __init__(self, width, height, rot=True, *args, **kwargs):
"""
_skyline is the list used to store all the skyline segments, each
one is a list with the format [x, y, width] where x is the x
coordinate of the left most point of the segment, y the y coordinate
of the segment, and width the length of the segment. The initial
segment is allways [0, 0, surface_width]
Arguments:
width (int, float):
height (int, float):
rot (bool): Enable or disable rectangle rotation
"""
self._waste_management = False
self._waste = WasteManager(rot=rot)
super(Skyline, self).__init__(width, height, rot, merge=False, *args, **kwargs)
def _placement_points_generator(self, skyline, width):
"""Returns a generator for the x coordinates of all the placement
points on the skyline for a given rectangle.
WARNING: In some cases could be duplicated points, but it is faster
to compute them twice than to remove them.
Arguments:
skyline (list): Skyline HSegment list
width (int, float): Rectangle width
Returns:
generator
"""
skyline_r = skyline[-1].right
skyline_l = skyline[0].left
# Placements using skyline segment left point
ppointsl = (s.left for s in skyline if s.left+width <= skyline_r)
# Placements using skyline segment right point
ppointsr = (s.right-width for s in skyline if s.right-width >= skyline_l)
# Merge positions
return heapq.merge(ppointsl, ppointsr)
def _generate_placements(self, width, height):
"""
Generate a list with
Arguments:
skyline (list): SkylineHSegment list
width (number):
Returns:
tuple (Rectangle, fitness):
Rectangle: Rectangle in valid position
left_skyline: Index for the skyline under the rectangle left edge.
right_skyline: Index for the skyline under the rectangle right edte.
"""
skyline = self._skyline
points = collections.deque()
left_index = right_index = 0 # Left and right side skyline index
support_height = skyline[0].top
support_index = 0
placements = self._placement_points_generator(skyline, width)
for p in placements:
# If Rectangle's right side changed segment, find new support
if p+width > skyline[right_index].right:
for right_index in range(right_index+1, len(skyline)):
if skyline[right_index].top >= support_height:
support_index = right_index
support_height = skyline[right_index].top
if p+width <= skyline[right_index].right:
break
# If left side changed segment.
if p >= skyline[left_index].right:
left_index +=1
# Find new support if the previous one was shifted out.
if support_index < left_index:
support_index = left_index
support_height = skyline[left_index].top
for i in range(left_index, right_index+1):
if skyline[i].top >= support_height:
support_index = i
support_height = skyline[i].top
# Add point if there is enought room at the top
if support_height+height <= self.height:
points.append((Rectangle(p, support_height, width, height),\
left_index, right_index))
return points
def _merge_skyline(self, skylineq, segment):
"""
Arguments:
skylineq (collections.deque):
segment (HSegment):
"""
if len(skylineq) == 0:
skylineq.append(segment)
return
if skylineq[-1].top == segment.top:
s = skylineq[-1]
skylineq[-1] = HSegment(s.start, s.length+segment.length)
else:
skylineq.append(segment)
def _add_skyline(self, rect):
"""
Arguments:
seg (Rectangle):
"""
skylineq = collections.deque([]) # Skyline after adding new one
for sky in self._skyline:
if sky.right <= rect.left or sky.left >= rect.right:
self._merge_skyline(skylineq, sky)
continue
if sky.left < rect.left and sky.right > rect.left:
# Skyline section partially under segment left
self._merge_skyline(skylineq,
HSegment(sky.start, rect.left-sky.left))
sky = HSegment(P(rect.left, sky.top), sky.right-rect.left)
if sky.left < rect.right:
if sky.left == rect.left:
self._merge_skyline(skylineq,
HSegment(P(rect.left, rect.top), rect.width))
# Skyline section partially under segment right
if sky.right > rect.right:
self._merge_skyline(skylineq,
HSegment(P(rect.right, sky.top), sky.right-rect.right))
sky = HSegment(sky.start, rect.right-sky.left)
if sky.left >= rect.left and sky.right <= rect.right:
# Skyline section fully under segment, account for wasted space
if self._waste_management and sky.top < rect.bottom:
self._waste.add_waste(sky.left, sky.top,
sky.length, rect.bottom - sky.top)
else:
# Segment
self._merge_skyline(skylineq, sky)
# Aaaaand ..... Done
self._skyline = list(skylineq)
def _rect_fitness(self, rect, left_index, right_index):
return rect.top
def _select_position(self, width, height):
"""
Search for the placement with the bes fitness for the rectangle.
Returns:
tuple (Rectangle, fitness) - Rectangle placed in the fittest position
None - Rectangle couldn't be placed
"""
positions = self._generate_placements(width, height)
if self.rot and width != height:
positions += self._generate_placements(height, width)
if not positions:
return None, None
return min(((p[0], self._rect_fitness(*p))for p in positions),
key=operator.itemgetter(1))
def fitness(self, width, height):
"""Search for the best fitness
"""
assert(width > 0 and height >0)
if width > max(self.width, self.height) or\
height > max(self.height, self.width):
return None
# If there is room in wasted space, FREE PACKING!!
if self._waste_management:
if self._waste.fitness(width, height) is not None:
return 0
# Get best fitness segment, for normal rectangle, and for
# rotated rectangle if rotation is enabled.
rect, fitness = self._select_position(width, height)
return fitness
def add_rect(self, width, height, rid=None):
"""
Add new rectangle
"""
assert(width > 0 and height > 0)
if width > max(self.width, self.height) or\
height > max(self.height, self.width):
return None
rect = None
# If Waste managment is enabled, first try to place the rectangle there
if self._waste_management:
rect = self._waste.add_rect(width, height, rid)
# Get best possible rectangle position
if not rect:
rect, _ = self._select_position(width, height)
if rect:
self._add_skyline(rect)
if rect is None:
return None
# Store rectangle, and recalculate skyline
rect.rid = rid
self.rectangles.append(rect)
return rect
def reset(self):
super(Skyline, self).reset()
self._skyline = [HSegment(P(0, 0), self.width)]
self._waste.reset()
class SkylineWMixin(Skyline):
"""Waste managment mixin"""
def __init__(self, width, height, *args, **kwargs):
super(SkylineWMixin, self).__init__(width, height, *args, **kwargs)
self._waste_management = True
class SkylineMwf(Skyline):
"""Implements Min Waste fit heuristic, minimizing the area wasted under the
rectangle.
"""
def _rect_fitness(self, rect, left_index, right_index):
waste = 0
for seg in self._skyline[left_index:right_index+1]:
waste +=\
(min(rect.right, seg.right)-max(rect.left, seg.left)) *\
(rect.bottom-seg.top)
return waste
def _rect_fitnes2s(self, rect, left_index, right_index):
waste = ((min(rect.right, seg.right)-max(rect.left, seg.left)) for seg in self._skyline[left_index:right_index+1])
return sum(waste)
class SkylineMwfl(Skyline):
"""Implements Min Waste fit with low profile heuritic, minimizing the area
wasted below the rectangle, at the same time it tries to keep the height
minimal.
"""
def _rect_fitness(self, rect, left_index, right_index):
waste = 0
for seg in self._skyline[left_index:right_index+1]:
waste +=\
(min(rect.right, seg.right)-max(rect.left, seg.left)) *\
(rect.bottom-seg.top)
return waste*self.width*self.height+rect.top
class SkylineBl(Skyline):
"""Implements Bottom Left heuristic, the best fit option is that which
results in which the top side of the rectangle lies at the bottom-most
position.
"""
def _rect_fitness(self, rect, left_index, right_index):
return rect.top
class SkylineBlWm(SkylineBl, SkylineWMixin):
pass
class SkylineMwfWm(SkylineMwf, SkylineWMixin):
pass
class SkylineMwflWm(SkylineMwfl, SkylineWMixin):
pass

View File

@ -0,0 +1,23 @@
from .guillotine import GuillotineBafMinas
from .geometry import Rectangle
class WasteManager(GuillotineBafMinas):
def __init__(self, rot=True, merge=True, *args, **kwargs):
super(WasteManager, self).__init__(1, 1, rot=rot, merge=merge, *args, **kwargs)
def add_waste(self, x, y, width, height):
"""Add new waste section"""
self._add_section(Rectangle(x, y, width, height))
def _fits_surface(self, width, height):
raise NotImplementedError
def validate_packing(self):
raise NotImplementedError
def reset(self):
super(WasteManager, self).reset()
self._sections = []

View File

@ -0,0 +1,677 @@
import bpy.ops as O
import bpy, os, re, sys, importlib, struct, platform, subprocess, threading, string, bmesh, shutil, glob, uuid
from io import StringIO
from threading import Thread
from queue import Queue, Empty
from dataclasses import dataclass
from dataclasses import field
from typing import List
###########################################################
###########################################################
# This set of utility functions are courtesy of LorenzWieseke
#
# Modified by Naxela
#
# https://github.com/Naxela/The_Lightmapper/tree/Lightmap-to-GLB
###########################################################
class Node_Types:
output_node = 'OUTPUT_MATERIAL'
ao_node = 'AMBIENT_OCCLUSION'
image_texture = 'TEX_IMAGE'
pbr_node = 'BSDF_PRINCIPLED'
diffuse = 'BSDF_DIFFUSE'
mapping = 'MAPPING'
normal_map = 'NORMAL_MAP'
bump_map = 'BUMP'
attr_node = 'ATTRIBUTE'
class Shader_Node_Types:
emission = "ShaderNodeEmission"
image_texture = "ShaderNodeTexImage"
mapping = "ShaderNodeMapping"
normal = "ShaderNodeNormalMap"
ao = "ShaderNodeAmbientOcclusion"
uv = "ShaderNodeUVMap"
mix = "ShaderNodeMixRGB"
def select_object(self,obj):
C = bpy.context
try:
O.object.select_all(action='DESELECT')
C.view_layer.objects.active = obj
obj.select_set(True)
except:
self.report({'INFO'},"Object not in View Layer")
def select_obj_by_mat(self,mat):
D = bpy.data
for obj in D.objects:
if obj.type == "MESH":
object_materials = [
slot.material for slot in obj.material_slots]
if mat in object_materials:
select_object(self,obj)
def save_image(image):
filePath = bpy.data.filepath
path = os.path.dirname(filePath)
try:
os.mkdir(path + "/tex")
except FileExistsError:
pass
try:
os.mkdir(path + "/tex/" + str(image.size[0]))
except FileExistsError:
pass
if image.file_format == "JPEG" :
file_ending = ".jpg"
elif image.file_format == "PNG" :
file_ending = ".png"
savepath = path + "/tex/" + \
str(image.size[0]) + "/" + image.name + file_ending
image.filepath_raw = savepath
image.save()
def get_file_size(filepath):
size = "Unpack Files"
try:
path = bpy.path.abspath(filepath)
size = os.path.getsize(path)
size /= 1024
except:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("error getting file path for " + filepath)
return (size)
def scale_image(image, newSize):
if (image.org_filepath != ''):
image.filepath = image.org_filepath
image.org_filepath = image.filepath
image.scale(newSize[0], newSize[1])
save_image(image)
def check_only_one_pbr(self,material):
check_ok = True
# get pbr shader
nodes = material.node_tree.nodes
pbr_node_type = Node_Types.pbr_node
pbr_nodes = find_node_by_type(nodes,pbr_node_type)
# check only one pbr node
if len(pbr_nodes) == 0:
self.report({'INFO'}, 'No PBR Shader Found')
check_ok = False
if len(pbr_nodes) > 1:
self.report({'INFO'}, 'More than one PBR Node found ! Clean before Baking.')
check_ok = False
return check_ok
# is material already the baked one
def check_is_org_material(self,material):
check_ok = True
if "_Bake" in material.name:
self.report({'INFO'}, 'Change back to org. Material')
check_ok = False
return check_ok
def clean_empty_materials(self):
for obj in bpy.context.scene.objects:
for slot in obj.material_slots:
mat = slot.material
if mat is None:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Removed Empty Materials from " + obj.name)
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
bpy.ops.object.material_slot_remove()
def get_pbr_inputs(pbr_node):
base_color_input = pbr_node.inputs["Base Color"]
metallic_input = pbr_node.inputs["Metallic"]
specular_input = pbr_node.inputs["Specular"]
roughness_input = pbr_node.inputs["Roughness"]
normal_input = pbr_node.inputs["Normal"]
pbr_inputs = {"base_color_input":base_color_input, "metallic_input":metallic_input,"specular_input":specular_input,"roughness_input":roughness_input,"normal_input":normal_input}
return pbr_inputs
def find_node_by_type(nodes, node_type):
nodes_found = [n for n in nodes if n.type == node_type]
return nodes_found
def find_node_by_type_recusivly(material, note_to_start, node_type, del_nodes_inbetween=False):
nodes = material.node_tree.nodes
if note_to_start.type == node_type:
return note_to_start
for input in note_to_start.inputs:
for link in input.links:
current_node = link.from_node
if (del_nodes_inbetween and note_to_start.type != Node_Types.normal_map and note_to_start.type != Node_Types.bump_map):
nodes.remove(note_to_start)
return find_node_by_type_recusivly(material, current_node, node_type, del_nodes_inbetween)
def find_node_by_name_recusivly(node, idname):
if node.bl_idname == idname:
return node
for input in node.inputs:
for link in input.links:
current_node = link.from_node
return find_node_by_name_recusivly(current_node, idname)
def make_link(material, socket1, socket2):
links = material.node_tree.links
links.new(socket1, socket2)
def add_gamma_node(material, pbrInput):
nodeToPrincipledOutput = pbrInput.links[0].from_socket
gammaNode = material.node_tree.nodes.new("ShaderNodeGamma")
gammaNode.inputs[1].default_value = 2.2
gammaNode.name = "Gamma Bake"
# link in gamma
make_link(material, nodeToPrincipledOutput, gammaNode.inputs["Color"])
make_link(material, gammaNode.outputs["Color"], pbrInput)
def remove_gamma_node(material, pbrInput):
nodes = material.node_tree.nodes
gammaNode = nodes.get("Gamma Bake")
nodeToPrincipledOutput = gammaNode.inputs[0].links[0].from_socket
make_link(material, nodeToPrincipledOutput, pbrInput)
material.node_tree.nodes.remove(gammaNode)
def apply_ao_toggle(self,context):
all_materials = bpy.data.materials
ao_toggle = context.scene.toggle_ao
for mat in all_materials:
nodes = mat.node_tree.nodes
ao_node = nodes.get("AO Bake")
if ao_node is not None:
if ao_toggle:
emission_setup(mat,ao_node.outputs["Color"])
else:
pbr_node = find_node_by_type(nodes,Node_Types.pbr_node)[0]
remove_node(mat,"Emission Bake")
reconnect_PBR(mat, pbr_node)
def emission_setup(material, node_output):
nodes = material.node_tree.nodes
emission_node = add_node(material,Shader_Node_Types.emission,"Emission Bake")
# link emission to whatever goes into current pbrInput
emission_input = emission_node.inputs[0]
make_link(material, node_output, emission_input)
# link emission to materialOutput
surface_input = nodes.get("Material Output").inputs[0]
emission_output = emission_node.outputs[0]
make_link(material, emission_output, surface_input)
def link_pbr_to_output(material,pbr_node):
nodes = material.node_tree.nodes
surface_input = nodes.get("Material Output").inputs[0]
make_link(material,pbr_node.outputs[0],surface_input)
def reconnect_PBR(material, pbrNode):
nodes = material.node_tree.nodes
pbr_output = pbrNode.outputs[0]
surface_input = nodes.get("Material Output").inputs[0]
make_link(material, pbr_output, surface_input)
def mute_all_texture_mappings(material, do_mute):
nodes = material.node_tree.nodes
for node in nodes:
if node.bl_idname == "ShaderNodeMapping":
node.mute = do_mute
def add_node(material,shader_node_type,node_name):
nodes = material.node_tree.nodes
new_node = nodes.get(node_name)
if new_node is None:
new_node = nodes.new(shader_node_type)
new_node.name = node_name
new_node.label = node_name
return new_node
def remove_node(material,node_name):
nodes = material.node_tree.nodes
node = nodes.get(node_name)
if node is not None:
nodes.remove(node)
def lightmap_to_ao(material,lightmap_node):
nodes = material.node_tree.nodes
# -----------------------AO SETUP--------------------#
# create group data
gltf_settings = bpy.data.node_groups.get('glTF Settings')
if gltf_settings is None:
bpy.data.node_groups.new('glTF Settings', 'ShaderNodeTree')
# add group to node tree
ao_group = nodes.get('glTF Settings')
if ao_group is None:
ao_group = nodes.new('ShaderNodeGroup')
ao_group.name = 'glTF Settings'
ao_group.node_tree = bpy.data.node_groups['glTF Settings']
# create group inputs
if ao_group.inputs.get('Occlusion') is None:
ao_group.inputs.new('NodeSocketFloat','Occlusion')
# mulitply to control strength
mix_node = add_node(material,Shader_Node_Types.mix,"Adjust Lightmap")
mix_node.blend_type = "MULTIPLY"
mix_node.inputs["Fac"].default_value = 1
mix_node.inputs["Color2"].default_value = [3,3,3,1]
# position node
ao_group.location = (lightmap_node.location[0]+600,lightmap_node.location[1])
mix_node.location = (lightmap_node.location[0]+300,lightmap_node.location[1])
make_link(material,lightmap_node.outputs['Color'],mix_node.inputs['Color1'])
make_link(material,mix_node.outputs['Color'],ao_group.inputs['Occlusion'])
###########################################################
###########################################################
# This utility function is modified from blender_xatlas
# and calls the object without any explicit object context
# thus allowing blender_xatlas to pack from background.
###########################################################
# Code is courtesy of mattedicksoncom
# Modified by Naxela
#
# https://github.com/mattedicksoncom/blender-xatlas/
###########################################################
def gen_safe_name():
genId = uuid.uuid4().hex
# genId = "u_" + genId.replace("-","_")
return "u_" + genId
def Unwrap_Lightmap_Group_Xatlas_2_headless_call(obj):
blender_xatlas = importlib.util.find_spec("blender_xatlas")
if blender_xatlas is not None:
import blender_xatlas
else:
return 0
packOptions = bpy.context.scene.pack_tool
chartOptions = bpy.context.scene.chart_tool
sharedProperties = bpy.context.scene.shared_properties
#sharedProperties.unwrapSelection
context = bpy.context
#save whatever mode the user was in
startingMode = bpy.context.object.mode
selected_objects = bpy.context.selected_objects
#check something is actually selected
#external function/operator will select them
if len(selected_objects) == 0:
print("Nothing Selected")
self.report({"WARNING"}, "Nothing Selected, please select Something")
return {'FINISHED'}
#store the names of objects to be lightmapped
rename_dict = dict()
safe_dict = dict()
#make sure all the objects have ligthmap uvs
for obj in selected_objects:
if obj.type == 'MESH':
safe_name = gen_safe_name();
rename_dict[obj.name] = (obj.name,safe_name)
safe_dict[safe_name] = obj.name
context.view_layer.objects.active = obj
if obj.data.users > 1:
obj.data = obj.data.copy() #make single user copy
uv_layers = obj.data.uv_layers
#setup the lightmap uvs
uvName = "UVMap_Lightmap"
if sharedProperties.lightmapUVChoiceType == "NAME":
uvName = sharedProperties.lightmapUVName
elif sharedProperties.lightmapUVChoiceType == "INDEX":
if sharedProperties.lightmapUVIndex < len(uv_layers):
uvName = uv_layers[sharedProperties.lightmapUVIndex].name
if not uvName in uv_layers:
uvmap = uv_layers.new(name=uvName)
uv_layers.active_index = len(uv_layers) - 1
else:
for i in range(0, len(uv_layers)):
if uv_layers[i].name == uvName:
uv_layers.active_index = i
obj.select_set(True)
#save all the current edges
if sharedProperties.packOnly:
edgeDict = dict()
for obj in selected_objects:
if obj.type == 'MESH':
tempEdgeDict = dict()
tempEdgeDict['object'] = obj.name
tempEdgeDict['edges'] = []
print(len(obj.data.edges))
for i in range(0,len(obj.data.edges)):
setEdge = obj.data.edges[i]
tempEdgeDict['edges'].append(i)
edgeDict[obj.name] = tempEdgeDict
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.quads_convert_to_tris(quad_method='FIXED', ngon_method='BEAUTY')
else:
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.quads_convert_to_tris(quad_method='FIXED', ngon_method='BEAUTY')
bpy.ops.object.mode_set(mode='OBJECT')
#Create a fake obj export to a string
#Will strip this down further later
fakeFile = StringIO()
blender_xatlas.export_obj_simple.save(
rename_dict=rename_dict,
context=bpy.context,
filepath=fakeFile,
mainUVChoiceType=sharedProperties.mainUVChoiceType,
uvIndex=sharedProperties.mainUVIndex,
uvName=sharedProperties.mainUVName,
use_selection=True,
use_animation=False,
use_mesh_modifiers=True,
use_edges=True,
use_smooth_groups=False,
use_smooth_groups_bitflags=False,
use_normals=True,
use_uvs=True,
use_materials=False,
use_triangles=False,
use_nurbs=False,
use_vertex_groups=False,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
)
#print just for reference
# print(fakeFile.getvalue())
#get the path to xatlas
#file_path = os.path.dirname(os.path.abspath(__file__))
scriptsDir = os.path.join(bpy.utils.user_resource('SCRIPTS'), "addons")
file_path = os.path.join(scriptsDir, "blender_xatlas")
if platform.system() == "Windows":
xatlas_path = os.path.join(file_path, "xatlas", "xatlas-blender.exe")
elif platform.system() == "Linux":
xatlas_path = os.path.join(file_path, "xatlas", "xatlas-blender")
#need to set permissions for the process on linux
subprocess.Popen(
'chmod u+x "' + xatlas_path + '"',
shell=True
)
#setup the arguments to be passed to xatlas-------------------
arguments_string = ""
for argumentKey in packOptions.__annotations__.keys():
key_string = str(argumentKey)
if argumentKey is not None:
print(getattr(packOptions,key_string))
attrib = getattr(packOptions,key_string)
if type(attrib) == bool:
if attrib == True:
arguments_string = arguments_string + " -" + str(argumentKey)
else:
arguments_string = arguments_string + " -" + str(argumentKey) + " " + str(attrib)
for argumentKey in chartOptions.__annotations__.keys():
if argumentKey is not None:
key_string = str(argumentKey)
print(getattr(chartOptions,key_string))
attrib = getattr(chartOptions,key_string)
if type(attrib) == bool:
if attrib == True:
arguments_string = arguments_string + " -" + str(argumentKey)
else:
arguments_string = arguments_string + " -" + str(argumentKey) + " " + str(attrib)
#add pack only option
if sharedProperties.packOnly:
arguments_string = arguments_string + " -packOnly"
arguments_string = arguments_string + " -atlasLayout" + " " + sharedProperties.atlasLayout
print(arguments_string)
#END setup the arguments to be passed to xatlas-------------------
#RUN xatlas process
xatlas_process = subprocess.Popen(
r'"{}"'.format(xatlas_path) + ' ' + arguments_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True
)
print(xatlas_path)
#shove the fake file in stdin
stdin = xatlas_process.stdin
value = bytes(fakeFile.getvalue() + "\n", 'UTF-8') #The \n is needed to end the input properly
stdin.write(value)
stdin.flush()
#Get the output from xatlas
outObj = ""
while True:
output = xatlas_process.stdout.readline()
if not output:
break
outObj = outObj + (output.decode().strip() + "\n")
#the objects after xatlas processing
# print(outObj)
#Setup for reading the output
@dataclass
class uvObject:
obName: string = ""
uvArray: List[float] = field(default_factory=list)
faceArray: List[int] = field(default_factory=list)
convertedObjects = []
uvArrayComplete = []
#search through the out put for STARTOBJ
#then start reading the objects
obTest = None
startRead = False
for line in outObj.splitlines():
line_split = line.split()
if not line_split:
continue
line_start = line_split[0] # we compare with this a _lot_
# print(line_start)
if line_start == "STARTOBJ":
print("Start reading the objects----------------------------------------")
startRead = True
# obTest = uvObject()
if startRead:
#if it's a new obj
if line_start == 'o':
#if there is already an object append it
if obTest is not None:
convertedObjects.append(obTest)
obTest = uvObject() #create new uv object
obTest.obName = line_split[1]
if obTest is not None:
#the uv coords
if line_start == 'vt':
newUv = [float(line_split[1]),float(line_split[2])]
obTest.uvArray.append(newUv)
uvArrayComplete.append(newUv)
#the face coords index
#faces are 1 indexed
if line_start == 'f':
#vert/uv/normal
#only need the uvs
newFace = [
int(line_split[1].split("/")[1]),
int(line_split[2].split("/")[1]),
int(line_split[3].split("/")[1])
]
obTest.faceArray.append(newFace)
#append the final object
convertedObjects.append(obTest)
print(convertedObjects)
#apply the output-------------------------------------------------------------
#copy the uvs to the original objects
# objIndex = 0
print("Applying the UVs----------------------------------------")
# print(convertedObjects)
for importObject in convertedObjects:
bpy.ops.object.select_all(action='DESELECT')
obTest = importObject
obTest.obName = safe_dict[obTest.obName] #probably shouldn't just replace it
bpy.context.scene.objects[obTest.obName].select_set(True)
context.view_layer.objects.active = bpy.context.scene.objects[obTest.obName]
bpy.ops.object.mode_set(mode = 'OBJECT')
obj = bpy.context.active_object
me = obj.data
#convert to bmesh to create the new uvs
bm = bmesh.new()
bm.from_mesh(me)
uv_layer = bm.loops.layers.uv.verify()
nFaces = len(bm.faces)
#need to ensure lookup table for some reason?
if hasattr(bm.faces, "ensure_lookup_table"):
bm.faces.ensure_lookup_table()
#loop through the faces
for faceIndex in range(nFaces):
faceGroup = obTest.faceArray[faceIndex]
bm.faces[faceIndex].loops[0][uv_layer].uv = (
uvArrayComplete[faceGroup[0] - 1][0],
uvArrayComplete[faceGroup[0] - 1][1])
bm.faces[faceIndex].loops[1][uv_layer].uv = (
uvArrayComplete[faceGroup[1] - 1][0],
uvArrayComplete[faceGroup[1] - 1][1])
bm.faces[faceIndex].loops[2][uv_layer].uv = (
uvArrayComplete[faceGroup[2] - 1][0],
uvArrayComplete[faceGroup[2] - 1][1])
# objIndex = objIndex + 3
# print(objIndex)
#assign the mesh back to the original mesh
bm.to_mesh(me)
#END apply the output-------------------------------------------------------------
#Start setting the quads back again-------------------------------------------------------------
if sharedProperties.packOnly:
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
for edges in edgeDict:
edgeList = edgeDict[edges]
currentObject = bpy.context.scene.objects[edgeList['object']]
bm = bmesh.new()
bm.from_mesh(currentObject.data)
if hasattr(bm.edges, "ensure_lookup_table"):
bm.edges.ensure_lookup_table()
#assume that all the triangulated edges come after the original edges
newEdges = []
for edge in range(len(edgeList['edges']), len(bm.edges)):
newEdge = bm.edges[edge]
newEdge.select = True
newEdges.append(newEdge)
bmesh.ops.dissolve_edges(bm, edges=newEdges, use_verts=False, use_face_split=False)
bpy.ops.object.mode_set(mode='OBJECT')
bm.to_mesh(currentObject.data)
bm.free()
bpy.ops.object.mode_set(mode='EDIT')
#End setting the quads back again-------------------------------------------------------------
#select the original objects that were selected
for objectName in rename_dict:
if objectName[0] in bpy.context.scene.objects:
current_object = bpy.context.scene.objects[objectName[0]]
current_object.select_set(True)
context.view_layer.objects.active = current_object
bpy.ops.object.mode_set(mode=startingMode)
print("Finished Xatlas----------------------------------------")
return {'FINISHED'}
def transfer_assets(copy, source, destination):
for filename in glob.glob(os.path.join(source, '*.*')):
try:
shutil.copy(filename, destination)
except shutil.SameFileError:
pass
def transfer_load():
load_folder = bpy.path.abspath(os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_SceneProperties.tlm_load_folder))
lightmap_folder = os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_EngineProperties.tlm_lightmap_savedir)
print(load_folder)
print(lightmap_folder)
transfer_assets(True, load_folder, lightmap_folder)