forked from LeenkxTeam/LNXSDK
Update Files
This commit is contained in:
0
leenkx/blender/lnx/lib/__init__.py
Normal file
0
leenkx/blender/lnx/lib/__init__.py
Normal file
BIN
leenkx/blender/lnx/lib/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
leenkx/blender/lnx/lib/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
leenkx/blender/lnx/lib/__pycache__/lnxpack.cpython-311.pyc
Normal file
BIN
leenkx/blender/lnx/lib/__pycache__/lnxpack.cpython-311.pyc
Normal file
Binary file not shown.
BIN
leenkx/blender/lnx/lib/__pycache__/lz4.cpython-311.pyc
Normal file
BIN
leenkx/blender/lnx/lib/__pycache__/lz4.cpython-311.pyc
Normal file
Binary file not shown.
BIN
leenkx/blender/lnx/lib/__pycache__/make_datas.cpython-311.pyc
Normal file
BIN
leenkx/blender/lnx/lib/__pycache__/make_datas.cpython-311.pyc
Normal file
Binary file not shown.
BIN
leenkx/blender/lnx/lib/__pycache__/server.cpython-311.pyc
Normal file
BIN
leenkx/blender/lnx/lib/__pycache__/server.cpython-311.pyc
Normal file
Binary file not shown.
175
leenkx/blender/lnx/lib/lnxpack.py
Normal file
175
leenkx/blender/lnx/lib/lnxpack.py
Normal file
@ -0,0 +1,175 @@
|
||||
"""Msgpack parser with typed arrays"""
|
||||
|
||||
# Based on u-msgpack-python v2.4.1 - v at sergeev.io
|
||||
# https://github.com/vsergeev/u-msgpack-python
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
#
|
||||
import io
|
||||
import struct
|
||||
import numpy as np
|
||||
|
||||
|
||||
def _pack_integer(obj, fp):
|
||||
if obj < 0:
|
||||
if obj >= -32:
|
||||
fp.write(struct.pack("b", obj))
|
||||
elif obj >= -(2 ** (8 - 1)):
|
||||
fp.write(b"\xd0" + struct.pack("b", obj))
|
||||
elif obj >= -(2 ** (16 - 1)):
|
||||
fp.write(b"\xd1" + struct.pack("<h", obj))
|
||||
elif obj >= -(2 ** (32 - 1)):
|
||||
fp.write(b"\xd2" + struct.pack("<i", obj))
|
||||
elif obj >= -(2 ** (64 - 1)):
|
||||
fp.write(b"\xd3" + struct.pack("<q", obj))
|
||||
else:
|
||||
raise Exception("huge signed int")
|
||||
else:
|
||||
if obj <= 127:
|
||||
fp.write(struct.pack("B", obj))
|
||||
elif obj <= 2**8 - 1:
|
||||
fp.write(b"\xcc" + struct.pack("B", obj))
|
||||
elif obj <= 2**16 - 1:
|
||||
fp.write(b"\xcd" + struct.pack("<H", obj))
|
||||
elif obj <= 2**32 - 1:
|
||||
fp.write(b"\xce" + struct.pack("<I", obj))
|
||||
elif obj <= 2**64 - 1:
|
||||
fp.write(b"\xcf" + struct.pack("<Q", obj))
|
||||
else:
|
||||
raise Exception("huge unsigned int")
|
||||
|
||||
|
||||
def _pack_nil(obj, fp):
|
||||
fp.write(b"\xc0")
|
||||
|
||||
|
||||
def _pack_boolean(obj, fp):
|
||||
fp.write(b"\xc3" if obj else b"\xc2")
|
||||
|
||||
|
||||
def _pack_float(obj, fp):
|
||||
# NOTE: forced 32-bit floats for Leenkx
|
||||
# fp.write(b"\xcb" + struct.pack("<d", obj)) # Double
|
||||
fp.write(b"\xca" + struct.pack("<f", obj))
|
||||
|
||||
|
||||
def _pack_string(obj, fp):
|
||||
obj = obj.encode("utf-8")
|
||||
if len(obj) <= 31:
|
||||
fp.write(struct.pack("B", 0xA0 | len(obj)) + obj)
|
||||
elif len(obj) <= 2**8 - 1:
|
||||
fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj)
|
||||
elif len(obj) <= 2**16 - 1:
|
||||
fp.write(b"\xda" + struct.pack("<H", len(obj)) + obj)
|
||||
elif len(obj) <= 2**32 - 1:
|
||||
fp.write(b"\xdb" + struct.pack("<I", len(obj)) + obj)
|
||||
else:
|
||||
raise Exception("huge string")
|
||||
|
||||
|
||||
def _pack_binary(obj, fp):
|
||||
if len(obj) <= 2**8 - 1:
|
||||
fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj)
|
||||
elif len(obj) <= 2**16 - 1:
|
||||
fp.write(b"\xc5" + struct.pack("<H", len(obj)) + obj)
|
||||
elif len(obj) <= 2**32 - 1:
|
||||
fp.write(b"\xc6" + struct.pack("<I", len(obj)) + obj)
|
||||
else:
|
||||
raise Exception("huge binary string")
|
||||
|
||||
|
||||
def _pack_array(obj, fp):
|
||||
if len(obj) <= 15:
|
||||
fp.write(struct.pack("B", 0x90 | len(obj)))
|
||||
elif len(obj) <= 2**16 - 1:
|
||||
fp.write(b"\xdc" + struct.pack("<H", len(obj)))
|
||||
elif len(obj) <= 2**32 - 1:
|
||||
fp.write(b"\xdd" + struct.pack("<I", len(obj)))
|
||||
else:
|
||||
raise Exception("huge array")
|
||||
|
||||
if len(obj) > 0 and isinstance(obj[0], float):
|
||||
fp.write(b"\xca")
|
||||
for e in obj:
|
||||
fp.write(struct.pack("<f", e))
|
||||
elif len(obj) > 0 and isinstance(obj[0], bool):
|
||||
for e in obj:
|
||||
pack(e, fp)
|
||||
elif len(obj) > 0 and isinstance(obj[0], int):
|
||||
fp.write(b"\xd2")
|
||||
for e in obj:
|
||||
fp.write(struct.pack("<i", e))
|
||||
# Float32
|
||||
elif len(obj) > 0 and isinstance(obj[0], np.float32):
|
||||
fp.write(b"\xca")
|
||||
fp.write(obj.tobytes())
|
||||
# Int32
|
||||
elif len(obj) > 0 and isinstance(obj[0], np.int32):
|
||||
fp.write(b"\xd2")
|
||||
fp.write(obj.tobytes())
|
||||
# Int16
|
||||
elif len(obj) > 0 and isinstance(obj[0], np.int16):
|
||||
fp.write(b"\xd1")
|
||||
fp.write(obj.tobytes())
|
||||
# Regular
|
||||
else:
|
||||
for e in obj:
|
||||
pack(e, fp)
|
||||
|
||||
|
||||
def _pack_map(obj, fp):
|
||||
if len(obj) <= 15:
|
||||
fp.write(struct.pack("B", 0x80 | len(obj)))
|
||||
elif len(obj) <= 2**16 - 1:
|
||||
fp.write(b"\xde" + struct.pack("<H", len(obj)))
|
||||
elif len(obj) <= 2**32 - 1:
|
||||
fp.write(b"\xdf" + struct.pack("<I", len(obj)))
|
||||
else:
|
||||
raise Exception("huge array")
|
||||
|
||||
for k, v in obj.items():
|
||||
pack(k, fp)
|
||||
pack(v, fp)
|
||||
|
||||
|
||||
def pack(obj, fp):
|
||||
if obj is None:
|
||||
_pack_nil(obj, fp)
|
||||
elif isinstance(obj, bool):
|
||||
_pack_boolean(obj, fp)
|
||||
elif isinstance(obj, int):
|
||||
_pack_integer(obj, fp)
|
||||
elif isinstance(obj, float):
|
||||
_pack_float(obj, fp)
|
||||
elif isinstance(obj, str):
|
||||
_pack_string(obj, fp)
|
||||
elif isinstance(obj, bytes):
|
||||
_pack_binary(obj, fp)
|
||||
elif isinstance(obj, (list, np.ndarray, tuple)):
|
||||
_pack_array(obj, fp)
|
||||
elif isinstance(obj, dict):
|
||||
_pack_map(obj, fp)
|
||||
else:
|
||||
raise Exception(f"unsupported type: {str(type(obj))}")
|
||||
|
||||
|
||||
def packb(obj):
|
||||
fp = io.BytesIO()
|
||||
pack(obj, fp)
|
||||
return fp.getvalue()
|
181
leenkx/blender/lnx/lib/lz4.py
Normal file
181
leenkx/blender/lnx/lib/lz4.py
Normal file
@ -0,0 +1,181 @@
|
||||
"""
|
||||
Port of the Iron LZ4 compression module based on
|
||||
https://github.com/gorhill/lz4-wasm. Original license:
|
||||
|
||||
BSD 2-Clause License
|
||||
Copyright (c) 2018, Raymond Hill
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
import numpy as np
|
||||
from numpy import uint8, int32, uint32
|
||||
|
||||
|
||||
class LZ4RangeException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LZ4:
|
||||
hash_table = None
|
||||
|
||||
@staticmethod
|
||||
def encode_bound(size: int) -> int:
|
||||
return 0 if size > 0x7E000000 else size + (size // 255 | 0) + 16
|
||||
|
||||
@staticmethod
|
||||
def encode(b: bytes) -> bytes:
|
||||
i_buf: np.ndarray = np.frombuffer(b, dtype=uint8)
|
||||
i_len = i_buf.size
|
||||
|
||||
if i_len >= 0x7E000000:
|
||||
raise LZ4RangeException("Input buffer is too large")
|
||||
|
||||
# "The last match must start at least 12 bytes before end of block"
|
||||
last_match_pos = i_len - 12
|
||||
|
||||
# "The last 5 bytes are always literals"
|
||||
last_literal_pos = i_len - 5
|
||||
|
||||
if LZ4.hash_table is None:
|
||||
LZ4.hash_table = np.full(shape=65536, fill_value=-65536, dtype=int32)
|
||||
|
||||
LZ4.hash_table.fill(-65536)
|
||||
|
||||
o_len = LZ4.encode_bound(i_len)
|
||||
o_buf = np.full(shape=o_len, fill_value=0, dtype=uint8)
|
||||
i_pos = 0
|
||||
o_pos = 0
|
||||
anchor_pos = 0
|
||||
|
||||
# Sequence-finding loop
|
||||
while True:
|
||||
ref_pos = int32(0)
|
||||
m_offset = 0
|
||||
sequence = uint32(
|
||||
i_buf[i_pos] << 8 | i_buf[i_pos + 1] << 16 | i_buf[i_pos + 2] << 24
|
||||
)
|
||||
|
||||
# Match-finding loop
|
||||
while i_pos <= last_match_pos:
|
||||
# Conversion to uint32 is mandatory to ensure correct
|
||||
# unsigned right shift (compare with .hx implementation)
|
||||
sequence = uint32(
|
||||
uint32(sequence) >> uint32(8) | i_buf[i_pos + 3] << 24
|
||||
)
|
||||
hash_val = (sequence * 0x9E37 & 0xFFFF) + (
|
||||
uint32(sequence * 0x79B1) >> uint32(16)
|
||||
) & 0xFFFF
|
||||
ref_pos = LZ4.hash_table[hash_val]
|
||||
LZ4.hash_table[hash_val] = i_pos
|
||||
m_offset = i_pos - ref_pos
|
||||
if (
|
||||
m_offset < 65536
|
||||
and i_buf[ref_pos + 0] == (sequence & 0xFF)
|
||||
and i_buf[ref_pos + 1] == ((sequence >> uint32(8)) & 0xFF)
|
||||
and i_buf[ref_pos + 2] == ((sequence >> uint32(16)) & 0xFF)
|
||||
and i_buf[ref_pos + 3] == ((sequence >> uint32(24)) & 0xFF)
|
||||
):
|
||||
break
|
||||
|
||||
i_pos += 1
|
||||
|
||||
# No match found
|
||||
if i_pos > last_match_pos:
|
||||
break
|
||||
|
||||
# Match found
|
||||
l_len = i_pos - anchor_pos
|
||||
m_len = i_pos
|
||||
i_pos += 4
|
||||
ref_pos += 4
|
||||
while i_pos < last_literal_pos and i_buf[i_pos] == i_buf[ref_pos]:
|
||||
i_pos += 1
|
||||
ref_pos += 1
|
||||
|
||||
m_len = i_pos - m_len
|
||||
token = m_len - 4 if m_len < 19 else 15
|
||||
|
||||
# Write token, length of literals if needed
|
||||
if l_len >= 15:
|
||||
o_buf[o_pos] = 0xF0 | token
|
||||
o_pos += 1
|
||||
l = l_len - 15
|
||||
while l >= 255:
|
||||
o_buf[o_pos] = 255
|
||||
o_pos += 1
|
||||
l -= 255
|
||||
o_buf[o_pos] = l
|
||||
o_pos += 1
|
||||
else:
|
||||
o_buf[o_pos] = (l_len << 4) | token
|
||||
o_pos += 1
|
||||
|
||||
# Write literals
|
||||
while l_len > 0:
|
||||
l_len -= 1
|
||||
o_buf[o_pos] = i_buf[anchor_pos]
|
||||
o_pos += 1
|
||||
anchor_pos += 1
|
||||
|
||||
if m_len == 0:
|
||||
break
|
||||
|
||||
# Write offset of match
|
||||
o_buf[o_pos + 0] = m_offset
|
||||
o_buf[o_pos + 1] = m_offset >> 8
|
||||
o_pos += 2
|
||||
|
||||
# Write length of match if needed
|
||||
if m_len >= 19:
|
||||
l = m_len - 19
|
||||
while l >= 255:
|
||||
o_buf[o_pos] = 255
|
||||
o_pos += 1
|
||||
l -= 255
|
||||
|
||||
o_buf[o_pos] = l
|
||||
o_pos += 1
|
||||
|
||||
anchor_pos = i_pos
|
||||
|
||||
# Last sequence is literals only
|
||||
l_len = i_len - anchor_pos
|
||||
if l_len >= 15:
|
||||
o_buf[o_pos] = 0xF0
|
||||
o_pos += 1
|
||||
l = l_len - 15
|
||||
while l >= 255:
|
||||
o_buf[o_pos] = 255
|
||||
o_pos += 1
|
||||
l -= 255
|
||||
|
||||
o_buf[o_pos] = l
|
||||
o_pos += 1
|
||||
|
||||
else:
|
||||
o_buf[o_pos] = l_len << 4
|
||||
o_pos += 1
|
||||
|
||||
while l_len > 0:
|
||||
l_len -= 1
|
||||
o_buf[o_pos] = i_buf[anchor_pos]
|
||||
o_pos += 1
|
||||
anchor_pos += 1
|
||||
|
||||
return np.resize(o_buf, o_pos).tobytes()
|
328
leenkx/blender/lnx/lib/make_datas.py
Normal file
328
leenkx/blender/lnx/lib/make_datas.py
Normal file
@ -0,0 +1,328 @@
|
||||
import lnx.utils
|
||||
from lnx import assets
|
||||
|
||||
def parse_context(
|
||||
c: dict,
|
||||
sres: dict,
|
||||
asset,
|
||||
defs: list[str],
|
||||
vert: list[str] = None,
|
||||
frag: list[str] = None,
|
||||
):
|
||||
con = {
|
||||
"name": c["name"],
|
||||
"constants": [],
|
||||
"texture_units": [],
|
||||
"vertex_elements": [],
|
||||
}
|
||||
sres["contexts"].append(con)
|
||||
|
||||
# Names
|
||||
con["vertex_shader"] = c["vertex_shader"].rsplit(".", 1)[0].split("/")[-1]
|
||||
if con["vertex_shader"] not in asset:
|
||||
asset.append(con["vertex_shader"])
|
||||
|
||||
con["fragment_shader"] = c["fragment_shader"].rsplit(".", 1)[0].split("/")[-1]
|
||||
if con["fragment_shader"] not in asset:
|
||||
asset.append(con["fragment_shader"])
|
||||
|
||||
if "geometry_shader" in c:
|
||||
con["geometry_shader"] = c["geometry_shader"].rsplit(".", 1)[0].split("/")[-1]
|
||||
if con["geometry_shader"] not in asset:
|
||||
asset.append(con["geometry_shader"])
|
||||
|
||||
if "tesscontrol_shader" in c:
|
||||
con["tesscontrol_shader"] = (
|
||||
c["tesscontrol_shader"].rsplit(".", 1)[0].split("/")[-1]
|
||||
)
|
||||
if con["tesscontrol_shader"] not in asset:
|
||||
asset.append(con["tesscontrol_shader"])
|
||||
|
||||
if "tesseval_shader" in c:
|
||||
con["tesseval_shader"] = c["tesseval_shader"].rsplit(".", 1)[0].split("/")[-1]
|
||||
if con["tesseval_shader"] not in asset:
|
||||
asset.append(con["tesseval_shader"])
|
||||
|
||||
if "color_attachments" in c:
|
||||
con["color_attachments"] = c["color_attachments"]
|
||||
for i in range(len(con["color_attachments"])):
|
||||
if con["color_attachments"][i] == "_HDR":
|
||||
con["color_attachments"][i] = "RGBA32" if "_LDR" in defs else "RGBA64"
|
||||
|
||||
# Params
|
||||
params = [
|
||||
"depth_write",
|
||||
"compare_mode",
|
||||
"cull_mode",
|
||||
"blend_source",
|
||||
"blend_destination",
|
||||
"blend_operation",
|
||||
"alpha_blend_source",
|
||||
"alpha_blend_destination",
|
||||
"alpha_blend_operation",
|
||||
"color_writes_red",
|
||||
"color_writes_green",
|
||||
"color_writes_blue",
|
||||
"color_writes_alpha",
|
||||
"conservative_raster",
|
||||
]
|
||||
|
||||
for p in params:
|
||||
if p in c:
|
||||
con[p] = c[p]
|
||||
|
||||
# Parse shaders
|
||||
if vert is None:
|
||||
with open(c["vertex_shader"], encoding="utf-8") as f:
|
||||
vert = f.read().splitlines()
|
||||
parse_shader(sres, c, con, defs, vert, True) # Parse attribs for vertex shader
|
||||
|
||||
if frag is None:
|
||||
with open(c["fragment_shader"], encoding="utf-8") as f:
|
||||
frag = f.read().splitlines()
|
||||
parse_shader(sres, c, con, defs, frag, False)
|
||||
|
||||
if "geometry_shader" in c:
|
||||
with open(c["geometry_shader"], encoding="utf-8") as f:
|
||||
geom = f.read().splitlines()
|
||||
parse_shader(sres, c, con, defs, geom, False)
|
||||
|
||||
if "tesscontrol_shader" in c:
|
||||
with open(c["tesscontrol_shader"], encoding="utf-8") as f:
|
||||
tesc = f.read().splitlines()
|
||||
parse_shader(sres, c, con, defs, tesc, False)
|
||||
|
||||
if "tesseval_shader" in c:
|
||||
with open(c["tesseval_shader"], encoding="utf-8") as f:
|
||||
tese = f.read().splitlines()
|
||||
parse_shader(sres, c, con, defs, tese, False)
|
||||
|
||||
|
||||
def parse_shader(
|
||||
sres, c: dict, con: dict, defs: list[str], lines: list[str], parse_attributes: bool
|
||||
):
|
||||
"""Parses the given shader to get information about the used vertex
|
||||
elements, uniforms and constants. This information is later used in
|
||||
Iron to check what data each shader requires.
|
||||
|
||||
@param defs A list of set defines for the preprocessor
|
||||
@param lines The list of lines of the shader file
|
||||
@param parse_attributes Whether to parse vertex elements
|
||||
"""
|
||||
vertex_elements_parsed = False
|
||||
vertex_elements_parsing = False
|
||||
|
||||
# Stack of the state of all preprocessor conditions for the current
|
||||
# line. If there is a `False` in the stack, at least one surrounding
|
||||
# condition is false and the line must not be parsed
|
||||
stack: list[bool] = []
|
||||
|
||||
if not parse_attributes:
|
||||
vertex_elements_parsed = True
|
||||
|
||||
for line in lines:
|
||||
line = line.lstrip()
|
||||
|
||||
# Preprocessor
|
||||
if line.startswith("#if"): # if, ifdef, ifndef
|
||||
s = line.split(" ")[1]
|
||||
found = s in defs
|
||||
if line.startswith("#ifndef"):
|
||||
found = not found
|
||||
stack.append(found)
|
||||
continue
|
||||
|
||||
if line.startswith("#else"):
|
||||
stack[-1] = not stack[-1]
|
||||
continue
|
||||
|
||||
if line.startswith("#endif"):
|
||||
stack.pop()
|
||||
continue
|
||||
|
||||
# Skip lines if the stack contains at least one preprocessor
|
||||
# condition that is not fulfilled
|
||||
skip = False
|
||||
for condition in stack:
|
||||
if not condition:
|
||||
skip = True
|
||||
break
|
||||
if skip:
|
||||
continue
|
||||
|
||||
if not vertex_elements_parsed and line.startswith("in "):
|
||||
vertex_elements_parsing = True
|
||||
s = line.split(" ")
|
||||
con["vertex_elements"].append(
|
||||
{
|
||||
"data": "float" + s[1][-1:],
|
||||
"name": s[2][:-1], # [:1] to get rid of the semicolon
|
||||
}
|
||||
)
|
||||
|
||||
# Stop the vertex element parsing if no other vertex elements
|
||||
# follow directly (assuming all vertex elements are positioned
|
||||
# directly after each other apart from empty lines and comments)
|
||||
if (
|
||||
vertex_elements_parsing
|
||||
and len(line) > 0
|
||||
and not line.startswith("//")
|
||||
and not line.startswith("in ")
|
||||
):
|
||||
vertex_elements_parsed = True
|
||||
|
||||
if line.startswith("uniform ") or line.startswith(
|
||||
"//!uniform"
|
||||
): # Uniforms included from header files
|
||||
s = line.split(" ")
|
||||
# Examples:
|
||||
# uniform sampler2D myname;
|
||||
# uniform layout(RGBA8) image3D myname;
|
||||
if s[1].startswith("layout"):
|
||||
ctype = s[2]
|
||||
cid = s[3]
|
||||
if cid[-1] == ";":
|
||||
cid = cid[:-1]
|
||||
else:
|
||||
ctype = s[1]
|
||||
cid = s[2]
|
||||
if cid[-1] == ";":
|
||||
cid = cid[:-1]
|
||||
|
||||
found = False # Uniqueness check
|
||||
if (
|
||||
ctype.startswith("sampler")
|
||||
or ctype.startswith("image")
|
||||
or ctype.startswith("uimage")
|
||||
): # Texture unit
|
||||
for tu in con["texture_units"]:
|
||||
if tu["name"] == cid:
|
||||
# Texture already present
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
if cid[-1] == "]": # Array of samplers - sampler2D mySamplers[2]
|
||||
# Add individual units - mySamplers[0], mySamplers[1]
|
||||
for i in range(int(cid[-2])):
|
||||
tu = {"name": cid[:-2] + str(i) + "]"}
|
||||
con["texture_units"].append(tu)
|
||||
else:
|
||||
tu = {"name": cid}
|
||||
con["texture_units"].append(tu)
|
||||
if ctype.startswith("image") or ctype.startswith("uimage"):
|
||||
tu["is_image"] = True
|
||||
|
||||
check_link(c, defs, cid, tu)
|
||||
|
||||
else: # Constant
|
||||
if cid.find("[") != -1: # Float arrays
|
||||
cid = cid.split("[")[0]
|
||||
ctype = "floats"
|
||||
for const in con["constants"]:
|
||||
if const["name"] == cid:
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
const = {"type": ctype, "name": cid}
|
||||
con["constants"].append(const)
|
||||
|
||||
check_link(c, defs, cid, const)
|
||||
|
||||
|
||||
def check_link(source_context: dict, defs: list[str], cid: str, out: dict):
|
||||
"""Checks whether the uniform/constant with the given name (`cid`)
|
||||
has a link stated in the json (`source_context`) that can be safely
|
||||
included based on the given defines (`defs`). If that is the case,
|
||||
the found link is written to the `out` dictionary.
|
||||
"""
|
||||
for link in source_context["links"]:
|
||||
if link["name"] == cid:
|
||||
valid_link = True
|
||||
|
||||
# Optionally only use link if at least
|
||||
# one of the given defines is set
|
||||
if "ifdef" in link:
|
||||
def_found = False
|
||||
for d in defs:
|
||||
for link_def in link["ifdef"]:
|
||||
if d == link_def:
|
||||
def_found = True
|
||||
break
|
||||
if def_found:
|
||||
break
|
||||
if not def_found:
|
||||
valid_link = False
|
||||
|
||||
# Optionally only use link if none of
|
||||
# the given defines are set
|
||||
if "ifndef" in link:
|
||||
def_found = False
|
||||
for d in defs:
|
||||
for link_def in link["ifndef"]:
|
||||
if d == link_def:
|
||||
def_found = True
|
||||
break
|
||||
if def_found:
|
||||
break
|
||||
if def_found:
|
||||
valid_link = False
|
||||
|
||||
if valid_link:
|
||||
out["link"] = link["link"]
|
||||
break
|
||||
|
||||
|
||||
def make(
|
||||
res: dict, base_name: str, json_data: dict, fp, defs: list[str], make_variants: bool
|
||||
):
|
||||
sres = {"name": base_name, "contexts": []}
|
||||
res["shader_datas"].append(sres)
|
||||
asset = assets.shader_passes_assets[base_name]
|
||||
|
||||
vert = None
|
||||
frag = None
|
||||
has_variants = "variants" in json_data and len(json_data["variants"]) > 0
|
||||
if make_variants and has_variants:
|
||||
d = json_data["variants"][0]
|
||||
if d in defs:
|
||||
# Write shader variant with define
|
||||
c = json_data["contexts"][0]
|
||||
with open(c["vertex_shader"], encoding="utf-8") as f:
|
||||
vert = f.read().split("\n", 1)[1]
|
||||
vert = "#version 450\n#define " + d + "\n" + vert
|
||||
|
||||
with open(c["fragment_shader"], encoding="utf-8") as f:
|
||||
frag = f.read().split("\n", 1)[1]
|
||||
frag = "#version 450\n#define " + d + "\n" + frag
|
||||
|
||||
with open(
|
||||
lnx.utils.get_fp_build()
|
||||
+ "/compiled/Shaders/"
|
||||
+ base_name
|
||||
+ d
|
||||
+ ".vert.glsl",
|
||||
"w",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
f.write(vert)
|
||||
|
||||
with open(
|
||||
lnx.utils.get_fp_build()
|
||||
+ "/compiled/Shaders/"
|
||||
+ base_name
|
||||
+ d
|
||||
+ ".frag.glsl",
|
||||
"w",
|
||||
encoding="utf-8",
|
||||
) as f:
|
||||
f.write(frag)
|
||||
|
||||
# Add context variant
|
||||
c2 = c.copy()
|
||||
c2["vertex_shader"] = base_name + d + ".vert.glsl"
|
||||
c2["fragment_shader"] = base_name + d + ".frag.glsl"
|
||||
c2["name"] = c["name"] + d
|
||||
parse_context(c2, sres, asset, defs, vert.splitlines(), frag.splitlines())
|
||||
|
||||
for c in json_data["contexts"]:
|
||||
parse_context(c, sres, asset, defs)
|
33
leenkx/blender/lnx/lib/server.py
Normal file
33
leenkx/blender/lnx/lib/server.py
Normal file
@ -0,0 +1,33 @@
|
||||
import atexit
|
||||
import http.server
|
||||
import socketserver
|
||||
import subprocess
|
||||
|
||||
haxe_server = None
|
||||
|
||||
|
||||
def run_tcp(port: int, do_log: bool):
|
||||
class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||
def log_message(self, format, *args):
|
||||
if do_log:
|
||||
print(format % args)
|
||||
|
||||
try:
|
||||
http_server = socketserver.TCPServer(("", port), HTTPRequestHandler)
|
||||
http_server.serve_forever()
|
||||
except:
|
||||
print("Server already running")
|
||||
|
||||
|
||||
def run_haxe(haxe_path, port=6000):
|
||||
global haxe_server
|
||||
if haxe_server is None:
|
||||
haxe_server = subprocess.Popen([haxe_path, "--wait", str(port)])
|
||||
atexit.register(kill_haxe)
|
||||
|
||||
|
||||
def kill_haxe():
|
||||
global haxe_server
|
||||
if haxe_server is not None:
|
||||
haxe_server.kill()
|
||||
haxe_server = None
|
Reference in New Issue
Block a user