Encoding works!

This commit is contained in:
2021-08-25 13:08:17 -07:00
parent a2ede26f46
commit 81fc8a8579
11 changed files with 351 additions and 462 deletions

View File

@@ -0,0 +1,131 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"source": [
"# just returns a random string\r\n",
"import random\r\n",
"import string\r\n",
"from neutrino.encode import *\r\n",
"def random_string(length):\r\n",
" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))\r\n",
"\r\n",
"# create test scene\r\n",
"test_scene = new_scene(name = \"Neutrino Test Scene\", cache = True)\r\n",
"\r\n",
"# populate assets\r\n",
"asset_names = []\r\n",
"for i in range(10):\r\n",
" name = random_string(8)\r\n",
" add_asset(test_scene, name, \"Assets/TestAsset.obj\")\r\n",
" asset_names.append(name)\r\n",
"\r\n",
"# populate objects in scene\r\n",
"for i in range(50):\r\n",
" spawn_object(test_scene, random_string(8), random.choice(asset_names))\r\n",
"\r\n",
"save_scene(test_scene, False)"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 2,
"source": [
"import json\r\n",
"from neutrino.decode import *\r\n",
"\r\n",
"imported_data = json.load(open(\"NeutrinoTestScene.json\"))\r\n",
"\r\n",
"for key, value in imported_data[\"graph\"][\"scene\"].items():\r\n",
" value = (imported_data[\"cache\"][\"names\"][value[0][1:]], value[1])\r\n",
" print(value)"
],
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"['@0xa', {'@0x1': ['@0x1', 'OBK1NYBK'], '@0x4': '*0x9', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KXKKGWH1'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'FP8K8N8K'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'MOGF2L64'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'Z8EFSXJH'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '1ZMPRUS6'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'QVQSXH1U'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'Q0F9YKF3'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'RS1HURTI'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '9IT2UDE7'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'JJZ9VA2P'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'PE6Y5NBE'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '41VJPUWO'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '3D63B4QE'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '5JZIUIZ9'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'ZD6DD8E1'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'MIRYB8QW'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '9HEY01NR'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '3EK2Y8LS'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KJYNHF9B'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '1WUOY60X'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'W2L3HTUI'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'K4L6USDR'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'ZF5RNV1N'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '62KC64SW'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '1QCUVQII'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '0IVRI09E'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'J8J492ET'], '@0x4': '*0x9', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'MJJTBO2L'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '5QAA9XJZ'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '21GNFNFG'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '132YJS72'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'E7GT5ZZ0'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '26DIERE1'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KTSB2GMU'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'ETRS6ZK2'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '8X2ZYUGQ'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'L09LNFXL'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '4MHVZCEE'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'YV5R9UAL'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '0BRGWQ75'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'W53N74LW'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'S1PLWUWH'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'I7GJKHL6'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'IPZGY627'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '3LAE7CYV'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'SUPYOPZB'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '2TSTYW4P'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'E47HYX2O'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KE36JYS7'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n"
]
}
],
"metadata": {}
}
],
"metadata": {
"orig_nbformat": 4,
"language_info": {
"name": "python",
"version": "3.7.8",
"mimetype": "text/x-python",
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"pygments_lexer": "ipython3",
"nbconvert_exporter": "python",
"file_extension": ".py"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.7.8 64-bit"
},
"interpreter": {
"hash": "57baa5815c940fdaff4d14510622de9616cae602444507ba5d0b6727c008cbd6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

Binary file not shown.

30
python/neutrino/decode.py Normal file
View File

@@ -0,0 +1,30 @@
import json
def uncache_scene(in_data):
pure_data = {
"meta": in_data["meta"],
"graph": in_data["graph"]
}
raw_json = json.dumps(pure_data)
# cache objects
raw_cache = json.dumps(in_data["cache"])
for key, value in in_data["cache"]["objects"].items():
pointer = "#" + key
raw_cache = raw_cache.replace(f'"{pointer}"', json.dumps(value))
unpacked_object_cache = json.loads(raw_cache)
# objects
for key, value in unpacked_object_cache.items():
print(json.dumps(value))
pointer = "#" + key
raw_json = raw_json.replace(f'"{pointer}"', json.dumps(value))
# names
for key, value in in_data["cache"]["names"].items():
raw_json = raw_json.replace("@" + key, value)
out_data = json.loads(raw_json)
return out_data

189
python/neutrino/encode.py Normal file
View File

@@ -0,0 +1,189 @@
import json
# create a new scene graph
def new_scene(name, cache = True):
# create empty neutrino data
data = {
"meta": {
"name": ("name", name),
"scale": ("float", 1.0),
"asset_path": ("path", "./")
},
"graph": {
"scene": {},
"assets": {}
},
"cache": {
"names": {},
"objects": {}
},
"internal": {
"cache": cache,
"max_object_key": {"index": 0},
"max_name_key": {"index": 0}
}
}
# return that empty data
return data
# write the data to a JSON file
def save_scene(data, readable = False):
# cache the scene
if data["internal"]["cache"]:
cache_scene(data)
# create working copy of the scene data
clean_data = data.copy()
# get rid of internal data (not to be exported)
del clean_data["internal"]
filename = data["meta"]["name"][1].replace(" ", "") + ".json"
with open(filename, "w") as outfile:
if readable:
json.dump(clean_data, outfile, indent = 4)
else:
json.dump(clean_data, outfile)
# get a new indexed object key and track it
def new_key(index):
# get the indexed key
key = hex(index["index"] + 1)
# index the max key
index["index"] += 1
return key
# returns a cached name key from a string
def name_key(data, name):
if data["internal"]["cache"]:
name_pointer = ""
# retrieve the proper key if it exists
for key, value in data["cache"]["names"].items():
if value == name:
name_pointer = key
# if the name pointer is still empty, make a new key and add it to the cache
if name_pointer == "":
name_pointer = new_key(data["internal"]["max_name_key"])
data["cache"]["names"][name_pointer] = name
return "@" + name_pointer
else:
return name
# add an asset to the graph
def add_asset(data, name, path):
asset_data = {
name_key(data, "name"): (name_key(data, "name"), name),
name_key(data, "file"): (name_key(data, "path"), path)
}
# add the asset to the graph
data["graph"]["assets"][new_key(data["internal"]["max_object_key"])] = (name_key(data, "asset"), asset_data)
# add an object to the scene
def spawn_object(data, name, asset):
object_data = {
name_key(data, "name"): (name_key(data, "name"), name),
name_key(data, "asset"): "",
name_key(data, "transform"): (name_key(data, "transform"), {
name_key(data, "position"): (name_key(data, "vec3"), [0.0, 0.0, 0.0]),
name_key(data, "rotation"): (name_key(data, "vec3"), [0.0, 0.0, 0.0]),
name_key(data, "scale"): (name_key(data, "vec3"), [1.0, 1.0, 1.0])
})
}
# get an asset key by the provided name
for key, value in data["graph"]["assets"].items():
if value[1][name_key(data, "name")][1] == asset:
object_data[name_key(data, "asset")] = f"*{key}"
# add the object to the scene
data["graph"]["scene"][new_key(data["internal"]["max_object_key"])] = (name_key(data, "object"), object_data)
# recursively cache a single typeval tuple object
def cache_typeval(cache, typeval):
# ignore if not typeval
if type(typeval) == tuple:
for key, value in typeval[1].items():
# refuse to cache pointers (that's just... that would just be a nightmare)
if type(value) == str:
is_pointer = ("*" in value)
else:
is_pointer = False
if not is_pointer:
# cache member objects if it's a dictionary object
if type(value[1]) == dict:
cache_typeval(cache, value)
value_hash = hash(str(value))
# track in cache
if value_hash not in cache["objects"]:
cache_pointer = new_key(cache["key_index"])
cache["objects"][value_hash] = {"key": cache_pointer, "value": value, "count": 1}
else:
cache_pointer = cache["objects"][value_hash]["key"]
cache["objects"][value_hash]["count"] += 1
# replace real value with hash
typeval[1][key] = "#" + cache_pointer
# if there's only one instance of a certain value, convert it back to the original value and destroy the cached version
def uncache_typeval(cache, typeval):
for key, value in typeval[1].items():
# refuse to cache pointers (that's just... that would just be a nightmare)
if type(value) == str:
is_pointer = ("*" in value)
else:
is_pointer = False
if not is_pointer:
# cache member objects if it's a dictionary object
if type(value[1]) == dict:
uncache_typeval(cache, value)
value_hash = hash(str(value))
# check if it occurs only once
cache_key = value.replace("#", "")
if cache[cache_key]["count"] <= 1:
# replace the cache pointer in the scene data with its original value
typeval[1][key] = cache[cache_key]["value"]
# delete this object from the cache
del cache[cache_key]
# cache the scene
def cache_scene(data):
containers = [
data["graph"]["scene"],
data["graph"]["assets"]
]
# build a cache of value hashes and pointers
hash_cache = {"key_index": {"index": 0}, "objects": {}}
for objects in containers:
for key, value in objects.items():
cache_typeval(hash_cache, value)
# create a cache hashed with pointer keys instead of value hashes
key_cache = {}
for key, value in hash_cache["objects"].items():
key_cache[value["key"]] = {"value": value["value"], "count": value["count"]}
# prune the cache to only redirect repeat values
for objects in containers:
for key, value in objects.items():
uncache_typeval(key_cache, value)
# create a serialized cache usable by neutrino
serial_cache = {}
for key, value in key_cache.items():
serial_cache[key] = value["value"]
# add that cache to the neutrino scene data
data["cache"]["objects"] = serial_cache