Encoding works!

This commit is contained in:
Skye Terran 2021-08-25 13:08:17 -07:00
parent a2ede26f46
commit 81fc8a8579
11 changed files with 351 additions and 462 deletions

View File

@ -1,245 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 217,
"source": [
"import json\r\n",
"\r\n",
"# create a new scene graph\r\n",
"def new_scene(name):\r\n",
" # create empty neutrino data\r\n",
" data = {\r\n",
" \"meta\": {\r\n",
" \"name\": (\"name\", name),\r\n",
" \"scale\": (\"float\", 1.0),\r\n",
" \"asset_path\": (\"path\", \"./\"),\r\n",
" },\r\n",
" \"graph\": {\r\n",
" \"scene\": {},\r\n",
" \"assets\": {}\r\n",
" },\r\n",
" \"internal\": {\r\n",
" \"max_object_key\": {\"index\": 0},\r\n",
" \"max_cache_key\": {\"index\": 0}\r\n",
" }\r\n",
" }\r\n",
"\r\n",
" # return that empty data\r\n",
" return data\r\n",
"\r\n",
"# write the data to a JSON file\r\n",
"def save_scene(data, readable):\r\n",
" # create working copy of the scene data\r\n",
" clean_data = data.copy()\r\n",
"\r\n",
" # get rid of internal data (not to be exported)\r\n",
" del clean_data[\"internal\"]\r\n",
" \r\n",
" filename = data[\"meta\"][\"name\"][1].replace(\" \", \"\") + \".json\"\r\n",
" with open(filename, \"w\") as outfile:\r\n",
" if readable:\r\n",
" json.dump(clean_data, outfile, indent = 4)\r\n",
" else:\r\n",
" json.dump(clean_data, outfile)\r\n",
"\r\n",
"# get a new indexed object key and track it\r\n",
"def new_key(index):\r\n",
" # get the indexed key\r\n",
" key = hex(index[\"index\"] + 1)\r\n",
"\r\n",
" # index the max key\r\n",
" index[\"index\"] += 1\r\n",
"\r\n",
" return key\r\n",
"\r\n",
"# add an asset to the graph\r\n",
"def add_asset(data, name, path):\r\n",
" asset_data = {\r\n",
" \"name\": (\"name\", name),\r\n",
" \"file\": (\"path\", path)\r\n",
" }\r\n",
" \r\n",
" # add the asset to the graph\r\n",
" data[\"graph\"][\"assets\"][new_key(data[\"internal\"][\"max_object_key\"])] = (\"asset\", asset_data)\r\n",
"\r\n",
"# add an object to the scene\r\n",
"def spawn_object(data, name, asset):\r\n",
" object_data = {\r\n",
" \"name\": (\"name\", name),\r\n",
" \"asset\": \"\",\r\n",
" \"trans\": (\"trans\", {\r\n",
" \"position\": (\"vec3\", [0.0, 0.0, 0.0]),\r\n",
" \"rotation\": (\"vec3\", [0.0, 0.0, 0.0]),\r\n",
" \"scale\": (\"vec3\", [1.0, 1.0, 1.0])\r\n",
" })\r\n",
" }\r\n",
"\r\n",
" # get an asset key by the provided name\r\n",
" for key, value in data[\"graph\"][\"assets\"].items():\r\n",
" if value[1][\"name\"][1] == asset:\r\n",
" object_data[\"asset\"] = f\"*{key}\"\r\n",
"\r\n",
" # add the object to the scene\r\n",
" data[\"graph\"][\"scene\"][new_key(data[\"internal\"][\"max_object_key\"])] = (\"object\", object_data)"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "markdown",
"source": [
"### Implement SPORC for storage/memory optimization\r\n",
"(Single-Pointer Objective Relational Cache)"
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 218,
"source": [
"# recursively cache a single typeval tuple object\r\n",
"def cache_typeval(cache, typeval):\r\n",
" # ignore if not typeval\r\n",
" if type(typeval) == tuple:\r\n",
" for key, value in typeval[1].items():\r\n",
" # refuse to cache pointers (that's just... that would just be a nightmare)\r\n",
" if type(value) == str:\r\n",
" is_pointer = (\"*\" in value)\r\n",
" else:\r\n",
" is_pointer = False\r\n",
" if not is_pointer:\r\n",
" # cache member objects if it's a dictionary object\r\n",
" if type(value[1]) == dict:\r\n",
" cache_typeval(cache, value)\r\n",
"\r\n",
" value_hash = hash(str(value))\r\n",
"\r\n",
" # track in cache\r\n",
" if value_hash not in cache[\"objects\"]:\r\n",
" cache_pointer = new_key(cache[\"key_index\"])\r\n",
" cache[\"objects\"][value_hash] = {\"key\": cache_pointer, \"value\": value, \"count\": 1}\r\n",
" else:\r\n",
" cache_pointer = cache[\"objects\"][value_hash][\"key\"]\r\n",
" cache[\"objects\"][value_hash][\"count\"] += 1\r\n",
"\r\n",
" # replace real value with hash\r\n",
" typeval[1][key] = \"#\" + cache_pointer\r\n",
"\r\n",
"# if there's only one instance of a certain value, convert it back to the original value and destroy the cached version\r\n",
"def uncache_typeval(cache, typeval):\r\n",
" for key, value in typeval[1].items():\r\n",
" # refuse to cache pointers (that's just... that would just be a nightmare)\r\n",
" if type(value) == str:\r\n",
" is_pointer = (\"*\" in value)\r\n",
" else:\r\n",
" is_pointer = False\r\n",
" if not is_pointer:\r\n",
" # cache member objects if it's a dictionary object\r\n",
" if type(value[1]) == dict:\r\n",
" uncache_typeval(cache, value)\r\n",
"\r\n",
" value_hash = hash(str(value))\r\n",
"\r\n",
" # check if it occurs only once\r\n",
" cache_key = value.replace(\"#\", \"\")\r\n",
" if cache[cache_key][\"count\"] <= 1:\r\n",
" # replace the cache pointer in the scene data with its original value\r\n",
" typeval[1][key] = cache[cache_key][\"value\"]\r\n",
"\r\n",
" # delete this object from the cache\r\n",
" del cache[cache_key]\r\n",
"\r\n",
"# cache the scene\r\n",
"def cache_scene(data):\r\n",
" # add the cache object to the scene data\r\n",
" data[\"cache\"] = {}\r\n",
"\r\n",
" containers = [\r\n",
" data[\"graph\"][\"scene\"],\r\n",
" data[\"graph\"][\"assets\"]\r\n",
" ]\r\n",
"\r\n",
" # build a cache of value hashes and pointers\r\n",
" hash_cache = {\"key_index\": {\"index\": 0}, \"objects\": {}}\r\n",
" for objects in containers:\r\n",
" for key, value in objects.items():\r\n",
" cache_typeval(hash_cache, value)\r\n",
"\r\n",
" # create a cache hashed with pointer keys instead of value hashes\r\n",
" key_cache = {}\r\n",
" for key, value in hash_cache[\"objects\"].items():\r\n",
" key_cache[value[\"key\"]] = {\"value\": value[\"value\"], \"count\": value[\"count\"]}\r\n",
"\r\n",
" # prune the cache to only redirect repeat values\r\n",
" for objects in containers:\r\n",
" for key, value in objects.items():\r\n",
" uncache_typeval(key_cache, value)\r\n",
"\r\n",
" # create a serialized cache usable by neutrino\r\n",
" serial_cache = {}\r\n",
" for key, value in key_cache.items():\r\n",
" serial_cache[key] = value[\"value\"]\r\n",
"\r\n",
" # add that cache to the neutrino scene data\r\n",
" data[\"cache\"] = serial_cache"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 219,
"source": [
"# just returns a random string\r\n",
"import random\r\n",
"import string\r\n",
"def random_string(length):\r\n",
" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))\r\n",
"\r\n",
"# create test scene\r\n",
"test_scene = new_scene(\"Neutrino Test Scene\")\r\n",
"\r\n",
"# populate assets\r\n",
"asset_names = []\r\n",
"for i in range(10):\r\n",
" name = random_string(8)\r\n",
" add_asset(test_scene, name, \"Assets/TestAsset.obj\")\r\n",
" asset_names.append(name)\r\n",
"\r\n",
"# populate objects in scene\r\n",
"for i in range(50):\r\n",
" spawn_object(test_scene, random_string(8), random.choice(asset_names))\r\n",
"\r\n",
"cache_scene(test_scene)\r\n",
"save_scene(test_scene, False)"
],
"outputs": [],
"metadata": {}
}
],
"metadata": {
"orig_nbformat": 4,
"language_info": {
"name": "python",
"version": "3.7.8",
"mimetype": "text/x-python",
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"pygments_lexer": "ipython3",
"nbconvert_exporter": "python",
"file_extension": ".py"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.7.8 64-bit"
},
"interpreter": {
"hash": "57baa5815c940fdaff4d14510622de9616cae602444507ba5d0b6727c008cbd6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,135 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 86,
"source": [
"file = open(\"testB.neu\")\r\n",
"dirty_blob = file.read()\r\n",
"\r\n",
"# get rid of comments and leading/trailing whitespace\r\n",
"lines = dirty_blob.split(\"\\n\")\r\n",
"for i, line in enumerate(lines):\r\n",
" if line.strip()[:2] == \"//\":\r\n",
" del lines[i]\r\n",
"\r\n",
"for i, line in enumerate(lines):\r\n",
" lines[i] = line.strip()\r\n",
"\r\n",
"tidy_blob = \" \".join(lines)"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 87,
"source": [
"data = []\r\n",
"\r\n",
"# get blocks\r\n",
"for line in lines:\r\n",
" object = {}\r\n",
" if \"=\" in line:\r\n",
" keytype = line.split(\" \")[0].split(\":\")\r\n",
" key = keytype[0]\r\n",
" object[\"key\"] = key\r\n",
" if len(keytype) > 1:\r\n",
" type = keytype[1]\r\n",
" object[\"type\"] = type\r\n",
" data.append(object)"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 88,
"source": [
"words = tidy_blob.split(\" \")"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 89,
"source": [
"def extract_objects(keywords):\r\n",
" blocks = []\r\n",
" depth = -1\r\n",
" i = -1\r\n",
" last = \"\"\r\n",
" for word in keywords:\r\n",
" #print(f\"Word: {word} | Depth: {depth} | i: {i}\")\r\n",
" if word == \"{\":\r\n",
" depth += 1\r\n",
" if depth == 1:\r\n",
" #print(\"New block of depth 1\")\r\n",
" blocks.append({\"key\": last, \"object\": []})\r\n",
" i += 1\r\n",
" elif word == \"}\":\r\n",
" depth -= 1\r\n",
" else:\r\n",
" last = word\r\n",
" if depth >= 1:\r\n",
" blocks[i][\"object\"].append(word)\r\n",
" return blocks\r\n",
"\r\n",
"data = extract_objects(words)\r\n",
"temp = []\r\n",
"for blob in data:\r\n",
" temp.append(extract_objects(blob[\"object\"]))\r\n",
"\r\n",
" \r\n",
"\r\n",
"temp"
],
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"[[{'key': 'aa', 'object': ['{', 'aaa']},\n",
" {'key': 'ab', 'object': ['{', 'aba']},\n",
" {'key': 'ac', 'object': ['{', 'abb']}],\n",
" [{'key': 'ba', 'object': ['{']},\n",
" {'key': 'bb', 'object': ['{']},\n",
" {'key': 'bc', 'object': ['{']}],\n",
" [{'key': 'ba', 'object': ['{']},\n",
" {'key': 'bb', 'object': ['{']},\n",
" {'key': 'bc', 'object': ['{']}]]"
]
},
"metadata": {},
"execution_count": 89
}
],
"metadata": {}
}
],
"metadata": {
"orig_nbformat": 4,
"language_info": {
"name": "python",
"version": "3.7.8",
"mimetype": "text/x-python",
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"pygments_lexer": "ipython3",
"nbconvert_exporter": "python",
"file_extension": ".py"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.7.8 64-bit"
},
"interpreter": {
"hash": "57baa5815c940fdaff4d14510622de9616cae602444507ba5d0b6727c008cbd6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

View File

@ -1,81 +0,0 @@
// & declares a key pointing to object instantiated by that declaration (in this case, we're using integers in hex)
// keys are NOT indices - ultimately they should be ingested as keys to a dictionary containing each object in the Neutrino file
// * gets a reference to a key's object (Neutrino will try to preserve this reference in the target software whenever possible)
// # gets a copy from a cached object's key (Neutrino will simply replace this reference with a copy of its value - this is simply to consolidate repetitive data in-exchange)
// overall structure:
/*
meta = {},
graph = {
scene = {},
assets = {}
},
cache = {}
*/
meta = {
//
},
graph = {
// note - "scene" and "assets" are essentially namespaces
// the scene itself
scene = {
&0: object = {
name: string = "SM_LargeWindow_A",
mesh: mesh = *2,
transform: trans = #b
},
&1: object = {
name: string = "SM_LargeWindow_A2",
mesh: mesh = *2,
transform: trans = #b
}
},
// assets used by the scene
assets {
&2: mesh {
source: path = "/Assets/Props/LargeWindowA.obj",
materials: [mat] = [*4]
},
&4: mat {
name: string = "Simple Glass",
parent: shader = *7,
// "shader.props" is a subtype of the "shader" type, which is just a namespace that keeps it from being mixed up with other "props" subtypes (like "mesh.props", etc.)
parameters: shader.props = {
albedo: tex = *6,
roughness: float = 0.15,
normal: vec4 = #a
}
},
&5: mat {
name: string = "Blockout Grey",
parent: shader = *7,
parameters: shader.props = {
albedo: vec4 = (0.5, 0.5, 0.5, 1.0),
roughness: float = 0.85,
normal: vec4 = #a
}
},
&6: tex {
source: path = "/Assets/Textures/T_WindowGrime.png"
},
&7: shader {
source: path = "/Assets/Shaders/PBRBasic.wgsl"
}
},
},
// anonymous objects shared by multiple other objects
cache {
// this vector is common as a position or rotation value
&8: vec3 = (0.0, 0.0, 0.0),
// this vector is common as a scale value
&9: vec3 = (1.0, 1.0, 1.0),
// this vector is common as a normal value
&a: vec4 = (0.5, 0.5, 1.0, 1.0),
// this transform is common because it's the default
&b: trans = {
position: vec3 = #8,
rotation: vec3 = #8,
scale: vec3 = #9
}
}

View File

@ -0,0 +1,131 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"source": [
"# just returns a random string\r\n",
"import random\r\n",
"import string\r\n",
"from neutrino.encode import *\r\n",
"def random_string(length):\r\n",
" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))\r\n",
"\r\n",
"# create test scene\r\n",
"test_scene = new_scene(name = \"Neutrino Test Scene\", cache = True)\r\n",
"\r\n",
"# populate assets\r\n",
"asset_names = []\r\n",
"for i in range(10):\r\n",
" name = random_string(8)\r\n",
" add_asset(test_scene, name, \"Assets/TestAsset.obj\")\r\n",
" asset_names.append(name)\r\n",
"\r\n",
"# populate objects in scene\r\n",
"for i in range(50):\r\n",
" spawn_object(test_scene, random_string(8), random.choice(asset_names))\r\n",
"\r\n",
"save_scene(test_scene, False)"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 2,
"source": [
"import json\r\n",
"from neutrino.decode import *\r\n",
"\r\n",
"imported_data = json.load(open(\"NeutrinoTestScene.json\"))\r\n",
"\r\n",
"for key, value in imported_data[\"graph\"][\"scene\"].items():\r\n",
" value = (imported_data[\"cache\"][\"names\"][value[0][1:]], value[1])\r\n",
" print(value)"
],
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"['@0xa', {'@0x1': ['@0x1', 'OBK1NYBK'], '@0x4': '*0x9', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KXKKGWH1'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'FP8K8N8K'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'MOGF2L64'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'Z8EFSXJH'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '1ZMPRUS6'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'QVQSXH1U'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'Q0F9YKF3'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'RS1HURTI'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '9IT2UDE7'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'JJZ9VA2P'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'PE6Y5NBE'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '41VJPUWO'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '3D63B4QE'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '5JZIUIZ9'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'ZD6DD8E1'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'MIRYB8QW'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '9HEY01NR'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '3EK2Y8LS'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KJYNHF9B'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '1WUOY60X'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'W2L3HTUI'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'K4L6USDR'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'ZF5RNV1N'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '62KC64SW'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '1QCUVQII'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '0IVRI09E'], '@0x4': '*0x3', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'J8J492ET'], '@0x4': '*0x9', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'MJJTBO2L'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '5QAA9XJZ'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '21GNFNFG'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '132YJS72'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'E7GT5ZZ0'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '26DIERE1'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KTSB2GMU'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'ETRS6ZK2'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '8X2ZYUGQ'], '@0x4': '*0x5', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'L09LNFXL'], '@0x4': '*0x4', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '4MHVZCEE'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'YV5R9UAL'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '0BRGWQ75'], '@0x4': '*0x6', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'W53N74LW'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'S1PLWUWH'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'I7GJKHL6'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'IPZGY627'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '3LAE7CYV'], '@0x4': '*0x8', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'SUPYOPZB'], '@0x4': '*0x7', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', '2TSTYW4P'], '@0x4': '*0xa', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'E47HYX2O'], '@0x4': '*0x1', '@0x5': '#0x4'}]\n",
"['@0xa', {'@0x1': ['@0x1', 'KE36JYS7'], '@0x4': '*0x2', '@0x5': '#0x4'}]\n"
]
}
],
"metadata": {}
}
],
"metadata": {
"orig_nbformat": 4,
"language_info": {
"name": "python",
"version": "3.7.8",
"mimetype": "text/x-python",
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"pygments_lexer": "ipython3",
"nbconvert_exporter": "python",
"file_extension": ".py"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3.7.8 64-bit"
},
"interpreter": {
"hash": "57baa5815c940fdaff4d14510622de9616cae602444507ba5d0b6727c008cbd6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

Binary file not shown.

30
python/neutrino/decode.py Normal file
View File

@ -0,0 +1,30 @@
import json
def uncache_scene(in_data):
pure_data = {
"meta": in_data["meta"],
"graph": in_data["graph"]
}
raw_json = json.dumps(pure_data)
# cache objects
raw_cache = json.dumps(in_data["cache"])
for key, value in in_data["cache"]["objects"].items():
pointer = "#" + key
raw_cache = raw_cache.replace(f'"{pointer}"', json.dumps(value))
unpacked_object_cache = json.loads(raw_cache)
# objects
for key, value in unpacked_object_cache.items():
print(json.dumps(value))
pointer = "#" + key
raw_json = raw_json.replace(f'"{pointer}"', json.dumps(value))
# names
for key, value in in_data["cache"]["names"].items():
raw_json = raw_json.replace("@" + key, value)
out_data = json.loads(raw_json)
return out_data

189
python/neutrino/encode.py Normal file
View File

@ -0,0 +1,189 @@
import json
# create a new scene graph
def new_scene(name, cache = True):
# create empty neutrino data
data = {
"meta": {
"name": ("name", name),
"scale": ("float", 1.0),
"asset_path": ("path", "./")
},
"graph": {
"scene": {},
"assets": {}
},
"cache": {
"names": {},
"objects": {}
},
"internal": {
"cache": cache,
"max_object_key": {"index": 0},
"max_name_key": {"index": 0}
}
}
# return that empty data
return data
# write the data to a JSON file
def save_scene(data, readable = False):
# cache the scene
if data["internal"]["cache"]:
cache_scene(data)
# create working copy of the scene data
clean_data = data.copy()
# get rid of internal data (not to be exported)
del clean_data["internal"]
filename = data["meta"]["name"][1].replace(" ", "") + ".json"
with open(filename, "w") as outfile:
if readable:
json.dump(clean_data, outfile, indent = 4)
else:
json.dump(clean_data, outfile)
# get a new indexed object key and track it
def new_key(index):
# get the indexed key
key = hex(index["index"] + 1)
# index the max key
index["index"] += 1
return key
# returns a cached name key from a string
def name_key(data, name):
if data["internal"]["cache"]:
name_pointer = ""
# retrieve the proper key if it exists
for key, value in data["cache"]["names"].items():
if value == name:
name_pointer = key
# if the name pointer is still empty, make a new key and add it to the cache
if name_pointer == "":
name_pointer = new_key(data["internal"]["max_name_key"])
data["cache"]["names"][name_pointer] = name
return "@" + name_pointer
else:
return name
# add an asset to the graph
def add_asset(data, name, path):
asset_data = {
name_key(data, "name"): (name_key(data, "name"), name),
name_key(data, "file"): (name_key(data, "path"), path)
}
# add the asset to the graph
data["graph"]["assets"][new_key(data["internal"]["max_object_key"])] = (name_key(data, "asset"), asset_data)
# add an object to the scene
def spawn_object(data, name, asset):
object_data = {
name_key(data, "name"): (name_key(data, "name"), name),
name_key(data, "asset"): "",
name_key(data, "transform"): (name_key(data, "transform"), {
name_key(data, "position"): (name_key(data, "vec3"), [0.0, 0.0, 0.0]),
name_key(data, "rotation"): (name_key(data, "vec3"), [0.0, 0.0, 0.0]),
name_key(data, "scale"): (name_key(data, "vec3"), [1.0, 1.0, 1.0])
})
}
# get an asset key by the provided name
for key, value in data["graph"]["assets"].items():
if value[1][name_key(data, "name")][1] == asset:
object_data[name_key(data, "asset")] = f"*{key}"
# add the object to the scene
data["graph"]["scene"][new_key(data["internal"]["max_object_key"])] = (name_key(data, "object"), object_data)
# recursively cache a single typeval tuple object
def cache_typeval(cache, typeval):
# ignore if not typeval
if type(typeval) == tuple:
for key, value in typeval[1].items():
# refuse to cache pointers (that's just... that would just be a nightmare)
if type(value) == str:
is_pointer = ("*" in value)
else:
is_pointer = False
if not is_pointer:
# cache member objects if it's a dictionary object
if type(value[1]) == dict:
cache_typeval(cache, value)
value_hash = hash(str(value))
# track in cache
if value_hash not in cache["objects"]:
cache_pointer = new_key(cache["key_index"])
cache["objects"][value_hash] = {"key": cache_pointer, "value": value, "count": 1}
else:
cache_pointer = cache["objects"][value_hash]["key"]
cache["objects"][value_hash]["count"] += 1
# replace real value with hash
typeval[1][key] = "#" + cache_pointer
# if there's only one instance of a certain value, convert it back to the original value and destroy the cached version
def uncache_typeval(cache, typeval):
for key, value in typeval[1].items():
# refuse to cache pointers (that's just... that would just be a nightmare)
if type(value) == str:
is_pointer = ("*" in value)
else:
is_pointer = False
if not is_pointer:
# cache member objects if it's a dictionary object
if type(value[1]) == dict:
uncache_typeval(cache, value)
value_hash = hash(str(value))
# check if it occurs only once
cache_key = value.replace("#", "")
if cache[cache_key]["count"] <= 1:
# replace the cache pointer in the scene data with its original value
typeval[1][key] = cache[cache_key]["value"]
# delete this object from the cache
del cache[cache_key]
# cache the scene
def cache_scene(data):
containers = [
data["graph"]["scene"],
data["graph"]["assets"]
]
# build a cache of value hashes and pointers
hash_cache = {"key_index": {"index": 0}, "objects": {}}
for objects in containers:
for key, value in objects.items():
cache_typeval(hash_cache, value)
# create a cache hashed with pointer keys instead of value hashes
key_cache = {}
for key, value in hash_cache["objects"].items():
key_cache[value["key"]] = {"value": value["value"], "count": value["count"]}
# prune the cache to only redirect repeat values
for objects in containers:
for key, value in objects.items():
uncache_typeval(key_cache, value)
# create a serialized cache usable by neutrino
serial_cache = {}
for key, value in key_cache.items():
serial_cache[key] = value["value"]
# add that cache to the neutrino scene data
data["cache"]["objects"] = serial_cache