+ task = ctx.task("Writing files", 1.0)
+ if collection:
+ existing = None
+ if skip_existing:
+ existing = lambda r: not os.path.exists(os.path.join(path, r.name))
+ scene_res.write_collection(out_fn, filter=existing)
+ else:
+ scene_res.write_to_file(out_fn)
+ for r in scene_res.collect_references():
+ r.write_to_file(os.path.join(path, r.name))
+
+ def export_scene_resources(self, ctx, scene, resources):
+ from .export import DataExporter
+ data_exporter = DataExporter()
+
+ data_exporter.export_resources(ctx, [p.object for p in scene.prototypes], resources)
+
+ def export_scene(self, scene, resources):
+ from .datafile import Resource, Statement, Token
+ scene_res = Resource(scene.name+".scene", "scene")
+
+ if scene.background_set or (scene.instances and scene.blended_instances):
+ scene_res.statements.append(Statement("type", Token("ordered")))
+ if scene.background_set:
+ scene_res.statements.append(scene_res.create_reference_statement("scene", resources[scene.background_set.name+".scene"]))
+
+ if scene.instances:
+ st = Statement("scene")
+ st.sub.append(Statement("type", Token("simple")))
+ self.add_instances(scene_res, st.sub, scene.instances, resources)
+ scene_res.statements.append(st)
+
+ if scene.blended_instances:
+ st = Statement("scene")
+ st.sub.append(Statement("type", Token("zsorted")))
+ self.add_instances(scene_res, st.sub, scene.blended_instances, resources)
+ scene_res.statements.append(st)
+ else:
+ scene_type = "zsorted" if scene.blended_instances else "simple"
+ scene_res.statements.append(Statement("type", Token(scene_type)))
+
+ self.add_instances(scene_res, scene_res.statements, scene.instances, resources)
+ self.add_instances(scene_res, scene_res.statements, scene.blended_instances, resources)
+
+ return scene_res
+
+ def add_instances(self, scene_res, statements, instances, resources):
+ from .datafile import Statement
+
+ for i in instances:
+ obj_res = resources[i.prototype.name+".object"]
+ st = scene_res.create_reference_statement("object", obj_res)
+ if i.name:
+ st.append(i.name)
+
+ st.sub.append(self.create_transform_statement(i))
+ statements.append(st)
+
+ def create_transform_statement(self, instance):
+ from .datafile import Statement
+
+ st = Statement("transform")
+
+ loc = instance.matrix_world.to_translation()
+ st.sub.append(Statement("position", *tuple(loc)))
+
+ quat = instance.matrix_world.to_quaternion()
+ if instance.rotation_mode in ('XYZ', 'XZY', 'YXZ', 'YZX', 'ZXY', 'ZYX'):
+ angles = [a*180/math.pi for a in quat.to_euler()]
+ st.sub.append(Statement("euler", *angles));
+ else:
+ st.sub.append(Statement("rotation", quat.angle*180/math.pi, *tuple(quat.axis)))
+
+ scale = instance.matrix_world.to_scale()
+ st.sub.append(Statement("scale", *tuple(scale)))
+
+ return st
+
+ def export_sequence_resources(self, scene, resources):
+ from .datafile import Resource, Statement, Token
+
+ lights = []
+ s = scene
+ while s:
+ lights += s.lights
+ s = s.background_set
+
+ from .util import make_unique
+ lights = make_unique(lights)
+
+ from .export_light import LightExporter
+ light_exporter = LightExporter()
+ for l in lights:
+ light_name = l.name+".light"
+ if light_name not in resources:
+ resources[light_name] = light_exporter.export_light(l)
+
+ lighting_name = scene.name+".lightn"
+ if lighting_name not in resources:
+ lighting_res = Resource(lighting_name, "lighting")
+ lighting_res.statements.append(Statement("ambient", *tuple(scene.ambient_light)))
+ for l in lights:
+ lighting_res.statements.append(lighting_res.create_reference_statement("light", resources[l.name+".light"]))
+
+ resources[lighting_name] = lighting_res
+
+ def export_sequence(self, scene, resources):
+ from .datafile import Resource, Statement, Token
+ seq_res = Resource(scene.name+".seq", "sequence")
+
+ if scene.use_hdr:
+ seq_res.statements.append(Statement("hdr", True))
+
+ self.add_clear(seq_res.statements, (0.0, 0.0, 0.0, 0.0), 1.0)
+
+ scene_res = resources[scene.name+".scene"]
+ seq_res.statements.append(seq_res.create_reference_statement("renderable", "content", scene_res))
+
+ lighting_res = resources[scene.name+".lightn"]
+
+ any_opaque = False
+ any_blended = False
+ use_ibl = False
+ use_shadow = False
+ shadowed_lights = []
+ shadow_casters = []
+ s = scene
+ while s:
+ if s.instances:
+ any_opaque = True
+ if s.blended_instances:
+ any_blended = True
+ if s.use_ibl:
+ use_ibl = True
+ if s.use_shadow:
+ use_shadow = True
+ shadowed_lights += [l.data for l in s.lights if l.data.use_shadow]
+ for i in itertools.chain(s.instances, s.blended_instances):
+ o = i.prototype.object
+ if o.material_slots and o.material_slots[0].material and o.material_slots[0].material.shadow_method!='NONE':
+ shadow_casters.append(i)
+ s = s.background_set
+
+ shadowed_lights.sort(key=lambda l:l.shadow_map_size, reverse=True)
+
+ main_tags = []
+ if any_opaque:
+ main_tags.append("")
+ if any_blended:
+ main_tags.append("blended")
+
+ content = "content"
+ if use_ibl and scene.use_sky:
+ self.add_auxiliary_sequence(seq_res, "environment", "sky", ((0.0, 0.0, 0.0, 0.0), 1.0), main_tags, lighting_res)
+
+ st = Statement("effect", "environment")
+ st.sub.append(Statement("type", Token("environment_map")))
+ st.sub.append(Statement("size", 32))
+ st.sub.append(Statement("roughness_levels", 2))
+ st.sub.append(Statement("fixed_position", 0.0, 0.0, 0.0))
+ st.sub.append(Statement("content", content))
+ st.sub.append(Statement("environment", "environment_sequence"))
+
+ seq_res.statements.append(st)
+ content = "environment"
+
+ if scene.use_sky:
+ st = Statement("effect", "sky")
+ st.sub.append(Statement("type", Token("sky")))
+ st.sub.append(seq_res.create_reference_statement("sun", resources[scene.sun_light.name+".light"]))
+ st.sub.append(Statement("content", content))
+
+ seq_res.statements.append(st)
+ content = "sky"
+
+ if use_shadow:
+ self.add_auxiliary_sequence(seq_res, "shadow", "content", (None, 1.0), ["shadow"], None)
+ self.add_auxiliary_sequence(seq_res, "thsm", "content", (None, 1.0), ["shadow_thsm"], None)
+
+ st = Statement("effect", "shadow_map")
+ st.sub.append(Statement("type", Token("shadow_map")))
+ st.sub.append(Statement("enable_for_method", "blended"))
+ st.sub.append(Statement("size", *self.compute_shadowmap_size(shadowed_lights)))
+ target, radius = self.compute_bounding_sphere(shadow_casters)
+ st.sub.append(Statement("target", *target))
+ st.sub.append(Statement("radius", radius))
+ st.sub.append(Statement("content", content))
+ st.sub.append(seq_res.create_reference_statement("lighting", lighting_res))
+ for l in shadowed_lights:
+ ss = seq_res.create_reference_statement("light", resources[l.name+".light"])
+ ss.sub.append(Statement("size", int(l.shadow_map_size)))
+ shadow_caster = "thsm_sequence" if l.type=='POINT' else "shadow_sequence"
+ ss.sub.append(Statement("shadow_caster", shadow_caster))
+ st.sub.append(ss)
+
+ seq_res.statements.append(st)
+ content = "shadow_map"
+
+ self.add_content_steps(seq_res, content, lighting_res, main_tags)
+
+ if scene.use_ao:
+ ss = Statement("postprocessor")
+ ss.sub.append(Statement("type", Token("ambient_occlusion")))
+ ss.sub.append(Statement("occlusion_radius", scene.ao_distance))
+ ss.sub.append(Statement("samples", scene.ao_samples))
+ seq_res.statements.append(ss)
+
+ if scene.use_hdr:
+ ss = Statement("postprocessor")
+ ss.sub.append(Statement("type", Token("bloom")))
+ seq_res.statements.append(ss)
+
+ ss = Statement("postprocessor")
+ ss.sub.append(Statement("type", Token("colorcurve")))
+ ss.sub.append(Statement("exposure_adjust", scene.exposure))
+ ss.sub.append(Statement("srgb"))
+ seq_res.statements.append(ss)