height(h),
hdr(d),
samples(0),
- target_ms(0),
- in_frame(false)
+ target_ms(0)
{
target[0] = 0;
target[1] = 0;
void Pipeline::setup_frame() const
{
- in_frame = true;
for(PassList::const_iterator i=passes.begin(); i!=passes.end(); ++i)
if(const Renderable *renderable = i->get_renderable())
renderable->setup_frame();
void Pipeline::finish_frame() const
{
- in_frame = false;
for(PassList::const_iterator i=passes.begin(); i!=passes.end(); ++i)
if(const Renderable *renderable = i->get_renderable())
renderable->finish_frame();
return;
Renderer renderer(camera);
+ setup_frame();
render(renderer, tag);
+ finish_frame();
}
void Pipeline::render(Renderer &renderer, const Tag &tag) const
if(tag.id)
return;
- bool was_in_frame = in_frame;
- if(!in_frame)
- setup_frame();
-
const Framebuffer *out_fbo = Framebuffer::current();
// These is a no-ops but will ensure the related state gets restored
BindRestore restore_fbo(out_fbo);
postproc[i]->render(renderer, color, depth);
}
}
-
- if(!was_in_frame)
- finish_frame();
}
void Pipeline::create_targets(unsigned recreate)
class PostProcessor;
/**
-Encapsulates all of the information used to produce a complete image in the
-framebuffer. This is the highest level rendering class.
+Top-level content class. Typically a Pipeline is used as the content
+Renderable for a View or effects such as ShadowMap or EnvironmentMap.
A Pipeline contains a sequence of passes. Each pass has a Renderable along
with Lighting, Clipping, DepthTest and Blend states. Scenes can be used to
-organize Renderables within a pass. A Camera can be specified for the entire
-Pipeline.
-
-A Pipeline is also a Renderable itself. It will only respond to the default
-pass. The Renderables within the Pipeline will be invoked with whatever tags
-were specified when adding them.
-
-A Pipeline's render method should normally be called without a Renderer; it
-will create one itself, using the camera specified for the Pipeline. If a
-Renderer is passed, its camera will be used instead.
+organize Renderables within a pass.
PostProcessors can be applied after all of the passes in the Pipeline have been
rendered. Framebuffer objects are automatically used to pass render results to
unsigned samples;
RenderTarget *target[2];
RenderTarget *target_ms;
- mutable bool in_frame;
public:
Pipeline(unsigned, unsigned, bool = false);
void set_hdr(bool);
void set_multisample(unsigned);
- void set_camera(const Camera *);
// Deprecated
+ void set_camera(const Camera *);
Pass &add_pass(const Tag &tag);
void add_renderable(const Renderable &);
void add_renderable_for_pass(const Renderable &, const Tag &);
#include "camera.h"
#include "framebuffer.h"
#include "renderable.h"
+#include "renderer.h"
#include "view.h"
using namespace std;
window.signal_resize.connect(sigc::mem_fun(this, &View::window_resized));
}
+void View::set_camera(Camera *c)
+{
+ camera = c;
+}
+
void View::set_content(const Renderable *r)
{
content = r;
{
target.clear(COLOR_BUFFER_BIT|DEPTH_BUFFER_BIT);
if(content)
- content->render();
+ {
+ Renderer renderer(camera);
+ content->setup_frame();
+ content->render(renderer);
+ content->finish_frame();
+ }
context.swap_buffers();
}
{
target.viewport(0, 0, w, h);
float aspect = static_cast<float>(w)/h;
+ if(camera)
+ camera->set_aspect(aspect);
for(list<Camera *>::iterator i=synced_cameras.begin(); i!=synced_cameras.end(); ++i)
(*i)->set_aspect(aspect);
}
Graphics::Window &window;
Graphics::GLContext &context;
Framebuffer ⌖
+ Camera *camera;
const Renderable *content;
std::list<Camera *> synced_cameras;
unsigned get_height() const { return window.get_height(); }
float get_aspect() const { return static_cast<float>(get_width())/get_height(); }
+ void set_camera(Camera *);
void set_content(const Renderable *);
+
+ // Deprecated
void synchronize_camera_aspect(Camera &);
void render();