16 #ifdef THREADED_PIPELINE 24 PipelineCyclerTrueImpl::
30 if (_pipeline ==
nullptr) {
34 _num_stages = _pipeline->get_num_stages();
35 _data =
new CycleDataNode[_num_stages];
36 for (
int i = 0; i < _num_stages; ++i) {
37 _data[i]._cdata = initial_data;
40 _pipeline->add_cycler(
this);
46 PipelineCyclerTrueImpl::
47 PipelineCyclerTrueImpl(
const PipelineCyclerTrueImpl ©) :
48 _pipeline(copy._pipeline),
55 _num_stages = _pipeline->get_num_stages();
56 nassertv(_num_stages == copy._num_stages);
57 _data =
new CycleDataNode[_num_stages];
65 for (
int i = 0; i < _num_stages; ++i) {
66 PT(
CycleData) &new_pt = pointers[copy._data[i]._cdata];
67 if (new_pt ==
nullptr) {
68 new_pt = copy._data[i]._cdata->make_copy();
70 _data[i]._cdata = new_pt.p();
73 _pipeline->add_cycler(
this);
75 _pipeline->add_dirty_cycler(
this);
82 void PipelineCyclerTrueImpl::
83 operator = (
const PipelineCyclerTrueImpl ©) {
86 nassertv(get_parent_type() == copy.get_parent_type());
91 for (
int i = 0; i < _num_stages; ++i) {
92 PT(
CycleData) &new_pt = pointers[copy._data[i]._cdata];
93 if (new_pt ==
nullptr) {
94 new_pt = copy._data[i]._cdata->make_copy();
96 _data[i]._cdata = new_pt.p();
99 if (copy._dirty && !_dirty) {
100 _pipeline->add_dirty_cycler(
this);
107 PipelineCyclerTrueImpl::
108 ~PipelineCyclerTrueImpl() {
111 _pipeline->remove_cycler(
this);
125 write_stage(
int pipeline_stage,
Thread *current_thread) {
126 _lock.acquire(current_thread);
129 nassertd(pipeline_stage >= 0 && pipeline_stage < _num_stages) {
135 CycleData *old_data = _data[pipeline_stage]._cdata;
141 if (_data[pipeline_stage]._writes_outstanding == 0) {
146 if (old_data->get_node_ref_count() != 1) {
148 _data[pipeline_stage]._cdata = old_data->make_copy();
149 if (pipeline_cat.is_debug()) {
151 <<
"Copy-on-write a: " << old_data <<
" becomes " 152 << _data[pipeline_stage]._cdata <<
"\n";
158 if (!_dirty && _num_stages != 1) {
159 _pipeline->add_dirty_cycler(
this);
164 ++(_data[pipeline_stage]._writes_outstanding);
165 return _data[pipeline_stage]._cdata;
173 write_stage_upstream(
int pipeline_stage,
bool force_to_0,
Thread *current_thread) {
174 _lock.acquire(current_thread);
177 nassertd(pipeline_stage >= 0 && pipeline_stage < _num_stages) {
183 CycleData *old_data = _data[pipeline_stage]._cdata;
185 if (old_data->get_ref_count() != 1 || force_to_0) {
188 int external_count = old_data->get_ref_count() - 1;
189 int k = pipeline_stage - 1;
190 while (k >= 0 && _data[k]._cdata == old_data) {
199 if (external_count > 0 && _data[pipeline_stage]._writes_outstanding == 0) {
202 PT(
CycleData) new_data = old_data->make_copy();
203 if (pipeline_cat.is_debug()) {
205 <<
"Copy-on-write b: " << old_data <<
" becomes " 210 k = pipeline_stage - 1;
211 while (k >= 0 && (_data[k]._cdata == old_data || force_to_0)) {
212 nassertr(_data[k]._writes_outstanding == 0,
nullptr);
213 _data[k]._cdata = new_data.p();
217 _data[pipeline_stage]._cdata = new_data;
219 if (k >= 0 || pipeline_stage + 1 < _num_stages) {
223 _pipeline->add_dirty_cycler(
this);
227 }
else if (k >= 0 && force_to_0) {
231 nassertr(_data[k]._writes_outstanding == 0,
nullptr);
232 _data[k]._cdata = old_data;
238 ++(_data[pipeline_stage]._writes_outstanding);
239 return _data[pipeline_stage]._cdata;
261 last_val.swap(_data[_num_stages - 1]._cdata);
262 last_val->node_unref_only();
264 nassertr(_lock.debug_is_locked(), last_val);
265 nassertr(_dirty, last_val);
268 for (i = _num_stages - 1; i > 0; --i) {
269 nassertr(_data[i]._writes_outstanding == 0, last_val);
270 _data[i]._cdata = _data[i - 1]._cdata;
273 for (i = 1; i < _num_stages; ++i) {
274 if (_data[i]._cdata != _data[i - 1]._cdata) {
289 void PipelineCyclerTrueImpl::
290 set_num_stages(
int num_stages) {
291 nassertv(_lock.debug_is_locked());
293 if (num_stages <= _num_stages) {
296 for (
int i = _num_stages; i < num_stages; ++i) {
297 nassertv(_data[i]._writes_outstanding == 0);
298 _data[i]._cdata.clear();
301 _num_stages = num_stages;
306 CycleDataNode *new_data =
new CycleDataNode[num_stages];
308 for (i = 0; i < _num_stages; ++i) {
309 nassertv(_data[i]._writes_outstanding == 0);
310 new_data[i]._cdata = _data[i]._cdata;
312 for (i = _num_stages; i < num_stages; ++i) {
313 new_data[i]._cdata = _data[_num_stages - 1]._cdata;
317 _num_stages = num_stages;
326 void PipelineCyclerTrueImpl::CyclerMutex::
327 output(std::ostream &out)
const {
328 out <<
"CyclerMutex ";
329 _cycler->cheat()->output(out);
331 #endif // DEBUG_THREADS 333 #endif // THREADED_PIPELINE This is our own Panda specialization on the default STL map.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
A single page of data maintained by a PipelineCycler.
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
Similar to MutexHolder, but for a reentrant mutex.
static Pipeline * get_render_pipeline()
Returns a pointer to the global render pipeline.
This class manages a staged pipeline of data, for instance the render pipeline, so that each stage of...
PANDA 3D SOFTWARE Copyright (c) Carnegie Mellon University.
A thread; that is, a lightweight process.