85 #ifndef SRC_VAULT_GEAR_BLOCK_FLOW_H_ 86 #define SRC_VAULT_GEAR_BLOCK_FLOW_H_ 114 namespace blockFlow {
168 static const CONF configInstance;
169 return configInstance;
173 framesPerEpoch()
const 175 return config().EPOCH_SIZ / config().ACTIVITIES_PER_FRAME;
179 initialFrameRate()
const 181 return config().INITIAL_STREAMS * config().REFERENCE_FPS;
185 initialEpochStep()
const 193 return util::max(2*_raw(config().
DUTY_CYCLE) / _raw(initialEpochStep()), 2u);
197 averageEpochs()
const 199 return util::max (initialEpochCnt(), 6u);
205 return config().BOOST_FACTOR;
236 using RawIter =
typename ALO::iterator;
237 using SIZ =
typename ALO::Extent::SIZ;
264 next =
this + (Epoch::SIZ() - 1);
265 ENSURE (next !=
this);
272 return data_.condition.dead;
276 isAlive (
Time deadline)
278 return data_.condition.isHold()
279 or not data_.condition.isDead (deadline);
285 const Activity* firstAllocPoint{
this + (Epoch::SIZ()-1)};
286 return firstAllocPoint - next;
298 REQUIRE (hasFreeSlot());
305 Time deadline() {
return Time{gate().deadline()}; }
310 return double(gate().filledSlots()) / (SIZ()-1);
315 implantInto (RawIter storageSlot)
317 Epoch& target =
static_cast<Epoch&
> (*storageSlot);
323 setup (RawIter storageSlot,
Time deadline)
325 Epoch& newEpoch{implantInto (storageSlot)};
326 newEpoch.gate().deadline() = deadline;
349 template<
class CONF = blockFlow::DefaultConfig>
354 constexpr
static size_t EPOCH_SIZ = CONF::EPOCH_SIZ;
360 using Extent =
typename Allocator::Extent;
363 using Strategy::config;
374 return static_cast<Epoch&
> (extent);
384 Epoch* curr_{
nullptr};
389 return RawIter::checkPoint()? & asEpoch (RawIter::yield())
397 , curr_{accessEpoch()}
415 RawIter::validatePos(curr_);
417 curr_ = accessEpoch();
421 expandAlloc (
size_t cnt =1)
423 RawIter::expandAlloc(cnt);
424 curr_ = accessEpoch();
433 , epochStep_{Strategy::initialEpochStep()}
443 adjustEpochStep (
double factor)
445 double stretched = _raw(epochStep_) * factor;
446 gavl_time_t microTicks(floor (stretched));
479 template<
typename...ARGS>
483 return *
new(claimSlot())
Activity {std::forward<ARGS> (args)...};
486 Time currDeadline()
const {
return epoch_->deadline(); }
487 bool hasFreeSlot()
const {
return epoch_->gate().hasFreeSlot(); }
494 while (not (epoch_ and
495 epoch_->gate().hasFreeSlot()))
500 auto lastDeadline = flow_->lastEpoch().deadline();
501 epoch_.expandAlloc();
503 Epoch::setup (epoch_, lastDeadline + flow_->getEpochStep());
511 return epoch_->gate().claimNextSlot();
530 Epoch::setup (alloc_.
begin(), deadline +
Time{epochStep_});
531 return AllocatorHandle{alloc_.
begin(),
this};
535 if (firstEpoch().deadline() >= deadline)
537 return AllocatorHandle{alloc_.
begin(),
this};
539 if (lastEpoch().deadline() < deadline)
542 TimeVar lastDeadline = lastEpoch().deadline();
543 auto distance = _raw(deadline) - _raw(lastDeadline);
545 ENSURE (not nextEpoch);
546 auto requiredNew = distance / _raw(epochStep_);
547 ___sanityCheckAlloc(requiredNew);
548 if (distance % _raw(epochStep_) > 0)
550 nextEpoch.expandAlloc (requiredNew);
552 for ( ; 0 < requiredNew; --requiredNew)
555 lastDeadline += epochStep_;
556 Epoch::setup (nextEpoch, lastDeadline);
557 if (deadline <= lastDeadline)
559 ENSURE (requiredNew == 1);
560 return AllocatorHandle{nextEpoch,
this};
564 NOTREACHED (
"Logic of counting new Epochs");
568 if (epochIt->deadline() >= deadline)
569 return AllocatorHandle{epochIt,
this};
571 NOTREACHED (
"Inconsistency in BlockFlow Epoch deadline organisation");
586 or firstEpoch().deadline() > deadline)
590 for (
Epoch& epoch : allEpochs())
592 if (epoch.gate().isAlive (deadline))
595 auto currDeadline = epoch.deadline();
596 auto epochDuration = currDeadline - updatePastDeadline(currDeadline);
597 markEpochUnderflow (epochDuration, epoch.getFillFactor());
613 if (epochStep_ > _cache_timeStep_cutOff)
614 adjustEpochStep (_cache_boostFactorOverflow);
633 auto interpolate = [&](
auto f,
auto v1,
auto v2) {
return f*v2 + (1-f)*v1; };
636 fillFactor /= config().TARGET_FILL;
637 auto THRESH = config().DAMP_THRESHOLD;
639 fillFactor > THRESH? fillFactor
640 : interpolate (1 - fillFactor/THRESH, fillFactor, Strategy::boostFactor());
643 double contribution = double(_raw(actualLen)) / _raw(epochStep_) / adjust;
646 auto N = Strategy::averageEpochs();
647 double avgFactor = (contribution + N-1) / N;
648 adjustEpochStep (avgFactor);
664 currFps += additionalFps;
665 TimeVar adaptedSpacing = Strategy::framesPerEpoch() / currFps;
666 epochStep_ = util::max (adaptedSpacing, _cache_timeStep_cutOff);
674 REQUIRE (not isnil (alloc_));
675 return asEpoch(*alloc_.
begin());
680 REQUIRE (not isnil (alloc_));
681 return asEpoch(*alloc_.
last());
687 return alloc_.
begin();
701 pastDeadline_ = newDeadline - epochStep_;
702 TimeVar previous = pastDeadline_;
703 pastDeadline_ = newDeadline;
709 ___sanityCheckAlloc (
size_t newBlockCnt)
711 if (newBlockCnt > blockFlow::BLOCK_EXPAND_SAFETY_LIMIT)
712 throw err::Fatal{
"Deadline expansion causes allocation of " 713 +util::showSize(newBlockCnt) +
"blocks > " 714 +util::showSize(blockFlow::BLOCK_EXPAND_SAFETY_LIMIT)
715 , err::LUMIERA_ERROR_CAPACITY};
744 Time first() {
return flow_.firstEpoch().deadline();}
745 Time last() {
return flow_.lastEpoch().deadline(); }
746 size_t cntEpochs() {
return watch(flow_.alloc_).active(); }
747 size_t poolSize() {
return watch(flow_.alloc_).size(); }
753 for (Epoch& epoch : flow_.allEpochs())
755 if (util::isSameObject (act, someActivity))
756 return epoch.deadline();
764 if (isnil (flow_.alloc_))
return "";
766 .transform([](Epoch& a){
return TimeValue{a.deadline()}; });
767 return util::join(deadlines,
"|");
775 for (
auto& epoch : flow_.allEpochs())
776 cnt += epoch.gate().filledSlots();
static const Time ANYTIME
border condition marker value. ANYTIME <= any time value
Allocation scheme for the Scheduler, based on Epoch(s).
a mutable time value, behaving like a plain number, allowing copy and re-accessing ...
Record to describe an Activity, to happen within the Scheduler's control flow.
iterator begin()
iterate over all the currently active Extents
< special definitions for the Scheduler activity language
specifically rigged GATE Activity, used for managing Epoch metadata
const Duration DUTY_CYCLE
typical relaxation time or average pre-roll to deadline
auto explore(IT &&srcSeq)
start building a IterExplorer by suitably wrapping the given iterable source.
void openNew(size_t cnt=1)
claim next cnt extents, possibly allocate.
Policy template to mix into the BlockFlow allocator, providing the parametrisation for self-regulatio...
Framerate specified as frames per second.
const size_t OVERLOAD_LIMIT
load factor over normal use where to assume saturation and limit throughput
const size_t BLOCK_EXPAND_SAFETY_LIMIT
< Parametrisation of Scheduler memory management scheme
Any copy and copy construction prohibited.
iterator last()
positioned to the last / latest storage extent opened
Allocation Extent holding scheduler Activities to be performed altogether before a common deadline...
static const gavl_time_t SCALE
Number of micro ticks (µs) per second as basic time scale.
Local handle to allow allocating a collection of Activities, all sharing a common deadline...
void markEpochOverflow()
Notify and adjust Epoch capacity as consequence of exhausting an Epoch.
Time updatePastDeadline(TimeVar newDeadline)
Lumiera's internal time value datatype.
const double BOOST_FACTOR
adjust capacity by this factor on Epoch overflow/underflow events
Lightweight yet safe parametrisation of memory management.
TimeValue find(Activity &someActivity)
find out in which Epoch the given Activity was placed
Derived specific exceptions within Lumiera's exception hierarchy.
static const size_t EPOCH_SIZ
Number of storage slots to fit into one »Epoch«
const double DAMP_THRESHOLD
do not account for (almost) empty Epochs to avoid overshooting regulation
Mix-Ins to allow or prohibit various degrees of copying and cloning.
const size_t REFERENCE_FPS
frame rate to use as reference point to relate DUTY_CYCLE and default counts
void dropOld(size_t cnt)
discard oldest cnt extents
void announceAdditionalFlow(FrameRate additionalFps)
provide a hint to the self-regulating allocation scheme.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...
Parametrisation tuned for Render Engine performance.
Activity & create(ARGS &&...args)
Main API operation: allocate a new Activity record.
const double TARGET_FILL
aim at using this fraction of Epoch space on average (slightly below 100%)
Duration timeStep_cutOff() const
< prevent stalling Epoch progression when reaching saturation
boost::rational< int64_t > FSecs
rational representation of fractional seconds
static Epoch & asEpoch(Extent &extent)
void markEpochUnderflow(TimeVar actualLen, double fillFactor)
On clean-up of past Epochs, the actual fill factor is checked to guess an Epoch duration to make opti...
Memory management scheme for cyclically used memory extents.
size_t initialEpochCnt() const
< reserve allocation headroom for two duty cycles
Basic set of definitions and includes commonly used together (Vault).
const size_t ACTIVITIES_PER_FRAME
how many Activity records are typically used to implement a single frame
static const Time NEVER
border condition marker value. NEVER >= any time value
Adapt the access to the raw storage to present the Extents as Epoch; also caches the address resoluti...
auto setup(FUN &&workFun)
Helper: setup a Worker-Pool configuration for the test.
Decorator-Adapter to make a »state core« iterable as Lumiera Forward Iterator.
Duration is the internal Lumiera time metric.
void discardBefore(Time deadline)
Clean-up all storage related to activities before the given deadline.
const size_t INITIAL_STREAMS
Number of streams with REFERENCE_FPS to expect for normal use.
Building tree expanding and backtracking evaluations within hierarchical scopes.
double boostFactorOverflow() const
< reduced logarithmically, since overflow is detected on individual allocations
a family of time value like entities and their relationships.
basic constant internal time value.
size_t cntElm()
count all currently active allocated elements
Vault-Layer implementation namespace root.
std::string allEpochs()
render deadlines of all currently active Epochs
AllocatorHandle until(Time deadline)
initiate allocations for activities to happen until some deadline
Descriptor for a piece of operational logic performed by the scheduler.