61 using Extent = BlockFlow::Extent;
64 const size_t EXTENT_SIZ = Extent::SIZ();
66 const size_t AVERAGE_EPOCHS =
Strategy{}.averageEpochs();
68 const double TARGET_FILL =
Strategy{}.config().TARGET_FILL;
69 const double ACTIVITIES_P_FR =
Strategy{}.config().ACTIVITIES_PER_FRAME;
104 Time deadline = randTime();
108 CHECK (1 == watch(bFlow).cntElm());
109 CHECK (1 == watch(bFlow).cntEpochs());
110 CHECK (watch(bFlow).first() > deadline);
111 CHECK (watch(bFlow).first() - deadline == bFlow.getEpochStep());
114 CHECK (0 == watch(bFlow).cntEpochs());
115 CHECK (0 == watch(bFlow).cntElm());
139 Extent& extent = *alloc.begin();
140 CHECK (extent.size() == Extent::SIZ::value);
141 CHECK (
sizeof(extent) == extent.size() *
sizeof(
Activity));
142 CHECK (showType<Extent::value_type>() ==
"vault::gear::Activity"_expect);
145 extent[55].data_.feed.one = 555555555555555;
151 CHECK (epoch[55].data_.feed.one == 555555555555555);
154 Epoch::EpochGate& gate = epoch.gate();
155 CHECK (isSameObject (gate, epoch[0]));
156 CHECK (isSameObject (epoch[0], extent[0]));
157 CHECK (
Time{gate.deadline()} ==
Time(0,10));
158 CHECK (
Time{gate.deadline()} ==
Time{epoch[0].data_.condition.dead});
162 CHECK (isSameObject (*gate.next, epoch[extent.size()-1]));
163 CHECK (0 == gate.filledSlots());
164 CHECK (0 == epoch.getFillFactor());
167 epoch[extent.size()-1].data_.timing.instant =
Time{5,5};
173 CHECK (isSameObject (timeStart, epoch[extent.size()-1]));
176 CHECK (epoch[extent.size()-1].data_.timing.instant !=
Time(5,5));
177 CHECK (epoch[extent.size()-1].data_.timing.instant ==
Time::NEVER);
179 CHECK (timeStart.data_.timing.instant ==
Time::NEVER);
180 CHECK (timeStart.data_.timing.quality == 0);
183 CHECK (isSameObject (*gate.next, epoch[extent.size()-2]));
186 CHECK (1 == gate.filledSlots());
187 CHECK (gate.hasFreeSlot());
189 CHECK (epoch.getFillFactor() == double(gate.filledSlots()) / (EXTENT_SIZ-1));
192 for (uint i=extent.size()-2; i>1; --i)
193 gate.claimNextSlot();
196 CHECK (isSameObject (*gate.next, epoch[1]));
197 CHECK (gate.filledSlots() == EXTENT_SIZ-2);
198 CHECK (gate.hasFreeSlot());
200 gate.claimNextSlot();
202 CHECK (not gate.hasFreeSlot());
203 CHECK (isSameObject (*gate.next, epoch[0]));
204 CHECK (gate.filledSlots() == EXTENT_SIZ-1);
205 CHECK (epoch.getFillFactor() == 1);
208 CHECK (gate.deadline() ==
Time(0,10));
210 CHECK ( gate.isAlive (
Time(0,5)));
211 CHECK ( gate.isAlive (
Time(999,9)));
212 CHECK (not gate.isAlive (
Time(0,10)));
213 CHECK (not gate.isAlive (
Time(1,10)));
239 CHECK (watch(bFlow).allEpochs() ==
"10s200ms"_expect);
240 CHECK (watch(bFlow).find(a1) ==
"10s200ms"_expect);
244 CHECK (watch(bFlow).allEpochs() ==
"10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
245 CHECK (watch(bFlow).find(a3) ==
"11s"_expect);
249 CHECK (watch(bFlow).allEpochs() ==
"10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
250 CHECK (watch(bFlow).find(a2) ==
"10s600ms"_expect);
255 CHECK (watch(bFlow).allEpochs() ==
"10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
256 CHECK (watch(bFlow).find(a0) ==
"10s200ms"_expect);
260 for (uint i=1; i<EXTENT_SIZ; ++i)
263 CHECK (allocHandle.currDeadline() ==
Time(400,10));
264 CHECK (not allocHandle.hasFreeSlot());
267 auto& a4 = allocHandle.
create();
268 CHECK (allocHandle.currDeadline() ==
Time(600,10));
269 CHECK (allocHandle.hasFreeSlot());
270 CHECK (watch(bFlow).find(a4) ==
"10s600ms"_expect);
273 for (uint i=1; i<EXTENT_SIZ; ++i)
277 CHECK (allocHandle.currDeadline() ==
Time(800,10));
278 CHECK (allocHandle.hasFreeSlot());
281 auto& a5 = bFlow.
until(
Time{220,10}).create();
282 CHECK (watch(bFlow).find(a5) ==
"10s800ms"_expect);
286 for (uint i=2; i<EXTENT_SIZ; ++i)
289 CHECK (allocHandle.currDeadline() ==
Time(0,11));
290 CHECK (not allocHandle.hasFreeSlot());
291 auto& a6 = bFlow.
until(
Time{850,10}).create();
293 CHECK (watch(bFlow).find(a6) ==
"11s192ms"_expect);
294 CHECK (watch(bFlow).allEpochs() ==
"10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms"_expect);
296 auto& a7 = bFlow.
until(
Time{500,11}).create();
298 CHECK (watch(bFlow).allEpochs() ==
"10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms|11s384ms|11s576ms"_expect);
299 CHECK (watch(bFlow).find(a7) ==
"11s576ms"_expect);
302 CHECK (watch(bFlow).cntElm() == 8 + EXTENT_SIZ-1 + EXTENT_SIZ-1 + EXTENT_SIZ-2);
305 CHECK (bFlow.getEpochStep() ==
"≺192ms≻"_expect);
307 CHECK (bFlow.getEpochStep() ==
"≺218ms≻"_expect);
308 CHECK (watch(bFlow).allEpochs() ==
"11s|11s192ms|11s384ms|11s576ms"_expect);
311 auto& a8 = bFlow.
until(
Time{500,10}).create();
312 CHECK (watch(bFlow).find(a8) ==
"11s192ms"_expect);
326 CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP);
330 CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP * BOOST_OVERFLOW);
332 CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP * BOOST_OVERFLOW*BOOST_OVERFLOW);
338 TimeVar dur1 = INITIAL_EPOCH_STEP;
340 TimeVar dur2 = INITIAL_EPOCH_STEP * BOOST_OVERFLOW;
343 double goal1 = double(_raw(dur1)) / (fac1/TARGET_FILL);
344 double goal2 = double(_raw(dur2)) / (fac2/TARGET_FILL);
346 auto movingAverage = [&](
TimeValue old,
double contribution)
348 auto N = AVERAGE_EPOCHS;
349 auto averageTicks = double(_raw(old))*(N-1)/N + contribution/N;
350 return TimeValue{gavl_time_t (floor (averageTicks))};
353 TimeVar step = bFlow.getEpochStep();
355 CHECK (bFlow.getEpochStep() == movingAverage(step, goal1));
357 step = bFlow.getEpochStep();
359 CHECK (bFlow.getEpochStep() == movingAverage(step, goal2));
370 Duration initialStep{bFlow.getEpochStep()};
371 size_t initialFPS = Strategy{}.initialFrameRate();
375 CHECK (bFlow.getEpochStep() * 2 == initialStep);
379 CHECK (bFlow.getEpochStep() * 4 == initialStep);
405 const size_t FPS = 200;
406 const size_t TICK_P_S = FPS * ACTIVITIES_P_FR;
408 const gavl_time_t RUN = _raw(
Time{0,0,3});
409 Offset BASE_DEADLINE{FSecs{1,2}};
410 Offset SPREAD_DEAD{FSecs{2,100}};
411 const uint INVOKE_LAG = _raw(
Time{250,0}) /STP;
412 const uint CLEAN_UP = _raw(
Time{100,0}) /STP;
413 const uint INSTANCES = RUN /STP;
414 const uint MAX_TIME = INSTANCES
415 +INVOKE_LAG+2*CLEAN_UP;
417 using TestData = vector<pair<TimeVar, size_t>>;
418 using Subjects = vector<reference_wrapper<Activity>>;
421 TestData testData{INSTANCES};
422 for (
size_t i=0; i<INSTANCES; ++i)
424 const size_t SPREAD = 2*_raw(SPREAD_DEAD);
425 const size_t MIN_DEAD = _raw(BASE_DEADLINE) - _raw(SPREAD_DEAD);
427 auto&[t,r] = testData[i];
433 Subjects subject{INSTANCES, std::ref(dummy)};
435 auto runTest = [&](
auto allocate,
auto invoke) ->
size_t 444 for (
size_t i=0; i<MAX_TIME; ++i)
448 auto const& data = testData[i];
449 subject[i] = allocate(data.first, data.second);
451 if (INVOKE_LAG <= i and i-INVOKE_LAG < INSTANCES)
452 checksum += invoke(subject[i-INVOKE_LAG]);
457 auto benchmark = [INSTANCES](
auto invokeTest)
459 return lib::test::benchmarkTime(invokeTest, INSTANCES);
466 vector<Activity> storage{INSTANCES};
468 auto allocate = [i=0, &storage](
Time,
size_t check)
mutable ->
Activity&
470 return *
new(&storage[i++])
Activity{check,
size_t{55}};
472 auto invoke = [](
Activity& feedActivity)
474 return feedActivity.data_.feed.one;
477 sum1 = runTest (allocate, invoke);
483 auto heapAlloc = [&]{
484 auto allocate = [](
Time,
size_t check)
mutable ->
Activity&
486 return *
new Activity{check,
size_t{55}};
488 auto invoke = [](
Activity& feedActivity)
490 size_t check = feedActivity.data_.feed.one;
491 delete &feedActivity;
495 sum2 = runTest (allocate, invoke);
501 vector<std::shared_ptr<Activity>> manager{INSTANCES};
502 auto sharedAlloc = [&]{
503 auto allocate = [&, i=0](
Time,
size_t check)
mutable ->
Activity&
510 auto invoke = [&, i=0](
Activity& feedActivity)
mutable 512 size_t check = feedActivity.data_.feed.one;
517 sum3 = runTest (allocate, invoke);
526 auto blockFlowAlloc = [&]{
527 auto allocHandle = blockFlow.
until(
Time{BASE_DEADLINE});
528 auto allocate = [&, j=0](
Time t,
size_t check)
mutable ->
Activity&
532 allocHandle = blockFlow.
until(t);
535 return allocHandle.create (check,
size_t{55});
537 auto invoke = [&, i=0](
Activity& feedActivity)
mutable 539 size_t check = feedActivity.data_.feed.one;
540 if (i % CLEAN_UP == 0)
546 sum4 = runTest (allocate, invoke);
550 auto time_noAlloc = benchmark(noAlloc);
553 auto time_heapAlloc = benchmark(heapAlloc);
556 auto time_sharedAlloc = benchmark(sharedAlloc);
558 cout<<
"\n\n■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■"<<endl;
561 auto time_blockFlow = benchmark(blockFlowAlloc);
563 Duration expectStep{FSecs{blockFlow.framesPerEpoch(), FPS} * 9/10};
565 cout<<
"\n___Microbenchmark____" 566 <<
"\nnoAlloc : "<<time_noAlloc
567 <<
"\nheapAlloc : "<<time_heapAlloc
568 <<
"\nsharedAlloc : "<<time_sharedAlloc
569 <<
"\nblockFlow : "<<time_blockFlow
570 <<
"\n_____________________\n" 571 <<
"\ninstances.... "<<INSTANCES
572 <<
"\nfps.......... "<<FPS
573 <<
"\nActivities/s. "<<TICK_P_S
574 <<
"\nEpoch(expect) "<<expectStep
575 <<
"\nEpoch (real) "<<blockFlow.getEpochStep()
576 <<
"\ncnt Epochs... "<<watch(blockFlow).cntEpochs()
577 <<
"\nalloc pool... "<<watch(blockFlow).poolSize()
582 CHECK (sum1 == sum2);
583 CHECK (sum1 == sum3);
584 CHECK (sum1 == sum4);
587 CHECK (expectStep - blockFlow.getEpochStep() <
Time(10,0));
591 CHECK (watch(blockFlow).cntEpochs() < 8);
595 CHECK (time_blockFlow < 800);
Allocation scheme for the Scheduler, based on Epoch(s).
a mutable time value, behaving like a plain number, allowing copy and re-accessing ...
signal start of some processing and transition grooming mode ⟼ *work mode
#define ASSERT_VALID_SIGNATURE(_FUN_, _SIG_)
Macro for a compile-time check to verify the given generic functors or lambdas expose some expected s...
Record to describe an Activity, to happen within the Scheduler's control flow.
Memory management scheme for activities and parameter data passed through the Scheduler within the Lu...
Policy template to mix into the BlockFlow allocator, providing the parametrisation for self-regulatio...
Framerate specified as frames per second.
Allocation Extent holding scheduler Activities to be performed altogether before a common deadline...
static const gavl_time_t SCALE
Number of micro ticks (µs) per second as basic time scale.
Functions to perform (multithreaded) timing measurement on a given functor.
Local handle to allow allocating a collection of Activities, all sharing a common deadline...
void markEpochOverflow()
Notify and adjust Epoch capacity as consequence of exhausting an Epoch.
Lumiera's internal time value datatype.
Abstract Base Class for all testcases.
Metaprogramming tools for transforming functor types.
void announceAdditionalFlow(FrameRate additionalFps)
provide a hint to the self-regulating allocation scheme.
Simple test class runner.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...
Activity & create(ARGS &&...args)
Main API operation: allocate a new Activity record.
A collection of frequently used helper functions to support unit testing.
probe window + count-down; activate next Activity, else re-schedule
internal engine »heart beat« for internal maintenance hook(s)
void markEpochUnderflow(TimeVar actualLen, double fillFactor)
On clean-up of past Epochs, the actual fill factor is checked to guess an Epoch duration to make opti...
static const Time NEVER
border condition marker value. NEVER >= any time value
string showType()
diagnostic type output, including const and similar adornments
Offset measures a distance in time.
auto setup(FUN &&workFun)
Helper: setup a Worker-Pool configuration for the test.
Duration is the internal Lumiera time metric.
void discardBefore(Time deadline)
Clean-up all storage related to activities before the given deadline.
lib::time::Time randTime()
create a random but not insane Time value between 1s ...
double boostFactorOverflow() const
< reduced logarithmically, since overflow is detected on individual allocations
a family of time value like entities and their relationships.
basic constant internal time value.
Vault-Layer implementation namespace root.
AllocatorHandle until(Time deadline)
initiate allocations for activities to happen until some deadline
bool isSameObject(A const &a, B const &b)
compare plain object identity, bypassing any custom comparison operators.