47 #ifndef LIB_INCIDENCE_COUNT_H 48 #define LIB_INCIDENCE_COUNT_H 77 using TIMING_SCALE = std::micro;
78 using Clock = std::chrono::steady_clock;
80 using Instance = decltype(Clock::now());
81 using Dur = std::chrono::duration<double, TIMING_SCALE>;
91 using Sequence = vector<Inc>;
92 using Recording = vector<Sequence>;
96 std::atomic_uint8_t slotID_{0};
102 return slotID_.fetch_add(+1, std::memory_order_relaxed);
109 ASSERT (threadID < std::numeric_limits<uint8_t>::max(),
"WOW -- so many threads?");
114 getMySequence(uint8_t threadID)
116 if (threadID >= rec_.size())
118 rec_.reserve (threadID+1);
119 for (
size_t i = rec_.size(); i < threadID+1u; ++i)
122 return rec_[threadID];
126 addEntry(uint8_t caseID,
bool isLeave)
128 uint8_t threadID{getMySlot()};
129 Sequence& seq = getMySequence(threadID);
130 Inc& incidence = seq.emplace_back();
131 incidence.when = Clock::now();
132 incidence.thread = threadID;
133 incidence.caseID = caseID;
134 incidence.isLeave = isLeave;
143 expectThreads(uint8_t cnt)
151 expectIncidents(
size_t cnt)
154 for (Sequence& s : rec_)
163 void markEnter(uint8_t caseID =0) { addEntry(caseID,
false); }
164 void markLeave(uint8_t caseID =0) { addEntry(caseID,
true); }
172 size_t activationCnt{0};
173 double cumulatedTime{0};
174 double activeTime{0};
175 double coveredTime{0};
176 double avgConcurrency{0};
178 vector<size_t> caseCntr{};
179 vector<size_t> thrdCntr{};
180 vector<double> caseTime{};
181 vector<double> thrdTime{};
182 vector<double> concTime{};
184 template<
typename VAL>
186 access (vector<VAL>
const& data,
size_t idx)
188 return idx < data.size()? data[idx]
191 size_t cntCase (
size_t id)
const {
return access (caseCntr,
id); }
192 size_t cntThread(
size_t id)
const {
return access (thrdCntr,
id); }
193 double timeCase (
size_t id)
const {
return access (caseTime,
id); }
194 double timeThread(
size_t id)
const {
return access (thrdTime,
id); }
195 double timeAtConc(
size_t id)
const {
return access (concTime,
id); }
219 size_t numThreads = rec_.size();
220 if (numThreads == 0)
return stat;
222 size_t numEvents =
explore(rec_)
223 .transform([](Sequence& seq){
return seq.size(); })
225 if (numEvents == 0)
return stat;
227 timeline.reserve(numEvents);
228 for (Sequence& seq : rec_)
229 for (
Inc& event : seq)
230 timeline.emplace_back(event);
231 std::stable_sort (timeline.begin(), timeline.end()
232 ,[](
Inc const& l,
Inc const& r) {
return l.when < r.when; }
236 vector<int> active_case;
237 vector<int> active_thrd(numThreads);
240 stat.concTime.resize (numThreads+1);
245 Instance prev = timeline.front().when;
246 for (
Inc& event : timeline)
248 if (event.caseID >= stat.
caseCntr.size())
250 active_case .resize (event.caseID+1);
251 stat.
caseCntr.resize (event.caseID+1);
252 stat.
caseTime.resize (event.caseID+1);
254 Dur timeSlice =
event.when - prev;
256 for (uint i=0; i < stat.
caseCntr.size(); ++i)
257 stat.
caseTime[i] += active_case[i] * timeSlice.count();
258 for (uint i=0; i < numThreads; ++i)
260 stat.
thrdTime[i] += timeSlice.count();
261 size_t concurr =
explore(active_thrd).filter([](
int a){
return 0 < a; }).count();
262 ENSURE (concurr <= numThreads);
264 stat.concTime[concurr] += timeSlice.count();
268 ASSERT (0 < active_case[event.caseID]);
269 ASSERT (0 < active_thrd[event.thread]);
271 --active_case[
event.caseID];
272 --active_thrd[
event.thread];
277 ++active_case[
event.caseID];
278 ++active_thrd[
event.thread];
281 ++stat.activationCnt;
285 Dur covered = timeline.back().when - timeline.front().when;
287 stat.eventCnt = timeline.size();
288 ENSURE (0 < stat.activationCnt);
289 ENSURE (stat.eventCnt % 2 == 0);
auto explore(IT &&srcSeq)
start building a IterExplorer by suitably wrapping the given iterable source.
double cumulatedTime
aggregated time over all cases
Any copy and copy construction prohibited.
Implementation namespace for support and library code.
double avgConcurrency
amortised concurrency in timespan
Mix-Ins to allow or prohibit various degrees of copying and cloning.
uint8_t allocateNextSlot()
threadsafe allocation of thread/slotID
double coveredTime
overall timespan of observation
vector< double > caseTime
aggregated time per case
vector< size_t > thrdCntr
counting activations per thread
A recorder for concurrent incidences.
double activeTime
compounded time of thread activity
vector< size_t > caseCntr
counting activations per case
vector< double > thrdTime
time of activity per thread
Statistic evaluate()
Visit all data captured thus far, construct an unified timeline and then compute statistics evaluatio...
Building tree expanding and backtracking evaluations within hierarchical scopes.