631
631
newSeries.pathExpression = newName
632
632
results.append(newSeries)
637
def hitcount(requestContext, seriesList, intervalString):
638
"""Estimate hit counts from a list of time series.
640
This function assumes the values in each time series represent
641
hits per second. It calculates hits per some larger interval
642
such as per day or per hour. This function is like summarize(),
643
except that it compensates automatically for different time scales
644
(so that a similar graph results from using either fine-grained
645
or coarse-grained records) and handles rarely-occurring events
649
delta = parseTimeOffset(intervalString)
650
interval = int(delta.seconds + (delta.days * 86400))
652
for series in seriesList:
654
step = int(series.step)
655
bucket_count = int(math.ceil(float(series.end - series.start) / interval))
656
buckets = [[] for _ in range(bucket_count)]
657
newStart = int(series.end - bucket_count * interval)
659
for i, value in enumerate(series):
663
start_time = int(series.start + i * step)
664
start_bucket, start_mod = divmod(start_time - newStart, interval)
665
end_time = start_time + step
666
end_bucket, end_mod = divmod(end_time - newStart, interval)
668
if end_bucket >= bucket_count:
669
end_bucket = bucket_count - 1
672
if start_bucket == end_bucket:
673
# All of the hits go to a single bucket.
674
if start_bucket >= 0:
675
buckets[start_bucket].append(value * (end_mod - start_mod))
678
# Spread the hits among 2 or more buckets.
679
if start_bucket >= 0:
680
buckets[start_bucket].append(value * (interval - start_mod))
681
hits_per_bucket = value * interval
682
for j in range(start_bucket + 1, end_bucket):
683
buckets[j].append(hits_per_bucket)
685
buckets[end_bucket].append(value * end_mod)
687
newValues = [(sum(bucket) if bucket else None) for bucket in buckets]
688
newName = 'hitcount(%s, "%s")' % (series.name, intervalString)
689
newSeries = TimeSeries(newName, newStart, series.end, interval, newValues)
690
newSeries.pathExpression = newName
691
results.append(newSeries)