tvaLib
analysis.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # tvaLib Copyright (c) 2012-2016 Paul G. St-Aubin
3 # Ecole Polytechnique de Montreal, McGill University
4 # Python 2.7; (dt) Spyder Windows 10 64-bit; ipython Ubuntu 15.04 64-bit
5
8 import os, sys, math
9 from copy import deepcopy
10 from inspect import getfile as inspect_getfile
11 from inspect import currentframe as inspect_currentframe
12
13 try: import numpy as np
14 except Exception: raise Exception, [0101, 'Numpy is not installed.']
15
16 if __name__ == '__main__':
17  print('Analysis library loaded directly.')
18  sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(inspect_getfile(inspect_currentframe())))))
19 import lib.tools as tvaLib
20
21
22 def rateString(obs, total):
23  ''' Take observations and total sample and format a string like "10/100 (10%)" '''
24  if(total == 0): return 'None'
25  if(type(obs) is float): obs_ = round(obs,1)
26  else: obs_ = obs
27  return str(obs_)+'/'+str(total)+' ('+str(round(float(obs)/float(total)*100,1))+'%)'
28
29
30 class Measures():
31  ''' The most basic analysis container contains individual values, e.g. one
32  value for each observation, or each site analysis. This could contain,
33  for example the mean speed at a site, or, the total flow, etc. '''
34  def __init__(self, prototype=None):
35  self.values = []
36  self.prototype = prototype
37  return
38
39  def __len__(self): return len(self.values)
40  def __getitem__(self, i): return self.values[i]
41
42  def add(self, *args):
43  if(self.prototype == None): return False
44  self.values.append(deepcopy(self.prototype))
46  return True
47
48  def increment(self, *args, **kwargs):
49  value = self.values[-1].increment(*args, **kwargs)
50  return value
51
53  return_ = []
54  for i in range(len(self.values)):
55  if(self.values[i].value): return_.append(i)
56  return return_
57  def getMeasures(self, round_=-1):
58  ''' Return measure of all analyses '''
59  indeces = self.getNonZeroValueIndeces()
60  measures = []
61  for i in indeces:
62  measures.append(self.values[i].value)
63  if(round_>=0 and isinstance(round_, int)):
64  measures = [round(x, round_) for x in measures]
65  return measures
66  def getDepths(self, round_=-1):
67  ''' Return depths of all analyses '''
68  indeces = self.getNonZeroValueIndeces()
69  depths = []
70  for i in indeces:
71  depths.append(self.values[i].depth)
72  if(round_>=0 and isinstance(round_, int)):
73  depths = [round(x, round_) for x in depths]
74  return depths
75  def getMeasuresAggregatedBySite(self, round_=-1):
76  ''' Return measure of all sites '''
77  indeces = self.getNonZeroValueIndeces()
78  siteIds = self.getSiteIds()
79  measures = []
80  for s in siteIds:
81  localmeasure = 0
82  localDepth = 0
83  for i in indeces:
84  if(self.values[i].metadata.idx == s): localmeasure = tvaLib.Math.combineMean([localmeasure,localDepth], [self.values[i].value, self.values[i].depth])
85  if(localmeasure): measures.append(localmeasure)
86  if(round_>=0 and isinstance(round_, int)):
87  measures = [round(x, round_) for x in measures]
88  return measures
89  def getSiteIds(self, NonZeroOnly=False):
90  ''' Return list of site IDs '''
91  ids = []
92  for i in self.values:
93  if(NonZeroOnly and i.value == 0): continue
95  ids = tvaLib.unique(ids)
96  return ids
97
98  def getSiteRankBySaIx(self, saIx):
99  try: return self.getSiteIdRanking().index(saIx)
100  except: return '-'
101
103  ''' Return ranked id aggregated by sites '''
104  measures = self.getMeasuresAggregatedBySite()
105  return sorted(range(len(measures)), key=measures.__getitem__)
106  def getSiteIdRanking(self):
107  ''' Return ranked id aggregated by sites '''
108  indeces = self.getSiteRankingByIndex()
109  siteIds = self.getSiteIds(NonZeroOnly=True)
110  return [siteIds[x] for x in indeces]
111  def getSiteRankingByMeasure(self, round_=-1):
112  ''' Return sorted measure aggregated by sites '''
113  return sorted(self.getMeasuresAggregatedBySite(round_))
114
115  def getMeasureRankingByIndex(self, style='verbose'):
116  ''' Return ranked index of all analyses '''
117  measures = self.getMeasures()
118  siteIndeces = self.getNonZeroValueIndeces()
119  if(measures and siteIndeces):
120  measures, siteIndeces = zip(*sorted(zip(measures, siteIndeces)))
121  if(style=='verbose'): return [self.values[x].metadata.name for x in siteIndeces]
122  else: return [x for x in siteIndeces]
123  def getMeasureRankingByMeasure(self, round_=-1):
124  ''' Return sorted measure of all analyses '''
125  return sorted(self.getMeasures(round_))
126
127  def printResult(self,label='default', round_=-1):
128  print '============================================'
129  print 'Ranking by '+label+' aggregated by site'
130  print self.getSiteIdRanking()
131  print self.getSiteRankingByMeasure(round_)
132  print 'Ranking by '+label+' for all analyses'
133  print self.getMeasureRankingByIndex()
134  print self.getMeasureRankingByMeasure(round_)
135
137  ''' This analysis contains a large number of observations. '''
139  return_ = []
140  for i in range(len(self.values)):
141  #print self.values[i].binData
142  if(self.values[i].binData): return_.append(i)
143  return return_
144  def getBins(self):
145  ''' Return bins of all analyses '''
146  indeces = self.getNonZeroValueIndeces()
147  bins = []
148  for i in indeces:
149  bins.append(self.values[i].binStops)
150  return bins
151  def getFrequencies(self):
152  ''' Return frequencies of all analyses '''
153  indeces = self.getNonZeroValueIndeces()
154  frequencies = []
155  for i in indeces:
156  frequencies.append(self.values[i].binData)
157  return frequencies
158  def getSumFrequency(self):
159  freq = self.getFrequencies()
160  for i in range(len(freq)):
161  if(i==0): sumFreq = freq[i]
162  else: sumFreq = [x+y for x,y in zip(sumFreq,freq[i])]
163  return sumFreq
164  def getSTDEVofBins(self):
165  ''' This function returns the standard deviation on each bin from the
166  sample (freqSamples). '''
167  return_ = []
168  for data in self.getFrequencies():
169  if(not data or type(data) is not list): return False
170  mean = sum(data)/len(data)
171  squareOfResult=[]
172  for obs in data:
173  squareOfResult.append(math.pow(obs-mean,2))
174  return_.append(math.sqrt(sum(squareOfResult)/len(squareOfResult)))
175  return return_
176  def getMAXIMAofBins(self, mode='max'):
177  ''' This function returns the min/max outliers on each bin from the
178  sample (freqSamples). '''
179  return_ = []
180  for data in self.getFrequencies():
181  if(not data or type(data) is not list): return False
182  if(mode=='min'): return_.append(min(data))
183  else: return_.append(max(data))
184  return return_
185
186  def clusterByKStest(self, nklusters):
187  ''' Cluster distributions by k-test distances into nklusters groups
188
189  Input:
190  ======
191  self: should be a correctly instantiated, non-empty list of
192  frequency objects
193  nklusters: number of cluster centroids (equivalent to k in k-means)
194
195  Output:
196  =======
197  returns a one-dimensional array of corresponding cluster associated
198  with each frequency object contained by this object
199  Also overwrites any self.clusters
200
201  '''
202  if(not self.values): return False
203  if(sum([1 for x in self.values if x]) < nklusters): nklusters = sum([1 for x in self.values if x])
204
205
206  meanKsDistances = []
207  for distroNeedleIx in range(len(self.values)):
208  if(not self.values[distroNeedleIx].getCDF()):
209  meanKsDistances.append(None)
210  continue
211  ksDistances = []
212  for distroStackIx in range(len(self.values)):
213  if(distroNeedleIx == distroStackIx or not self.values[distroStackIx].getCDF()): continue
214  ksDistances.append(tvaLib.Math.ksTest(self.values[distroNeedleIx].getCDF(),self.values[distroStackIx].getCDF()))
215  if(len(ksDistances) == 0): meanKsDistances.append(0)
216  else: meanKsDistances.append(sum(ksDistances)/float(len(ksDistances)))
217  self.centroidIxs = sorted(range(len(meanKsDistances)), key=lambda i: meanKsDistances[i])[meanKsDistances.count(None):nklusters+meanKsDistances.count(None)]
218
219
220  self.clusters = []
221
222  for distroNeedleIx in range(len(self.values)):
223  if(not self.values[distroNeedleIx].getCDF()):
224  self.clusters.append(None)
225  continue
226  ksDistances = []
227  isCentroid = False
228  for klusterStackIx in range(nklusters):
229  if(distroNeedleIx == self.centroidIxs[klusterStackIx]):
230  isCentroid = True
231  break
232  ksDistances.append(tvaLib.Math.ksTest(self.values[distroNeedleIx].getCDF(),self.values[self.centroidIxs[klusterStackIx]].getCDF()))
233  if(isCentroid): self.clusters.append(klusterStackIx)
234  else: self.clusters.append(ksDistances.index(min(ksDistances)))
235
236  return self.clusters
237
239  def getBins(self):
240  ''' Return profile bins. '''
241  for value in self.values:
242  try: return value.bins
243  except: continue
244
245
246  def getMean(self, min_up_coverage=0, min_down_coverage=0):
247  mean = []
248  for bIx in range(len(self.getBins())):
249  try: mean.append(tvaLib.Math.combineMean([value.binData[bIx] for value in self.values if value.binData and value.up_coverage > min_up_coverage and value.down_coverage > min_down_coverage], [value.binWeights[bIx] for value in self.values if value.binData and value.up_coverage > min_up_coverage and value.down_coverage > min_down_coverage]))
250  except ZeroDivisionError: mean.append(0)
251  return mean
252
253  def getStdDev(self, min_up_coverage=0, min_down_coverage=0):
254  stdev = []
255  for bIx in range(len(self.getBins())):
256  try: stdev.append(tvaLib.Math.combineStdDev([value.binStdDev[bIx] for value in self.values if value.binData and value.up_coverage > min_up_coverage and value.down_coverage > min_down_coverage], [value.binData[bIx] for value in self.values if value.binData], [value.binWeights[bIx] for value in self.values if value.binData and value.up_coverage > min_up_coverage and value.down_coverage > min_down_coverage]))
257  except ZeroDivisionError: stdev.append(0)
258  return stdev
259
260
261
262
264  metadata = None
265
268  return True
269
271  def __init__(self):
272  self.value = 0.0
273  self.depth = 0
274  return
275
276  def increment(self, data, weights=1):
277  ''' data is a list of observations '''
278  if(type(data) is not list): data = [data]
279  if(type(weights) is not list): weights = [weights]
280  if(len(data) <= 0): return False
281  value = self.incrementByMean(data, weights)
282  return value
283
284  def incrementByMean(self, data, weights=1):
285  ''' data is a simple list '''
286  if(type(data) is not list): data = [data]
287  if(weights and type(weights) is not list): weights = [weights]
288  if(len(data) <= 0): return False
289  if(weights and len(weights) == len(data)):
290  sum_val = 0
291  len_val = 0
292  for i in range(len(data)):
293  sum_val += data[i]*weights[i]
294  len_val += weights[i]
295  mean_val = sum_val/float(len_val)
296  else:
297  len_val = len(data)
298  mean_val = sum(data)/float(len_val)
299  self.value = tvaLib.Math.combineMean([self.value, mean_val], [self.depth, len_val])
300  self.depth += len(data)
301  return mean_val
302
303
305  def __init__(self):
306  ''' To get hourly values, use getCurve()[hour], etc. '''
307  self.values = [[] for x in range(1440)]
308  self.depths = [0 for x in range(1440)]
309  return
310  def increment(self, data, startTimes, startTime, duration, framerate):
311  ''' data: a list of measures.
312  startTimes: a list of starttimes corresponding to each measure in
313  data (lists should be the same size)
314  startTime: start time of measure recording
315  duration: duration of measure recording
316  '''
317  if(len(data) <= 0): return False
318
319  for measure,start in zip(data,startTimes):
320  timestampBin = int((startTime.hour*60 + startTime.minute + start/60.0/framerate))
321  self.values[timestampBin].append(measure)
322  for bin in range(startTime.hour*60 + startTime.minute, int(startTime.hour*60 + startTime.minute + duration/60.0)):
323  self.depths[bin] += 1
324  return True
325  def getCurve(self, bins=24, type='rate', emptyVal=None, divideBy=1, divideByValueVariety=False, strictDepthCompleteness=False):
326  ''' 2 bins is am/pm, 24 bins is hourly, 96 bins is quarter hourly, etc.
327
328  divideByValueVariety might be handy if the measures are
329  categorical.
330
331  If strictDepthCompleteness is set to True, only return values for
332  bins with a complete depth coverage (in other words the entire
333  period has had data collected over that period). '''
334  if(divideByValueVariety): divideBy = len(list(set(tvaLib.flatten_list(self.values))))
335  returnList = []
336  returnDepth = []
337  for bin in range(bins):
338  if(strictDepthCompleteness and [] in self.values[(bin)*int(1440/bins):(bin+1)*int(1440/bins)]):
339  returnList.append(emptyVal)
340  returnDepth.append(0)
341  elif(self.values[(bin)*int(1440/bins):(bin+1)*int(1440/bins)].count([]) == len(self.values[(bin)*int(1440/bins):(bin+1)*int(1440/bins)])):
342  returnList.append(emptyVal)
343  returnDepth.append(0)
344  else:
345  returnList.append(tvaLib.flatten_list(self.values[(bin)*int(1440/bins):(bin+1)*int(1440/bins)]))
346  returnDepth.append(sum(self.depths[(bin)*int(1440/bins):(bin+1)*int(1440/bins)]))
347  for i,depth in zip(range(len(returnList)),returnDepth):
348  if(returnList[i] and returnList[i] != emptyVal):
349  if(type=='mean'): returnList[i] = sum(returnList[i])/float(len(returnList[i]))
350  elif(type=='std'): returnList[i] = np.std(returnList[i])
351  else:
352  if(depth): returnList[i] = len(returnList[i])/float(depth)*int(1440/bins)/float(divideBy)
353  else: returnList[i] = emptyVal
354  return returnList
355
356
358  def increment(self, data, startTime, duration, framerate, mps_kmh=3.6):
359  ''' data is a list of objects (trajectories). '''
360  if(len(data) <= 0): return False
361
362  for datum in data:
363  timestampBin = int((startTime.hour*60 + startTime.minute + datum.getFirstInstant()/60.0/framerate))
364  self.values[timestampBin].append(sum(datum.velocities.positions[2])/float(len(datum.velocities.positions[2]))*mps_kmh*framerate)
365  return True
366
368  ''' Pool together raw data. Usefull if the data is structured using a custom scheme. '''
369  def __init__(self):
370  self.value = []
371  return
372
373  def increment(self, data):
374  ''' Data is a list of observations '''
375  self.value.append(data)
376  return self.value
377
379  def __init__(self, binSize=0.25, xstart=0, xend=10):
380  self.binStops = [xstart]
381  self.binData = []
382  self.binSize = binSize
383  self.depth = 0
384  self.source = []
385
386  while(True):
387  xstart += self.binSize
388  self.binStops.append(xstart)
389  self.binData.append(0)
390  if(xstart > xend): break
391  return
392
393  def increment(self, data, weights=None, method='byData', keepSource=False):
394  ''' Increment data.
395
396  Input:
397  ======
398  method=='byData':
399  Data is a simple list of individual observations
400  method=='byMeanHisto':
401  Use this method if the histogram has been pre calculated '''
402  if(len(data) <= 0): return False
403
404  if(method=='byMeanHisto'):
405  (histo,xs) = data
406  if(self.depth == 0):
407  self.binStops = xs
408  self.binSize = xs[1]-xs[0]
409  self.binData = list(histo)
410  self.depth = sum(histo)
411  return True
412  if(len(histo) != len(self.binData)): return False
413  for histIx in range(len(histo)):
414  self.binData[histIx] = (self.binData[histIx]*self.depth+histo[histIx]*sum(histo))/(self.depth+sum(histo))
415  self.depth += sum(histo)
416  else:
417  if(keepSource): self.source += data
418  if(weights): result = list(np.histogram(data, bins=self.binStops, weights=weights)[0])
419  else: result = list(np.histogram(data, bins=self.binStops)[0])
420  for i in range(len(result)):
421  self.binData[i] += result[i]
422  self.depth += len(data)
423  return True
424
425
426  def verifyBinStops(self, data):
427  ''' This method is called to build/expand'''
428  data = sorted(data)
429  if(data[0] < self.binStops[-1] or data[-1] > self.binStops[-1]): return False
430  else: return True
431
432  def getFreq(self):
433  if(not sum(self.binData)): return False
434  return self.binData
435
436  def getPDF(self):
437  if(not sum(self.binData)): return False
438  return [x/float(sum(self.binData)) for x in self.binData]
439
440  def getCDF(self):
441  frequency = self.getPDF()
442  if(not frequency): return False
443  return_ = []
444  for i in range(len(frequency)):
445  return_.append(sum(frequency[0:i+1]))
446  return return_
447
448  def ksTestWith(self, refData):
449  thisData = self.getCDF()
450  if(isinstance(refData, Frequency)): refData = refData.getCDF()
451  if(not thisData or type(refData) != list or len(refData) != len(thisData)): return False
452  supremum = 0
453  for i in range(len(thisData)):
454  if(math.fabs(thisData[i] - refData[i]) > supremum): supremum = math.fabs(thisData[i] - refData[i])
455  return supremum
456
458  def __init__(self):
459  self.binData = []
460  self.bins = []
461  self.binWeights = []
462  self.binStdDev = []
463  self.binMins = []
464  self.binMaxs = []
465  self.up_coverage = 0
466  self.down_coverage = 0
467  return
468
469  def setBins(self, values): self.bins = values; return True
470
471  def increment(self, data, weights, stdDevs=None, mins=None, maxs=None):
472  ''' Increment data. '''
473  if(len(data) <= 0): return False
474
475  if(not self.binData):
476  self.binData = data
477  self.binWeights = weights
478  if(stdDevs): self.binStdDev = stdDevs
479  if(mins): self.binMins = mins
480  if(maxs): self.binMaxs = maxs
481  else:
482  if(len(data) != len(self.binData) or len(data) != len(weights)): return False
483  if(stdDevs): self.binStdDev = [tvaLib.Math.combineStdDev([stdDevs[ix], self.binStdDev[ix]], [data[ix], self.binData[ix]], [weights[ix], self.binWeights[ix]]) for ix in range(len(data))]
484  self.binData = [tvaLib.Math.combineMean([data[ix], self.binData[ix]], [weights[ix], self.binWeights[ix]]) for ix in range(len(data))]
485  if(mins): self.binMins = [m1 if m1 < m2 else m2 for m1,m2 in zip(mins, self.binMins)]
486  if(maxs): self.binMaxs = [m1 if m1 > m2 else m2 for m1,m2 in zip(maxs, self.binMaxs)]
487  for ix in range(len(data)): self.binWeights[ix] += weights[ix]
488  return True
489
490
491
492
494  def increment(self, data, duration=3600):
495  ''' data is a generic rate (int) '''
496  if(not isinstance(data, list)): data = [data]
497  self.incrementByMean([x*float(3600/duration) for x in data])
498  return True
499
500 class Total(Measure):
501  def increment(self, data):
502  ''' data is a generic total (int). '''
503  self.value += data
504  return True
505
507  def increment(self, data, speed_conv):
508  ''' data is a list of objects (trajectories). '''
509  if(len(data) <= 0): return False
510
511  data_ = []
512  for datum in data:
513  data_.append(sum(datum.velocities.positions[2])/float(len(datum.velocities.positions[2]))*speed_conv)
514  mean_val = self.incrementByMean(data_)
515  return mean_val
516
518  def increment(self, data, duration=3600, location=0):
519  ''' data is a list of objects (trajectories). '''
520  if(len(data) <= 0): return False
521
522  data_ = [0 for i in range(len(self.metadata.site.alignments))]
523  for datum in data:
524  data_[datum.curvilinearPositions.getLanes()[location]] += 1
525  for i in range(len(data_)):
526  data_[i] = data_[i] * 3600/duration
527  if(len(filter(None, data_))): mean_val = self.incrementByMean(filter(None, data_))
528  else: mean_val = self.incrementByMean([0])
529  return mean_val
530
532  def increment(self, data):
533  ''' data is a list of objects (trajectories). '''
534  if(len(data) <= 0): return False
535  value = 0
536  for datum in data:
537  value += sum(datum.velocities.positions[2])/1000.0
538  self.depth += 1
539  self.value += value
540  return value
541
543  def __init__(self, thresh_ranges):
544  self.thresh_ranges = thresh_ranges
545  self.values = [0 for i in thresh_ranges]
546  self.weights = [[] for i in thresh_ranges]
547  self.total = 0
548  return
549  def increment(self, data, weights=None):
550  ''' data is a a list of indicators
551  weights is a list of weights (0-1) of equal size to data
552  '''
553  self.total += len(data)
554  for thresh in range(len(self.thresh_ranges)):
555  for datum in data:
556  if(datum <= self.thresh_ranges[thresh]):
557  self.values[thresh] += 1
558  if(weights):
559  for weight,datum in zip(weights,data):
560  if(datum <= self.thresh_ranges[thresh]):
561  self.weights[thresh].append(weight)
562  return True
563
564  def getValue(self, thresh): return self.values[thresh]
565  def getWeightedValue(self, thresh): return self.values[thresh]*sum(self.weights[thresh])/float(len(self.weights[thresh]))
566
567 #K-S test
def getNonZeroValueIndeces(self)
Definition: analysis.py:52
def __init__(self)
Definition: analysis.py:369
def verifyBinStops(self, data)
Definition: analysis.py:426
def getSiteIds(self, NonZeroOnly=False)
Definition: analysis.py:89
def increment(self, data, weights=None)
Definition: analysis.py:549
def increment(self, data, weights=1)
Definition: analysis.py:276
def increment(self, data)
Definition: analysis.py:532
def getSiteRankingByMeasure(self, round_=-1)
Definition: analysis.py:111
def getMean(self, min_up_coverage=0, min_down_coverage=0)
Definition: analysis.py:246
def getMeasuresAggregatedBySite(self, round_=-1)
Definition: analysis.py:75
def getSiteIdRanking(self)
Definition: analysis.py:106
def __init__(self)
Definition: analysis.py:271
Definition: analysis.py:266
def printResult(self, label='default', round_=-1)
Definition: analysis.py:127
def getCurve(self, bins=24, type='rate', emptyVal=None, divideBy=1, divideByValueVariety=False, strictDepthCompleteness=False)
Definition: analysis.py:325
def setBins(self, values)
Definition: analysis.py:469
def __init__(self, binSize=0.25, xstart=0, xend=10)
Definition: analysis.py:379
def getMAXIMAofBins(self, mode='max')
Definition: analysis.py:176
def __init__(self, thresh_ranges)
Definition: analysis.py:543
def increment(self, data, startTime, duration, framerate, mps_kmh=3.6)
Definition: analysis.py:358
def increment(self, data, duration=3600, location=0)
Definition: analysis.py:518
def __init__(self)
Definition: analysis.py:458
def increment(self, data)
Definition: analysis.py:373
def increment(self, data)
Definition: analysis.py:501
def ksTestWith(self, refData)
Definition: analysis.py:448
def getSiteRankingByIndex(self)
Definition: analysis.py:102
def getMeasureRankingByMeasure(self, round_=-1)
Definition: analysis.py:123
def __len__(self)
Definition: analysis.py:39
centroidIxs
Search for centroids: pick top nklusters according to minimum mean K-S-test with all others...
Definition: analysis.py:217
def increment(self, data, speed_conv)
Definition: analysis.py:507
def increment(self, data, weights, stdDevs=None, mins=None, maxs=None)
Definition: analysis.py:471
def __init__(self, prototype=None)
Definition: analysis.py:34
def incrementByMean(self, data, weights=1)
Definition: analysis.py:284
def rateString(obs, total)
Definition: analysis.py:22
def __getitem__(self, i)
Definition: analysis.py:40
def getSiteRankBySaIx(self, saIx)
Definition: analysis.py:98
Definition: analysis.py:42
def getWeightedValue(self, thresh)
Definition: analysis.py:565
def getSumFrequency(self)
Definition: analysis.py:158
def getMeasureRankingByIndex(self, style='verbose')
Definition: analysis.py:115
def clusterByKStest(self, nklusters)
Definition: analysis.py:186
def getDepths(self, round_=-1)
Definition: analysis.py:66
def increment(self, args, kwargs)
Definition: analysis.py:48
def getNonZeroValueIndeces(self)
Definition: analysis.py:138
def increment(self, data, startTimes, startTime, duration, framerate)
Definition: analysis.py:310
def getValue(self, thresh)
Definition: analysis.py:564
def increment(self, data, duration=3600)
Definition: analysis.py:494
def getFrequencies(self)
Definition: analysis.py:151
def increment(self, data, weights=None, method='byData', keepSource=False)
Definition: analysis.py:393
def getMeasures(self, round_=-1)
Definition: analysis.py:57
def getStdDev(self, min_up_coverage=0, min_down_coverage=0)
Definition: analysis.py:253
def getSTDEVofBins(self)
Definition: analysis.py:164