tvaLib
analysis.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # tvaLib Copyright (c) 2012-2016 Paul G. St-Aubin
3 # Ecole Polytechnique de Montreal, McGill University
4 # Python 2.7; (dt) Spyder Windows 10 64-bit; ipython Ubuntu 15.04 64-bit
5 version = 'R2.3.0 u. 2017-03-22'
6 
7 '''
8 -z I:\Video -d lund-scene.sqlite --analysis 2 -w -a --hli cg
9 '''
10 
11 
14 def main():
15  try:
16 
17 
18 
22  import sys, os, time, logging
23  from csv import writer as csv_writer
24  from csv import QUOTE_MINIMAL as csv_QUOTE_MINIMAL
25  from collections import OrderedDict
26  from site import addsitedir
27  from copy import deepcopy
28  from datetime import datetime
29  import cPickle as pickle
30  from warnings import filterwarnings
31  filterwarnings('ignore')
32 
33  #Add parent library
34  if(os.path.isfile(os.path.join(os.getcwd(), os.pardir, 'main.py'))):
35  addsitedir(os.path.join(os.getcwd(), os.pardir))
36  os.chdir(os.path.join(os.getcwd(), os.pardir))
37 
38  import include.config as tvaConfig
39  import include.local as tvaLocal
40 
41 
44  commands = tvaConfig.commands()
45  config = tvaConfig.Config(readonly=commands.configreadonly)
46 
47 
50  try: #Colorama, optional, required for Windows if coloured text is desired
51  from colorama import init as colorama_init
52  colorama_init(strip=False)
53  from colorama import Fore, Back, Style
54  except ImportError:
55  Fore = tvaConfig.Fore()
56  Back = tvaConfig.Back()
57  Style = tvaConfig.Style()
58  if(commands.logging or config.disable_colour):
59  config.disable_colour = True
60  Fore = tvaConfig.Fore(forceOff=True)
61  Back = tvaConfig.Back(forceOff=True)
62  Style = tvaConfig.Style(forceOff=True)
63 
64 
67  import include.runtime as tvaRuntime
68 
69 
72  if(not config.advanced_runtime): tvaRuntime.checkDependancies()
73 
74 
78  import matplotlib as mpl; mpl.use('Agg')
79  from scipy.stats import kruskal as scipy_stats_kruskal
80 
81 
86  if(not config.debug): oldstdout = sys.stdout;sys.stdout = tvaConfig.NullWriter()
87  try:
88  import lib.analysis as tvaAnalysis
89  import lib.filt as tvaFilter
90  import lib.scene as tvaScene
91  import lib.tools as tvaLib
92  import lib.vis as tvaVis
93  finally:
94  if(not config.debug): sys.stdout = oldstdout #Re-enable output
95 
96 
99  if(config.debug): logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] (%(threadName)-10s) %(message)s')
100 
101 
102 
105  if(commands.dir): config.dir = commands.dir
106  if(commands.dbn): config.dbn = commands.dbn
107  if(commands.language): config.language = commands.language
108  local = tvaLocal.Local(commands.language)
109  if(not commands.analysis): commands.analysis = 1
110  #Override module colour options
111  tvaLib.Fore = Fore;tvaLib.Back = Back;tvaLib.Style = Style
112  tvaRuntime.Fore = Fore;tvaRuntime.Back = Back;tvaRuntime.Style = Style
113 
114  sites = tvaScene.Sites(config)
115  site_analyses = tvaScene.SiteAnalyses(config, sites=sites)
116  analyses = tvaScene.Analyses(config)
117  cluster_specs = tvaScene.Clusters(config)
118 
119  if(commands.analysis): aIx = analyses.interpret(commands.analysis)[0]
120  else: aIx = 0
121  summaryFolderPrefix = 'SUMMARY'
122  clusterFolderPrefix = 'CLUSTER'
123  siteSummaryPrefix = 'site_summaries'
124  indDistPrefix = 'indicator_distributions'
125  minimumVersion = 'R2.2.0'
126  minimumVersionDepth = 3
127  freq_binsize = 0.25
128  labelTTCSampleSize = True
129  nklusters = 1 # no more than 6
130  per_site_sampling_size = 10000 # kruskal
131  intInsClassMethods_master = tvaConfig.InteractionClassificationMethods()
132  tvaVis.plotSettings(style=commands.fig_style, size=config.plot_text_size, family=config.font_family, verbose=commands.verbose)
133  config.version = tvaLib.Parse.versionFromHgCommit(version)
134  if(commands.depth >= 2):
135  distributionTypes = ['pdf','cdf']
136  predictionMethods_master = tvaConfig.PredictionMethods(verbose=commands.verbose, version=config.version)
137  agregationMethods_master = tvaConfig.AggMethods()
138  print('Running meta analysis version '+config.version+'.')
139 
140 
141 
144  if(commands.cluster):
145  if(commands.interactive): commands = tvaRuntime.interactiveClusterSelection(commands, config, cluster_specs, local)
146  csIxs = cluster_specs.interpret(commands.cluster)
147  else:
148  if(commands.interactive):
149  commands = tvaRuntime.interactiveAnalysisSelection(commands, config, analyses, local)
150  aIx = analyses.interpret(commands.analysis)[0]
151  csIxs = [None]
152  for csIx in csIxs:
153  #Cluster data containers
154  if(csIx is not None):
155  cluster = cluster_specs[csIx]
156  if(commands.depth >= 2):
157  cluster.results_sample_dump = [] #for kruskal
158  cluster.results_TTC_X = []
159  cluster.results_TTC_site_means = dict((distType, dict((agMethod.label_short, dict((pmMethod.label_short, []) for pmMethod in predictionMethods_master)) for agMethod in agregationMethods_master)) for distType in distributionTypes)
160  cluster.results_TTC_site = dict((distType, dict((agMethod.label_short, dict((pmMethod.label_short, []) for pmMethod in predictionMethods_master)) for agMethod in agregationMethods_master)) for distType in distributionTypes)
161  else: cluster = [None]
162 
163 
164 
167  for cIx in range(len(cluster)):
168  if(cluster[cIx] is None): print('Processing analysis '+analyses[aIx].name+' ...')
169  else:
170  if(not cluster.labels[cIx]): cluster.labels[cIx] = 'cl_'+str(cIx+1)
171  commands.analysis = str(cluster.analyses[cIx])
172  if(cluster.xref_dbname[cIx]):
173  sites = tvaScene.Sites(config, filename_overide=cluster.xref_dbname[cIx])
174  site_analyses = tvaScene.SiteAnalyses(config, filename_overide=cluster.xref_dbname[cIx], sites=sites)
175  analyses = tvaScene.Analyses(config, filename_overide=cluster.xref_dbname[cIx])
176  else:
177  sites = tvaScene.Sites(config)
178  site_analyses = tvaScene.SiteAnalyses(config, sites=sites)
179  analyses = tvaScene.Analyses(config)
180  print('Processing analysis cases for cluster '+str(cIx+1)+' using the cluster specification "'+cluster.name+'"...')
181  clusterFolder = clusterFolderPrefix+'_'+cluster.name
182  if(not os.path.isdir(os.path.join(sites.getBaseDirectory(), config.output_folder, clusterFolder))): os.makedirs(os.path.join(config.dir, config.output_folder, clusterFolder))
183 
184 
187 
188 
191  if(not commands.hli_only):
192  tvaLib.printTimeStamp('Parsing cached data... This operation may take several hours.')
193 
194  if(commands.analysis):
195  aIx = analyses.interpret(commands.analysis)[0]
196  commands.s_analysis = analyses[aIx].site_analyses
197  try: analysisFolder = summaryFolderPrefix+'_'+tvaLib.Parse.clean_file_name(analyses[aIx].name)
198  except: analysisFolder = summaryFolderPrefix
199  if(not os.path.isdir(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder))): os.makedirs(os.path.join(config.dir, config.output_folder, analysisFolder))
200 
201 
205  alignments = []
206  mhcs = []
207  masks = []
208  masks2 = []
209  zones = []
210  loops = []
211  bounds = []
212  homoCompletion = []
213  trackCompletion = []
214  trackOldestAge = []
215  annotCompletion = []
216  gtCompletion = []
217  clCompletion = []
218  serObjCompletion = []
219  objMinversions = []
220  objMaxversions = []
221  listOfExistingUserTypes = [None]+[x for x in range(len(local['userTypeNames']))]
222 
223 
224  if(commands.depth >= 1):
225  meanSpeeds = tvaAnalysis.Measures(tvaAnalysis.MeanSpeed())
226  inFlowPLPH = tvaAnalysis.Measures(tvaAnalysis.FlowRatePerLanePerHour())
227  hourly_flows = tvaAnalysis.Measures(tvaAnalysis.MeasureByHour())
228  hourly_speed = tvaAnalysis.Measures(tvaAnalysis.MeanSpeedByHour())
229  vehKmTraveled = tvaAnalysis.Measures(tvaAnalysis.VehKmTraveled())
230 
231 
232  if(commands.depth >= 2):
233  #[Label,[oldCompletion],[NotUsed],camIndCompletion,LowestVersion,HighestVersion]
234  predictionMethods = deepcopy(predictionMethods_master)
235  agregationMethods = deepcopy(agregationMethods_master)
236  thresholds = tvaLib.drange(0.25,2.0,0.25)
237 
238 
239  for pmIx in range(len(predictionMethods)):
240  predictionMethods[pmIx].completionBySeq = []
241  predictionMethods[pmIx].completion = []
242  predictionMethods[pmIx].minVersion = []
243  predictionMethods[pmIx].maxVersion = []
244  predictionMethods[pmIx].userPairsPH = tvaAnalysis.Measures(tvaAnalysis.Exposure())
245  predictionMethods[pmIx].userPairsWIndPH = tvaAnalysis.Measures(tvaAnalysis.Exposure())
246  predictionMethods[pmIx].userPairsBH = tvaAnalysis.Measures(tvaAnalysis.MeasureByHour())
247  predictionMethods[pmIx].userPairsWIndBH = tvaAnalysis.Measures(tvaAnalysis.MeasureByHour())
248  predictionMethods[pmIx].interactionsPH = tvaAnalysis.Measures(tvaAnalysis.Exposure())
249  predictionMethods[pmIx].meanTTC = [[OrderedDict([(agregationMethod.label_short, tvaAnalysis.Measures(tvaAnalysis.Measure())) for agregationMethod in agregationMethods]) for userType2 in listOfExistingUserTypes[listOfExistingUserTypes.index(userType1):]] for userType1 in listOfExistingUserTypes]
250  predictionMethods[pmIx].freqTTC = [[OrderedDict([(agregationMethod.label_short, tvaAnalysis.Frequencies(tvaAnalysis.Frequency(binSize=freq_binsize, xend=config.disp_timehorizon))) for agregationMethod in agregationMethods]) for userType2 in listOfExistingUserTypes[listOfExistingUserTypes.index(userType1):]] for userType1 in listOfExistingUserTypes]
251  predictionMethods[pmIx].countTTCThresh = [[OrderedDict([(agregationMethod.label_short, tvaAnalysis.Measures(tvaAnalysis.IndicatorThresholds(thresholds))) for agregationMethod in agregationMethods]) for userType2 in listOfExistingUserTypes[listOfExistingUserTypes.index(userType1):]] for userType1 in listOfExistingUserTypes]
252 
253  if(cluster[cIx] is not None): cluster.results_sample_dump.append([[[] for agregationMethod in agregationMethods] for predictionMethod in predictionMethods]) #for kruskal
254 
255 
258  if(commands.depth >= 2):
259  if(commands.export_csv):
260 
261  ttcExportFile = open(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'TTC_export.csv'), 'wb')
262  writer = csv_writer(ttcExportFile, delimiter=',', quotechar='"', quoting=csv_QUOTE_MINIMAL)
263  headers = ['DBID', 'SITE_ID', 'CAM_ID', 'CAM_IX', 'ANALYSIS_IX', 'HOUR', 'TTC_VAL', 'TTC_PROB', 'MEAN_SPEED_PH', 'INFLOW_PH', 'INFLOW_PHPL', 'INTERINST_ANGLE', 'INTERINST_CLASS', 'PRED_METHOD','AGG_METHOD', 'OBJ1_NUM', 'OBJ2_NUM', 'OBJ1_TYPE', 'OBJ2_TYPE', 'MEAN_SPEED_OBJ1', 'MEAN_SPEED_OBJ2', 'INS_SPEED_OBJ1', 'INS_SPEED_OBJ2', 'FIVE_MINUTE_EXPOSURE', 'TWO_MINUTE_EXPOSURE', 'ONE_MINUTE_EXPOSURE', 'FIFTEEN_SECOND_EXPOSURE']
264  writer.writerow(headers)
265 
266  upsExportFile = open(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'UPS_export.csv'), 'wb')
267  writer = csv_writer(upsExportFile, delimiter=',', quotechar='"', quoting=csv_QUOTE_MINIMAL)
268  headers = ['DBID', 'SITE_ID', 'CAM_ID', 'CAM_IX', 'ANALYSIS_IX', 'HOUR', 'MEAN_SPEED_PH', 'INFLOW_PH', 'INFLOW_PHPL', 'PRED_METHOD', 'OBJ1_NUM', 'OBJ2_NUM', 'MEAN_SPEED_OBJ1', 'MEAN_SPEED_OBJ2', 'FIVE_MINUTE_EXPOSURE', 'TWO_MINUTE_EXPOSURE', 'ONE_MINUTE_EXPOSURE', 'FIFTEEN_SECOND_EXPOSURE', 'MEDIAN_INT_ANGLE', 'TTC_VAL_MEDIAN', 'TTC_PROB_MEDIAN']
269  for amIx in range(1,len(agregationMethods)): headers += ['TTC_VAL_'+agregationMethods[amIx].label_short, 'TTC_PROB_'+agregationMethods[amIx].label_short]
270  writer.writerow(headers)
271 
274  if(commands.depth >= 2):
275  runtime_figures = {}
276  for predictionMethod in predictionMethods:
277  runtime_figures['timeseries_'+str(predictionMethod.label_short)] = tvaVis.interactionTimeseries(None, None, alpha=0.01, local=local, fig_name='TTC timeseries_'+str(predictionMethod.label_short), figsize=config.figsize, verbose=commands.verbose)
278 
279 
282 
283 
284 
288  saIxs = site_analyses.interpret(commands.s_analysis)
289  if(commands.verbose == 1): prog = tvaLib.ProgressBar(0, len(saIxs), 77)
290  for saIx in saIxs:
291  commands.s_analysis = saIx+1
292  if(commands.verbose == 1): prog.updateAmount(saIx)
293  elif(commands.verbose >= 2): print site_analyses[saIx].name
294 
295 
296  try:
297  if(site_analyses[saIx].site.alignments): alignments.append(True)
298  else: alignments.append(False)
299  except AttributeError: raise Exception, [4780, 'Malformed database entry for site-analysis #'+str(site_analyses[saIx].idx)+' '+site_analyses[saIx].name+'. Please correct this database error before proceeding.']
300  if(None not in [cam.camHeight for cam in site_analyses[saIx].site] and [] not in [cam.camOrigin.data for cam in site_analyses[saIx].site]):
301  mhcs.append(True)
302  else: mhcs.append(False)
303  if(site_analyses[saIx].site.getCombinedMasks()): masks.append(True)
304  else: masks.append(False)
305  if(False in [os.path.exists(cam.getFullMaskFilename()) for cam in site_analyses[saIx].cameras]): masks2.append(False)
306  else: masks2.append(True)
307  if(site_analyses[saIx].zone): zones.append(True)
308  else: zones.append(False)
309  if(site_analyses[saIx].virtual_loops.coordinates): loops.append(True)
310  else: loops.append(False)
311  if(site_analyses[saIx].xy_bounds): bounds.append(True)
312  else: bounds.append(False)
313  homoCompletion.append(sum([1 if sacam.getHomography() else 0 for sacam in site_analyses[saIx].cameras])/float(len(site_analyses[saIx].cameras)))
314 
315 
317  for sacam in site_analyses[saIx].cameras:
318  for seq in sacam:
319  if(os.path.exists(seq.getFullDataFilename()) and not os.path.getsize(seq.getFullDataFilename())): os.remove(seq.getFullDataFilename())
320  if(os.path.exists(seq.getFullGroundTruthFilename()) and not os.path.getsize(seq.getFullGroundTruthFilename())): os.remove(seq.getFullGroundTruthFilename())
321  if(os.path.exists(seq.getFullClassifiedFilename()) and not os.path.getsize(seq.getFullClassifiedFilename())): os.remove(seq.getFullClassifiedFilename())
322  # Update analysis data
323  try: trackCompletion.append(sum([sum([os.path.exists(seq.getFullDataFilename()) for seq in sacam]) for sacam in site_analyses[saIx].cameras])/float(site_analyses[saIx].getSequenceCount()))
324  except ZeroDivisionError: trackCompletion.append(1)
325  try: trackOldestAge.append(datetime.fromtimestamp(int(min(tvaLib.flatten_list([[os.path.getmtime(seq.getFullDataFilename()) for seq in sacam if os.path.exists(seq.getFullDataFilename())] for sacam in site_analyses[saIx].cameras])))))
326  except ValueError: trackOldestAge.append(None)
327  try: annotCompletion.append(sum([sum([os.path.exists(seq.getFullAnnotationFilename()) for seq in sacam]) for sacam in site_analyses[saIx].cameras])/float(site_analyses[saIx].getSequenceCount()))
328  except ZeroDivisionError: annotCompletion.append(1)
329  try: gtCompletion.append(sum([sum([os.path.exists(seq.getFullGroundTruthFilename()) for seq in sacam]) for sacam in site_analyses[saIx].cameras])/float(site_analyses[saIx].getSequenceCount()))
330  except ZeroDivisionError: gtCompletion.append(1)
331  try: clCompletion.append(sum([sum([os.path.exists(seq.getFullClassifiedFilename()) for seq in sacam]) for sacam in site_analyses[saIx].cameras])/float(site_analyses[saIx].getSequenceCount()))
332  except ZeroDivisionError: clCompletion.append(1)
333  try: serObjCompletion.append(sum([sum([os.path.exists(seq.getFullSerialisedFilename()) for seq in sacam]) for sacam in site_analyses[saIx].cameras])/float(site_analyses[saIx].getSequenceCount()))
334  except ZeroDivisionError: serObjCompletion.append(1)
335  objMinversions.append('')
336  objMaxversions.append('')
337 
338 
339  if(commands.depth >= 1):
340  meanSpeeds.add(site_analyses[saIx])
341  inFlowPLPH.add(site_analyses[saIx])
342  hourly_flows.add(site_analyses[saIx])
343  hourly_speed.add(site_analyses[saIx])
344  vehKmTraveled.add(site_analyses[saIx])
345 
346 
347  if(commands.depth >= 2):
348  for pmIx in range(len(predictionMethods)):
349  #serial_seq_userPairs[indicator][3].append(0)
350  predictionMethods[pmIx].completion.append(0)
351  predictionMethods[pmIx].minVersion.append('')
352  predictionMethods[pmIx].maxVersion.append('')
353  predictionMethods[pmIx].userPairsPH.add(site_analyses[saIx])
354  predictionMethods[pmIx].userPairsWIndPH.add(site_analyses[saIx])
355  predictionMethods[pmIx].userPairsBH.add(site_analyses[saIx])
356  predictionMethods[pmIx].userPairsWIndBH.add(site_analyses[saIx])
357  predictionMethods[pmIx].interactionsPH.add(site_analyses[saIx])
358  for agregationMethod in agregationMethods:
359  for utIx1 in range(len(listOfExistingUserTypes)):
360  for utIx2 in range(len(listOfExistingUserTypes[listOfExistingUserTypes.index(userType1):])):
361  predictionMethods[pmIx].meanTTC[utIx1][utIx2][agregationMethod.label_short].add(site_analyses[saIx])
362  predictionMethods[pmIx].freqTTC[utIx1][utIx2][agregationMethod.label_short].add(site_analyses[saIx])
363  predictionMethods[pmIx].countTTCThresh[utIx1][utIx2][agregationMethod.label_short].add(site_analyses[saIx])
364 
365 
366  siteIxs = tvaRuntime.targetSiteIxs(commands, sites, site_analyses[saIx])
367  for siteIx in siteIxs:
368 
369  camIxs = tvaRuntime.targetCameraIxs(commands, sites[siteIx], site_analyses[saIx])
370  for camIx in camIxs:
371 
372  fileIxs = tvaRuntime.targetSequenceIxs(commands, sites[siteIx][camIx], site_analyses[saIx])
373  for fileIx in fileIxs:
374 
375 
378  if(os.path.exists(sites[siteIx][camIx][fileIx].getFullSerialisedFilename())):
379  with open(sites[siteIx][camIx][fileIx].getFullSerialisedFilename(), 'rb') as input_data:
380  try:
381  s_version = pickle.load(input_data)
382  if(commands.verbose >= 2): print ' '+sites[siteIx][camIx].name+'/'+sites[siteIx][camIx][fileIx].name+'->objects ['+s_version+']'
383  if(tvaLib.Parse.versionIsMoreRecent(minimumVersion, s_version, minimumVersionDepth)): continue
384  objects = pickle.load(input_data)
385  except EOFError:
386  if(commands.verbose): tvaLib.printWarning('Serialised trajectory data for sequence '+sites[siteIx].name+'/'+sites[siteIx][camIx].name+'/'+sites[siteIx][camIx][fileIx].name+' is physically corrupt. Serialised data will be automatically flushed and this sequence will be skipped.', local['gen_warning'])
387  os.unlink(sites[siteIx][camIx][fileIx].getFullSerialisedFilename())
388  continue
389 
390 
391  objects, _ = tvaFilter.filtBoundingBox(objects, site_analyses[saIx].zone, containment_threshold=config.f_bb_containment_threshold, max_outside_dist=config.f_bb_max_outside_dist, loopback_verification_frames=config.f_bb_loopback_ver_frames, indent=12, verbose=commands.verbose)
392  reportedAlignIdxs = tvaFilter.getReportedAlignIdxs(objects)
393 
394 
396  if(True in [align not in range(len(sites[siteIx].alignments)) for align in reportedAlignIdxs]):
397  if(commands.verbose): tvaLib.printWarning('Serialised curvilinear trajectory data for sequence '+sites[siteIx].name+'/'+sites[siteIx][camIx].name+'/'+sites[siteIx][camIx][fileIx].name+' does not match existing alignment metadata. Serialised data will be automatically flushed and this sequence will be skipped.', local['gen_warning'])
398  os.unlink(sites[siteIx][camIx][fileIx].getFullSerialisedFilename())
399  continue
400 
401 
402  if(not objMinversions[-1]): objMinversions[-1] = s_version
403  elif(tvaLib.Parse.versionIsMoreRecent(s_version, objMinversions[-1])): objMinversions[-1] = s_version
404  if(not objMaxversions[-1]): objMaxversions[-1] = s_version
405  elif(tvaLib.Parse.versionIsMoreRecent(objMaxversions[-1], s_version)): objMaxversions[-1] = s_version
406 
407 
408  if(commands.depth >= 1):
409  meanSpeeds.increment(objects, speed_conv=sites[siteIx][camIx].camera.frameRate*config.mps_kmh)
410  inFlowPLPH.increment(objects, duration=sites[siteIx][camIx][fileIx].duration, location=0)
411  hourly_flows.increment([datum.curvilinearPositions.getLanes()[0] for datum in objects], startTimes=[datum.getFirstInstant() for datum in objects], startTime=sites[siteIx][camIx][fileIx].startTime, duration=sites[siteIx][camIx][fileIx].duration, framerate=sites[siteIx][camIx].camera.frameRate)
412  hourly_speed.increment(objects, startTime=sites[siteIx][camIx][fileIx].startTime, duration=sites[siteIx][camIx][fileIx].duration, framerate=sites[siteIx][camIx].camera.frameRate)
413  vehKmTraveled.increment(objects)
414  # Update internal metadata
415  sites[siteIx][camIx][fileIx].setCountsFromObjects(objects)
416 
417 
420  if(commands.depth >= 2):
421  for pmIx in range(len(predictionMethods)):
422  filename = predictionMethods[pmIx].label_short+'_'+sites[siteIx][camIx].name.replace('\\','').replace('/','')+'_'+sites[siteIx][camIx][fileIx].name+'.upairs'
423  if(not os.path.exists(os.path.join(site_analyses[saIx].getFullResultsFolder(), filename))): predictionMethods[pmIx].completionBySeq.append(False)
424  else:
425  with open(os.path.join(site_analyses[saIx].getFullResultsFolder(), filename), 'rb') as input_data:
426  try:
427  s_version = pickle.load(input_data)
428  if(commands.verbose >= 2): print (' '+sites[siteIx][camIx][fileIx].name+'->'+predictionMethods[pmIx].label+' ['+s_version+']')
429  if(tvaLib.Parse.versionIsMoreRecent(minimumVersion, s_version, minimumVersionDepth)): continue
430  _ = pickle.load(input_data)
431  seq_userPairs = pickle.load(input_data)
432  except AttributeError:
433  if(commands.verbose >= 2): tvaLib.printWarning('Interaction data for prediction method "'+predictionMethods[pmIx].label+'" corrupted or possibly out of date. Ignoring.', local['gen_warning'])
434  continue
435 
436 
437 
438  if(commands.verbose >= 2): print (' Repopulating road user object memory...')
439  try:
440  seq_userPairs.repopulateObjectData(objects)
441  seq_userPairs[0].roadUser1.getUserType()
442  except:
443  if(commands.verbose >= 2): tvaLib.printWarning('Failed to rebuild road user object memory. Trajectory or interaction data may be corrupted or out of date.', local['gen_warning'])
444 
445 
446  if(not predictionMethods[pmIx].minVersion[-1]): predictionMethods[pmIx].minVersion[-1] = s_version
447  elif(tvaLib.Parse.versionIsMoreRecent(s_version, predictionMethods[pmIx].minVersion[-1])): predictionMethods[pmIx].minVersion[-1] = s_version
448  if(not predictionMethods[pmIx].maxVersion[-1]): predictionMethods[pmIx].maxVersion[-1] = s_version
449  elif(tvaLib.Parse.versionIsMoreRecent(predictionMethods[pmIx].maxVersion[-1], s_version)): predictionMethods[pmIx].maxVersion[-1] = s_version
450 
451  predictionMethods[pmIx].completion[-1] += 1.0/float(site_analyses[saIx].getSequenceCount())
452  predictionMethods[pmIx].completionBySeq.append(True)
453 
454 
455  if(commands.verbose >= 2): print(' Working user pairs per hour')
456  predictionMethods[pmIx].userPairsPH.increment(len(seq_userPairs.data), duration=sites[siteIx][camIx][fileIx].duration)
457  predictionMethods[pmIx].userPairsWIndPH.increment(seq_userPairs.getIntWTTCCount(), duration=sites[siteIx][camIx][fileIx].duration)
458  predictionMethods[pmIx].userPairsBH.increment([1 for x in seq_userPairs.data], startTimes=[x.getFirstInstant() for x in seq_userPairs.data], startTime=sites[siteIx][camIx][fileIx].startTime, duration=sites[siteIx][camIx][fileIx].duration, framerate=sites[siteIx][camIx].camera.frameRate)
459  predictionMethods[pmIx].userPairsWIndBH.increment([1 for x in seq_userPairs.data if x.getPointList()], startTimes=[x.getFirstInstant() for x in seq_userPairs.data if x.getPointList()], startTime=sites[siteIx][camIx][fileIx].startTime, duration=sites[siteIx][camIx][fileIx].duration, framerate=sites[siteIx][camIx].camera.frameRate)
460  if(commands.verbose >= 3): print(' '+str(len(seq_userPairs.data)))
461  if(commands.verbose >= 2): print(' Working interactions per hour')
462  predictionMethods[pmIx].interactionsPH.increment(len(seq_userPairs.getPointList()), duration=sites[siteIx][camIx][fileIx].duration)
463  if(commands.verbose >= 3): print(' '+str(len(seq_userPairs.getPointList())))
464 
465 
466  sites[siteIx][camIx][fileIx].setCountExpPairs(len(seq_userPairs))
467  sites[siteIx][camIx][fileIx].setCountExpInstances(len(seq_userPairs.getPointList()))
468  sites[siteIx][camIx][fileIx].setCountExpPairsWInd(seq_userPairs.getIntWTTCCount())
469  sites[siteIx][camIx][fileIx].setCountExpInstancesWInd(seq_userPairs.getInstantWIndicatorCount())
470 
471 
472  for agregationMethod in agregationMethods:
473  for utIx1 in range(len(listOfExistingUserTypes)):
474  for utIx2 in range(len(listOfExistingUserTypes[listOfExistingUserTypes.index(userType1):])):
475  try:
476  result = seq_userPairs.getPointList(userType1=utIx1, userType2=utIx2, ptype='CP', method=agregationMethod.method, percentile=agregationMethod.percentile, minimumProbability=config.col_probability_threshold, format='columns')
477  except: import pdb; pdb.set_trace()
478  if(result):
479  if(commands.verbose >= 2): print(' Working mean TTCs ('+agregationMethod.label+')')
480  predictionMethods[pmIx].meanTTC[utIx1][utIx2][agregationMethod.label_short].increment([x/float(sites[siteIx][camIx].camera.frameRate) for x in result[0]], result[3])
481  if(commands.verbose >= 3):
482  print(' ', sum([x/float(sites[siteIx][camIx].camera.frameRate) for x in result[0]])/len(result[0]), len(result[0]))
483  print(' ', predictionMethods[pmIx].meanTTC[utIx1][utIx2][agregationMethod.label_short][-1].value, predictionMethods[pmIx].meanTTC[utIx1][utIx2][agregationMethod.label_short][-1].depth)
484  if(commands.verbose >= 2): print(' Working TTC frequency ('+agregationMethod.label+')')
485  predictionMethods[pmIx].freqTTC[utIx1][utIx2][agregationMethod.label_short].increment([x/float(sites[siteIx][camIx].camera.frameRate) for x in result[0]], result[3])
486  if(commands.verbose >= 2): print(' Working TTC thresholds ('+agregationMethod.label+')')
487  predictionMethods[pmIx].countTTCThresh[utIx1][utIx2][agregationMethod.label_short].increment([x/float(sites[siteIx][camIx].camera.frameRate) for x in result[0]], [x for x in result[3]])
488 
489 
490  for amIx in range(len(agregationMethods)):
491  if(commands.verbose >= 4): print(' Preparing kruskal-wallis data ('+agregationMethods[amIx].label+')')
492  result = seq_userPairs.getPointList(ptype='CP', method=agregationMethods[amIx].method, percentile=agregationMethods[amIx].percentile, minimumProbability=config.col_probability_threshold, format='points')
493  #If new site, start new list of samples
494  if(cluster[cIx] is not None):
495  if(fileIx == 0): cluster.results_sample_dump[-1][pmIx][amIx].append([])
496  for sample in range(per_site_sampling_size):
497  try: cluster.results_sample_dump[-1][pmIx][amIx][-1].append(result[sample][0]/float(sites[siteIx][camIx].camera.frameRate))
498  except IndexError: break
499 
500 
501  if(commands.export_csv):
502  if(commands.verbose >= 2): print(' Exporting TTCs...')
503 
505  for amIx in range(len(agregationMethods)):
506  writer = csv_writer(ttcExportFile, delimiter=',', quotechar='"', quoting=csv_QUOTE_MINIMAL)
507  result = seq_userPairs.getPointList(ptype='CP', method=agregationMethods[amIx].method, percentile=agregationMethods[amIx].percentile, minimumProbability=config.col_probability_threshold, format='points')
508  for point in result:
509  if(commands.verbose >= 4 and point[3] > 1): tvaLib.printWarning('High TTC probability detected '+str(point[3]), local['gen_warning'])
510 
511  writeData = [sites[siteIx].description, #DBID
512  sites[siteIx].idx, #SITE_ID
513  sites[siteIx][camIx].idx, #CAM_ID
514  camIx+1, #CAM_IX
515  saIx+1, #ANALYSIS_IX
516  sites[siteIx][camIx][fileIx].getHour(), #HOUR
517  point[0]/float(sites[siteIx][camIx].camera.frameRate), #TTC_VAL
518  point[3], #TTC_PROB
519  hourly_speed[-1].getCurve(type='mean')[sites[siteIx][camIx][fileIx].getHour()], #MEAN_SPEED_PH
520  hourly_flows[-1].getCurve()[sites[siteIx][camIx][fileIx].getHour()], #INFLOW_PH
521  hourly_flows[-1].getCurve(divideByValueVariety=True)[sites[siteIx][camIx][fileIx].getHour()]] #INFLOW_PHPL
522  try: writeData += [point[7].getIncidenceAngle()] #INTERINST_ANGLE
523  except: writeData += ['']
524  try: writeData += [point[7].getClassification(intInsClassMethods_master).label_short] #INTERINST_CLASS
525  except: writeData += ['']
526  writeData += [predictionMethods[pmIx].label_short] #PRED_METHOD
527  writeData += [agregationMethods[amIx].label_short] #AGG_METHOD
528 
529  try:
530  obj1_index = tvaLib.Obj.num2ind(objects, point[5])
531  obj2_index = tvaLib.Obj.num2ind(objects, point[6])
532  obj1_t_index = point[4]-objects[obj1_index].getFirstInstant()
533  obj2_t_index = point[4]-objects[obj2_index].getFirstInstant()
534  writeData += [objects[obj1_index].num, #OBJ1_NUM
535  objects[obj2_index].num, #OBJ2_NUM
536  objects[obj1_index].getUserType(), #OBJ1_TYPE
537  objects[obj2_index].getUserType(), #OBJ2_TYPE
538  sum(objects[obj1_index].velocities.positions[2])/float(len(objects[obj1_index].velocities.positions[2]))*sites[siteIx][camIx].camera.frameRate*config.mps_kmh, #MEAN_SPEED_OBJ1
539  sum(objects[obj2_index].velocities.positions[2])/float(len(objects[obj2_index].velocities.positions[2]))*sites[siteIx][camIx].camera.frameRate*config.mps_kmh, #MEAN_SPEED_OBJ2
540  objects[obj1_index].velocities.positions[2][obj1_t_index]*sites[siteIx][camIx].camera.frameRate*config.mps_kmh, #INS_SPEED_OBJ1
541  objects[obj2_index].velocities.positions[2][obj2_t_index]*sites[siteIx][camIx].camera.frameRate*config.mps_kmh] #INS_SPEED_OBJ2
542  except:
543  writeData += ['','','','','','']
544  if(commands.verbose >= 2): tvaLib.printWarning('Trajectory data ommited during TTC exporting process for '+sites[siteIx].name+'/'+sites[siteIx][camIx].name+'/'+sites[siteIx][camIx][fileIx].name+' because conflict data is missing trajectory identifiers ('+predictionMethods[pmIx].label+' '+agregationMethods[amIx].label+').', local['gen_warning'], indent=4)
545 
546  try: writeData += [seq_userPairs.exposure['5 minutes'], #FIVE_MINUTE_EXPOSURE
547  seq_userPairs.exposure['2 minutes'], #TWO_MINUTE_EXPOSURE
548  seq_userPairs.exposure['1 minute'], #ONE_MINUTE_EXPOSURE
549  seq_userPairs.exposure['15 seconds']] #FIFTEEN_SECOND_EXPOSURE
550  except: writeData += ['','','','']
551 
552  writer.writerow(writeData)
553 
554 
556  if(commands.verbose >= 2): print(' Exporting user pairs')
557  writer = csv_writer(upsExportFile, delimiter=',', quotechar='"', quoting=csv_QUOTE_MINIMAL)
558  for userPair in seq_userPairs:
559  if(userPair.getInstantWIndicatorCount()):
560 
561  writeData = [sites[siteIx].description, #SITE_DBID
562  sites[siteIx].idx, #SITE_ID
563  sites[siteIx][camIx].idx, #CAM_ID
564  camIx+1, #CAM_IX
565  saIx+1, #ANALYSIS_IX
566  sites[siteIx][camIx][fileIx].getHour(), #HOUR
567  hourly_speed[-1].getCurve(type='mean')[sites[siteIx][camIx][fileIx].getHour()], #MEAN_SPEED_PH
568  hourly_flows[-1].getCurve()[sites[siteIx][camIx][fileIx].getHour()], #INFLOW_PH
569  hourly_flows[-1].getCurve(divideByValueVariety=True)[sites[siteIx][camIx][fileIx].getHour()]] #INFLOW_PHPL
570  writeData += [predictionMethods[pmIx].label_short] #PRED_METHOD
571 
572  try:
573  writeData += [userPair.roadUser1.num, #OBJ1_NUM
574  userPair.roadUser2.num, #OBJ2_NUM
575  userPair.roadUser1.getUserType(), #OBJ1_TYPE
576  userPair.roadUser2.getUserType(), #OBJ2_TYPE
577  sum(userPair.roadUser1.velocities.positions[2])/float(len(userPair.roadUser1.velocities.positions[2]))*sites[siteIx][camIx].camera.frameRate*config.mps_kmh, #MEAN_SPEED_OBJ1
578  sum(userPair.roadUser2.velocities.positions[2])/float(len(userPair.roadUser2.velocities.positions[2]))*sites[siteIx][camIx].camera.frameRate*config.mps_kmh] #MEAN_SPEED_OBJ2
579  except:
580  writeData += ['','','','']
581  if(commands.verbose >= 2): tvaLib.printWarning('Trajectory data ommited during UPS exporting process for '+sites[siteIx].name+'/'+sites[siteIx][camIx].name+'/'+sites[siteIx][camIx][fileIx].name+' because conflict data is missing trajectory identifiers ('+predictionMethods[pmIx].label+').', local['gen_warning'], indent=4)
582 
583  try: writeData += [userPair.exposure['5 minutes'], #FIVE_MINUTE_EXPOSURE
584  userPair.exposure['2 minutes'], #TWO_MINUTE_EXPOSURE
585  userPair.exposure['1 minute'], #ONE_MINUTE_EXPOSURE
586  userPair.exposure['15 seconds']] #FIFTEEN_SECOND_EXPOSURE
587  except: writeData += ['','','','']
588 
589  writeData += [userPair.getAggregatedInterAngle(percentile=0.50)] #MEDIAN_INT_ANGLE
590  try: writeData += [userPair.getAggregatedPointList(method=1, percentile=0.50)[0], #TTC_VAL_MEDIAN
591  userPair.getAggregatedPointList(method=1, percentile=0.50)[3]] #TTC_PROB_MEDIAN
592  except: writeData += ['','']
593  for amIx in range(1,len(agregationMethods)):
594  try: writeData += [userPair.getAggregatedPointList(method=agregationMethods[amIx].method, percentile=agregationMethods[amIx].percentile)[0],
595  userPair.getAggregatedPointList(method=agregationMethods[amIx].method, percentile=agregationMethods[amIx].percentile)[3]]
596  except: writeData += ['','']
597 
598  writer.writerow(writeData)
599 
600 
601 
602  if(commands.fig_save_advanced):
603  if(commands.verbose >= 3): print(' Updating timeseries figures...')
604  runtime_figures['timeseries_'+str(predictionMethods[pmIx].label_short)].addTimeseries(seq_userPairs, sites[siteIx][camIx].camera.frameRate)
605  '''
606  ## Produce site-figures
607  if(commands.depth >= 2 and site_distros):
608  fig = tvaVis.Analysis.siteTTC([[x.freqTTC[agregationMethod.label_short] for x in predictionMethods[pmIx]] for key,agregationMethod in agregationMethods], labels=[[agregationMethod.label, [predictionMethod.label for predictionMethod in predictionMethods]] for agregationMethod in agregationMethods], timehorizon=config.disp_timehorizon, fig_name='TTC cdfs for '+sites[siteIx].name+'/'+sites[siteIx][camIx].name, verbose=commands.verbose)
609  tvaVis.Save(fig, os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'Site-figures'), fig_format=commands.fig_format, fig_bg_colour=config.fig_bg_colour, verbose=commands.verbose)
610  '''
611 
612 
615 
616 
619  if(commands.depth >= 2):
620  if(commands.export_csv):
621  ttcExportFile.close()
622  upsExportFile.close()
623 
624 
625 
628  tvaLib.printTimeStamp('Processing statistical tests...')
629 
630  if(commands.depth >= 2):
631 
632 
633  #kstests[predictionMethod][aggregationMethod][obs]
634  kstests = []
635  f = 0
636  for pmIx in range(len(predictionMethods)):
637  kstests_ = {}
638  for aggMethod_label, freqMeasure in predictionMethods[pmIx].freqTTC[0][0].iteritems():
639  ksVector = []
640  for obs1 in range(len(freqMeasure)):
641  #Generate average frequency for all other sites
642  ydata_ = None
643  for obs2 in range(len(freqMeasure)):
644  if(obs1 is obs2): continue
645  ydata = freqMeasure[obs2].getPDF()
646  if(not ydata): continue
647  if(not ydata_): ydata_ = ydata
648  else: ydata_ = [x+y for x, y in zip(ydata,ydata_)]
649  if(not ydata): continue
650 
651  if(not ydata_):
652  ksVector.append(None)
653  continue
654 
655  frequency = [x/float(sum(ydata_)) for x in ydata_]
656  cdf = []
657  for i in range(len(frequency)):
658  cdf.append(sum(frequency[0:i+1]))
659  ksVector.append(freqMeasure[obs1].ksTestWith(cdf))
660 
661  kstests_[aggMethod_label] = ksVector
662  kstests.append(kstests_)
663 
664 
665 
666 
669  print('==Metadata statistics======================')
670  print(' Alignments: '+tvaAnalysis.rateString(sum(alignments), len(alignments)))
671  print(' Mast height calibrations: '+tvaAnalysis.rateString(sum(mhcs), len(mhcs)))
672  print(' Masks: '+tvaAnalysis.rateString(sum(masks), len(masks)))
673  print(' Masks (Tracking): '+tvaAnalysis.rateString(sum(masks2), len(masks2)))
674  print(' Zones: '+tvaAnalysis.rateString(sum(zones), len(zones)))
675  print(' Homographies: '+tvaAnalysis.rateString(sum(homoCompletion), len(homoCompletion)))
676  print(' Virtual loops: '+tvaAnalysis.rateString(sum(loops), len(loops)))
677  print(' XY Bounds: '+tvaAnalysis.rateString(sum(bounds), len(bounds)))
678  print(' Video tracking progress: '+tvaAnalysis.rateString(sum(trackCompletion), len(trackCompletion)))
679  print(' Video annotation progress: '+tvaAnalysis.rateString(sum(annotCompletion), len(annotCompletion)))
680  print(' Video GT progress: '+tvaAnalysis.rateString(sum(gtCompletion), len(gtCompletion)))
681  print(' Video CL progress: '+tvaAnalysis.rateString(sum(clCompletion), len(clCompletion)))
682  print(' Object serialisation: '+tvaAnalysis.rateString(sum(serObjCompletion), len(serObjCompletion)))
683  print(' XY Bounds: '+tvaAnalysis.rateString(sum(bounds), len(bounds)))
684 
685 
686  if(commands.depth >= 1 and commands.verbose >= 2):
687  meanSpeeds.printResult(label='mean speed', round_=2)
688  inFlowPLPH.printResult(label='inflow per hour per lane', round_=2)
689  vehKmTraveled.printResult(label='vehicle-kilometers traveled', round_=2)
690 
691 
692  if(commands.depth >= 2):
693  for predictionMethod in predictionMethods:
694  if(not True in predictionMethod.completionBySeq):
695  tvaLib.printWarning('No indicators of type "'+predictionMethod.label+'" found', local['gen_warning'])
696  continue
697  print('=='+predictionMethod.label+'======================')
698  print(' Version compliancy rate: '+tvaAnalysis.rateString(sum(predictionMethod.completionBySeq), len(predictionMethod.completionBySeq)))
699  if(commands.verbose >= 2):
700  predictionMethod.userPairsPH.printResult(label='user pair counts per hour', round_=2)
701  predictionMethod.userPairsWIndPH.printResult(label='user pair counts with indicators per hour', round_=2)
702  predictionMethod.interactionsPH.printResult(label='interaction counts per hour ('+predictionMethod.label+')', round_=2)
703 
704 
705 
709  if(commands.fig_save):
710 
711 
714  if(commands.verbose): tvaLib.printTimeStamp('Generating figures...')
715  plotSettings = tvaVis.plotSettings(style=commands.fig_style, size=config.plot_text_size, family=config.font_family, verbose=commands.verbose)
716  if(commands.fig_save_lan_a): fig_lan_suffix = config.language
717  else: fig_lan_suffix = None
718  method = 1
719 
720 
721 
724  if(commands.verbose >= 2): print(' Data sampling...')
725 
726  figures = []
727  figures.append(tvaVis.Analysis.plotTODSamplingDistribution(site_analyses, saIxs, local=local, verbose=commands.verbose))
728 
729 
730  tvaVis.Save(figures, os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder), fig_format=commands.fig_format, fig_lan_suffix=fig_lan_suffix, fig_bg_colour=config.fig_bg_colour, verbose=commands.verbose)
731  tvaVis.close()
732 
733 
736  if(commands.depth >= 1):
737  if(commands.verbose >= 2): print(' Traffic data for individual sites...')
738 
739  figures = []
740  for hourly_flows_curves,hourly_speed_curves in zip(hourly_flows,hourly_speed):
741  figures.append(tvaVis.hourlyFlows([hourly_flows_curves.getCurve(bins=96, divideByValueVariety=True)], speed_data=[hourly_speed_curves.getCurve(bins=96, type='mean')], speed_data_stds=[hourly_speed_curves.getCurve(bins=96, type='std')], sm_scale=[0,config.speed_map_u_scale], local=local, fig_name='Hourly flows '+hourly_flows_curves.metadata.name))
742 
743 
744  tvaVis.Save(figures, os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, siteSummaryPrefix), fig_format=commands.fig_format, fig_lan_suffix=fig_lan_suffix, fig_bg_colour=config.fig_bg_colour, verbose=commands.verbose)
745  tvaVis.close()
746 
747 
750  if(commands.depth >= 2):
751  if(commands.verbose >= 2): print(' Conflict/SSM results...')
752 
753  figures = [x.get() for x in runtime_figures.values()]
754  figures_grid = []
755  labels = {}
756  freqTTCdatas = []
757  freqTTCklusters = []
758 
759 
760  for userType1 in range(len(listOfExistingUserTypes)):
761  for userType2 in range(len(listOfExistingUserTypes[range(len(listOfExistingUserTypes)).index(userType1):])):
762 
763 
764  if(userType1 == 0):
765  if(userType2 == 0): labels['userType'] = ''
766  else: labels['userType'] = local['userTypeNames'][listOfExistingUserTypes[userType2]]
767  else:
768  if(userType2 == 0): labels['userType'] = local['userTypeNames'][listOfExistingUserTypes[userType1]]
769  else: labels['userType'] = local['userTypeNames'][listOfExistingUserTypes[userType1]]+'-'+local['userTypeNames'][listOfExistingUserTypes[userType2]]
770 
771 
772  for predictionMethod in predictionMethods:
773  if(True not in predictionMethod.completionBySeq): continue
774  labels['predictionMethod'] = str(predictionMethod.label_short)
775 
776 
777  for distro_type in distributionTypes:
778  labels['distro_type'] = distro_type
779 
780 
781  for freqTTCByAggMethod, agregationMethod in zip([predictionMethod.freqTTC[userType1][userType2][agregationMethod.label_short] for agregationMethod in agregationMethods],[x for x in agregationMethods_master]):
782  if(sum(freqTTCByAggMethod.getDepths()) == 0): continue
783  labels['agregationMethod'] = str(agregationMethod.label_short)
784  if(commands.verbose >= 2): print(' Figure '+'TTC_'+'_'.join([labels[i] for i in labels if labels[i] != '']))
785  #if(distro_type=='cdf'): freqTTCByAggMethod = list(tvaLib.Math.running_sum(freqTTCByAggMethod)) #should be taken care of in figure
786  fig, x_range, mean = tvaVis.Analysis.plotTTCdistros(freqTTCByAggMethod, klusters=freqTTCByAggMethod.clusterByKStest(nklusters), method=method, dist_type=distro_type, timehorizon=config.disp_timehorizon, labelSampleSize=labelTTCSampleSize, local=local, fig_name='TTC_'+'_'.join([labels[i] for i in labels if labels[i] != '']), verbose=commands.verbose)
787  figures.append(fig)
788  #figures.append(tvaVis.Analysis.plotIndicatorCounts())
789  if(cluster[cIx] is not None and x_range and mean):
790  cluster.results_TTC_X = x_range
791  if(distro_type == 'cdf'):
792  cluster.results_TTC_site_means[distro_type][agregationMethod.label_short][predictionMethod.label_short].append(list(tvaLib.Math.running_sum(mean)))
793  if(cluster.plot_sites): cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short].append([datum.getCDF() for datum in freqTTCByAggMethod])
794  else:
795  cluster.results_TTC_site_means[distro_type][agregationMethod.label_short][predictionMethod.label_short].append(mean)
796  if(cluster.plot_sites): cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short].append([datum.getPDF() for datum in freqTTCByAggMethod])
797 
798 
799 
800  for agregationMethod_label_short,freqTTCByAggMethod in predictionMethod.freqTTC[userType1][userType2].iteritems():
801  freqTTCdatas.append(freqTTCByAggMethod)
802  freqTTCklusters.append(freqTTCByAggMethod.clusterByKStest(nklusters))
803  if(len(freqTTCdatas) == 12):
804  for distro_type in distributionTypes:
805  figures_grid.append(tvaVis.Analysis.plotTTCdistros_grid(freqTTCdatas, freqTTCklusters, method=method, dist_type=distro_type, timehorizon=config.disp_timehorizon, labelSampleSize=labelTTCSampleSize, local=local, fig_name='TTC_'+'_'.join([labels['userType'], distro_type, 'grid']), verbose=commands.verbose, fontsize=config.plot_text_size))
806  figures_grid.append(tvaVis.Analysis.plotTTCdistros_grid(freqTTCdatas, freqTTCklusters, method=method, dist_type=distro_type, timehorizon=config.disp_timehorizon, labelSampleSize=labelTTCSampleSize, shadeBySampleSize=False, local=local, fig_name='TTC_'+'_'.join([labels['userType'], distro_type, 'grid_b']), verbose=commands.verbose, fontsize=config.plot_text_size))
807 
808 
809  tvaVis.Save(figures, os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, indDistPrefix), fig_format=commands.fig_format, fig_lan_suffix=fig_lan_suffix, fig_bg_colour=config.fig_bg_colour, verbose=commands.verbose)
810  tvaVis.Save(figures_grid, os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, indDistPrefix, 'grid'), fig_format=commands.fig_format, fig_lan_suffix=fig_lan_suffix, fig_bg_colour=config.fig_bg_colour, verbose=commands.verbose)
811  tvaVis.close()
812 
813 
814 
817  if(commands.verbose): tvaLib.printTimeStamp('Exporting data to CSVs...')
818 
819 
820 
823  try:
824  with open(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'report.csv'), 'wb') as f:
825  writer = csv_writer(f, delimiter=',', quotechar='"', quoting=csv_QUOTE_MINIMAL)
826 
827 
828  headers = ['ANALYSIS_NAME', 'SAIX', 'DBID', 'SEQUENCES_SCHEDULED', 'START_DATE', 'DURATION', 'HAS_ALIGNMENTS', 'HAS_MHC', 'HAS_MASK', 'HAS_MASK-TRACK', 'HAS_ZONE', 'HAS_HOMOGRAPHIES', 'HAS_LOOPS', 'HAS_BOUNDS', 'TRACKING_COMPLETION', 'TRACKING_AGE', 'ANNOTATION_COMPLETION', 'GT_COMPLETION', 'CL_COMPLETION', 'SERIALISATION_COMPLETION']
829  if(commands.depth >= 1): headers += ['MEAN_SPEED','MEAN_SPEED_RANK','INFLOW_PHPL','INFLOW_PHPL_RANK','TOTAL_VEHKM','TOTAL_VEHKM_RANK','AADT','AADC','AADP','AADB']
830  if(commands.depth >= 2):
831  for predictionMethod in predictionMethods:
832  if(True not in predictionMethod.completionBySeq): continue
833  headers += ['IND_COMPLETION-'+predictionMethod.label,'LOW-VERSION-'+predictionMethod.label,'HIGH-VERSION-'+predictionMethod.label]
834  if(predictionMethod.label_short == 'cmp'):
835  headers.append('DMP-VERSION-'+predictionMethod.label)
836  headers += ['AADPE','AADIE','AADPIE','AADIIE','USER_PAIR_PH','|']
837  for predictionMethod in predictionMethods:
838  if(True not in predictionMethod.completionBySeq): continue
839  headers += ['USER_PAIR_W_IND_PH-'+predictionMethod.label,'INTERACTIONS_PH-'+predictionMethod.label,'INTERACTIONS_PH_RANK-'+predictionMethod.label]
840  for agregationMethod in agregationMethods:
841  headers.append('MEAN_TTC_'+agregationMethod.label_short+'-'+predictionMethod.label)
842  headers.append('MEAN_TTC_'+agregationMethod.label_short+'_RANK-'+predictionMethod.label)
843  headers.append('KS_STAT_TTC_'+agregationMethod.label_short+'-'+predictionMethod.label)
844  headers.append('|')
845  writer.writerow(headers)
846 
847 
848 
849  datasetIx = 0
850  for saIx in saIxs:
851 
852  data = [site_analyses[saIx].name,
853  saIx+1,
854  site_analyses[saIx].site.description,
855  site_analyses[saIx].getSequenceCount(),
856  site_analyses[saIx].getStartTime(),
857  site_analyses[saIx].getDuration(),
858  int(alignments[datasetIx]),
859  int(mhcs[datasetIx]),
860  int(masks[datasetIx]),
861  int(masks2[datasetIx]),
862  int(zones[datasetIx]),
863  homoCompletion[datasetIx],
864  int(loops[datasetIx]),
865  int(bounds[datasetIx]),
866  round(trackCompletion[datasetIx], 2),
867  trackOldestAge[datasetIx].strftime('%Y-%m-%d %H:%M:%S') if trackOldestAge[datasetIx] is not None else '',
868  round(annotCompletion[datasetIx], 2),
869  round(gtCompletion[datasetIx], 2),
870  round(clCompletion[datasetIx], 2),
871  round(serObjCompletion[datasetIx], 2)]
872  if(commands.depth >= 1):
873  data += [round(meanSpeeds[datasetIx].value, 2),
874  meanSpeeds.getSiteRankBySaIx(site_analyses[saIx].idx),
875  round(inFlowPLPH[datasetIx].value, 2),
876  inFlowPLPH.getSiteRankBySaIx(site_analyses[saIx].idx),
877  round(vehKmTraveled[datasetIx].value, 2),
878  vehKmTraveled.getSiteRankBySaIx(site_analyses[siteIx].idx),
879  site_analyses[saIx].getAADT(exposure='traffic'),
880  site_analyses[saIx].getAADT(exposure=1),
881  site_analyses[saIx].getAADT(exposure=2),
882  site_analyses[saIx].getAADT(exposure=4)]
883  if(commands.depth >= 2):
884  for predictionMethod in predictionMethods:
885  if(True not in predictionMethod.completionBySeq): continue
886  data.append(predictionMethod.completion[datasetIx])
887  data.append(predictionMethod.minVersion[datasetIx])
888  data.append(predictionMethod.maxVersion[datasetIx])
889  if(predictionMethod.label_short == 'cmp'): data.append(site_analyses[saIx].findLowestMPversion())
890  data.append(site_analyses[saIx].getAADT(exposure='pair'))
891  data.append(site_analyses[saIx].getAADT(exposure='instance'))
892  data.append(site_analyses[saIx].getAADT(exposure='pairind'))
893  data.append(site_analyses[saIx].getAADT(exposure='instanceind'))
894  data.append(predictionMethods[0].userPairsPH[datasetIx].value)
895  data.append('|')
896  for predictionMethod in predictionMethods:
897  if(True not in predictionMethod.completionBySeq): continue
898  data.append(predictionMethod.userPairsWIndPH[datasetIx].value)
899  data.append(predictionMethod.interactionsPH[datasetIx].value)
900  data.append(predictionMethod.interactionsPH.getSiteRankBySaIx(site_analyses[saIx].idx))
901  for agregationMethod in agregationMethods:
902  data.append(predictionMethod.meanTTC[0][0][agregationMethod.label_short][datasetIx].value)
903  data.append(predictionMethod.meanTTC[0][0][agregationMethod.label_short].getSiteRankBySaIx(site_analyses[saIx].idx))
904  data.append(kstests[predictionMethods.index(predictionMethod)][agregationMethod.label_short][datasetIx])
905  data.append('|')
906  writer.writerow(data)
907  datasetIx += 1
908  except IOError: tvaLib.printWarning(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'report.csv')+' could not be opened for saving.', local['gen_warning'])
909 
910 
911 
914  try:
915  with open(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'report_hourly.csv'), 'wb') as f:
916  writer = csv_writer(f, delimiter=',', quotechar='"', quoting=csv_QUOTE_MINIMAL)
917 
918 
919  headers = ['ANALYSIS_NAME', 'SAIX', 'DBID', 'SEQUENCES_SCHEDULED', 'HOUR', 'HAS_ALIGNMENTS', 'HAS_MHC', 'HAS_MASK', 'HAS_MASK-TRACK', 'HAS_ZONE', 'HAS_HOMOGRAPHIES', 'HAS_LOOPS', 'HAS_BOUNDS', 'TRACKING_COMPLETION', 'SERIALISATION_COMPLETION']
920  if(commands.depth >= 1):
921  headers.append('MEAN_SPEED')
922  headers.append('INFLOW_PH')
923  headers.append('INFLOW_PHPL')
924  headers.append('TOTAL_VEHKM')
925  headers.append('VEHKM_PH')
926  headers.append('AADT')
927  if(commands.depth >= 2):
928  for predictionMethod in predictionMethods:
929  if(True not in predictionMethod.completionBySeq): continue
930  headers.append('IND_COMPLETION-'+predictionMethod.label)
931  headers.append('LOW-VERSION-'+predictionMethod.label)
932  headers.append('HIGH-VERSION'+predictionMethod.label)
933  headers.append('AADPE')
934  headers.append('AADIE')
935  headers.append('AADPIE')
936  headers.append('AADIIE')
937  headers.append('USER_PAIR_BH')
938  for predictionMethod in predictionMethods:
939  if(True not in predictionMethod.completionBySeq): continue
940  headers.append('USER_PAIR_W_IND_BH-'+predictionMethod.label)
941  headers.append('INTERACTION_PH-'+predictionMethod.label)
942  for agregationMethod in agregationMethods:
943  headers.append('MEAN_TTC_'+agregationMethod.label_short+'-'+predictionMethod.label)
944  headers.append('KS_STAT_TTC_'+agregationMethod.label_short+'-'+predictionMethod.label)
945  writer.writerow(headers)
946 
947 
948  datasetIx = 0
949  for saIx in saIxs:
950  for hour in site_analyses[saIx].getCamSeqs_Hourly():
951 
952  data = [site_analyses[saIx].name,
953  saIx+1,
954  site_analyses[saIx].site.description,
955  site_analyses[saIx].getSequenceCount(),
956  hour,
957  int(alignments[datasetIx]),
958  int(mhcs[datasetIx]),
959  int(masks[datasetIx]),
960  int(masks2[datasetIx]),
961  int(zones[datasetIx]),
962  homoCompletion[datasetIx],
963  int(loops[datasetIx]),
964  int(bounds[datasetIx]),
965  trackCompletion[datasetIx],
966  serObjCompletion[datasetIx]]
967  if(commands.depth >= 1):
968  data.append(hourly_speed[datasetIx].getCurve(type='mean')[hour])
969  data.append(hourly_flows[datasetIx].getCurve()[hour])
970  data.append(hourly_flows[datasetIx].getCurve(divideByValueVariety=True)[hour])
971  data.append(vehKmTraveled[datasetIx].value)
972  data.append(vehKmTraveled[datasetIx].value/float(site_analyses[saIx].getSequenceCount()))
973  data.append(site_analyses[saIx].getAADT(exposure='traffic'))
974  if(commands.depth >= 2):
975  for predictionMethod in predictionMethods:
976  if(True not in predictionMethod.completionBySeq): continue
977  data.append(predictionMethod.completion[datasetIx])
978  data.append(predictionMethod.minVersion[datasetIx])
979  data.append(predictionMethod.maxVersion[datasetIx])
980  data.append(site_analyses[saIx].getAADT(exposure='pair'))
981  data.append(site_analyses[saIx].getAADT(exposure='instance'))
982  data.append(site_analyses[saIx].getAADT(exposure='pairind'))
983  data.append(site_analyses[saIx].getAADT(exposure='instanceind'))
984  data.append(predictionMethods[0].userPairsBH[datasetIx].getCurve()[hour])
985  for predictionMethod in predictionMethods:
986  if(True not in predictionMethod.completionBySeq): continue
987  data.append(predictionMethod.userPairsWIndBH[datasetIx].getCurve()[hour])
988  data.append(predictionMethod.interactionsPH[datasetIx].value)
989  for agregationMethod in agregationMethods:
990  data.append(predictionMethod.meanTTC[0][0][agregationMethod.label_short][datasetIx].value)
991  data.append(kstests[predictionMethods.index(predictionMethod)][agregationMethod.label_short][datasetIx])
992  writer.writerow(data)
993  datasetIx += 1
994  except IOError: tvaLib.printWarning(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'report_hourly.csv')+' could not be opened for saving.', local['gen_warning'])
995 
996 
999  try:
1000  with open(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'indicator_thresholds.csv'), 'wb') as f:
1001  writer = csv_writer(f, delimiter=',', quotechar='"', quoting=csv_QUOTE_MINIMAL)
1002 
1003 
1004  headers = ['ANALYSIS_NAME', 'SAIX', 'DBID']
1005  if(commands.depth >= 2):
1006  headers.append('USERPAIRS')
1007  for predictionMethod in predictionMethods:
1008  if(True not in predictionMethod.completionBySeq): continue
1009  for agregationMethod in agregationMethods:
1010  for thresh in range(len(thresholds)):
1011  headers.append('TTC_THRESH_'+str(thresholds[thresh])+'_COUNT_'+agregationMethod.label_short+'-'+predictionMethod.label)
1012  for predictionMethod in predictionMethods:
1013  if(True not in predictionMethod.completionBySeq): continue
1014  for agregationMethod in agregationMethods:
1015  for thresh in range(len(thresholds)):
1016  headers.append('TTC_THRESH_'+str(thresholds[thresh])+'_COUNT_'+agregationMethod.label_short+'-'+predictionMethod.label+'_WEIGHTED_TTC_PROB')
1017  writer.writerow(headers)
1018 
1019 
1020  datasetIx = 0
1021  for saIx in saIxs:
1022 
1023  data = [site_analyses[saIx].name,
1024  saIx+1,
1025  site_analyses[saIx].site.description]
1026  if(commands.depth >= 2):
1027  data.append(predictionMethods[0].userPairsPH[datasetIx].value*site_analyses[saIx].getSequenceCount())
1028  for predictionMethod in predictionMethods:
1029  if(True not in predictionMethod.completionBySeq): continue
1030  for agregationMethod in agregationMethods:
1031  for thresh in range(len(thresholds)):
1032  data.append(predictionMethod.countTTCThresh[0][0][agregationMethod.label_short][datasetIx].getValue(thresh))
1033  for predictionMethod in predictionMethods:
1034  if(True not in predictionMethod.completionBySeq): continue
1035  for agregationMethod in agregationMethods:
1036  for thresh in range(len(thresholds)):
1037  try: data.append(predictionMethod.countTTCThresh[0][0][agregationMethod.label_short][datasetIx].getWeightedValue(thresh))
1038  except ZeroDivisionError: data.append('-')
1039  writer.writerow(data)
1040  datasetIx += 1
1041  except IOError: tvaLib.printWarning(os.path.join(sites.getBaseDirectory(), config.output_folder, analysisFolder, 'indicator_thresholds.csv')+' could not be opened for saving.', local['gen_warning'])
1042 
1043 
1044 
1047 
1048  if(commands.hli or commands.hli_only):
1049  import importlib
1050  if(not commands.hli):
1051  commands.hli = 'all'
1052  tvaLib.printWarning('No HLI-module specified explicitly. Defaulting to "all" HLI modules.', local['gen_warning'])
1053  if(commands.hli == 'all'):
1054  for _, dirs, files in os.walk('hli'):
1055  for file_ in files:
1056  if(os.path.splitext(file_)[0] != '__init__'):
1057  try:
1058  tvaHLI = importlib.import_module('hli.'+os.path.splitext(file_)[0])
1059  tvaLib.printTimeStamp('Running "'+os.path.splitext(file_)[0]+'" HLI analysis...')
1060  tvaHLI.analysis(commands=commands, config=config, site_analyses=site_analyses, analyses=analyses, local=local)
1061  except ImportError: tvaLib.printWarning('There were issues trying to import HLI module "'+file_+'".', local['gen_warning'])
1062  else:
1063  try:
1064  tvaHLI = importlib.import_module('hli.'+commands.hli)
1065  tvaLib.printTimeStamp('Running "'+commands.hli+'" HLI analysis...')
1066  tvaHLI.analysis(commands=commands, config=config, site_analyses=site_analyses, analyses=analyses, local=local)
1067  except ImportError: tvaLib.printWarning('There were issues trying to import HLI module "'+commands.hli+'".', local['gen_warning'])
1068 
1069 
1070 
1071 
1072 
1075 
1076 
1077  if(cluster[cIx] is not None and commands.depth >= 2):
1078 
1081  tvaLib.printTimeStamp('Processing statistical cluster tests...')
1082 
1083  kruskal_results = []
1084  for pmIx in range(len(predictionMethods_master)):
1085  kruskal_results.append({})
1086  for amIx in range(len(agregationMethods_master)):
1087  try:
1088  kruskal_result = scipy_stats_kruskal(*[tvaLib.flatten_list(x[pmIx][amIx]) for x in cluster.results_sample_dump])
1089  kruskal_by_site = []
1090  for cluster_sample_dump_elem in cluster.results_sample_dump:
1091  try: kruskal_by_site.append(scipy_stats_kruskal(*[x for x in cluster_sample_dump_elem[pmIx][amIx]]))
1092  except: pass
1093  kruskal_results[-1][amIx] = [kruskal_result,kruskal_by_site]
1094  except: pass
1095 
1096  print('============================================')
1097  print('Kruskal-Wallis one-way analysis of variance (scipy.stats.kruskal) on cluster specification: '+cluster.name)
1098  for pmIx in range(len(predictionMethods_master)):
1099  for kruskal_result in kruskal_results[pmIx]:
1100  print(' Results for aggregation method \''+predictionMethods[pmIx].label+' '+str(kruskal_result)+'\'')
1101  print(' Sample sizes: '+str([len(tvaLib.flatten_list(x[pmIx][kruskal_result])) for x in cluster.results_sample_dump]))
1102  print(' H-statistic: '+str(kruskal_results[pmIx][kruskal_result][0][0]))
1103  print(' p-value: '+str(kruskal_results[pmIx][kruskal_result][0][1]))
1104  print(' p-value by site: '+str([x[1] for x in kruskal_results[pmIx][kruskal_result][1]]))
1105  print(' Panels: '+str([len(x[pmIx][kruskal_result]) for x in cluster.results_sample_dump]))
1106 
1107 
1108 
1111  tvaLib.printTimeStamp('Generating figures...')
1112  if(cluster.plot_sites):
1113  cluster_lineStyles = ['-' for x in range(len(cluster))]
1114  cluster_markers = ['' for x in range(len(cluster))]
1115  else:
1116  cluster_lineStyles = ['-', '-', '--', ':','-.','-']
1117  cluster_markers = ['', 'o', '', '','','x']
1118  figures = []
1119  for predictionMethod in predictionMethods:
1120  if(True not in predictionMethod.completionBySeq): continue
1121  for distro_type in distributionTypes:
1122  for agregationMethod in agregationMethods:
1123  site_distros = []
1124  for site_distros_cl in cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short]:
1125  site_distros += site_distros_cl
1126  if(cluster.colours): colours = cluster.colours.data+tvaLib.flatten_list([[cluster.colours[groupIx] for i in group] for groupIx,group in zip(range(len(cluster)),cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short])])
1127  else: colours = [['b','r','g','orange','k','m','y'][groupIx] for groupIx in range(len(cluster))]+tvaLib.flatten_list([[['b','r','g','orange','k','m','y'][groupIx] for i in group] for groupIx,group in zip(range(len(cluster)),cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short])])
1128  linestyles = [cluster_lineStyles[groupIx] for groupIx in range(len(cluster))]+tvaLib.flatten_list([[cluster_lineStyles[groupIx] for i in group] for groupIx,group in zip(range(len(cluster)),cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short])])
1129  markers = [cluster_markers[groupIx] for groupIx in range(len(cluster))]+tvaLib.flatten_list([[cluster_markers[groupIx] for i in group] for groupIx,group in zip(range(len(cluster)),cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short])])
1130  linewidths = [5 for groupIx in range(len(cluster))]+tvaLib.flatten_list([[1 for i in group] for group in cluster.results_TTC_site[distro_type][agregationMethod.label_short][predictionMethod.label_short]])
1131  xs = [cluster.results_TTC_X for x in range(len(cluster)+len(site_distros))]
1132  ys = [[0 for z in range(len(xs))] if x==False else x for x in cluster.results_TTC_site_means[distro_type][agregationMethod.label_short][predictionMethod.label_short]+site_distros]
1133  if(distro_type == 'cdf'): figures.append(tvaVis.plots(xs, ys, legend_labels=cluster.labels, colours=colours, linestyles=linestyles, linewidths=linewidths, markers=markers, x_bounds=[0,config.disp_timehorizon], y_bounds=[0,1], x_label=local['vis_cp_hist_x'], y_label=local['vis_hist_y_'+distro_type], local=None, fig_name='TTC_clusters_'+distro_type+'_'+agregationMethod.label_short+'_'+predictionMethod.label_short))
1134  else: figures.append(tvaVis.plots(xs, ys, legend_labels=cluster.labels, colours=colours, linestyles=linestyles, linewidths=linewidths, markers=markers, x_bounds=[0,config.disp_timehorizon], x_label=local['vis_cp_hist_x'], y_label=local['vis_hist_y_'+distro_type], local=None, fig_name='TTC_clusters_'+distro_type+'_'+agregationMethod.label_short+'_'+predictionMethod.label_short))
1135  if(commands.fig_save_lan_a): fig_lan_suffix = config.language
1136  else: fig_lan_suffix = None
1137  tvaVis.Save(figures, os.path.join(sites.getBaseDirectory(), config.output_folder, clusterFolder), fig_format=commands.fig_format, fig_lan_suffix=fig_lan_suffix, fig_bg_colour=config.fig_bg_colour, verbose=commands.verbose)
1138 
1139  raise Exception, [0003, 'Batch processing complete.']
1140 
1141 
1142 
1146  except KeyboardInterrupt:
1147  if(commands.verbose): print(Back.GREEN+'==User exited== [0000]'+Back.RESET+'\nRuntime: {0}'.format(round(time.clock())))
1148 
1149  except SystemExit, e:
1150  try:
1151  if(commands.verbose): print(Back.GREEN+'==Execution finished== [0001]'+Back.RESET+'\nRuntime: {0}'.format(round(time.clock())))
1152  except UnboundLocalError: pass
1153 
1154  except Exception, e:
1155  if('commands' in dir()): verbose = commands.verbose
1156  else: verbose = 1
1157  if('config' in dir()): debug = config.debug
1158  else: debug = True
1159  from include.runtime import debug as tvaRuntime_debug
1160  tvaRuntime_debug(e, time, logging=logging, verbose=verbose, force=debug)
1161 
1162 
1165 if __name__ == '__main__':
1166  main()
def join(obj1, obj2, postSmoothing=True)
Definition: tools_obj.py:816
Definition: vis.py:1
Definition: main.py:1
Definition: filt.py:1
def main()
main()
Definition: analysis.py:14