tvaLib
interface.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # tvaLib Copyright (c) 2012-2016 Paul G. St-Aubin
3 # Ecole Polytechnique de Montreal, McGill University
4 # Python 2.7; (dt) Spyder Windows 10 64-bit; ipython Ubuntu 15.04 64-bit
5 
9 import os, sys, time
10 import math as m
11 import cPickle as pickle
12 from csv import writer as csv_writer
13 from datetime import datetime
14 from datetime import timedelta
15 from inspect import getfile as inspect_getfile
16 from inspect import currentframe as inspect_currentframe
17 from copy import deepcopy
18 
19 import Tkinter as tk
20 from ttk import Combobox as tk_Combobox
21 from ttk import Style as ttk_Style
22 from tkFileDialog import askopenfilename
23 from tkFileDialog import asksaveasfilename
24 from tkMessageBox import askyesno as tk_askyesno
25 try: import numpy as np
26 except ImportError: raise Exception, [101, 'Numpy is not installed.']
27 try:
28  import matplotlib as mpl
29  import matplotlib.pyplot as plt
30  from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
31 except ImportError: raise Exception, [106, 'Matplotlib could not be found/imported.']
32 try: import cv2
33 except ImportError: raise Exception, [108, 'opencv could not be found/imported.']
34 try:
35  from moving import intersection as TrafIntMoving_intersection
36  from moving import Point as TrafIntMoving_Point
37  from moving import MovingObject
38  from moving import TimeInterval
39  from moving import Trajectory
40 except ImportError: raise Exception, [103, 'Traffic-Intelligence is not installed.']
41 
42 if __name__ == '__main__':
43  print('Interface library loaded directly.')
44  sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(inspect_getfile(inspect_currentframe())))))
45 import lib.tools as tvaLib
46 
47 
52 
53 
54 
55 
56 
59 def cvPlot(img, obj, lastInstant, useProjectedPositions=False, plotFeature=None, **kwargs):
60  ''' Plot positions on image using cv.
61  Note: Opencv has reversed channels (BGR). '''
62  if(useProjectedPositions):
63  if(plotFeature):
64  for i in range(min(len(obj.features[plotFeature].projectedPositions.getXCoordinates())-1, lastInstant)):
65  cv2.line(img, (int(round(obj.features[plotFeature].projectedPositions.getXCoordinates()[i])), int(round(obj.features[plotFeature].projectedPositions.getYCoordinates()[i]))),
66  (int(round(obj.features[plotFeature].projectedPositions.getXCoordinates()[i+1])),int(round(obj.features[plotFeature].projectedPositions.getYCoordinates()[i+1]))), **kwargs)
67  else:
68  for i in range(min(len(obj.projectedPositions.getXCoordinates())-1, lastInstant)):
69  cv2.line(img, (int(round(obj.projectedPositions.getXCoordinates()[i])), int(round(obj.projectedPositions.getYCoordinates()[i]))),
70  (int(round(obj.projectedPositions.getXCoordinates()[i+1])),int(round(obj.projectedPositions.getYCoordinates()[i+1]))), **kwargs)
71  else:
72  if(plotFeature):
73  for i in range(min(len(obj.features[plotFeature].getXCoordinates)-1, lastInstant)):
74  cv2.line(img, (int(round(obj.features[plotFeature].getXCoordinates()[i])), int(round(obj.features[plotFeature].getYCoordinates()[i]))),
75  (int(round(obj.features[plotFeature].getXCoordinates()[i+1])),int(round(obj.features[plotFeature].getYCoordinates()[i+1]))), **kwargs)
76  else:
77  for i in range(min(len(obj.getXCoordinates)-1, lastInstant)):
78  cv2.line(img, (int(round(obj.getXCoordinates()[i])), int(round(obj.getYCoordinates()[i]))),
79  (int(round(obj.getXCoordinates()[i+1])),int(round(obj.getYCoordinates()[i+1]))), **kwargs)
80 
81 
82 
85 class StatusBar(tk.Frame):
86  def __init__(self, master, style, local):
87  tk.Frame.__init__(self, master, bg=style.bgdark, bd=1)
88  self.after = master.after
89  self.local = local
90  self.style = style
91  self.label = tk.Label(self, relief=tk.SUNKEN, foreground=style.fg, bg=style.bgdark, anchor=tk.W)
92  self.label.grid(row=0, column=0, sticky=tk.W+tk.E)
93  self.coordinates = tk.Label(self, relief=tk.SUNKEN, foreground=style.fg, bg=style.bgdark)
94  self.coordinates.grid(row=0, column=1, sticky=tk.W+tk.E)
95  self.columnconfigure(0, weight=5)
96  self.columnconfigure(0, weight=1)
97  self.clock_str = ''
98  self.offset_str = ''
99  self.frame_str = '0'
100  self.objs_str = '0'
101  self.class_str = '-'
102  self.setDefault()
103  self.setCoords()
104 
105  def set(self, msg, *args):
106  self.label.config(text=msg % args)
107  self.label.update_idletasks()
108 
109  def displayMsg(self, msg, seconds=5):
110  self.set(msg)
111  self.after(seconds*1000, self.setDefault)
112 
113  def displayWarning(self, msg, seconds=5):
114  self.label.config(foreground=self.style.error)
115  self.set(msg)
116  self.after(seconds*1000, self.setDefault)
117 
118  def clear(self):
119  self.label.config(text="")
120  self.label.update_idletasks()
121 
122  def setCoords(self, wx=0.0, wy=0.0, ix=0.0, iy=0.0):
123  self.coordinates.config(text='world: ('+str(wx)+'m,'+str(wy)+'m) img: ('+str(ix).rjust(4)+'px,'+str(iy).rjust(4)+'px)')
124  self.label.update_idletasks()
125 
126  def setDefault(self, clock_str=None, offset_str=None, frame_str=None, objs_str=None, class_str=None):
127  self.label.config(foreground=self.style.fg)
128  if(clock_str is not None): self.clock_str = clock_str
129  if(offset_str is not None): self.offset_str = offset_str
130  if(frame_str is not None): self.frame_str = frame_str
131  if(objs_str is not None): self.objs_str = objs_str
132  if(class_str is not None): self.class_str = class_str
133  self.set(self.local['gen_clock']+': '+self.clock_str+' '+self.local['gen_offset']+': +'+self.offset_str+' '+self.local['gen_frame']+': '+self.frame_str+' '+' objs: '+self.objs_str+' class: '+self.class_str)
134 
135 
138 class Event():
139  def __init__(self, num=0, frame=0, type=1):
140  self.num = num
141  self.frame = frame
142  self.type = type
143 
144 
145 
151  def __init__(self, videoFilename, local, satFilename=None, satRes=0.12, homography=None, alignments=None, fps=15, title='Video Timeseries', dynamicWindowSize=True, windowSize=[1200,670], intrinsicCameraMatrix=None, distortionCoefficients=None, undistortedImageScalingFactor=1.0, startTime=None, frame_data_width=500, launch=True, config=None, verbose=0):
152  ''' local is a pre-formatted dict-like object containing keywords in
153  the appropriate language. See the tvaLib Specification for formatting
154  details and examples.
155 
156  IMPORTANT: Matplotlib (mpl) MUST be initialised with mpl.use('TkAgg') as soon as mpl is imported.
157  '''
158 
159  try:
160  from PIL import Image, ImageTk
161  self.Image = Image
162  self.ImageTk = ImageTk
163  except ImportError: raise Exception, [109, 'PIL could not be found/imported.']
164 
165 
166  plt.ioff()
167  if(homography is not None):
168  self.homography = homography.asNpArray()
169  self.invHomography = tvaLib.Obj.invHomography(homography.asNpArray())
170  else:
171  self.homography = None
172  self.invHomography = None
173  if(startTime is None): self.startTime = datetime(2000, 1, 1)
174  else: self.startTime = startTime
175  self.alignments = alignments
177  self.frame_fwd_delay = 0.04
178  self.frame_bck_delay = 0.2
179  self.satFilename = satFilename
180  self.satRes = satRes
181  self.speed_conv = fps * config.mps_kmh
182  self.fps = fps
184  self.raw_vid = None
188  self.cursor_range = range(10)
189  self.iIx = 0
190  self.projctedMap = None
191  self.local = local
192  self.unsaved_changes = False
193  self.config = config
194 
195 
196 
197  self.style = Style()
198  #ttk_Style()
199 
200 
201 
202  self.capture = cv2.VideoCapture(videoFilename)
203  self.vid_width = int(self.capture.get(3))
204  self.vid_height = int(self.capture.get(4))
205  while self.vid_width == 0:
206  videoFilename = askopenfilename(title=local['UI_dialogue_title_sel_vid'], filetypes=[('MP4', '.mp4'), ('AVI', '.avi')])
207  self.capture = cv2.VideoCapture(videoFilename)
208  self.vid_width = int(self.capture.get(3))
209  self.vid_height = int(self.capture.get(4))
210  self.go_window_width = int(round(float(windowSize[1])*float(self.vid_width)/float(self.vid_height)))
211  if(dynamicWindowSize): windowSize[0] = 150+self.go_window_width+frame_data_width
212  if(intrinsicCameraMatrix and distortionCoefficients):
213  self.vid_width = int(round(self.vid_width*undistortedImageScalingFactor))
214  self.vid_height = int(round(self.vid_height*undistortedImageScalingFactor))
215  new_matrix = deepcopy(np.array(intrinsicCameraMatrix))
216  new_matrix[0,2] = self.vid_width/2.0
217  new_matrix[1,2] = self.vid_height/2.0
218  [self.map1, self.map2] = cv2.initUndistortRectifyMap(np.array(intrinsicCameraMatrix), np.array(distortionCoefficients), np.identity(3), new_matrix, (self.vid_width, self.vid_height), cv2.CV_32FC1)
219  else: self.map1, self.map2 = None, None
220 
221 
222 
223  self.root = tk.Tk()
224  self.root.title(title)
225  self.root.geometry(str(windowSize[0])+'x'+str(windowSize[1]))
226  self.root.minsize(windowSize[0], windowSize[1])
227  # Favicon doesn't work on all platforms
228  try:
229  self.root.iconbitmap(os.path.join(os.path.split(os.path.realpath(__file__))[0], os.pardir, 'assets', 'favicon.ico'))
230  except: pass
231 
232 
233  self.menubar = tk.Menu(self.root, foreground=self.style.fg, bg=self.style.bglight)
234  self.filemenu = tk.Menu(self.menubar, tearoff=0, foreground=self.style.fg, bg=self.style.bglight, activebackground='#D9CB9E', activeforeground='#000000')
235  self.filemenu.add_command(label=local['UI_nav_menu_quit'], command=self.quit)
239  self.videomenu_groupFeatures=tk.IntVar()
240  self.videomenu_groupFeatures.set(1)
241  self.videomenu.add_checkbutton(label=local['UI_nav_menu_group_feat'], variable=self.videomenu_groupFeatures, command=self.callback_scrubStatic, accelerator="Ctrl+T")
242  self.menubar.add_cascade(label=local['UI_nav_menu_file'], menu=self.filemenu)
243  self.menubar.add_cascade(label=local['UI_nav_menu_playback'], menu=self.playback)
244  self.menubar.add_cascade(label=local['UI_nav_menu_video'], menu=self.videomenu)
245  self.menubar.add_cascade(label=local['UI_nav_menu_object'], menu=self.objectmenu)
246  self.root.config(menu=self.menubar, bg=self.style.bglight)
247 
248 
249  self.status = StatusBar(self.root, self.style, self.local)
250  self.status.pack(side=tk.BOTTOM, fill=tk.X)
251 
252 
253  self.frame_nav_list = tk.Frame(self.root, bg=self.style.bglight, width='150px')
254  self.frame_video = tk.Frame(self.root, bg=self.style.bgdark, width=str(self.go_window_width)+'px')
255  self.frame_data = tk.Frame(self.root, bg=self.style.bglight)
256  self.frame_nav_list.pack(side=tk.LEFT, fill=tk.Y)
257  self.frame_video.pack(side=tk.LEFT, fill=tk.Y)
258  self.frame_data.pack(side=tk.RIGHT, fill=tk.BOTH, expand=1)
259 
260 
261  self.video_window = tk.Label(self.frame_video, highlightthickness=0, cursor='tcross')
262  self.video_window.pack(padx=10, pady=(10,5))
263  self.video_window.config(foreground=self.style.fg, bg=self.style.bgdark)
264 
265  self.playback_multiplier_HUD = tk.Label(self.frame_video, text='', borderwidth=0, relief="solid")
266  self.playback_multiplier_HUD.place(relx=1.0, rely=0.0, x=-20, y=20, anchor="ne")
267 
268 
269  self.video_window.bind('<Motion>', self.callback_getWorldCoords)
270  self.root.bind('<space>', self.callback_autoPlay)
271  self.root.bind('<Control-Right>', self.callback_scrubFwdx10)
272  self.root.bind('<Right>', self.callback_scrubFwd)
273  self.root.bind('<Control-Left>', self.callback_scrubBckx10)
274  self.root.bind('<Left>', self.callback_scrubBck)
275  self.root.bind('<Home>', self.callback_scrubHome)
276  self.root.bind('<End>', self.callback_scrubEnd)
277  self.root.bind('<Prior>', self.callback_prevItem)
278  self.root.bind('<Next>', self.callback_nextItem)
279  self.root.bind('<KP_Add>', self.callback_SpeedUp)
280  self.root.bind('<KP_Subtract>', self.callback_SpeedDown)
281  self.root.bind('=', self.callback_SpeedUp)
282  self.root.bind('-', self.callback_SpeedDown)
283  self.root.bind('+', self.callback_SpeedUp)
284  self.root.bind('<Configure>', self.drawTkinterCursor)
285  self.root.bind('<Control-t>', self.callback_shortcut_T)
286  self.root.bind('<Control-g>', self.callback_shortcut_G)
287  self.root.bind('<Control-a>', self.callback_shortcut_A)
288  self.root.bind('<Control-s>', self.callback_shortcut_S)
289  self.root.bind('<Control-d>', self.callback_shortcut_D)
290  self.root.bind('<Control-c>', self.callback_shortcut_C)
291  self.root.bind('<Control-b>', self.callback_shortcut_B)
292  self.root.protocol('WM_DELETE_WINDOW', self.quit)
293 
294 
295 
296  if(launch):
297  self.root.focus_force()
298  self.root.mainloop()
299 
300 
301 
304  def showDataWindow(self, content=''):
305  t = tk.Toplevel(bg=self.style.bglight)
306  t.wm_title('Data viewer')
307  l = tk.Text(t, foreground=self.style.fg, bg=self.style.bgdark)
308  l.insert('1.0', content)
309  l.pack(side='top', fill='both', expand=True, padx=5, pady=5)
310  b = tk.Button(t, text=self.local['UI_nav_copy_to_clipboard'], command=lambda: self.copyToClipboard(content), foreground=self.style.fg, bg=self.style.bglight)
311  b.pack(side='bottom', padx=5, pady=5)
312 
313  def copyToClipboard(self, content=''):
314  r = tk.Tk()
315  r.withdraw()
316  r.clipboard_clear()
317  r.clipboard_append(content)
318  r.update()
319  r.destroy()
320 
321 
322 
323 
326 
328  self.playback = tk.Menu(self.menubar, tearoff=0, foreground=self.style.fg, bg=self.style.bglight, activebackground='#D9CB9E', activeforeground='#000000')
329  self.playback_autoplay=tk.IntVar()
330  self.playback.add_checkbutton(label=self.local['UI_nav_menu_autoplay'], variable=self.playback_autoplay, command=self.callback_scrubFwd, accelerator="Space")
331  self.playback.add_command(label=self.local['UI_nav_menu_speed_up'], command=self.callback_SpeedUp, accelerator="+")
332  self.playback.add_command(label=self.local['UI_nav_menu_speed_down'], command=self.callback_SpeedDown, accelerator="-")
333  self.playback.add_command(label=self.local['UI_nav_menu_first_fr'], command=self.callback_scrubHome, accelerator="Home")
334  self.playback.add_command(label=self.local['UI_nav_menu_last_fr'], command=self.callback_scrubEnd, accelerator="End")
335 
337  self.videomenu = tk.Menu(self.menubar, tearoff=0, foreground=self.style.fg, bg=self.style.bglight, activebackground='#D9CB9E', activeforeground='#000000')
338  self.videomenu_show_grid=tk.IntVar()
339  self.videomenu.add_checkbutton(label=self.local['UI_nav_menu_show_grid'], variable=self.videomenu_show_grid, command=self.callback_scrubStatic, accelerator="Ctrl+G")
340  self.videomenu_show_align=tk.IntVar()
341  self.videomenu.add_checkbutton(label=self.local['UI_nav_menu_show_align'], variable=self.videomenu_show_align, command=self.callback_scrubStatic, accelerator="Ctrl+A", state=(tk.DISABLED if self.alignments is None else tk.NORMAL))
342  self.videomenu_hide_vid_state=tk.IntVar()
343  self.videomenu.add_checkbutton(label=self.local['UI_nav_menu_hide_vid'], variable=self.videomenu_hide_vid_state, command=self.callback_scrubStatic, accelerator="Ctrl+S")
344  self.videomenu_proj_homo_state=tk.IntVar()
345  self.videomenu.add_checkbutton(label=self.local['UI_nav_menu_proj_homo'], variable=self.videomenu_proj_homo_state, command=self.callback_refreshHomography, accelerator="Ctrl+D")
346  self.videomenu_show_contours=tk.IntVar()
347  self.videomenu.add_checkbutton(label=self.local['UI_nav_menu_show_contour'], variable=self.videomenu_show_contours, command=self.callback_scrubStatic, accelerator="Ctrl+C")
349  self.videomenu.add_checkbutton(label=self.local['UI_nav_menu_show_boundin'], variable=self.videomenu_show_bounding_boxes, command=self.callback_scrubStatic, accelerator="Ctrl+B")
350 
352  self.objectmenu = tk.Menu(self.menubar, tearoff=0, foreground=self.style.fg, bg=self.style.bglight, activebackground='#D9CB9E', activeforeground='#000000')
353  self.objectmenu.add_command(label=self.local['UI_nav_menu_show_imageBox'], command=self.active_object_show_imageBox)
354  self.objectmenu.add_command(label=self.local['UI_nav_menu_show_hog'], command=self.active_object_show_hog)
355  self.objectmenu.add_command(label=self.local['UI_nav_menu_show_types'], command=self.active_object_show_userTypes)
356 
357 
358 
361  def callback_scrubTo(self, frame):
362  if(frame < 0 or frame > len(self.cursor_range)-1): frame = self.cursor_range[-1]
363  if(frame == self.cursor_location): return False
364  self.cursor_location = frame
366  self.capture.set(1, self.cursor_abs_location)
367  return self.refresh()
368 
369 
370  def callback_scrubTarget(self, event):
371  lowerlimit = self.frame_data.winfo_width()*0.126
372  upperlimit = self.frame_data.winfo_width()*0.902
373  if(event.x > lowerlimit and event.x < upperlimit):
374  new_cursor = self.cursor_range[int(round((len(self.cursor_range)-1)*(event.x-round(lowerlimit))/(round(upperlimit)-round(lowerlimit))))]
375  return self.callback_scrubTo(new_cursor)
376  else: return False
377 
378  def callback_scrubHome(self, event=None):
379  return self.callback_scrubTo(0)
380 
381  def callback_scrubEnd(self, event=None):
382  return self.callback_scrubTo(-1)
383 
384  def callback_scrubBckx10(self, event=None):
385  return self.callback_scrubBck(amount=10)
386  def callback_scrubFwdx10(self, event=None):
387  return self.callback_scrubFwd(amount=10)
388 
389  def callback_scrubFwd(self, event=None, amount=1):
390 
391  if((time.time() - self.frameRefreshTime) > self.frame_fwd_delay):
392  self.frameRefreshTime = time.time()
393  if(self.cursor_location < max(self.cursor_range)):
394  if(self.playback_autoplay.get()):
395  self.cursor_location += int(1*self.playback_multiplier)
396  self.cursor_abs_location += int(1*self.playback_multiplier)
397  else:
398  self.cursor_location += amount
399  self.cursor_abs_location += amount
400  result = self.refresh()
401  if(self.playback_autoplay.get()): self.root.after(10, self.callback_scrubFwd)
402  return result
403  if(self.playback_autoplay.get()): self.root.after(10, self.callback_scrubFwd)
404  return False
405 
406  def callback_scrubBck(self, event=None, amount=1):
407 
408  if(self.playback_autoplay.get()): self.playback_autoplay.set(0) # Pause Auto Play-back
409  if((time.time() - self.frameRefreshTime) < self.frame_bck_delay): return False
410  if(self.cursor_location > min(self.cursor_range)):
411  self.cursor_location -= amount
412  self.cursor_abs_location -= amount
413  self.capture.set(1, self.cursor_abs_location)
414  return self.refresh()
415  else: return False
416 
417  def callback_scrubStatic(self, event=None):
418  return self.refresh(scrub=False)
419 
420 
421  def callback_getWorldCoords(self, event):
422 
423  loc = [event.x,event.y]
424  if(self.homography is not None): loc = tvaLib.Obj.homographyProject([loc], self.homography)[0]
425  if(loc[0] > 1000): x = 'inf'
426  elif(loc[0] < -1000): x = '-inf'
427  else: x = str(round(loc[0],1))
428  if(loc[1] > 1000): y = 'inf'
429  elif(loc[1] < -1000): y = '-inf'
430  else: y = str(round(loc[1],1))
431  self.status.setCoords(x.rjust(7), y.rjust(7), event.x, event.y)
432 
433 
434  def callback_refreshHomography(self, event=None):
435  ''' '''
436  if(self.videomenu_proj_homo_state.get()):
437  if(self.satFilename and type(self.satFilename) is str and os.path.exists(self.satFilename)):
438  self.satImg, self.satMask = self.projectTexture(cv2.imread(self.satFilename), (int(self.capture.get(3)),int(self.capture.get(4))))
439  return self.refresh(scrub=False)
440  else:
441  self.videomenu_proj_homo_state.set(0)
442  return False
443  else:
444  self.satImg, self.satMask = None, None
445  return self.refresh(scrub=False)
446 
447 
448  def callback_SpeedUp(self, event=None):
449  if(self.playback_multiplier < 16.0): self.playback_multiplier = self.playback_multiplier * 2.0
450  self.playback_multiplier_HUD['text'] = 'x'+str(self.playback_multiplier)
451  return True
452 
453  def callback_SpeedDown(self, event=None):
454  if(self.playback_multiplier > 1.0): self.playback_multiplier = self.playback_multiplier / 2.0
455  if(self.playback_multiplier == 1.0): self.playback_multiplier_HUD['text'] = ''
456  else: self.playback_multiplier_HUD['text'] = 'x'+str(self.playback_multiplier)
457  return True
458 
459  def callback_autoPlay(self, event=None):
460  if(self.playback_autoplay.get() == 0): self.playback_autoplay.set(1)
461  else: self.playback_autoplay.set(0)
462  return self.callback_scrubFwd()
463 
464  def callback_shortcut_T(self, event=None):
465  if(self.videomenu_groupFeatures.get() == 0): self.videomenu_groupFeatures.set(1)
466  else: self.videomenu_groupFeatures.set(0)
467  return self.callback_scrubStatic()
468 
469  def callback_shortcut_G(self, event=None):
470  if(self.videomenu_show_grid.get() == 0): self.videomenu_show_grid.set(1)
471  else: self.videomenu_show_grid.set(0)
472  return self.callback_scrubStatic()
473 
474  def callback_shortcut_A(self, event=None):
475  if(self.alignments is None): return False
476  if(self.videomenu_show_align.get() == 0): self.videomenu_show_align.set(1)
477  else: self.videomenu_show_align.set(0)
478  return self.callback_scrubStatic()
479 
480  def callback_shortcut_S(self, event=None):
481  if(self.videomenu_hide_vid_state.get() == 0): self.videomenu_hide_vid_state.set(1)
482  else: self.videomenu_hide_vid_state.set(0)
483  return self.callback_scrubStatic()
484 
485  def callback_shortcut_D(self, event=None):
486  if(self.videomenu_proj_homo_state.get() == 0): self.videomenu_proj_homo_state.set(1)
487  else: self.videomenu_proj_homo_state.set(0)
488  return self.callback_refreshHomography()
489 
490  def callback_shortcut_C(self, event=None):
491  if(self.alignments is None): return False
492  if(self.videomenu_show_contours.get() == 0): self.videomenu_show_contours.set(1)
493  else: self.videomenu_show_contours.set(0)
494  return self.callback_scrubStatic()
495  def callback_shortcut_B(self, event=None):
496  if(self.alignments is None): return False
497  if(self.videomenu_show_bounding_boxes.get() == 0): self.videomenu_show_bounding_boxes.set(1)
498  else: self.videomenu_show_bounding_boxes.set(0)
499  return self.callback_scrubStatic()
500 
501 
504  def refreshTimeStamps(self):
505  self.status.setDefault(clock_str=(self.startTime+timedelta(seconds=int(self.cursor_abs_location/self.fps))).strftime("%H:%M:%S"), offset_str=(datetime(2000, 1, 1)+timedelta(seconds=int(self.cursor_abs_location/self.fps))).strftime("%H:%M:%S"), frame_str=str(self.cursor_abs_location))
506 
507 
509  return int(x*self.vid_width/self.go_window_width),int(y*self.vid_height/600.0)
510 
511 
512  def getProjectedMap(self, flush=False, scaling_factor=None):
513  ''' Regulate and obtain projected coordinates using homography. '''
514  if(self.projctedMap is not None and flush==False): return self.projctedMap
515  if(scaling_factor is None): scaling_factor = 1/self.satRes
516  self.projctedMap = np.zeros((self.vid_height,self.vid_width,2), int)
517  for rIx in range(self.vid_height):
518  self.projctedMap[rIx] = np.transpose(np.multiply(np.array(tvaLib.Obj.homographyProject(np.array([range(self.vid_width),[rIx for x in range(self.vid_width)]]), self.invHomography)), scaling_factor).astype('int'))
519  return self.projctedMap
520 
521  def projectObjects(self, objects):
522  ''' Function only needs to be called once and modifies the objects
523  permanently in memory. '''
524  for i in range(len(objects)):
525  if(not hasattr(objects[i], 'projectedPositions')): tvaLib.Obj.trajectoryProject(objects[i], self.invHomography)
526  return objects
527 
528 
529  def projectTexture(self, src, dstShape):
530  ''' Not optimised. '''
531  projectedMap = self.getProjectedMap()
532  dst = np.zeros((dstShape[1],dstShape[0],3), np.uint8)
533  mask = np.zeros((dstShape[1],dstShape[0]), np.uint8)
534  for rIx in range(dst.shape[0]):
535  for cIx in range(dst.shape[1]):
536  if(projectedMap[rIx][cIx][0] >= 0 and projectedMap[rIx][cIx][0] < src.shape[0] and projectedMap[rIx][cIx][1] >= 0 and projectedMap[rIx][cIx][1] < src.shape[1]):
537  dst[rIx][cIx] = src[projectedMap[rIx][cIx][1]][projectedMap[rIx][cIx][0]]
538  mask[rIx][cIx] = 255
539  return dst, mask
540 
541  def grabVideoFrame(self, scrub=True, sourceImageOnly=False):
542  ''' This function loads a video frame from file. General-purpose,
543  static objects will be drawn onto frame here as well.
544 
545  Input:
546  ======
547  scrub: read next frame instead of pointing to specific frame. This
548  is necessary to play certain videos correctly and smoothly,
549  and is better for performance in general.
550  sourceImageOnly: return original video only
551  '''
552 
553  if(self.videomenu_hide_vid_state.get()):
554  img = np.full((self.vid_height,self.vid_width,3), 255, np.uint8)
555  else:
556  if(not scrub): self.capture.set(1, self.cursor_abs_location)
557  if(self.playback_autoplay.get()):
558  for i in range(int(self.playback_multiplier)):
559  imgstate, img = self.capture.read()
560  else:
561  imgstate, img = self.capture.read()
562  if(not imgstate):
563  self.status.displayMsg('There was an error loading the video frame.')
564  return None
565 
566  if(self.map1 is not None and self.map2 is not None): img = cv2.remap(img, self.map1, self.map2, interpolation=cv2.INTER_LINEAR)
567  if(not sourceImageOnly):
568 
569  if(self.videomenu_proj_homo_state.get()):
570  img = cv2.bitwise_and(img,img,mask = cv2.bitwise_not(self.satMask))
571  img = cv2.add(img, self.satImg)
572 
573  if(self.videomenu_show_grid.get() or self.videomenu_hide_vid_state.get()): img = self.drawProjectedGrid(img)
574 
575  if(self.videomenu_show_align.get()): img = self.drawProjectedAlignments(img)
576  return img
577 
578  def getProjectedGridX(self, grid_range, spacing):
579  ''' Return projected grid X coordinates if they exists, otherwise
580  calculate these coordinates and store them for future reference.
581  '''
582  try: return self.gridX
583  except:
584  edges = [[0,0],[self.vid_width,0],[self.vid_width,self.vid_height],[0,self.vid_height]]
585  if(self.invHomography is not None): edges = tvaLib.Obj.homographyProject(edges, self.invHomography)
586  x_range=[x/10.0 for x in range(int(grid_range[0]*10.0),int((grid_range[1]+1)*10.0),int(spacing*10.0))]
587  y_range=[y/10.0 for y in range(int(grid_range[0]*10.0),int((grid_range[1]+1)*10.0),int(spacing*10.0))]
588  self.gridX = []
589  for x in x_range:
590  pt1 = TrafIntMoving_Point(x,y_range[0])
591  pt2 = TrafIntMoving_Point(x,y_range[-1])
592  pt1_in = tvaLib.Geo.pip(pt1.x, pt1.y, edges)
593  pt2_in = tvaLib.Geo.pip(pt2.x, pt2.y, edges)
594  if(not pt1_in or not pt2_in):
595  sides = [TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[0][0],edges[0][1]), TrafIntMoving_Point(edges[1][0],edges[1][1])),
596  TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[1][0],edges[1][1]), TrafIntMoving_Point(edges[2][0],edges[2][1])),
597  TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[2][0],edges[2][1]), TrafIntMoving_Point(edges[3][0],edges[3][1])),
598  TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[3][0],edges[3][1]), TrafIntMoving_Point(edges[0][0],edges[0][1]))]
599  for sideIx in range(4):
600  coords = [sides[sideIx].x,sides[sideIx].y]
601  if(self.invHomography is not None): coords = tvaLib.Obj.homographyProject([coords], self.invHomography)[0]
602  coords = tvaLib.list2int(coords)
603  if(coords[0] < 0 or coords[0] > self.vid_width or coords[1] < 0 or coords[1] > self.vid_height):
604  sides[sideIx] = None
605  sides = filter(None, sides)
606  shortest_distance_squared = sys.float_info.max
607  if(not pt1_in and not pt2_in):
608  try:
609  dst = round(m.sqrt((pt1.x-pt2.x)**2+(pt1.y-pt2.y)**2),1)
610  if(round(m.sqrt((pt1.x-sides[0].x)**2+(pt1.y-sides[0].y)**2)+m.sqrt((pt2.x-sides[0].x)**2+(pt2.y-sides[0].y)**2),1) != dst or round(m.sqrt((pt1.x-sides[1].x)**2+(pt1.y-sides[1].y)**2)+m.sqrt((pt2.x-sides[1].x)**2+(pt2.y-sides[1].y)**2),1) != dst): continue
611  pt1 = sides[0]
612  pt2 = sides[1]
613  except: continue
614  elif(not pt1_in):
615  for side in sides:
616  dst_squared = (pt1.x-side.x)**2+(pt1.y-side.y)**2
617  if(dst_squared < shortest_distance_squared):
618  shortest_distance_squared = dst_squared
619  sideToUse = side
620  pt1 = sideToUse
621  elif(not pt2_in):
622  for side in sides:
623  dst_squared = (pt1.x-side.x)**2+(pt1.y-side.y)**2
624  if(dst_squared < shortest_distance_squared):
625  shortest_distance_squared = dst_squared
626  sideToUse = side
627  pt2 = sideToUse
628  coords = [[pt1.x,pt1.y],[pt2.x,pt2.y]]
629  if(self.invHomography is not None): coords = tvaLib.Obj.homographyProject(coords, self.invHomography)
630  coords = tvaLib.list2int(coords)
631  self.gridX.append([TrafIntMoving_Point(coords[0][0],coords[0][1]).asint(), TrafIntMoving_Point(coords[1][0],coords[1][1]).asint()])
632  return self.gridX
633 
634 
635  def getProjectedGridY(self, grid_range, spacing):
636  ''' Return projected grid Y coordinates if they exists, otherwise
637  calculate these coordinates and store them for future reference.
638  '''
639  try: return self.gridy
640  except:
641  edges = [[0,0],[self.vid_width,0],[self.vid_width,self.vid_height],[0,self.vid_height]]
642  if(self.invHomography is not None): edges = tvaLib.Obj.homographyProject(edges, self.invHomography)
643  x_range=[x/10.0 for x in range(int(grid_range[0]*10.0),int((grid_range[1]+1)*10.0),int(spacing*10.0))]
644  y_range=[y/10.0 for y in range(int(grid_range[0]*10.0),int((grid_range[1]+1)*10.0),int(spacing*10.0))]
645  self.gridy = []
646  for y in y_range:
647  pt1 = TrafIntMoving_Point(x_range[0],y)
648  pt2 = TrafIntMoving_Point(x_range[-1],y)
649  pt1_in = tvaLib.Geo.pip(pt1.x, pt1.y, edges)
650  pt2_in = tvaLib.Geo.pip(pt2.x, pt2.y, edges)
651  if(not pt1_in or not pt2_in):
652  sides = [TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[0][0],edges[0][1]), TrafIntMoving_Point(edges[1][0],edges[1][1])),
653  TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[1][0],edges[1][1]), TrafIntMoving_Point(edges[2][0],edges[2][1])),
654  TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[2][0],edges[2][1]), TrafIntMoving_Point(edges[3][0],edges[3][1])),
655  TrafIntMoving_intersection(pt1, pt2, TrafIntMoving_Point(edges[3][0],edges[3][1]), TrafIntMoving_Point(edges[0][0],edges[0][1]))]
656  for sideIx in range(4):
657  coords = [sides[sideIx].x,sides[sideIx].y]
658  if(self.invHomography is not None): coords = tvaLib.Obj.homographyProject([coords], self.invHomography)[0]
659  coords = tvaLib.list2int(coords)
660  if(coords[0] < 0 or coords[0] > self.vid_width or coords[1] < 0 or coords[1] > self.vid_height):
661  sides[sideIx] = None
662  sides = filter(None, sides)
663  shortest_distance_squared = sys.float_info.max
664  if(not pt1_in and not pt2_in):
665  try:
666  dst = round(m.sqrt((pt1.x-pt2.x)**2+(pt1.y-pt2.y)**2),1)
667  if(round(m.sqrt((pt1.x-sides[0].x)**2+(pt1.y-sides[0].y)**2)+m.sqrt((pt2.x-sides[0].x)**2+(pt2.y-sides[0].y)**2),1) != dst or round(m.sqrt((pt1.x-sides[1].x)**2+(pt1.y-sides[1].y)**2)+m.sqrt((pt2.x-sides[1].x)**2+(pt2.y-sides[1].y)**2),1) != dst): continue
668  pt1 = sides[0]
669  pt2 = sides[1]
670  except: continue
671  elif(not pt1_in):
672  for side in sides:
673  dst_squared = (pt1.x-side.x)**2+(pt1.y-side.y)**2
674  if(dst_squared < shortest_distance_squared):
675  shortest_distance_squared = dst_squared
676  sideToUse = side
677  pt1 = sideToUse
678  elif(not pt2_in):
679  for side in sides:
680  dst_squared = (pt1.x-side.x)**2+(pt1.y-side.y)**2
681  if(dst_squared < shortest_distance_squared):
682  shortest_distance_squared = dst_squared
683  sideToUse = side
684  pt2 = sideToUse
685  coords = [[pt1.x,pt1.y],[pt2.x,pt2.y]]
686  if(self.invHomography is not None): coords = tvaLib.Obj.homographyProject(coords, self.invHomography)
687  coords = tvaLib.list2int(coords)
688  self.gridy.append([TrafIntMoving_Point(coords[0][0],coords[1][0]).asint(), TrafIntMoving_Point(coords[0][1],coords[1][1]).asint()])
689  return self.gridy
690 
692  ''' Return projected alignment coordinates if they exists, otherwise
693  calculate these coordinates and store them for future reference.
694  '''
695  try: return self.projAlignments
696  except:
697 
698  candidateProjAlignments = []
699  for lane in range(len(self.alignments)):
700  candidateProjAlignments.append([])
701  for sIx in range(len(self.alignments[lane])):
702  coords = tvaLib.list2int(tvaLib.Obj.homographyProject([self.alignments[lane][sIx].x,self.alignments[lane][sIx].y], self.invHomography))
703  candidateProjAlignments[lane].append([coords[0],coords[1]])
704 
705 
706  self.projAlignments = deepcopy(candidateProjAlignments)
707  for lane in range(len(candidateProjAlignments)):
708  for sIx in range(len(candidateProjAlignments[lane])):
709  if(0 > candidateProjAlignments[lane][sIx][0] or candidateProjAlignments[lane][sIx][0] > self.vid_width or 0 > candidateProjAlignments[lane][sIx][1] or candidateProjAlignments[lane][sIx][1] > self.vid_height):
710 
711  if(sIx > 0 and 0 < candidateProjAlignments[lane][sIx-1][0] and candidateProjAlignments[lane][sIx-1][0] < self.vid_width and 0 < candidateProjAlignments[lane][sIx-1][1] and candidateProjAlignments[lane][sIx-1][1] < self.vid_height):
712  # OK, previous point is within bounds, find intercept
713  coords = tvaLib.Geo.polyInter([float(candidateProjAlignments[lane][sIx-1][0]), float(candidateProjAlignments[lane][sIx-1][1])], [float(candidateProjAlignments[lane][sIx][0]), float(candidateProjAlignments[lane][sIx][1])], [[0.0,0.0],[float(self.vid_width),0.0],[float(self.vid_width),float(self.vid_height)],[0.0,float(self.vid_height)]])[0]
714  self.projAlignments[lane][sIx] = [int(coords[0]),int(coords[1])]
715  elif(sIx < len(candidateProjAlignments[lane])-1 and 0 < candidateProjAlignments[lane][sIx+1][0] and candidateProjAlignments[lane][sIx+1][0] < self.vid_width and 0 < candidateProjAlignments[lane][sIx+1][1] and candidateProjAlignments[lane][sIx+1][1] < self.vid_height):
716  # OK, next point is within bounds, find intercept
717  coords = tvaLib.Geo.polyInter([float(candidateProjAlignments[lane][sIx][0]), float(candidateProjAlignments[lane][sIx][1])], [float(candidateProjAlignments[lane][sIx+1][0]), float(candidateProjAlignments[lane][sIx+1][1])], [[0.0,0.0],[float(self.vid_width),0.0],[float(self.vid_width),float(self.vid_height)],[0.0,float(self.vid_height)]])[0]
718  self.projAlignments[lane][sIx] = [int(coords[0]),int(coords[1])]
719  else:
720  # Point is isolated. Ignore.
721  self.projAlignments[lane][sIx] = [0,0]
722 
723 
724 
725  if(not len(self.projAlignments[lane])-self.projAlignments[lane].count([0,0])):
726  for sIx in range(len(candidateProjAlignments[lane])-1):
727  intercept = tvaLib.Geo.polyInter([float(candidateProjAlignments[lane][sIx][0]), float(candidateProjAlignments[lane][sIx][1])], [float(candidateProjAlignments[lane][sIx+1][0]), float(candidateProjAlignments[lane][sIx+1][1])], [[0.0,0.0],[float(self.vid_width),0.0],[float(self.vid_width),float(self.vid_height)],[0.0,float(self.vid_height)]])
728  if(len(intercept)==2):
729  self.projAlignments[lane] = [[int(x) for x in inter] for inter in intercept]
730  break
731 
732  return self.projAlignments
733 
734 
735  def drawProjectedGrid(self, img, grid_range=[0,120], spacing=1):
736  ''' Draw projected grid onto image. '''
737  for gridx in self.getProjectedGridX(grid_range=grid_range, spacing=spacing):
738  cv2.line(img, tuple(gridx[0]), tuple(gridx[1]), (120,120,120))
739  for gridy in self.getProjectedGridY(grid_range=grid_range, spacing=spacing):
740  cv2.line(img, tuple(gridy[0]), tuple(gridy[1]), (120,120,120))
741  return img
742 
743  def drawProjectedAlignments(self, img):
744  ''' Draw projected alignments onto image. '''
745  for lane in range(len(self.alignments)):
746  if(lane in self.alignments.sidewalks): colour=(32,114,55)
747  elif(lane in self.alignments.bikepaths): colour=(146,15,112)
748  else: colour=(255,0,255)
749  for sIx in range(len(self.alignments[lane])-1):
750  cv2.line(img, tuple(self.getProjectedAlignments()[lane][sIx]), tuple(self.getProjectedAlignments()[lane][sIx+1]), color=colour, thickness=2)
751 
752  return img
753 
754  def drawPoly(self, img, points, colour=(255, 0, 0)):
755  ''' Draw polygon onto image. '''
756  for pIx in range(len(points)-1):
757  cv2.line(img, (int(points[pIx][0]), int(points[pIx][1])), (int(points[pIx+1][0]), int(points[pIx+1][1])), color=colour, thickness=2)
758  cv2.line(img, (int(points[-1][0]), int(points[-1][1])), (int(points[0][0]), int(points[0][1])), color=colour, thickness=2)
759  return img
760 
761  def drawProjectedContour(self, img, obj, frame):
762  ''' Draw projected contour of this object onto image. '''
763  contour = [tvaLib.Obj.homographyProject(point, self.invHomography) for point in tvaLib.Obj.contourThis(obj, frame)] # Regular objcts
764  return self.drawPoly(img, [[n[0] for n in x] for x in contour])
765 
766  def drawBoundingBox(self, img, obj, frame):
767  bounds = tvaLib.Obj.boundThis(obj, frame, self.invHomography, self.vid_width, self.vid_height)
768  return self.drawPoly(img, bounds, colour=(0, 255, 0))
769 
770  def drawFig(self, fig, master=None, location=None):
771  if(location is None): location = tk.TOP
772  if(master is None): master = self.root
773  dataPlot = FigureCanvasTkAgg(fig, master=master)
774  dataPlot.get_tk_widget().configure(background=self.style.bglight, highlightcolor=self.style.bglight, highlightbackground=self.style.bglight)
775  dataPlot.show()
776  dataPlot.get_tk_widget().pack(side=location, fill=tk.BOTH, expand=1)
777  return dataPlot.get_tk_widget()
778 
779  def drawPlots(self, x, datas, fig_name, y_label='', xy_bounds=None):
780  ''' datas is a list of y values, past as a list of lists or a single list
781  x is a single list
782  '''
783 
784  if(type(datas[0]) is not list): datas = [datas]
785 
786  fig = plt.figure(fig_name, figsize=(5,2), dpi=100)
787  ax = fig.add_subplot(111)
788  for data in datas:
789  ax.plot(x,data)
790 
791  if(xy_bounds is not None):
792  ax.set_xlim(xy_bounds[0])
793  ax.set_ylim(xy_bounds[1])
794  if(y_label): ax.set_ylabel(y_label)
795  return fig
796 
797  def drawTkinterCursor(self, event=None):
798  lowerlimit = self.frame_data.winfo_width()*0.1259
799  upperlimit = self.frame_data.winfo_width()*0.902
800  if(len(self.cursor_range) <= 1): loc = lowerlimit
801  else: loc = self.cursor_location/float(len(self.cursor_range)-1)*(upperlimit-lowerlimit)+lowerlimit
802  try:
803  for cursorIx in range(len(self.data_fig_canvas_tags)):
804  self.data_fig_canvas.coords(self.data_fig_canvas_tags[cursorIx], (loc,self.frame_data.winfo_height()*self.data_fig_canvas_y_stops_p[cursorIx*2],loc,self.frame_data.winfo_height()*self.data_fig_canvas_y_stops_p[cursorIx*2+1]))
805  self.data_fig_canvas.tag_raise(self.data_fig_canvas_tags[cursorIx])
806  except: pass
807 
808 
809  def drawGroupedObj(self, img, i, annotate_velocity=False, color=(0, 0, 0), thickness=4, **kwargs):
810  ''' Draw object onto image '''
811  cur_loc = [x for x in self.objects[i].getTimeInterval()].index(self.cursor_abs_location)
812  cvPlot(img, self.objects[i], lastInstant=cur_loc, useProjectedPositions=True, color=color, thickness=thickness, **kwargs)
813  coords1 = (int(round(self.objects[i].projectedPositions[cur_loc][0])),int(round(self.objects[i].projectedPositions[cur_loc][1])))
814  if(img.shape[1] > self.go_window_width): scaleFactor = img.shape[1]/float(self.go_window_width)
815  else: scaleFactor = 1.0
816  cv2.putText(img, '#'+str(self.objects[i].num), coords1, cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (0, 0, 0), thickness=int(m.ceil(3*scaleFactor)))
817  cv2.putText(img, '#'+str(self.objects[i].num), coords1, cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), color)
818  if(self.iIx == i):
819  if(annotate_velocity):
820  cv2.putText(img, str(round(self.vel1[self.cursor_location],1)), (coords1[0],coords1[1]+int(20*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (0, 0, 0), thickness=int(m.ceil(3*scaleFactor)))
821  cv2.putText(img, str(round(self.vel1[self.cursor_location],1)), (coords1[0],coords1[1]+int(20*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), color)
822  cv2.putText(img, self.local['userTypeNames'][self.objects[i].getUserType()], (coords1[0],coords1[1]+int(30*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(0.5*scaleFactor)), (0, 0, 0), thickness=int(m.ceil(3*scaleFactor)))
823  cv2.putText(img, self.local['userTypeNames'][self.objects[i].getUserType()], (coords1[0],coords1[1]+int(30*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(0.5*scaleFactor)), color)
824 
825 
826  def drawFeatures(self, img, i, **kwargs):
827  ''' Draw object features onto image '''
828  for f in range(len(self.objects[i].features)):
829  try: cur_loc = [x for x in self.objects[i].features[f].getTimeInterval()].index(self.cursor_abs_location)
830  except ValueError: continue
831  try: cvPlot(img, self.objects[i], lastInstant=cur_loc, useProjectedPositions=True, plotFeature=f, **kwargs)
832  except AttributeError:
833  self.projectObjects([self.objects[i].features[f]])
834  cvPlot(img, self.objects[i], lastInstant=cur_loc, useProjectedPositions=True, plotFeature=f, **kwargs)
835 
836 
837 
838 
841  def itemsReorderString(self, obj):
842  return str(obj.num)+' at f='+str(obj.getFirstInstant())
843  def itemsReorderConfig(self, obj, i="end"): pass
844 
845  def itemsReorder(self, event=None, initOnly=False, setActiveIdx=0):
846  self.item_list_active_idx = setActiveIdx
849  self.item_list.delete(0, tk.END)
850  pair_parse = self.item_list_filter_value.get().split(' '+self.local['gen_with']+' ')
851 
852  if(self.item_list_filter_value.get() in self.local['userTypeNames']):
853  ut_1 = self.local['userTypeNames'].index(self.item_list_filter_value.get())
854  for i in range(len(self.objects)):
855  if(ut_1==self.objects[i].getUserType()):
856  self.item_list_contents.append({'iIx':i, 'string': self.itemsReorderString(self.objects[i])})
857  self.item_list.insert(tk.END, self.item_list_contents[-1]['string'])
858  self.itemsReorderConfig(self.objects[i])
859 
860  elif(len(pair_parse) == 2 and pair_parse[0] in self.local['userTypeNames'] and pair_parse[1] in self.local['userTypeNames']):
861  ut_1 = self.local['userTypeNames'].index(pair_parse[0])
862  ut_2 = self.local['userTypeNames'].index(pair_parse[1])
863  for i in range(len(self.objects)):
864  if(ut_1==self.objects[i].getUserType()):
865  ''' Find any coexisting objects. If any of these matches
866  the type of the second user type, then add first object
867  to list and move on to next object (i). '''
868  #Annotation-compatible verification
869  if(hasattr(self.objects[i], 'drawed_positions')):
870  i_firstInstant = min(self.objects[i].drawed_positions[0])
871  i_lastInstant = max(self.objects[i].drawed_positions[0])
872  #Trajectory-compatible verification
873  else:
874  i_firstInstant = self.objects[i].getFirstInstant()
875  i_lastInstant = self.objects[i].getLastInstant()
876  # The second pass loop
877  for j in range(len(self.objects)):
878  # Annotation-compatible verification
879  if(hasattr(self.objects[j], 'drawed_positions')):
880  j_firstInstant = min(self.objects[j].drawed_positions[0])
881  j_lastInstant = max(self.objects[j].drawed_positions[0])
882  #Trajectory-compatible verification
883  else:
884  j_firstInstant = self.objects[j].getFirstInstant()
885  j_lastInstant = self.objects[j].getLastInstant()
886  if(ut_2==self.objects[j].getUserType() and j_firstInstant < i_lastInstant and j_lastInstant > i_firstInstant):
887  self.item_list_contents.append({'iIx':i, 'string': self.itemsReorderString(self.objects[i])})
888  self.item_list.insert(tk.END, self.item_list_contents[-1]['string'])
889  self.itemsReorderConfig(self.objects[i])
890  break
891 
892  else:
893  for i in range(len(self.objects)):
894  self.item_list_contents.append({'iIx':i, 'string': self.itemsReorderString(self.objects[i])})
895  self.item_list.insert(tk.END, self.item_list_contents[-1]['string'])
896  self.itemsReorderConfig(self.objects[i])
897 
898  try: self.item_list.itemconfig(self.item_list_active_idx, bg=self.style.selection)
899  except: pass
900  if(initOnly): return True
901  self.frame_video.focus()
902  return self.callback_switchItem()
903 
904  def callback_prevItem(self, event=None):
905  if(self.item_list_active_idx==0): return False
908  return self.callback_switchItem()
909 
910  def callback_nextItem(self, event=None):
911  if(self.item_list_active_idx == len(self.item_list_contents)): return False
914  return self.callback_switchItem()
915 
916  def callback_mouseItem(self, event=None):
917  ''' Switch active item by clicking on it with the mouse. '''
918  selection = self.item_list.curselection()
919  if(selection):
920  if(self.item_list_active_idx == int(selection[0])): return False
922  self.item_list_active_idx = int(selection[0])
923  return self.callback_switchItem()
924  return False
925 
926  def callback_switchItemByoIx(self, oIx):
927  ''' Switch active item by reffering to its object index. This method is
928  slower than by simply changing item_list_active_idx. '''
929  try:
931  self.item_list_active_idx = self.item_list_contents.index([x for x in self.item_list_contents if x['iIx']==oIx][0])
932  return self.callback_switchItem()
933  except:
934  self.status.displayWarning('Selected trajectory is not part of the filtered list of items.')
935  return False
936 
937 
938  def callback_switchItem(self, event=None):
939  try:
940  self.item_list.itemconfig(self.item_list_last_idx, bg=self.style.bgdark)
941  self.item_list.itemconfig(self.item_list_active_idx, bg=self.style.selection)
942  self.iIx = self.item_list_contents[self.item_list_active_idx]['iIx']
943  self.cursor_location = self.objects[self.iIx].getFirstInstant()
944  self.cursor_abs_location = self.objects[self.iIx].getFirstInstant()
945  self.capture.set(1, self.cursor_abs_location)
946  self.frame_video.focus()
947  return self.refresh()
948  except: pass
949 
950 
953 
955  ''' Show object's imagebox. '''
956  img = self.grabVideoFrame(scrub=False, sourceImageOnly=True)
957  if(self.objects[self.iIx].hasFeatures() and len(self.objects[self.iIx].features) >= self.config.class_min_n_features):
958  imgBox = tvaLib.Obj.imageBoxTI(img, self.objects[self.iIx], frameNum=self.cursor_abs_location, width=self.vid_width, height=self.vid_height, homography=self.invHomography)
959  else:
960  imgBox,_,_,_,_ = tvaLib.Obj.imageBox(img, self.objects[self.iIx], frameNum=self.cursor_abs_location, homography=self.invHomography)
961  cv2.imshow('image',imgBox)
962  cv2.waitKey(0)
963  return True
964 
965 
967  ''' Show HOG digest for the active item. '''
968  from cvutils import HOG as TrafIntCVUtils_HOG
969  img = self.grabVideoFrame(scrub=False, sourceImageOnly=True)
970  if(self.objects[self.iIx].hasFeatures() and len(self.objects[self.iIx].features) >= self.config.class_min_n_features):
971  imgBox = tvaLib.Obj.imageBoxTI(img, self.objects[self.iIx], frameNum=self.cursor_abs_location, width=self.vid_width, height=self.vid_height, homography=self.invHomography)
972  else:
973  imgBox,_,_,_,_ = tvaLib.Obj.imageBox(img, self.objects[self.iIx], frameNum=self.cursor_abs_location, homography=self.invHomography)
974  hog = TrafIntCVUtils_HOG(imgBox, rescaleSize=self.config.class_hogRescaleSize, orientations=self.config.class_hogNOrientations, pixelsPerCell=self.config.class_hogNPixelsPerCell, cellsPerBlock=self.config.class_hogNCellsPerBlock, blockNorm=self.config.class_hogBlockNorm)
975  self.showDataWindow(content=str(hog.tolist()))
976  return
977 
979  ''' Show usertype classifications. '''
980  self.showDataWindow(content=str(self.objects[self.iIx].userTypes.values()))
981  return
982 
983 
984 
985 
988 
989  def refresh(self):
990  ''' This is a placeholder function to be over ridden by child class
991  functionality. Refresh calls all the appropriate methods to load
992  and display a new frame and associated data after scrubbing or
993  changing data. '''
994  return True
995 
996 
997  def quit(self):
998  ''' Quit the program. '''
999  if(self.unsaved_changes==True):
1000  if(not tk_askyesno(self.local['UI_unsaved_title'], self.local['UI_unsaved_msg'])): return False
1001  self.root.destroy()
1002  self.root.quit()
1003 
1004 
1005 
1006 
1012  def __init__(self, videoFilename, local, sequence, eventLabels=[], **kwargs):
1013  ''' local is a pre-formatted dict-like object containing keywords in
1014  the appropriate language. See the tvaLib Specification for formatting
1015  details and examples.
1016 
1017  homography can be passed directly or as a filename.
1018 
1019  IMPORTANT: Matplotlib (mpl) MUST be initialised with mpl.use('TkAgg') as soon as mpl is imported.
1020  '''
1021 
1022 
1023  TimeseriesInterface.__init__(self, videoFilename, local, frame_data_width=200, launch=False, **kwargs)
1024  self.objects = []
1025  self.events = []
1026  self.iIx = 0
1027  self.sequence = sequence
1028 
1029 
1030 
1031  self.menubar = tk.Menu(self.root, foreground=self.style.fg, bg=self.style.bglight)
1032  self.filemenu = tk.Menu(self.menubar, tearoff=0, foreground=self.style.fg, bg=self.style.bglight, activebackground='#D9CB9E', activeforeground='#000000')
1033  self.filemenu.add_command(label=local['UI_annotate_menu_clear_a'], command=self.clearAnnotations)
1034  self.filemenu.add_command(label=local['UI_annotate_menu_load_tr'], command=self.loadTracking)
1035  self.filemenu.add_command(label=local['UI_annotate_menu_load_an'], command=self.loadAnnotations)
1036  self.filemenu.add_command(label=local['UI_annotate_menu_load_as'], command=self.loadTrackingAs)
1037  self.filemenu.add_command(label=local['UI_annotate_menu_save_an'], command=self.saveAnnotations, accelerator="Ctrl+S")
1038  self.filemenu.add_command(label=local['UI_annotate_menu_save_gt'], command=self.exportAnnotations)
1039  self.filemenu.add_command(label=local['UI_annotate_menu_save_as'], command=self.exportAnnotationsAs)
1040  self.filemenu.add_command(label=local['UI_nav_menu_quit'], command=self.quit)
1042  self.playback.add_command(label=local['UI_nav_menu_prev_keyf'], command=self.callback_scrubPrevKeyFrame, accelerator="4")
1043  self.playback.add_command(label=local['UI_nav_menu_next_keyf'], command=self.callback_scrubNextKeyFrame, accelerator="6")
1044  self.playback.add_command(label=local['UI_nav_menu_prev_event'], command=self.callback_scrubPrevEvent, accelerator="Ctrl+4")
1045  self.playback.add_command(label=local['UI_nav_menu_next_event'], command=self.callback_scrubNextEvent, accelerator="Ctrl+6")
1048  self.menubar.add_cascade(label=local['UI_nav_menu_file'], menu=self.filemenu)
1049  self.menubar.add_cascade(label=local['UI_nav_menu_playback'], menu=self.playback)
1050  self.menubar.add_cascade(label=local['UI_nav_menu_video'], menu=self.videomenu)
1051  self.root.config(menu=self.menubar, bg=self.style.bglight)
1052 
1053 
1054 
1055  self.item_list_filter_value = tk.StringVar()
1056  self.item_list_filter = tk_Combobox(self.frame_nav_list, foreground=self.style.fg, textvariable=self.item_list_filter_value, state='readonly')
1057  self.item_list_filter['values'] = ['<'+local['UI_item_list_filt_none']+'>']+['-----']+self.local['userTypeNames']+['-----']+tvaLib.flatten_list([[utypestr+' '+self.local['gen_with']+' '+utypestr2 for utypestr2 in self.local['userTypeNames']] for utypestr in self.local['userTypeNames']])
1058  self.item_list_filter.current(0)
1059  self.item_list_filter.pack(fill=tk.BOTH, expand=0, padx=(10,10), pady=(10,5))
1060  self.item_list_scrollbar = tk.Scrollbar(self.frame_nav_list)
1061  self.item_list_scrollbar.pack(side=tk.RIGHT, fill=tk.Y, padx=(0,10), pady=(5,10))
1062  self.item_list = tk.Listbox(self.frame_nav_list, foreground=self.style.fg, bg=self.style.bgdark, highlightbackground=self.style.bglight, yscrollcommand=self.item_list_scrollbar.set)
1063  self.item_list.pack(fill=tk.BOTH, expand=1, padx=(10,0), pady=(5,10))
1064  self.item_list_scrollbar.config(command=self.item_list.yview)
1065  self.itemsReorder(initOnly=True)
1066  # Items menu
1067  self.annotation_obj_menu = tk.Menu(self.root, tearoff=0)
1068  self.annotation_obj_menu.add_command(label=local['gen_delete'], command=self.annotation_RC_delete_object)
1069  self.annotation_obj_menu.add_command(label=local['UI_annotate_rc_join_with'], command=self.annotation_RC_join_object)
1070 
1071 
1072 
1073  self.data_set_label_obj = tk.Label(self.frame_data, text=self.local['UI_annotate_set_obj_data'], foreground=self.style.fg, bg=self.style.bglight)
1074  self.data_set_label_obj.pack(padx=(10,10), pady=(10,5))
1075  self.data_set_user_container = tk.Frame(self.frame_data, bg=self.style.bglight)
1076  self.data_set_user_container.pack(padx=(10,10), pady=(10,5))
1077  self.data_set_user_type_label = tk.Label(self.data_set_user_container, text=self.local['UI_annotate_set_type'], foreground=self.style.fg, bg=self.style.bglight)
1078  self.data_set_user_type_label.grid(row=1, column=1)
1079  self.data_set_user_type_value = tk.StringVar()
1080  self.data_set_user_type = tk_Combobox(self.data_set_user_container, foreground=self.style.fg, textvariable=self.data_set_user_type_value, state='readonly')
1081  self.data_set_user_type['values'] = self.local['userTypeNames']
1082  self.data_set_user_type.current(0)
1083  self.data_set_user_type.grid(row=1, column=2)
1084  self.data_set_user_age_label = tk.Label(self.data_set_user_container, text=self.local['UI_annotate_set_age'], foreground=self.style.fg, bg=self.style.bglight)
1085  self.data_set_user_age_label.grid(row=2, column=1)
1086  self.data_set_user_age_value = tk.StringVar()
1087  self.data_set_user_age = tk_Combobox(self.data_set_user_container, foreground=self.style.fg, textvariable=self.data_set_user_age_value, state='readonly')
1088  self.data_set_user_age['values'] = self.local['userAges']
1089  self.data_set_user_age.current(0)
1090  self.data_set_user_age.grid(row=2, column=2)
1091  self.data_set_user_gender_label = tk.Label(self.data_set_user_container, text=self.local['UI_annotate_set_gender'], foreground=self.style.fg, bg=self.style.bglight)
1092  self.data_set_user_gender_label.grid(row=3, column=1)
1093  self.data_set_user_gender_value = tk.StringVar()
1094  self.data_set_user_gender = tk_Combobox(self.data_set_user_container, foreground=self.style.fg, textvariable=self.data_set_user_gender_value, state='readonly')
1095  self.data_set_user_gender['values'] = self.local['userGenders']
1096  self.data_set_user_gender.current(0)
1097  self.data_set_user_gender.grid(row=3, column=2)
1098  self.data_set_label_event = tk.Label(self.frame_data, text=self.local['UI_annotate_set_event_lb'], foreground=self.style.fg, bg=self.style.bglight)
1099  self.data_set_label_event.pack(padx=(10,10), pady=(5,5))
1100  self.data_set_event_btn_container = tk.Frame(self.frame_data, bg=self.style.bglight)
1101  self.data_set_event_btn_container.pack(padx=(10,10), pady=(5,5))
1102  self.data_set_event = []
1103  charachtersPerRow=30
1104  activeRow=1
1105  activeColumn = 1
1106  rowCharachters=0
1107  for i in range(len(eventLabels)):
1108  self.data_set_event.append(tk.Button(self.data_set_event_btn_container, text=eventLabels[i], command=lambda name=i+1: self.events_mark(type=name), foreground=self.style.fg, bg=self.style.bglight))
1109  self.data_set_event[-1].grid(row=activeRow,column=activeColumn)
1110  rowCharachters += len(eventLabels[i])+4
1111  activeColumn += 1
1112  if(rowCharachters > charachtersPerRow):
1113  activeRow += 1
1114  activeColumn = 1
1115  rowCharachters = 0
1116 
1117 
1118 
1119  self.projectObjects(self.objects)
1120  self.loadData()
1121 
1122 
1123 
1124  self.item_list_filter.bind('<<ComboboxSelected>>', self.itemsReorder)
1125  self.item_list.bind('<<ListboxSelect>>', self.callback_mouseItem)
1126  self.item_list.bind('<Button-3>', lambda e: self.annotation_rightClick(e))
1127  self.root.bind('<Control-s>', self.saveAnnotations)
1128  self.root.bind('<Control-n>', self.annotation_new_object)
1129  self.root.bind('<Control-Delete>', self.annotation_delete_active_object)
1130  self.video_window.bind('<Button-1>', self.annotation_addPosition)
1131  self.video_window.bind('<Control-Button-1>', self.annotation_select_nearest_trajectory)
1132  self.root.bind('<Delete>', self.annotation_deletePosition)
1133  self.root.bind('4', self.callback_scrubPrevKeyFrame)
1134  self.root.bind('6', self.callback_scrubNextKeyFrame)
1135  self.root.bind('<Control-4>', self.callback_scrubPrevEvent)
1136  self.root.bind('<Control-6>', self.callback_scrubNextEvent)
1137  self.data_set_user_type.bind('<<ComboboxSelected>>', self.annotation_set_userType)
1138  self.data_set_user_age.bind('<<ComboboxSelected>>', self.annotation_set_age)
1139  self.data_set_user_gender.bind('<<ComboboxSelected>>', self.annotation_set_gender)
1140 
1141 
1142 
1143  self.root.focus_force()
1144  self.root.mainloop()
1145 
1146 
1149 
1150  def callback_scrubPrevKeyFrame(self, event=None):
1151  keyframes = sorted(list(self.objects[self.iIx].drawed_positions[0]))
1152  if(self.cursor_location <= min(keyframes)): return False
1153  return self.callback_scrubTo(max([k for k in keyframes if k < self.cursor_location]))
1154 
1155  def callback_scrubNextKeyFrame(self, event=None):
1156  keyframes = sorted(list(self.objects[self.iIx].drawed_positions[0]))
1157  if(self.cursor_location >= max(keyframes)): return False
1158  return self.callback_scrubTo(min([k for k in keyframes if k > self.cursor_location]))
1159 
1160  def callback_scrubPrevEvent(self, event=None):
1161  self.events.sort(key=lambda x: x.frame, reverse=False)
1162  eventFrames = [x.frame for x in self.events]
1163  if(self.cursor_location <= eventFrames[0]): return False
1164  return self.callback_scrubTo(max([k for k in eventFrames if k < self.cursor_location]))
1165 
1166  def callback_scrubNextEvent(self, event=None):
1167  self.events.sort(key=lambda x: x.frame, reverse=False)
1168  eventFrames = [x.frame for x in self.events]
1169  if(self.cursor_location >= eventFrames[-1]): return False
1170  return self.callback_scrubTo(min([k for k in eventFrames if k > self.cursor_location]))
1171 
1172  def callback_switchItem(self, event=None):
1173  try:
1174  self.item_list.itemconfig(self.item_list_last_idx, bg=self.style.bgdark)
1175  self.item_list.itemconfig(self.item_list_active_idx, bg=self.style.selection)
1176  self.iIx = self.item_list_contents[self.item_list_active_idx]['iIx']
1177  self.cursor_location = self.objects[self.iIx].getFirstInstant()
1178  self.cursor_abs_location = self.objects[self.iIx].getFirstInstant()
1179  self.capture.set(1, self.cursor_abs_location)
1180  self.frame_video.focus()
1181  if(len(self.objects) > 0):
1182  self.data_set_user_type.current(self.objects[self.iIx].getUserType())
1183  self.data_set_user_age.current(self.objects[self.iIx].age)
1184  self.data_set_user_gender.current(self.objects[self.iIx].gender)
1185  else:
1186  self.data_set_user_type.current(0)
1187  self.data_set_user_age.current(0)
1188  self.data_set_user_gender.current(0)
1189  return self.refresh()
1190  except: pass
1191 
1192 
1193  def itemsReorderString(self, obj):
1194  return str(obj.num)+' at f='+str(obj.getFirstInstant())+' ['+self.local['userTypeNames'][obj.getUserType()][0]+']'
1195  def itemsReorderConfig(self, obj, i='end'):
1196  if(obj.getUserType()==0):
1197  self.item_list.itemconfig(i, bg=self.style.error)
1198 
1199 
1200 
1203  def drawBoundingBoxAnnotationObj(self, img, obj, frame, keyframes):
1204  ''' Draw projected bounding box of this object onto image. '''
1205  x,y = tvaLib.Obj.homographyProject(self.annotation_interpolatePosition(obj, frame, keyframes), self.homography)
1206  x2,y2 = tvaLib.Obj.homographyProject(self.annotation_interpolatePosition(obj, frame+1, keyframes), self.homography)
1207  contour = [tvaLib.Obj.homographyProject(point, self.invHomography) for point in tvaLib.Obj.contourClassified(obj, x, y, x2-x, y2-y)] # Annotations
1208  xmin = min([point[0][0] for point in contour])
1209  xmax = max([point[0][0] for point in contour])
1210  ymin = min([point[1][0] for point in contour])
1211  ymax = max([point[1][0] for point in contour])
1212  bounds = tvaLib.Obj.boundFromExtrema(self.vid_width, self.vid_height, xmin, xmax, ymin, ymax)
1213  return self.drawPoly(img, bounds, colour=(0, 255, 0))
1214 
1215  def drawProjectedContourAnnotationObj(self, img, obj, frame, keyframes):
1216  ''' Draw projected contour of this object onto image. '''
1217  if(frame > max(keyframes)): return None
1218  x,y = tvaLib.Obj.homographyProject(self.annotation_interpolatePosition(obj, frame, keyframes), self.homography)
1219  if(frame==max(keyframes)): x2,y2 = tvaLib.Obj.homographyProject(self.annotation_interpolatePosition(obj, frame-1, keyframes), self.homography)
1220  else: x2,y2 = tvaLib.Obj.homographyProject(self.annotation_interpolatePosition(obj, frame+1, keyframes), self.homography)
1221  contour = [tvaLib.Obj.homographyProject(point, self.invHomography) for point in tvaLib.Obj.contourClassified(obj, x, y, x2-x, y2-y)] # Annotations
1222  return self.drawPoly(img, contour)
1223 
1224 
1225 
1228 
1229  def annotation_rightClick(self, event):
1230  ''' Context menu for item box '''
1231  index = event.widget.nearest(event.y)
1232  _, yoffset, _, height = event.widget.bbox(index)
1233  if(event.y > height + yoffset + 5): return False
1235  self.annotation_obj_menu.post(event.x_root, event.y_root)
1236 
1237  def annotation_RC_join_object(self, event=None):
1239 
1240  def annotation_RC_delete_object(self, event=None):
1242 
1243  def annotation_delete_active_object(self, event=None):
1244  return self.annotation_delete_object(byId=self.item_list_active_idx)
1245 
1246 
1247 
1250  def annotation_new_object(self, event=None):
1251  self.iIx = len(self.objects)
1252  if(len(self.objects) == 0): new_num = 0
1253  else: new_num = self.objects[-1].num+1
1254  self.objects.append(MovingObject(num=new_num, timeInterval=TimeInterval(self.cursor_abs_location,self.cursor_abs_location), positions=Trajectory(), velocities=Trajectory()))
1255  self.projectObjects([self.objects[self.iIx]])
1256  self.objects[self.iIx].drawed_positions = [{},{}]
1257  self.itemsReorder(setActiveIdx=self.iIx)
1258  self.unsaved_changes = True
1259 
1260  self.objects[-1].age = 0
1261  self.objects[-1].gender = 0
1262  return True
1263 
1264  def annotation_join_object(self, event=None, byId=0):
1265  if(byId==self.iIx): return False
1266  self.objects[self.iIx].drawed_positions[0] = tvaLib.mergeDicts(self.objects[self.iIx].drawed_positions[0], self.objects[byId].drawed_positions[0])
1267  self.objects[self.iIx].drawed_positions[1] = tvaLib.mergeDicts(self.objects[self.iIx].drawed_positions[1], self.objects[byId].drawed_positions[1])
1268  oldNum = self.objects[byId].num
1269  self.objects[byId] = None
1270  self.objects = filter(None, self.objects)
1271  self.itemsReorder()
1272  self.unsaved_changes = True
1273  self.status.displayMsg('Added keyframes of object '+str(oldNum)+' to obj '+str(self.objects[self.iIx].num))
1274  return True
1275 
1276  def annotation_delete_object(self, event=None, byId=0):
1277  self.objects[byId] = None
1278  self.objects = filter(None, self.objects)
1279  self.itemsReorder()
1280  self.unsaved_changes = True
1281  return True
1282 
1283  def annotation_set_userType(self, event=None, force=None):
1284  if(type(force) is int): ut = force
1285  else: ut = self.data_set_user_type.current()
1286  if(self.objects[self.iIx].getUserType()==ut): return False
1287  self.objects[self.iIx].setUserType(ut)
1288  self.itemsReorder(setActiveIdx=self.item_list_active_idx)
1289  self.unsaved_changes = True
1290  return True
1291 
1292  def annotation_set_age(self, event=None, force=None):
1293  if(type(force) is int): a = force
1294  else: a = self.data_set_user_age.current()
1295  if(self.objects[self.iIx].age==a): return False
1296  self.objects[self.iIx].age = a
1297  self.itemsReorder(setActiveIdx=self.item_list_active_idx)
1298  self.unsaved_changes = True
1299  return True
1300 
1301  def annotation_set_gender(self, event=None, force=None):
1302  if(type(force) is int): g = force
1303  else: g = self.data_set_user_gender.current()
1304  if(self.objects[self.iIx].gender==g): return False
1305  self.objects[self.iIx].gender = g
1306  self.itemsReorder(setActiveIdx=self.item_list_active_idx)
1307  self.unsaved_changes = True
1308  return True
1309 
1310  def annotation_addPosition(self, event):
1311  if(not self.objects): return False
1312  x,y = self.convertMeasuredToRealPixels(event.x, event.y)
1313  self.objects[self.iIx].drawed_positions[0][self.cursor_abs_location] = x
1314  self.objects[self.iIx].drawed_positions[1][self.cursor_abs_location] = y
1315  self.refresh(scrub=False)
1316  self.unsaved_changes = True
1317  return True
1318 
1319  def annotation_deletePosition(self, event):
1320  if(not self.objects): return False
1321  del self.objects[self.iIx].drawed_positions[0][self.cursor_abs_location]
1322  del self.objects[self.iIx].drawed_positions[1][self.cursor_abs_location]
1323  self.refresh(scrub=False)
1324  self.unsaved_changes = True
1325  return True
1326 
1328  keyframes = sorted(list(obj.drawed_positions[0]))
1329 
1330  for f in keyframes:
1331  coords = tvaLib.Obj.homographyProject([obj.drawed_positions[0][f],obj.drawed_positions[1][f]], self.homography)
1332  obj.drawed_positions[0][f] = coords[0]
1333  obj.drawed_positions[1][f] = coords[1]
1334 
1335  return obj
1336 
1338  keyframes = sorted(list(obj.drawed_positions[0]))
1339 
1340  for f in keyframes:
1341  coords = tvaLib.Obj.homographyProject([obj.drawed_positions[0][f],obj.drawed_positions[1][f]], self.invHomography)
1342  obj.drawed_positions[0][f] = int(coords[0])
1343  obj.drawed_positions[1][f] = int(coords[1])
1344 
1345  return obj
1346 
1348  ''' Add trajectory data as a single feature. '''
1349  obj.features = [deepcopy(obj)]
1350  return obj
1351 
1352 
1353  def annotation_interpolatePosition(self, obj, frame, keyframes):
1354  ''' Interpolate position at frame from list of keyframes. '''
1355  if(frame in keyframes):
1356  x = obj.drawed_positions[0][frame]
1357  y = obj.drawed_positions[1][frame]
1358  else:
1359  # Find nearest keyframes
1360  f_prev = max([k for k in keyframes if k <= frame])
1361  try: f_next = min([k for k in keyframes if k > frame])
1362  except ValueError: f_next = keyframes[-1]
1363  # Linear interpolation between two nearest keyframes
1364  ratio = (frame-f_prev)/float(f_next-f_prev)
1365  x = obj.drawed_positions[0][f_prev] + (obj.drawed_positions[0][f_next]-obj.drawed_positions[0][f_prev])*ratio
1366  y = obj.drawed_positions[1][f_prev] + (obj.drawed_positions[1][f_next]-obj.drawed_positions[1][f_prev])*ratio
1367 
1368  return x,y
1369 
1371  ''' Interpolate keyframes to positions. '''
1372 
1373  keyframes = sorted(list(obj.drawed_positions[0]))
1374  frames = range(keyframes[-1]-keyframes[0])
1375 
1376  obj.timeInterval.first = keyframes[0] # first keyframes might not start at beginning of started first frame
1377  obj.timeInterval.last = obj.timeInterval.first+len(frames)-1
1378  obj.positions.positions[0] = []
1379  obj.positions.positions[1] = []
1380  obj.velocities.positions[0] = []
1381  obj.velocities.positions[1] = []
1382 
1383  for f in frames:
1384  x,y = self.annotation_interpolatePosition(obj, f+keyframes[0], keyframes)
1385  obj.positions.positions[0].append(x)
1386  obj.positions.positions[1].append(y)
1387  for f in range(len(frames)-1):
1388  obj.velocities.positions[0].append(obj.positions.positions[0][f+1]-obj.positions.positions[0][f])
1389  obj.velocities.positions[1].append(obj.positions.positions[1][f+1]-obj.positions.positions[1][f])
1390  return obj
1391 
1393  x,y = self.convertMeasuredToRealPixels(event.x, event.y)
1394  candidate_spline_oixs = []
1395  candidate_splines = []
1396  for i in range(len(self.objects))[:self.iIx]+range(len(self.objects))[self.iIx+1:]:
1397  keyframes = sorted(list(self.objects[i].drawed_positions[0]))
1398  if((True in [self.cursor_location >= k for k in keyframes] and True in [self.cursor_location <= k for k in keyframes])):
1399  candidate_spline_oixs.append(i)
1400  candidate_splines.append([[self.objects[i].drawed_positions[0][f],self.objects[i].drawed_positions[1][f]] for f in keyframes])
1401  if(not candidate_splines): return False
1402 
1403  [spline,_,_,_] = tvaLib.Geo.matchSplineNearest(x, y, candidate_splines)
1404 
1405  return self.callback_switchItemByoIx(oIx=candidate_spline_oixs[spline])
1406 
1407 
1410  def events_mark(self, event=None, type=1):
1411  try: num = self.objects[self.iIx].num
1412  except IndexError: num = None
1413  event_matches = [event_.num==num and event_.frame==self.cursor_abs_location and event_.type==type for event_ in self.events]
1414  if(True in event_matches):
1415  x = event_matches.index(True)
1416  self.data_set_event[self.events[x].type-1].configure(bg=self.style.bglight)
1417  self.events[x] = None
1418  self.events = filter(None, self.events)
1419  else:
1420  self.events.append(Event(num=num, frame=self.cursor_abs_location, type=type))
1421  self.data_set_event[type-1].configure(bg=self.style.accent2)
1422  return True
1423 
1424  def events_save(self, event=None):
1425  if(self.events):
1426  with open(self.sequence.getFullEventsFilename(), 'wb') as output:
1427  pickle.dump(self.events, output, protocol=2)
1428  self.events_export()
1429  return True
1430 
1431  def events_export(self, event=None):
1432  if(self.events):
1433  with open(self.sequence.getFullEventsCSVFilename(), 'wb') as f:
1434  writer = csv_writer(f, delimiter=',')
1435  writer.writerow(['FRAME','NUM','TYPE'])
1436  for event in self.events:
1437  writer.writerow([str(event.frame),str(event.num),str(event.type)])
1438  return True
1439 
1440  def events_load(self, event=None):
1441  if(not os.path.exists(self.sequence.getFullEventsFilename())):
1442  return False
1443  with open(self.sequence.getFullEventsFilename(), 'rb') as input_data:
1444  self.events = pickle.load(input_data)
1445  return True
1446 
1447 
1450  def clearAnnotations(self, event=None, verbose=1):
1451  self.objects = []
1452  self.itemsReorder()
1453  self.refresh()
1454  self.unsaved_changes = False
1455  if(verbose): self.status.displayMsg('Flushed annotations')
1456  return True
1457 
1458  def loadTracking(self, event=None, filename='', interframeLoadCount=10, minimumTrajectoryLength=20):
1459  if(self.unsaved_changes==True):
1460  if(not tk_askyesno(self.local['UI_unsaved_title'], self.local['UI_unsaved_msg'])): return False
1461  self.clearAnnotations(verbose=0)
1462  if(filename):
1463  self.status.displayMsg('Loading tracking from "'+filename+'"...')
1464  self.objects = tvaLib.Obj.loadObjects(filename, suppress_features=True)
1465  else:
1466  self.status.displayMsg('Loading tracking from (default) file...')
1467  self.objects = self.sequence.loadObjects(suppress_features=True)
1468 
1469  for i in range(len(self.objects)):
1470  if(len(self.objects[i].getXCoordinates()) < minimumTrajectoryLength):
1471  self.objects[i] = None
1472  continue
1473  self.objects[i].drawed_positions = [{},{}]
1474  for point in range(0, len(self.objects[i].getXCoordinates()), interframeLoadCount):
1475  self.objects[i].drawed_positions[0][self.objects[i].getFirstInstant()+point] = self.objects[i].getXCoordinates()[point]
1476  self.objects[i].drawed_positions[1][self.objects[i].getFirstInstant()+point] = self.objects[i].getYCoordinates()[point]
1477  self.objects[i] = self.annotation_invProjectPositions(self.objects[i])
1478  self.objects = filter(None, self.objects)
1479 
1480  self.itemsReorder()
1481  self.refresh()
1482  self.status.displayMsg('Loaded tracking from file')
1483  return True
1484 
1485  def loadTrackingAs(self, event=None, **kwargs):
1486  filename = askopenfilename(parent=self.root, title=self.local['UI_annotate_menu_load_as'], filetypes=[('SQLITE', '.sqlite')], initialdir=os.path.split(self.sequence.getFullGroundTruthFilename())[0], initialfile=os.path.split(self.sequence.getFullGroundTruthFilename())[1])
1487  return self.loadTracking(filename=filename, **kwargs)
1488 
1489 
1490  def loadAnnotations(self, event=None):
1491  if(not os.path.exists(self.sequence.getFullAnnotationFilename())):
1492  self.status.displayMsg('Annotation file not found')
1493  return False
1494  with open(self.sequence.getFullAnnotationFilename(), 'rb') as input_data:
1495  self.objects = pickle.load(input_data)
1496  self.events_load()
1497 
1498  for obj in self.objects:
1499  try: obj.age
1500  except: obj.age = 0
1501  try: obj.gender
1502  except: obj.gender = 0
1503  self.itemsReorder()
1504  self.refresh()
1505  self.status.displayMsg('Loaded annotations from file')
1506  return True
1507 
1508 
1509  def saveAnnotations(self, event=None):
1510 
1511  with open(self.sequence.getFullAnnotationFilename(), 'wb') as output:
1512  pickle.dump(self.objects, output, protocol=2)
1513  self.events_save()
1514  self.unsaved_changes = False
1515  self.status.displayMsg('Saved annotations to file')
1516  return True
1517 
1518  def exportAnnotations(self, event=None, filename=None):
1519  import storage as TrafIntStorage
1520 
1521  objects = deepcopy(self.objects)
1522 
1523  for obj in objects:
1524  obj = self.annotation_projectPositions(obj)
1525  obj = self.annotation_interpolatePositions(obj)
1526  obj = self.annotation_createFeature(obj)
1527 
1528  try:
1529  if(filename is None): filename = self.sequence.getFullGroundTruthFilename()
1530 
1531  if(os.path.isfile(filename)): os.unlink(filename)
1532  TrafIntStorage.saveTrajectoriesToSqlite(filename, objects, trajectoryType='object')
1533  self.status.displayMsg('Exported annotations as trajectories to file '+os.path.split(filename)[1])
1534  return True
1535  except IOError:
1536  self.status.displayMsg('There was an problem writing to disk')
1537  return False
1538 
1539  def exportAnnotationsAs(self, event=None):
1540  filename = asksaveasfilename(parent=self.root, title=self.local['UI_annotate_menu_save_as'], defaultextension='.sqlite', filetypes=[('SQLITE', '.sqlite')], initialdir=os.path.split(self.sequence.getFullGroundTruthFilename())[0], initialfile=os.path.split(self.sequence.getFullGroundTruthFilename())[1])
1541  return self.exportAnnotations(filename=filename)
1542 
1543 
1544 
1547  def loadData(self):
1548  ''' Load and setup video data '''
1549  self.cursor_range = range(int(self.capture.get(7)))
1550  self.cursor_location = 0
1551  self.cursor_abs_location = 0
1553 
1554 
1555  self.capture.set(1, self.cursor_abs_location)
1556 
1557  return self.refresh()
1558 
1559 
1560  def refresh(self, colour=(92, 39, 227), thickness=4, scrub=True):
1561  ''' Refresh dynamic/visual content when playback occurs.
1562 
1563  Note:
1564  =====
1565  Opencv has reversed channels (BGR)
1566  '''
1567 
1568 
1569  img = self.grabVideoFrame(scrub=scrub)
1570  if(img is None): return False
1571 
1572  objCount = 0
1573  objTypes = []
1574  for i in range(len(self.objects))[:self.iIx]+range(len(self.objects))[self.iIx+1:]+[self.iIx]:
1575  if(len(self.objects)>0):
1576  keyframes = sorted(list(self.objects[i].drawed_positions[0]))
1577  if(self.iIx == i or (True in [self.cursor_location >= k for k in keyframes] and True in [self.cursor_location <= k for k in keyframes])):
1578 
1579  if(self.iIx == i):
1580 
1581  for p1,p2 in zip(keyframes[:-1],keyframes[1:]):
1582  cv2.line(img, (self.objects[i].drawed_positions[0][p1],self.objects[i].drawed_positions[1][p1]), (self.objects[i].drawed_positions[0][p2],self.objects[i].drawed_positions[1][p2]), colour, thickness=thickness)
1583  for p in keyframes:
1584  if(p==self.cursor_abs_location):
1585  cv2.circle(img, (self.objects[i].drawed_positions[0][p],self.objects[i].drawed_positions[1][p]), 11, (0, 0, 0), thickness=2)
1586  cv2.circle(img, (self.objects[i].drawed_positions[0][p],self.objects[i].drawed_positions[1][p]), 10, (139, 26, 85), thickness=-1)
1587  else:
1588  cv2.circle(img, (self.objects[i].drawed_positions[0][p],self.objects[i].drawed_positions[1][p]), 7, colour, thickness=-1)
1589 
1590  if(self.videomenu_show_contours.get()):
1591  if(self.objects[i].getXCoordinates()): self.drawProjectedContour(img, self.objects[i], self.cursor_location)
1592  else: self.drawProjectedContourAnnotationObj(img, self.objects[i], self.cursor_location, keyframes)
1593 
1594  if(self.videomenu_show_bounding_boxes.get()):
1595  if(self.objects[i].hasFeatures()): self.drawBoundingBox(img, self.objects[i], self.cursor_location)
1596  else: self.drawBoundingBoxAnnotationObj(img, self.objects[i], self.cursor_location, keyframes)
1597 
1598  else:
1599 
1600  pos_estimate = self.annotation_interpolatePosition(self.objects[i], self.cursor_location, keyframes)
1601 
1602  if(self.videomenu_show_align.get()):
1603  coords = tvaLib.Obj.homographyProject([float(pos_estimate[0]),float(pos_estimate[1])], self.homography)
1604  [_,_,x_snap,y_snap] = tvaLib.Geo.matchSplineNearest(coords[0], coords[1], self.alignments)
1605  coords = tvaLib.Obj.homographyProject([x_snap,y_snap], self.invHomography)
1606  cv2.line(img, (int(pos_estimate[0]), int(pos_estimate[1])), (int(coords[0]),int(coords[1])), (0,0,255), thickness=2)
1607  cv2.circle(img, (int(coords[0]), int(coords[1])), 7, (0,0,255), thickness=-1)
1608 
1609 
1610  for p1,p2 in zip(keyframes[:-1],keyframes[1:]):
1611  cv2.line(img, (self.objects[i].drawed_positions[0][p1],self.objects[i].drawed_positions[1][p1]), (self.objects[i].drawed_positions[0][p2],self.objects[i].drawed_positions[1][p2]), (255,255,255), thickness=2)
1612  cv2.circle(img, (int(pos_estimate[0]), int(pos_estimate[1])), 11, (0, 0, 0), thickness=2)
1613  cv2.circle(img, (int(pos_estimate[0]), int(pos_estimate[1])), 10, (0, 128, 0), thickness=-1)
1614 
1615  objCount += 1
1616  objTypes.append(self.objects[i].getUserType())
1617 
1618 
1619  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
1620  if(img.shape[1] > self.go_window_width): img = cv2.resize(img, (self.go_window_width,600))
1621  img = self.Image.fromarray(img)
1622  imgtk = self.ImageTk.PhotoImage(image=img)
1623  self.video_window.imgtk = imgtk
1624  self.video_window.configure(image=imgtk)
1625 
1626 
1627  self.refreshTimeStamps()
1628  try: self.status.setDefault(objs_str=str(objCount), class_str='('+','.join([self.local['userTypeNames'][x] for x in objTypes])+')')
1629  except: self.status.setDefault(objs_str=str(objCount), class_str='-')
1630 
1631 
1632 
1633  for btn in self.data_set_event:
1634  btn.configure(bg=self.style.bglight)
1635  for event in self.events:
1636  if(event.frame==self.cursor_abs_location):
1637  self.data_set_event[event.type-1].configure(bg=self.style.accent2)
1638  return True
1639 
1640 
1641 
1647  def __init__(self, objects, videoFilename, local, **kwargs):
1648  ''' local is a pre-formatted dict-like object containing keywords in
1649  the appropriate language. See the tvaLib Specification for formatting
1650  details and examples.
1651 
1652  homography can be passed directly or as a filename.
1653 
1654  IMPORTANT: Matplotlib (mpl) MUST be initialised with mpl.use('TkAgg') as soon as mpl is imported.
1655  '''
1656 
1657 
1658  TimeseriesInterface.__init__(self, videoFilename, local, launch=False, **kwargs)
1659  self.objects = objects
1660  self.iIx = 0
1661 
1662 
1663  self.item_list_filter_value = tk.StringVar()
1664  self.item_list_filter = tk_Combobox(self.frame_nav_list, foreground=self.style.fg, textvariable=self.item_list_filter_value, state='readonly')
1665  self.item_list_filter['values'] = ['<'+local['UI_item_list_filt_none']+'>']+self.local['userTypeNames']
1666  self.item_list_filter.current(0)
1667  self.item_list_filter.pack(fill=tk.BOTH, expand=0, padx=(10,10), pady=(10,5))
1668  self.item_list_scrollbar = tk.Scrollbar(self.frame_nav_list)
1669  self.item_list_scrollbar.pack(side=tk.RIGHT, fill=tk.Y, padx=(0,10), pady=(5,10))
1670  self.item_list = tk.Listbox(self.frame_nav_list, foreground=self.style.fg, bg=self.style.bgdark, highlightbackground=self.style.bglight, yscrollcommand=self.item_list_scrollbar.set)
1671  self.item_list.pack(fill=tk.BOTH, expand=1, padx=(10,0), pady=(5,10))
1672  self.item_list_scrollbar.config(command=self.item_list.yview)
1673  self.itemsReorder(initOnly=True)
1674 
1675 
1676  self.data_fig, [self.nobj_fig,self.avgs_fig] = plt.subplots(2, figsize=(5,4), dpi=100, sharex=True)
1677  self.data_fig.patch.set_alpha(0.0)
1678 
1679  self.nobj_fig.set_ylim([0,60])
1680  self.nobj_fig.set_ylabel(local['UI_fig_num_objects'], fontsize=10)
1681  self.nobj_fig.patch.set_facecolor(self.style.bgdark)
1682  plt.setp([self.nobj_fig.get_xticklines(), self.nobj_fig.get_yticklines()], color=self.style.bgborder)
1683 
1684  self.avgs_fig.set_ylim([0,10])
1685  self.avgs_fig.set_ylabel(local['gen_speed'], fontsize=10)
1686  self.avgs_fig.patch.set_facecolor(self.style.bgdark)
1687  plt.setp([self.avgs_fig.get_xticklines(), self.avgs_fig.get_yticklines()], color=self.style.bgborder)
1688 
1689  self.nobj_fig.set_xlim([0,10])
1690  self.data_fig_canvas = self.drawFig(self.data_fig, master=self.frame_data)
1691  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='speed')
1692  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='nfeat')
1693  self.data_fig_canvas_tags = ['speed', 'nfeat']
1694  self.data_fig_canvas_y_stops_p = [0.100, 0.465, 0.535, 0.900]
1695 
1696 
1697 
1698  self.projectObjects(self.objects)
1699  self.loadData()
1700 
1701 
1702 
1703  self.item_list_filter.bind('<<ComboboxSelected>>', self.itemsReorder)
1704  self.item_list.bind('<<ListboxSelect>>', self.callback_mouseItem)
1705  self.data_fig_canvas.bind('<Button 1>', self.callback_scrubTarget)
1706 
1707 
1708 
1709  self.root.focus_force()
1710  self.root.mainloop()
1711 
1712 
1713 
1716  def loadData(self):
1717  ''' Load and setup video data '''
1718  last_frame = 0
1719  for obj in self.objects:
1720  if(last_frame < obj.getLastInstant()): last_frame = obj.getLastInstant()
1721  self.cursor_range = range(last_frame)
1725 
1726  nobj_x = range(0,len(self.cursor_range),int(60*self.fps))
1727  nobj = [0 for x in nobj_x]
1728  avgs = [[] for x in nobj_x]
1729  for obj in self.objects:
1730  for i in range(len(nobj_x)):
1731  if(i < len(nobj_x)-1): condition = obj.getFirstInstant() >= nobj_x[i] and obj.getFirstInstant() < nobj_x[i+1]
1732  else: condition = obj.getFirstInstant() >= nobj_x[i]
1733  if(condition):
1734  nobj[i] += 1
1735  avgs[i].append(sum([m.sqrt(x**2+y**2) for x,y in zip(obj.velocities.getXCoordinates(),obj.velocities.getYCoordinates())])/float(len(obj.velocities.getXCoordinates())))
1736  break
1737  avgs = [sum(avg)/float(len(avg))*self.speed_conv if avg else 0 for avg in avgs]
1738 
1739 
1740  self.capture.set(1, self.cursor_abs_location)
1741 
1742 
1743  majorLocator = mpl.ticker.MultipleLocator(((len(self.cursor_range)/80)+1)*15)
1744  minorLocator = mpl.ticker.MultipleLocator(((len(self.cursor_range)/80)+1)*1)
1745 
1746  try:
1747  self.nobj_fig_data1.pop(0).remove()
1748  except: pass
1749  self.nobj_fig_data1 = self.nobj_fig.plot(nobj_x, nobj, color=self.style.accent1, linewidth=2)
1750  self.nobj_fig.set_ylim([0,int(round(max(nobj)*1.1,0))])
1751  self.nobj_fig.xaxis.set_major_locator(majorLocator)
1752  self.nobj_fig.xaxis.set_minor_locator(minorLocator)
1753  self.nobj_fig.xaxis.set_tick_params(length=80, which='major')
1754  self.nobj_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
1755  self.nobj_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
1756  self.nobj_fig.yaxis.grid(color=self.style.bgborder)
1757 
1758  try:
1759  self.avgs_fig_data1.pop(0).remove()
1760  except: pass
1761  self.avgs_fig_data1 = self.avgs_fig.plot(nobj_x, avgs, color=self.style.accent2, linewidth=2)
1762  self.avgs_fig.set_ylim([0,int(round(max(avgs)*1.1,0))])
1763  self.avgs_fig.xaxis.set_major_locator(majorLocator)
1764  self.avgs_fig.xaxis.set_minor_locator(minorLocator)
1765  self.avgs_fig.xaxis.set_tick_params(length=80, which='major')
1766  self.avgs_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
1767  self.avgs_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
1768  self.avgs_fig.yaxis.grid(color=self.style.bgborder)
1769 
1770  self.nobj_fig.set_xlim([0,max(self.cursor_range)])
1771  self.data_fig.canvas.draw()
1772 
1773  return self.refresh()
1774 
1775  def refresh(self, colour=(92, 39, 227), thickness=4, scrub=True):
1776  ''' Refresh dynamic/visual content when playback occurs.
1777 
1778  Note:
1779  =====
1780  Opencv has reversed channels (BGR) for color
1781  '''
1782 
1783 
1784  img = self.grabVideoFrame(scrub=scrub)
1785  if(img is None): return False
1786 
1787  objCount = 0
1788  objTypes = []
1789  for i in range(len(self.objects))[:self.iIx]+range(len(self.objects))[self.iIx+1:]+[self.iIx]:
1790  if(len(self.objects)>0 and self.objects[i].existsAtInstant(self.cursor_abs_location)):
1791  # Draw grouped tracks...
1792  if(self.videomenu_groupFeatures.get()): self.drawGroupedObj(img, i, color=colour, thickness=thickness)
1793  # ...or draw features.
1794  else: self.drawFeatures(img, i, color=colour, thickness=thickness-1)
1795 
1796 
1797  if(self.videomenu_show_contours.get()):
1798  #TODO: FIX THIS
1799  self.drawProjectedContour(img, self.objects[i], self.cursor_location)
1800 
1801  if(self.videomenu_show_bounding_boxes.get()): self.drawBoundingBox(img, self.objects[i], self.cursor_location)
1802 
1803  objCount += 1
1804  objTypes.append(self.objects[i].getUserType())
1805 
1806  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
1807  if(img.shape[1] > self.go_window_width): img = cv2.resize(img, (self.go_window_width,600))
1808  img = self.Image.fromarray(img)
1809  imgtk = self.ImageTk.PhotoImage(image=img)
1810  self.video_window.imgtk = imgtk
1811  self.video_window.configure(image=imgtk)
1812 
1813 
1814  self.refreshTimeStamps()
1815  try: self.status.setDefault(objs_str=str(objCount), class_str='('+','.join([self.local['userTypeNames'][x] for x in objTypes])+')')
1816  except: self.status.setDefault(objs_str=str(objCount), class_str='-')
1817 
1818 
1819  self.drawTkinterCursor()
1820  return True
1821 
1822 
1823 
1824 
1830  def __init__(self, objects, videoFilename, local, companion_objects=None, **kwargs):
1831  ''' local is a pre-formatted dict-like object containing keywords in
1832  the appropriate language. See the tvaLib Specification for formatting
1833  details and examples.
1834 
1835  homography can be passed directly or as a filename.
1836 
1837  IMPORTANT: Matplotlib (mpl) MUST be initialised with mpl.use('TkAgg') as soon as mpl is imported.
1838  '''
1839 
1840 
1841  TimeseriesInterface.__init__(self, videoFilename, local, launch=False, **kwargs)
1842  self.objects = objects
1843  self.companion_objects = companion_objects
1844  self.iIx = 0
1845 
1846 
1847 
1848  self.item_list_filter_value = tk.StringVar()
1849  self.item_list_filter = tk_Combobox(self.frame_nav_list, foreground=self.style.fg, textvariable=self.item_list_filter_value, state='readonly')
1850  self.item_list_filter['values'] = ['<'+local['UI_item_list_filt_none']+'>']+self.local['userTypeNames']
1851  self.item_list_filter.current(0)
1852  self.item_list_filter.pack(fill=tk.BOTH, expand=0, padx=(10,10), pady=(10,5))
1853  self.item_list_scrollbar = tk.Scrollbar(self.frame_nav_list)
1854  self.item_list_scrollbar.pack(side=tk.RIGHT, fill=tk.Y, padx=(0,10), pady=(5,10))
1855  self.item_list = tk.Listbox(self.frame_nav_list, foreground=self.style.fg, bg=self.style.bgdark, highlightbackground=self.style.bglight, yscrollcommand=self.item_list_scrollbar.set)
1856  self.item_list.pack(fill=tk.BOTH, expand=1, padx=(10,0), pady=(5,10))
1857  self.item_list_scrollbar.config(command=self.item_list.yview)
1858  self.itemsReorder(initOnly=True)
1859 
1860 
1861 
1862  self.data_fig, [self.speed_fig,self.nfeat_fig,self.curv_d_fig,self.curv_o_fig] = plt.subplots(4, figsize=(5,4), dpi=100, sharex=True)
1863  self.data_fig.patch.set_alpha(0.0)
1864 
1865  self.speed_fig.set_ylim([0,60])
1866  self.speed_fig.set_ylabel(local['gen_speed'], fontsize=10)
1867  self.speed_fig.patch.set_facecolor(self.style.bgdark)
1868  plt.setp([self.speed_fig.get_xticklines(), self.speed_fig.get_yticklines()], color=self.style.bgborder)
1869 
1870  self.nfeat_fig.set_ylim([0,10])
1871  self.nfeat_fig.set_ylabel(local['UI_fig_num_features'], fontsize=10)
1872  self.nfeat_fig.patch.set_facecolor(self.style.bgdark)
1873  plt.setp([self.nfeat_fig.get_xticklines(), self.nfeat_fig.get_yticklines()], color=self.style.bgborder)
1874 
1875  self.curv_d_fig.set_ylim([0,10])
1876  self.curv_d_fig.set_ylabel(local['UI_fig_curv_dist'], fontsize=10)
1877  self.curv_d_fig.patch.set_facecolor(self.style.bgdark)
1878  plt.setp([self.curv_d_fig.get_xticklines(), self.curv_d_fig.get_yticklines()], color=self.style.bgborder)
1879 
1880  self.curv_o_fig.set_ylim([0,10])
1881  self.curv_o_fig.set_ylabel(local['UI_fig_curv_offset'], fontsize=10)
1882  self.curv_o_fig.patch.set_facecolor(self.style.bgdark)
1883  self.curv_o_fig.plot([0,999999], [0,0], color=self.style.bgborder, linewidth=1)
1884  plt.setp([self.curv_o_fig.get_xticklines(), self.curv_o_fig.get_yticklines()], color=self.style.bgborder)
1885 
1886  self.speed_fig.set_xlim([0,10])
1887  self.data_fig_canvas = self.drawFig(self.data_fig, master=self.frame_data)
1888  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='speed')
1889  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='nfeat')
1890  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='curv_d')
1891  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='curv_o')
1892  self.data_fig_canvas_tags = ['speed', 'nfeat', 'curv_d', 'curv_o']
1893  self.data_fig_canvas_y_stops_p = [0.100, 0.274, 0.310, 0.483, 0.518, 0.692, 0.730, 0.900]
1894 
1895 
1896 
1897  self.projectObjects(self.objects)
1899  self.loadObject()
1900 
1901 
1902 
1903  self.item_list_filter.bind('<<ComboboxSelected>>', self.itemsReorder)
1904  self.item_list.bind('<<ListboxSelect>>', self.callback_mouseItem)
1905  self.data_fig_canvas.bind('<Button 1>', self.callback_scrubTarget)
1906 
1907 
1908 
1909  self.root.focus_force()
1910  self.root.mainloop()
1911 
1912 
1913 
1916  def callback_switchItem(self, event=None):
1917  try:
1918  self.item_list.itemconfig(self.item_list_last_idx, bg=self.style.bgdark)
1919  self.item_list.itemconfig(self.item_list_active_idx, bg=self.style.selection)
1920  self.iIx = self.item_list_contents[self.item_list_active_idx]['iIx']
1921  self.cursor_location = self.objects[self.iIx].getFirstInstant()
1922  self.cursor_abs_location = self.objects[self.iIx].getFirstInstant()
1923  self.capture.set(1, self.cursor_abs_location)
1924  self.frame_video.focus()
1925  self.loadObject()
1926  return self.refresh()
1927  except: pass
1928 
1929 
1930 
1933  def loadObject(self):
1934  ''' Load and setup individual object data '''
1935  if(not self.objects): return self.refresh()
1936 
1937 
1938  objLength = self.objects[self.iIx].getLastInstant()-self.objects[self.iIx].getFirstInstant()+1
1939  self.vel1 = [m.sqrt(x**2 + y**2)*self.speed_conv for x,y in zip(self.objects[self.iIx].velocities.getXCoordinates(),self.objects[self.iIx].velocities.getYCoordinates())]
1940  try:
1941  nfeat_dict = self.objects[self.iIx].getFeatureNumbers()
1942  nfeat = [nfeat_dict[x] for x in self.objects[self.iIx].timeInterval]
1943  except: nfeat = [0 for x in self.objects[self.iIx].timeInterval]
1944  if(hasattr(self.objects[self.iIx], 'curvilinearPositions')):
1945  curv_d = self.objects[self.iIx].curvilinearPositions.getXCoordinates()
1946  curv_o = self.objects[self.iIx].curvilinearPositions.getYCoordinates()
1947  lanes = self.objects[self.iIx].curvilinearPositions.getLanes()
1948  else:
1949  curv_d = [0 for x in range(objLength)]
1950  curv_o = [0 for x in range(objLength)]
1951  lanes = [0 for x in range(objLength)]
1952  splitIndeces = [0]+[lane for lane in range(1,len(lanes)) if lanes[lane] != lanes[lane-1]]+[len(lanes)]
1953  self.cursor_range = range(objLength)
1954  self.cursor_location = 0
1955  self.cursor_abs_location = self.objects[self.iIx].getFirstInstant()
1957 
1958 
1959  self.capture.set(1, self.cursor_abs_location)
1960 
1961 
1962  majorLocator = mpl.ticker.MultipleLocator(((len(self.cursor_range)/80)+1)*5)
1963  minorLocator = mpl.ticker.MultipleLocator(((len(self.cursor_range)/80)+1)*1)
1964 
1965 
1966  try: self.speed_fig_data1.pop(0).remove()
1967  except: pass
1968  self.speed_fig_data1 = self.speed_fig.plot(self.cursor_range, self.vel1, color=self.style.accent1, linewidth=2)
1969  self.speed_fig.set_ylim([0,int(round(max(self.vel1)*1.1,0))])
1970  self.speed_fig.xaxis.set_major_locator(majorLocator)
1971  self.speed_fig.xaxis.set_minor_locator(minorLocator)
1972  self.speed_fig.xaxis.set_tick_params(length=self.style.plotStyle.majorTickLength, which='major')
1973  self.speed_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
1974  self.speed_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
1975  self.speed_fig.yaxis.grid(color=self.style.bgborder)
1976 
1977 
1978  try: self.nfeat_fig_data.pop(0).remove()
1979  except: pass
1980  self.nfeat_fig_data = self.nfeat_fig.step(self.cursor_range, nfeat, color=self.style.accent2, linewidth=2)
1981  self.nfeat_fig.set_ylim([0,int(round(max(nfeat)*1.1,0))])
1982  self.nfeat_fig.xaxis.set_major_locator(majorLocator)
1983  self.nfeat_fig.xaxis.set_minor_locator(minorLocator)
1984  self.nfeat_fig.xaxis.set_tick_params(length=self.style.plotStyle.majorTickLength, which='major')
1985  self.nfeat_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
1986  self.nfeat_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
1987  self.nfeat_fig.yaxis.grid(color=self.style.bgborder)
1988 
1989 
1990  try:
1991  for data in self.curv_d_fig_data:
1992  data.pop(0).remove()
1993  except: pass
1995  for splitIx in range(len(splitIndeces)-1):
1996  self.curv_d_fig_data.append(self.curv_d_fig.plot(self.cursor_range[splitIndeces[splitIx]:splitIndeces[splitIx+1]], curv_d[splitIndeces[splitIx]:splitIndeces[splitIx+1]], color=self.style.accent3, linewidth=2))
1997  self.curv_d_fig.set_ylim([0,max(round(max(curv_d)*1.1),1)])
1998  self.curv_d_fig.xaxis.set_major_locator(majorLocator)
1999  self.curv_d_fig.xaxis.set_minor_locator(minorLocator)
2000  self.curv_d_fig.xaxis.set_tick_params(length=self.style.plotStyle.majorTickLength, which='major')
2001  self.curv_d_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
2002  self.curv_d_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
2003  self.curv_d_fig.yaxis.grid(color=self.style.bgborder)
2004 
2005 
2006  try:
2007  for data in self.curv_o_fig_data:
2008  data.pop(0).remove()
2009  except: pass
2011  for splitIx in range(len(splitIndeces)-1):
2012  self.curv_o_fig_data.append(self.curv_o_fig.plot(self.cursor_range[splitIndeces[splitIx]:splitIndeces[splitIx+1]], curv_o[splitIndeces[splitIx]:splitIndeces[splitIx+1]], color=self.style.accent4, linewidth=2))
2013  absRangeMax = max(round(max(abs(min(curv_o)*1.1),abs(max(curv_o)*1.1)),0),1)
2014  self.curv_o_fig.set_ylim([-absRangeMax,absRangeMax])
2015  self.curv_o_fig.xaxis.set_major_locator(majorLocator)
2016  self.curv_o_fig.xaxis.set_minor_locator(minorLocator)
2017  self.curv_o_fig.xaxis.set_tick_params(length=self.style.plotStyle.majorTickLength, which='major')
2018  self.curv_o_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
2019  self.curv_o_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
2020  self.curv_o_fig.yaxis.set_major_locator(mpl.ticker.MultipleLocator(round(absRangeMax/2.0)))
2021  self.curv_o_fig.yaxis.grid(color=self.style.bgborder)
2022 
2023 
2024  self.speed_fig.set_xlim([0,max(self.cursor_range)])
2025  self.data_fig.canvas.draw()
2026 
2027 
2028  return self.refresh()
2029 
2030  def refresh(self, scrub=True, highlight_colour_BGR=(92, 39, 227), highlight_thickness=4):
2031  ''' Refresh dynamic/visual content when playback occurs.
2032 
2033  Input:
2034  ======
2035  highlight_colour_openCV -> BGR colour values (as used in openCV)
2036  for trajectory highlighting
2037  '''
2038 
2039  img = self.grabVideoFrame(scrub=scrub)
2040  if(img is None): return False
2041 
2042 
2043  if(self.companion_objects):
2044  if(self.videomenu_hide_vid_state.get()): colour = (3, 130, 105)
2045  else: colour = (5, 237, 191)
2046  for i in range(len(self.companion_objects)):
2047  if(self.companion_objects[i].existsAtInstant(self.cursor_abs_location)):
2048  cur_loc = [x for x in self.companion_objects[i].getTimeInterval()].index(self.cursor_abs_location)
2049  cvPlot(img, self.companion_objects[i], cur_loc, useProjectedPositions=True, color=colour, thickness=2)
2050 
2051 
2052  objNums = []
2053  objTypes = []
2054  for i in range(len(self.objects))[:self.iIx]+range(len(self.objects))[self.iIx+1:]+[self.iIx]:
2055  # Check that the object is real and exists at cursor instant
2056  if(len(self.objects)>0 and self.objects[i].existsAtInstant(self.cursor_abs_location)):
2057  if(self.iIx == i):
2058  colour = highlight_colour_BGR
2059  thickness = highlight_thickness
2060  else:
2061  if(self.videomenu_hide_vid_state.get()): colour = (110, 110, 110)
2062  else: colour = (220, 220, 220)
2063  thickness = 2
2064  # Draw grouped tracks...
2065  if(self.videomenu_groupFeatures.get()): self.drawGroupedObj(img, i, annotate_velocity=True, color=colour, thickness=thickness)
2066  # ...or draw features.
2067  else: self.drawFeatures(img, i, color=colour, thickness=thickness-1)
2068 
2069  objNums.append(self.objects[i].num)
2070  objTypes.append(self.objects[i].getUserType())
2071 
2072 
2073  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
2074  if(img.shape[1] > self.go_window_width): img = cv2.resize(img, (self.go_window_width,600))
2075  img = self.Image.fromarray(img)
2076  imgtk = self.ImageTk.PhotoImage(image=img)
2077  self.video_window.imgtk = imgtk
2078  self.video_window.configure(image=imgtk)
2079 
2080 
2081  self.refreshTimeStamps()
2082  try: self.status.setDefault(objs_str='('+','.join([str(x) for x in objNums])+')', class_str='('+','.join([self.local['userTypeNames'][x] for x in objTypes])+')')
2083  except: self.status.setDefault(objs_str='('+','.join([str(x) for x in objNums])+')', class_str='-')
2084 
2085 
2086  self.drawTkinterCursor()
2087  return True
2088 
2089 
2090 
2091 
2097  def __init__(self, userPairs, videoFilename, local, **kwargs):
2098  ''' local is a pre-formatted dict-like object containing keywords in
2099  the appropriate language. See the tvaLib Specification for formatting
2100  details and examples.
2101 
2102  homography can be passed directly or as a filename.
2103 
2104  IMPORTANT: Matplotlib (mpl) MUST be initialised with mpl.use('TkAgg') as soon as mpl is imported.
2105  '''
2106 
2107 
2108  TimeseriesInterface.__init__(self, videoFilename, local, launch=False, **kwargs)
2109  self.userPairs = userPairs
2110  self.ttc_critical_val = 3.0
2111 
2112 
2113 
2114  self.videomenu.add_separator()
2115  self.videomenu_cp_disp_state=tk.IntVar()
2116  self.videomenu.add_radiobutton(label=local['UI_nav_menu_cp_disp_prob'], variable=self.videomenu_cp_disp_state, value=False, command=self.callback_scrubStatic)
2117  self.videomenu.add_radiobutton(label=local['UI_nav_menu_cp_disp_inte'], variable=self.videomenu_cp_disp_state, value=True, command=self.callback_scrubStatic)
2118 
2119 
2120 
2121  self.item_list_nonempty_state = tk.IntVar()
2122  self.item_list_nonempty = tk.Checkbutton(self.frame_nav_list, text=local['UI_item_list_filt_noempty'], variable=self.item_list_nonempty_state, command=self.itemsReorder, foreground=self.style.fg, bg=self.style.bglight, activeforeground=self.style.fg, activebackground=self.style.bglight)
2123  self.item_list_nonempty.pack(side=tk.TOP, padx=(10,10), pady=(10,0))
2124  self.item_list_scrollbar = tk.Scrollbar(self.frame_nav_list)
2125  self.item_list_scrollbar.pack(side=tk.RIGHT, fill=tk.Y, padx=(0,10), pady=(5,10))
2126  self.item_list = tk.Listbox(self.frame_nav_list, foreground=self.style.fg, bg=self.style.bgdark, highlightbackground=self.style.bglight, yscrollcommand=self.item_list_scrollbar.set)
2127  self.item_list.pack(fill=tk.BOTH, expand=1, padx=(10,0), pady=(5,10))
2128  self.item_list_scrollbar.config(command=self.item_list.yview)
2129  self.itemsReorder(initOnly=True)
2130  # Map userpair indexes with indicators
2131  self.withIndicatorToUserPairsMap = [Ix for Ix in range(len(userPairs)) if userPairs[Ix].collisionPoints]
2132 
2133 
2134 
2135  self.label_cp_nums = tk.Label(self.frame_video, text=local['UI_label_cops']+' 0', foreground=self.style.fg, bg=self.style.bgdark)
2136  self.label_cp_nums.pack(side=tk.LEFT, padx=(5,5), pady=(5,10))
2137 
2138 
2139 
2140  self.data_fig, [self.speed_fig,self.rdist_fig,self.int_fig,self.intp_fig] = plt.subplots(4, figsize=(5,4), dpi=100, sharex=True)
2141  self.data_fig.patch.set_alpha(0.0)
2142 
2143  self.speed_fig.set_ylim([0,60])
2144  self.speed_fig.set_ylabel(local['gen_speed'])
2145  self.speed_fig.patch.set_facecolor(self.style.bgdark)
2146  plt.setp([self.speed_fig.get_xticklines(), self.speed_fig.get_yticklines()], color=self.style.bgborder)
2147 
2148  self.rdist_fig.set_ylim([0,10])
2149  self.rdist_fig.set_ylabel(local['gen_rel_dist_short'])
2150  self.rdist_fig.patch.set_facecolor(self.style.bgdark)
2151  plt.setp([self.rdist_fig.get_xticklines(), self.rdist_fig.get_yticklines()], color=self.style.bgborder)
2152 
2153  self.int_fig.set_ylim([0,10])
2154  self.int_fig.set_ylabel(local['vis_cp_hist_x'])
2155  self.int_fig.patch.set_facecolor(self.style.bgdark)
2156  plt.setp([self.int_fig.get_xticklines(), self.int_fig.get_yticklines()], color=self.style.bgborder)
2157 
2158  self.intp_fig.set_ylim([0,1])
2159  self.intp_fig.set_ylabel(local['vis_cp_col_prob'])
2160  self.intp_fig.patch.set_facecolor(self.style.bgdark)
2161  plt.setp([self.intp_fig.get_xticklines(), self.intp_fig.get_yticklines()], color=self.style.bgborder)
2162 
2163  self.speed_fig.set_xlim([0,10])
2164  self.data_fig_canvas = self.drawFig(self.data_fig, master=self.frame_data)
2165  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='speed')
2166  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='rdist')
2167  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='int')
2168  self.data_fig_canvas.create_line(0, 0, 0, 0, fill=self.style.fg, width=self.style.plotStyle.cursorWidth, tags='intp')
2169  self.data_fig_canvas_tags = ['speed', 'rdist', 'int', 'intp']
2170  self.data_fig_canvas_y_stops_p = [0.100, 0.274, 0.310, 0.483, 0.518, 0.692, 0.730, 0.900]
2171 
2172 
2173 
2174  self.loadInteraction()
2175 
2176 
2177 
2178  self.item_list.bind('<<ListboxSelect>>', self.callback_switchItem)
2179  self.data_fig_canvas.bind('<Button 1>', self.callback_scrubTarget)
2180  self.videomenu.invoke(self.videomenu.index(local['UI_nav_menu_cp_disp_inte']))
2181 
2182 
2183 
2184  self.root.focus_force()
2185  self.root.mainloop()
2186 
2187 
2188 
2189  def callback_prevItem(self, event=None):
2190  if(self.item_list_nonempty_state.get()==1):
2191  if(self.iIx != self.withIndicatorToUserPairsMap[0]):
2192  self.item_list.itemconfig(self.withIndicatorToUserPairsMap.index(self.iIx), bg=self.style.bgdark)
2194  self.item_list.itemconfig(self.withIndicatorToUserPairsMap.index(self.iIx), bg=self.style.selection)
2195  self.loadInteraction()
2196  else: return False
2197  else:
2198  if(self.iIx > 0):
2199  self.item_list.itemconfig(self.iIx, bg=self.style.bgdark)
2200  self.iIx -= 1
2201  self.item_list.itemconfig(self.iIx, bg=self.style.selection)
2202  self.loadInteraction()
2203  else: return False
2204 
2205  def callback_nextItem(self, event=None):
2206  if(self.item_list_nonempty_state.get()==1):
2207  if(self.iIx != self.withIndicatorToUserPairsMap[-1]):
2208  self.item_list.itemconfig(self.withIndicatorToUserPairsMap.index(self.iIx), bg=self.style.bgdark)
2209  self.iIx = self.withIndicatorToUserPairsMap[self.withIndicatorToUserPairsMap.index(self.iIx)+1]
2210  self.item_list.itemconfig(self.withIndicatorToUserPairsMap.index(self.iIx), bg=self.style.selection)
2211  self.loadInteraction()
2212  else: return False
2213  else:
2214  if(self.iIx < len(self.userPairs)):
2215  self.item_list.itemconfig(self.iIx, bg=self.style.bgdark)
2216  self.iIx += 1
2217  self.item_list.itemconfig(self.iIx, bg=self.style.selection)
2218  self.loadInteraction()
2219  else: return False
2220 
2221  def callback_switchItem(self, event=None):
2222  selection = self.item_list.curselection()
2223  if(selection):
2224  self.item_list.selection_clear(selection)
2225  self.item_list.itemconfig(selection[0], bg=self.style.selection)
2226  if(self.item_list_nonempty_state.get()==1):
2227  self.item_list.itemconfig(self.withIndicatorToUserPairsMap.index(self.iIx), bg=self.style.bgdark)
2228  self.iIx = self.withIndicatorToUserPairsMap[int(selection[0])]
2229  else:
2230  self.item_list.itemconfig(self.iIx, bg=self.style.bgdark)
2231  self.iIx = int(selection[0])
2232  self.filemenu.focus()
2233  return self.loadInteraction()
2234 
2235 
2236 
2237  def loadInteraction(self):
2238  ''' Load and setup individual interaction data '''
2239  if(not self.userPairs): return self.refresh()
2240  xticklength=80
2241 
2242 
2243  intLength = self.userPairs[self.iIx].getLastInstant()-self.userPairs[self.iIx].getFirstInstant()+1
2244  self.ruLowOffset1 = max(self.userPairs[self.iIx].getFirstInstant()-self.userPairs[self.iIx].roadUser1.getFirstInstant(),0)
2245  self.ruLowOffset2 = max(self.userPairs[self.iIx].getFirstInstant()-self.userPairs[self.iIx].roadUser2.getFirstInstant(),0)
2246  self.ruUpOffset1 = self.ruLowOffset1 + intLength
2247  self.ruUpOffset2 = self.ruLowOffset2 + intLength
2248  self.vel1 = [m.sqrt(x**2 + y**2)*self.speed_conv for x,y in zip(self.userPairs[self.iIx].roadUser1.velocities.getXCoordinates()[self.ruLowOffset1:self.ruUpOffset1],self.userPairs[self.iIx].roadUser1.velocities.getYCoordinates()[self.ruLowOffset1:self.ruUpOffset1])]
2249  self.vel2 = [m.sqrt(x**2 + y**2)*self.speed_conv for x,y in zip(self.userPairs[self.iIx].roadUser2.velocities.getXCoordinates()[self.ruLowOffset2:self.ruUpOffset2],self.userPairs[self.iIx].roadUser2.velocities.getYCoordinates()[self.ruLowOffset2:self.ruUpOffset2])]
2250  rdist = [m.sqrt(x**2 + y**2) for x,y in zip([x2-x1 for x1,x2 in zip(self.userPairs[self.iIx].roadUser1.positions.getXCoordinates()[self.ruLowOffset1:self.ruUpOffset1],self.userPairs[self.iIx].roadUser2.positions.getXCoordinates()[self.ruLowOffset2:self.ruUpOffset2])],[y2-y1 for y1,y2 in zip(self.userPairs[self.iIx].roadUser1.positions.getYCoordinates()[self.ruLowOffset1:self.ruUpOffset1],self.userPairs[self.iIx].roadUser2.positions.getYCoordinates()[self.ruLowOffset2:self.ruUpOffset2])])]
2251  self.cursor_range = range(intLength)
2253  self.cursor_abs_location = self.userPairs[self.iIx].getFirstInstant()
2255 
2256 
2257  try: self.status.setDefault(objs_str='('+self.local['userTypeNames'][self.userPairs[self.iIx].roadUser1.getUserType()]+','+self.local['userTypeNames'][self.userPairs[self.iIx].roadUser2.getUserType()]+')', class_str='(('+self.local['userTypeNames'][self.userPairs[self.iIx].roadUser1.getUserType()]+','+self.local['userTypeNames'][self.userPairs[self.iIx].roadUser2.getUserType()]+')')
2258  except: self.status.setDefault(objs_str='('+self.local['userTypeNames'][self.userPairs[self.iIx].roadUser1.getUserType()]+','+self.local['userTypeNames'][self.userPairs[self.iIx].roadUser2.getUserType()]+')', class_str='-')
2259 
2260 
2261  self.capture.set(1, self.cursor_abs_location)
2262  self.projectObjects([self.userPairs[self.iIx].roadUser1, self.userPairs[self.iIx].roadUser2])
2263 
2264 
2265  majorLocator = mpl.ticker.MultipleLocator(((len(self.cursor_range)/80)+1)*5)
2266  minorLocator = mpl.ticker.MultipleLocator(((len(self.cursor_range)/80)+1)*1)
2267 
2268  try:
2269  self.speed_fig_data1.pop(0).remove()
2270  self.speed_fig_data2.pop(0).remove()
2271  except: pass
2272  self.speed_fig_data1 = self.speed_fig.plot(self.cursor_range, self.vel1, color=self.style.accent1, linewidth=2)
2273  self.speed_fig_data2 = self.speed_fig.plot(self.cursor_range, self.vel2, color=self.style.accent2, linewidth=2)
2274  self.speed_fig.set_ylim([0,int(round(max(self.vel1+self.vel2)*1.1,0))])
2275  self.speed_fig.xaxis.set_major_locator(majorLocator)
2276  self.speed_fig.xaxis.set_minor_locator(minorLocator)
2277  self.speed_fig.xaxis.set_tick_params(length=xticklength, which='major')
2278  self.speed_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
2279  self.speed_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
2280  self.speed_fig.yaxis.grid(color=self.style.bgborder)
2281 
2282  try: self.rdist_fig_data.pop(0).remove()
2283  except: pass
2284  self.rdist_fig_data = self.rdist_fig.plot(self.cursor_range, rdist, color=self.style.accent3, linewidth=2)
2285  self.rdist_fig.set_ylim([0,int(round(max(rdist)*1.1,0))])
2286  self.rdist_fig.xaxis.set_major_locator(majorLocator)
2287  self.rdist_fig.xaxis.set_minor_locator(minorLocator)
2288  self.rdist_fig.xaxis.set_tick_params(length=xticklength, which='major')
2289  self.rdist_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
2290  self.rdist_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
2291  self.rdist_fig.yaxis.grid(color=self.style.bgborder)
2292 
2293  try: self.intd_fig_data1.pop(0).remove()
2294  except: pass
2295  try: self.intd_fig_data2.remove()
2296  except: pass
2297  try: self.intd_fig_data3.remove()
2298  except: pass
2299  try: self.intpd_fig_data1.pop(0).remove()
2300  except: pass
2301  try: self.intpd_fig_data2.remove()
2302  except: pass
2303  indicators = sorted(self.userPairs[self.iIx].getPointList(aggregateInstants=True), key=lambda x: x[4])
2304  indicators_plt = [[x[4]-self.userPairs[self.iIx].getFirstInstant() for x in indicators],[y[0]/float(self.fps) for y in indicators]]
2305  indicators_plt_lt = [[x[4]-self.userPairs[self.iIx].getFirstInstant() for x in indicators if x[0]/float(self.fps) < self.ttc_critical_val],[y[0]/float(self.fps) for y in indicators if y[0]/float(self.fps) < self.ttc_critical_val]]
2306  indicatorsP_plt = [[x[4]-self.userPairs[self.iIx].getFirstInstant() for x in indicators],[y[3] for y in indicators]]
2307  self.intd_fig_data1 = self.int_fig.plot(indicators_plt[0], indicators_plt[1], color=self.style.accent4, linewidth=1.5, zorder=5)
2308  self.intd_fig_data2 = self.int_fig.scatter(indicators_plt[0], indicators_plt[1], color=self.style.accent4, linewidth=2, zorder=10)
2309  self.intd_fig_data3 = self.int_fig.scatter(indicators_plt_lt[0], indicators_plt_lt[1], color='red', linewidth=2, zorder=10)
2310  if(not(indicators_plt[1])): indicators_plt[1] = [10]
2311  self.int_fig.set_ylim([0,int(round(max(indicators_plt[1])*1.1,0))])
2312  self.int_fig.xaxis.set_major_locator(majorLocator)
2313  self.int_fig.xaxis.set_minor_locator(minorLocator)
2314  self.int_fig.xaxis.set_tick_params(length=xticklength, which='major')
2315  self.int_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
2316  self.int_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
2317  self.int_fig.yaxis.grid(color=self.style.bgborder)
2318 
2319  self.intpd_fig_data1 = self.intp_fig.plot(indicatorsP_plt[0], indicatorsP_plt[1], color=self.style.accent4, linewidth=1.5, zorder=5)
2320  self.intpd_fig_data2 = self.intp_fig.scatter(indicatorsP_plt[0], indicatorsP_plt[1], color=self.style.accent4, linewidth=2, zorder=10)
2321  self.intp_fig.xaxis.set_major_locator(majorLocator)
2322  self.intp_fig.xaxis.set_minor_locator(minorLocator)
2323  self.intp_fig.xaxis.set_tick_params(length=xticklength, which='major')
2324  self.intp_fig.xaxis.set_tick_params(length=self.style.plotStyle.minorTickLength, which='minor', color=self.style.bgborder)
2325  self.intp_fig.xaxis.grid(color=self.style.bgborder, linestyle=self.style.plotStyle.majorTickGridLineStyle)
2326  self.intp_fig.yaxis.grid(color=self.style.bgborder)
2327 
2328  self.speed_fig.set_xlim([0,max(self.cursor_range)])
2329  self.data_fig.canvas.draw()
2330 
2331  return self.refresh()
2332 
2333  def refresh(self, scrub=True):
2334  ''' Refresh dynamic/visual content when playback occurs. '''
2335 
2337  img = self.grabVideoFrame(scrub=scrub)
2338  if(img is None): return False
2339  #Settings
2340  if(img.shape[1] > self.go_window_width): scaleFactor = img.shape[1]/float(self.go_window_width)
2341  else: scaleFactor = 1.0
2342  #Draw collision Points
2343  cps = []
2344  if(hasattr(self.userPairs[self.iIx], 'getPointList') and self.userPairs[self.iIx].getIndicator('Time to Collision').values):
2345  cps = self.userPairs[self.iIx].getPointList(atInstant=self.cursor_abs_location, aggregateInstants=False)
2346  if(self.videomenu_cp_disp_state.get()==1):
2347  colourRule = lambda x: (int(255*(1-min(1, 1/m.sqrt(1+x*8)))),
2348  0,
2349  int(255*(min(1, 1/m.sqrt(1+x*2.5)))))
2350  #Legend
2351  cv2.rectangle(img, (img.shape[1],img.shape[0]), (int(img.shape[1]*0.96),int(img.shape[0]*0.97)), colourRule(0.0), thickness=-1)
2352  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.97)), (int(img.shape[1]*0.96),int(img.shape[0]*0.94)), colourRule(0.5), thickness=-1)
2353  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.94)), (int(img.shape[1]*0.96),int(img.shape[0]*0.91)), colourRule(1.0), thickness=-1)
2354  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.91)), (int(img.shape[1]*0.96),int(img.shape[0]*0.88)), colourRule(1.5), thickness=-1)
2355  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.88)), (int(img.shape[1]*0.96),int(img.shape[0]*0.85)), colourRule(2.0), thickness=-1)
2356  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.85)), (int(img.shape[1]*0.96),int(img.shape[0]*0.82)), colourRule(2.5), thickness=-1)
2357  cv2.putText(img, '0.0s', (int(img.shape[1]*0.96),img.shape[0]), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (255, 255, 255))
2358  cv2.putText(img, '2.5s', (int(img.shape[1]*0.96),int(img.shape[0]*0.85)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (255, 255, 255))
2359  else:
2360  colourRule = lambda x: (int(1-min(x,1)*255),
2361  int(min(x,1)*0),
2362  int(min(x,1)*255))
2363  #Legend
2364  cv2.rectangle(img, (img.shape[1],img.shape[0]), (int(img.shape[1]*0.96),int(img.shape[0]*0.97)), colourRule(0.0), thickness=-1)
2365  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.97)), (int(img.shape[1]*0.96),int(img.shape[0]*0.94)), colourRule(0.2), thickness=-1)
2366  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.94)), (int(img.shape[1]*0.96),int(img.shape[0]*0.91)), colourRule(0.4), thickness=-1)
2367  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.91)), (int(img.shape[1]*0.96),int(img.shape[0]*0.88)), colourRule(0.5), thickness=-1)
2368  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.88)), (int(img.shape[1]*0.96),int(img.shape[0]*0.85)), colourRule(0.6), thickness=-1)
2369  cv2.rectangle(img, (img.shape[1],int(img.shape[0]*0.85)), (int(img.shape[1]*0.96),int(img.shape[0]*0.82)), colourRule(1.0), thickness=-1)
2370  cv2.putText(img, '0.0', (int(img.shape[1]*0.96),img.shape[0]), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (255, 255, 255))
2371  cv2.putText(img, '1.0', (int(img.shape[1]*0.96),int(img.shape[0]*0.85)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (255, 255, 255))
2372  for cp in cps:
2373  coords = [cp[1],cp[2]]
2374  if(self.invHomography): coords = tvaLib.Obj.homographyProject(coords, self.invHomography)
2375  if(self.videomenu_cp_disp_state.get()==1): colour = colourRule(cp[0]/self.fps)
2376  else: colour = colourRule(cp[3])
2377  cv2.circle(img, (int(coords[0]),int(coords[1])), 1, colour, thickness=2)
2378 
2379  #Draw trajectories
2380  cvPlot(img, self.userPairs[self.iIx].roadUser1, self.ruLowOffset1+self.cursor_location, useProjectedPositions=True, color=(92, 39, 227), thickness=4)
2381  cvPlot(img, self.userPairs[self.iIx].roadUser2, self.ruLowOffset2+self.cursor_location, useProjectedPositions=True, color=(227, 139, 39), thickness=4)
2383  coords1 = self.userPairs[self.iIx].roadUser1.projectedPositions[self.ruLowOffset1+self.cursor_location].asint().astuple()
2384  coords2 = self.userPairs[self.iIx].roadUser2.projectedPositions[self.ruLowOffset2+self.cursor_location].asint().astuple()
2385  cv2.putText(img, '#'+str(self.userPairs[self.iIx].roadUser1.num), coords1, cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (0, 0, 0), thickness=int(m.ceil(3*scaleFactor)))
2386  cv2.putText(img, '#'+str(self.userPairs[self.iIx].roadUser1.num), coords1, cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (92, 39, 227))
2387  cv2.putText(img, str(round(self.vel1[self.cursor_location],1)), (coords1[0],coords1[1]+int(20*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (0, 0, 0), thickness=int(m.ceil(3*scaleFactor)))
2388  cv2.putText(img, str(round(self.vel1[self.cursor_location],1)), (coords1[0],coords1[1]+int(20*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (92, 39, 227))
2389  cv2.putText(img, '#'+str(self.userPairs[self.iIx].roadUser2.num), coords2, cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (0, 0, 0), thickness=int(m.ceil(3*scaleFactor)))
2390  cv2.putText(img, '#'+str(self.userPairs[self.iIx].roadUser2.num), coords2, cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (227, 139, 39))
2391  cv2.putText(img, str(round(self.vel2[self.cursor_location],1)), (coords2[0],coords2[1]+int(20*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (0, 0, 0), thickness=int(m.ceil(3*scaleFactor)))
2392  cv2.putText(img, str(round(self.vel2[self.cursor_location],1)), (coords2[0],coords2[1]+int(20*scaleFactor)), cv2.FONT_HERSHEY_PLAIN, int(m.ceil(1*scaleFactor)), (227, 139, 39))
2393  #Prepare, resize and draw to canvas
2394  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
2395  if(img.shape[1] > self.go_window_width): img = cv2.resize(img, (self.go_window_width,600))
2396  img = self.Image.fromarray(img)
2397  imgtk = self.ImageTk.PhotoImage(image=img)
2398  self.video_window.imgtk = imgtk
2399  self.video_window.configure(image=imgtk)
2400 
2401 
2402 
2403  self.refreshTimeStamps()
2404  self.label_cp_nums['text'] = self.local['UI_label_cops']+' '+str(len(cps))
2405 
2406 
2407 
2408  self.drawTkinterCursor()
2409  return True
2410 
2411  def itemsReorder(self, event=None, initOnly=False):
2412  self.item_list.delete(0, tk.END)
2413  if(self.item_list_nonempty_state.get()==1):
2414  for userPairIx in self.withIndicatorToUserPairsMap:
2415  self.item_list.insert(tk.END, str(self.userPairs[userPairIx].roadUser1.num)+'-'+str(self.userPairs[userPairIx].roadUser2.num)+' at f='+str(self.userPairs[userPairIx].getFirstInstant()))
2416  if(self.iIx not in self.withIndicatorToUserPairsMap):
2417  self.iIx = self.withIndicatorToUserPairsMap[0]
2418  self.loadInteraction()
2419  if(self.withIndicatorToUserPairsMap):
2420  self.item_list.itemconfig(self.withIndicatorToUserPairsMap.index(self.iIx), bg=self.style.selection)
2421  else:
2422  for userPair in self.userPairs:
2423  self.item_list.insert(tk.END, str(userPair.roadUser1.num)+'-'+str(userPair.roadUser2.num)+' at f='+str(userPair.getFirstInstant()))
2424  if(self.userPairs):
2425  self.item_list.itemconfig(self.iIx, bg=self.style.selection)
2426  return True
2427 
2428 
2429 
2432 class Style():
2433  def __init__(self, fg='#b6b6b6', bglight='#535353', bgdark='#262626', selection='#676767', bgborder='#424242', accent1='#E3275C', accent2='#278BE3', accent3='#82E327', accent4='#FFA500', error='#FF0000', plotStyle=None):
2434  self.fg = fg
2435  self.bglight = bglight
2436  self.bgdark = bgdark
2437  self.selection = selection
2438  self.bgborder = bgborder
2439  self.accent1 = accent1
2440  self.accent2 = accent2
2441  self.accent3 = accent3
2442  self.accent4 = accent4
2443  self.error = error
2444  if(plotStyle): self.plotStyle = plotStyle
2445  else: self.plotStyle = PlotStyle()
2446 
2447 class PlotStyle():
2448  def __init__(self, majorTickLength=8, minorTickLength=4, cursorWidth=2, majorTickGridLineStyle='-'):
2449  self.majorTickLength = majorTickLength
2450  self.minorTickLength = minorTickLength
2451  self.cursorWidth = cursorWidth
def drawProjectedAlignments(self, img)
Definition: interface.py:743
def copyToClipboard(self, content='')
Definition: interface.py:313
def cvPlot(img, obj, lastInstant, useProjectedPositions=False, plotFeature=None, kwargs)
Info data_fig_canvas_y_stops_p 2 figures: [0.100, 0.465, 0.535, 0.900] 3 figures: [0...
Definition: interface.py:59
projAlignments
Project allignments into imagespace.
Definition: interface.py:706
def callback_scrubNextEvent(self, event=None)
Definition: interface.py:1166
def events_mark(self, event=None, type=1)
Event operations.
Definition: interface.py:1410
def annotation_interpolatePositions(self, obj)
Definition: interface.py:1370
def refresh(self, colour=(92, 39, 227), thickness=4, scrub=True)
Definition: interface.py:1775
config
Definition: main.py:889
def refresh(self)
Operational functions.
Definition: interface.py:989
def clearAnnotations(self, event=None, verbose=1)
Data loading/saving.
Definition: interface.py:1450
def __init__(self, videoFilename, local, sequence, eventLabels=[], kwargs)
Definition: interface.py:1012
def itemsReorderString(self, obj)
Item list navigator.
Definition: interface.py:841
def loadTracking(self, event=None, filename='', interframeLoadCount=10, minimumTrajectoryLength=20)
Definition: interface.py:1458
def getProjectedMap(self, flush=False, scaling_factor=None)
Definition: interface.py:512
def events_save(self, event=None)
Definition: interface.py:1424
def itemsReorder(self, event=None, initOnly=False, setActiveIdx=0)
Definition: interface.py:845
curv_d_fig_data
Draw curvilinear distance graph.
Definition: interface.py:1994
def set(self, msg, args)
Definition: interface.py:105
def join(obj1, obj2, postSmoothing=True)
Definition: tools_obj.py:816
def callback_getWorldCoords(self, event)
Definition: interface.py:421
def callback_switchItemByoIx(self, oIx)
Definition: interface.py:926
def callback_switchItem(self, event=None)
Definition: interface.py:938
def displayWarning(self, msg, seconds=5)
Definition: interface.py:113
def loadObject(self)
Data operations.
Definition: interface.py:1933
def annotation_select_nearest_trajectory(self, event)
Definition: interface.py:1392
def annotation_deletePosition(self, event)
Definition: interface.py:1319
data_set_label_obj
Data frame.
Definition: interface.py:1073
def menu_add_playback_elements(self)
Menu functions.
Definition: interface.py:327
def drawTkinterCursor(self, event=None)
Definition: interface.py:797
def displayMsg(self, msg, seconds=5)
Definition: interface.py:109
def projectObjects(self, objects)
Definition: interface.py:521
nfeat_fig_data
Draw number of features graph.
Definition: interface.py:1980
def drawProjectedGrid(self, img, grid_range=[0, spacing=1)
Definition: interface.py:735
def callback_scrubPrevKeyFrame(self, event=None)
Timeline functions.
Definition: interface.py:1150
def annotation_join_object(self, event=None, byId=0)
Definition: interface.py:1264
def annotation_projectPositions(self, obj)
Definition: interface.py:1327
def refreshTimeStamps(self)
Image/object drawing functions.
Definition: interface.py:504
def callback_autoPlay(self, event=None)
Definition: interface.py:459
def showDataWindow(self, content='')
General interface functions.
Definition: interface.py:304
def callback_scrubBck(self, event=None, amount=1)
Definition: interface.py:406
def callback_SpeedDown(self, event=None)
Definition: interface.py:453
def annotation_interpolatePosition(self, obj, frame, keyframes)
Definition: interface.py:1353
def setCoords(self, wx=0.0, wy=0.0, ix=0.0, iy=0.0)
Definition: interface.py:122
def annotation_RC_delete_object(self, event=None)
Definition: interface.py:1240
def callback_scrubNextKeyFrame(self, event=None)
Definition: interface.py:1155
def __init__(self, majorTickLength=8, minorTickLength=4, cursorWidth=2, majorTickGridLineStyle='-')
Definition: interface.py:2448
def loadData(self)
Data operations.
Definition: interface.py:1716
curv_o_fig_data
Draw curvilinear offset graph.
Definition: interface.py:2010
def callback_mouseItem(self, event=None)
Definition: interface.py:916
def callback_nextItem(self, event=None)
Definition: interface.py:910
def annotation_rightClick(self, event)
Annotation actions.
Definition: interface.py:1229
def getProjectedGridY(self, grid_range, spacing)
Definition: interface.py:635
def annotation_addPosition(self, event)
Definition: interface.py:1310
def annotation_createFeature(self, obj)
Definition: interface.py:1347
def setDefault(self, clock_str=None, offset_str=None, frame_str=None, objs_str=None, class_str=None)
Definition: interface.py:126
def callback_shortcut_D(self, event=None)
Definition: interface.py:485
def exportAnnotationsAs(self, event=None)
Definition: interface.py:1539
def annotation_set_age(self, event=None, force=None)
Definition: interface.py:1292
def active_object_show_imageBox(self)
Active object functions.
Definition: interface.py:954
protocol
Definition: main.py:1583
def annotation_set_gender(self, event=None, force=None)
Definition: interface.py:1301
def saveAnnotations(self, event=None)
Definition: interface.py:1509
def projectTexture(self, src, dstShape)
Definition: interface.py:529
def callback_scrubEnd(self, event=None)
Definition: interface.py:381
def __init__(self, videoFilename, local, satFilename=None, satRes=0.12, homography=None, alignments=None, fps=15, title='Video Timeseries', dynamicWindowSize=True, windowSize=[1200, intrinsicCameraMatrix=None, distortionCoefficients=None, undistortedImageScalingFactor=1.0, startTime=None, frame_data_width=500, launch=True, config=None, verbose=0)
Definition: interface.py:151
def callback_shortcut_B(self, event=None)
Definition: interface.py:495
def exportAnnotations(self, event=None, filename=None)
Definition: interface.py:1518
def callback_shortcut_T(self, event=None)
Definition: interface.py:464
def callback_scrubHome(self, event=None)
Definition: interface.py:378
def callback_scrubPrevEvent(self, event=None)
Definition: interface.py:1160
def callback_shortcut_A(self, event=None)
Definition: interface.py:474
def callback_scrubStatic(self, event=None)
Definition: interface.py:417
def __init__(self, objects, videoFilename, local, kwargs)
Definition: interface.py:1647
def loadTrackingAs(self, event=None, kwargs)
Definition: interface.py:1485
def __init__(self, master, style, local)
Definition: interface.py:86
def callback_shortcut_C(self, event=None)
Definition: interface.py:490
def drawBoundingBox(self, img, obj, frame)
Definition: interface.py:766
def annotation_invProjectPositions(self, obj)
Definition: interface.py:1337
def drawPoly(self, img, points, colour=(255, 0, 0))
Definition: interface.py:754
def callback_shortcut_G(self, event=None)
Definition: interface.py:469
def callback_switchItem(self, event=None)
Item list navigator.
Definition: interface.py:1916
def itemsReorderConfig(self, obj, i='end')
Definition: interface.py:1195
def __init__(self, fg='#b6b6b6', bglight='#535353', bgdark='#262626', selection='#676767', bgborder='#424242', accent1='#E3275C', accent2='#278BE3', accent3='#82E327', accent4='#FFA500', error='#FF0000', plotStyle=None)
Definition: interface.py:2433
frameRefreshTime
Minimum refresh time between frames.
Definition: interface.py:176
def events_export(self, event=None)
Definition: interface.py:1431
def itemsReorderString(self, obj)
Definition: interface.py:1193
def refresh(self, scrub=True, highlight_colour_BGR=(92, 39, 227), highlight_thickness=4)
Definition: interface.py:2030
Annotation-specific data structures.
Definition: interface.py:138
def annotation_set_userType(self, event=None, force=None)
Definition: interface.py:1283
def callback_scrubTo(self, frame)
Timeline functions.
Definition: interface.py:361
def drawProjectedContour(self, img, obj, frame)
Definition: interface.py:761
def callback_scrubTarget(self, event)
Definition: interface.py:370
def loadAnnotations(self, event=None)
Definition: interface.py:1490
def callback_scrubBckx10(self, event=None)
Definition: interface.py:384
def callback_refreshHomography(self, event=None)
Definition: interface.py:434
def loadObjects(sequencepath, max_obj=None, max_obj_features=999, suppress_features=False, legacy=False, legacy_features_path='')
The following functions are used for manipulating object data from Traffic-Intelligence.
Definition: tools_obj.py:521
def drawFeatures(self, img, i, kwargs)
Definition: interface.py:826
def callback_shortcut_S(self, event=None)
Definition: interface.py:480
def refresh(self, colour=(92, 39, 227), thickness=4, scrub=True)
Definition: interface.py:1560
def drawGroupedObj(self, img, i, annotate_velocity=False, color=(0, 0, 0), thickness=4, kwargs)
Definition: interface.py:809
def drawBoundingBoxAnnotationObj(self, img, obj, frame, keyframes)
Image/object drawing.
Definition: interface.py:1203
def __init__(self, num=0, frame=0, type=1)
Definition: interface.py:139
def annotation_new_object(self, event=None)
Annotation operations.
Definition: interface.py:1250
def annotation_delete_object(self, event=None, byId=0)
Definition: interface.py:1276
def getProjectedGridX(self, grid_range, spacing)
Definition: interface.py:578
def callback_switchItem(self, event=None)
Definition: interface.py:1172
def grabVideoFrame(self, scrub=True, sourceImageOnly=False)
Definition: interface.py:541
def itemsReorderConfig(self, obj, i="end")
Definition: interface.py:843
def convertMeasuredToRealPixels(self, x, y)
Definition: interface.py:508
def itemsReorder(self, event=None, initOnly=False)
Definition: interface.py:2411
def annotation_delete_active_object(self, event=None)
Definition: interface.py:1243
def drawFig(self, fig, master=None, location=None)
Definition: interface.py:770
def drawPlots(self, x, datas, fig_name, y_label='', xy_bounds=None)
Definition: interface.py:779
def annotation_RC_join_object(self, event=None)
Definition: interface.py:1237
def drawProjectedContourAnnotationObj(self, img, obj, frame, keyframes)
Definition: interface.py:1215
def loadData(self)
Data operations.
Definition: interface.py:1547
def callback_scrubFwdx10(self, event=None)
Definition: interface.py:386
def __init__(self, userPairs, videoFilename, local, kwargs)
Definition: interface.py:2097
def __init__(self, objects, videoFilename, local, companion_objects=None, kwargs)
Definition: interface.py:1830
def callback_SpeedUp(self, event=None)
Definition: interface.py:448
Interface elements.
Definition: interface.py:85
def callback_prevItem(self, event=None)
Definition: interface.py:904
def events_load(self, event=None)
Definition: interface.py:1440
def callback_scrubFwd(self, event=None, amount=1)
Definition: interface.py:389