tvaLib
Functions | Variables
lib.filt Namespace Reference

Functions

def duration (objects, duration=0, start=0, indent=4, verbose=0)
 Filtering library. More...
 
def corVelIndex (objects, indent=4, verbose=0)
 
def splitbyOutlierVectors (objects, hard_maxSpeed=6.0, soft_maxSpeed=2.0, maxAngle=45.0, indent=4, verbose=0)
 
def verifyObjectIntegrity (objects, ignoreFeatures=False, indent=4, skipTrajectoryContiguityCheck=False, verbose=0)
 
def compensateParralax (objects, origin, mast_height, obj_heights=[1.4, indent=4, verbose=0)
 
def smoothPositions (objects, indent=4, verbose=0)
 
def transformTrajectories (objects, translationX, translationY, rotation, objectsOnly=False, indent=4, verbose=0)
 
def filtTime (objects, startTime=0, endTime=-1, indent=4, verbose=0)
 
def filtBoundingBox (objects, bounding_boxes, containment_threshold=1.0, loopback_verification_frames=20, max_outside_dist=2.0, bounding_boxes_label='bounding_boxes', indent=4, min_ret_size=10, verbose=0)
 
def dropTrackingErrors (objects, config_min_traj_len=20, indent=4, verbose=0)
 
def dropTrackingDuplicates (objects, objectSearchWindow=8, minimumSeperationDistance=8.0, minimumSeperationVelocity=0.08, contiguity=0.80, indent=4, verbose=0)
 
def objectStitchExp (objects, maximum_d_seperation=4.0, maximum_t_seperation=200.0, maximum_h_seperation=60.0, d_weight=0.4, t_weight=0.4, h_weight=0.2, indent=4, verbose=0)
 
def objectStitch (objects, bounding_boxes, framerate, search_window=50, overlap_time=0.66, max_dwell_time=0.66, speed_adj_factor=1000.0, speed_sim_limit_min=10.0, speed_similarity_limit=0.30, search_radius=8.0, stop_speed_detection_limit=5.5, angle_similairty_deg=80.0, trim_frames=5, mps_kmh=3.6, indent=4, verbose=0)
 
def transformToCurvilinear (objects, alignments, restrict_by_type=True, cl_align_window_m=4.0, matchHeadingStrength=10.0, indent=4, local=None, passNumber=1, verbose=0)
 
def buildSpeed (objects, indent=4, verbose=0)
 
def classify (objects, cameraView, alignments=None, fileIx=None, config=None, commitPermanently=False, method=0, indent=4, verbose=0)
 
def purgeFeatures (objects)
 
def genHashID (objects)
 
def splitAndInjectObjectsByPointIndex (objects, oIx, pIx, packInjected=False, min_ret_size=10)
 Object list meta-methods. More...
 
def applyFunctionToObjects (objects, function, args, kwargs)
 
def trimObjects (objects, max_obj)
 
def dropObjects (objects, oIxs)
 
def getReportedAlignIdxs (objects)
 

Variables

 oldstdout
 
 stdout
 

Function Documentation

◆ applyFunctionToObjects()

def lib.filt.applyFunctionToObjects (   objects,
  function,
  args,
  kwargs 
)
Apply a function to all objects. 

Definition at line 1036 of file filt.py.

1036 def applyFunctionToObjects(objects, function, *args, **kwargs):
1037  ''' Apply a function to all objects. '''
1038  for i in range(len(objects)):
1039  objects[i] = function(objects[i], *args, **kwargs)
1040  return objects
1041 
def applyFunctionToObjects(objects, function, args, kwargs)
Definition: filt.py:1036

◆ buildSpeed()

def lib.filt.buildSpeed (   objects,
  indent = 4,
  verbose = 0 
)
Build absolute speed data. 

Definition at line 748 of file filt.py.

748 def buildSpeed(objects, indent=4, verbose=0):
749  ''' Build absolute speed data. '''
750  if(verbose >= 2): print(''.rjust(indent,' ')+'Building absolute speed...')
751 
752  if(hasattr(objects, '__len__')):
753  for i in range(len(objects)):
754  objects[i].velocities.positions.append([])
755  for point in range(len(objects[i].velocities.getXCoordinates())):
756  objects[i].velocities.positions[2].append(m.sqrt(m.pow(objects[i].velocities.getXCoordinates()[point],2)+m.pow(objects[i].velocities.getYCoordinates()[point],2)))
757  return objects
758 
759 
def buildSpeed(objects, indent=4, verbose=0)
Definition: filt.py:748

◆ classify()

def lib.filt.classify (   objects,
  cameraView,
  alignments = None,
  fileIx = None,
  config = None,
  commitPermanently = False,
  method = 0,
  indent = 4,
  verbose = 0 
)
Attempt to classify road users.

    Input:
    ======
    sequence -> a specific Sequence() object (i.e. with framerate, video file...)
    method=0 -> Classify using speed threshold only
    method=1 -> Classify using HOG (requires video source as input)

Definition at line 760 of file filt.py.

760 def classify(objects, cameraView, alignments=None, fileIx=None, config=None, commitPermanently=False, method=0, indent=4, verbose=0):
761  ''' Attempt to classify road users.
762 
763  Input:
764  ======
765  sequence -> a specific Sequence() object (i.e. with framerate, video file...)
766  method=0 -> Classify using speed threshold only
767  method=1 -> Classify using HOG (requires video source as input)
768  '''
769 
770 
771  if(method==1 and fileIx is not None and config and alignments is not None):
772  ''' This next section is adapted from TI > scripts/classify-objects.py,
773  implementing automated routines and metadata loading, and
774  integreated with the rest of tvaLib.
775  '''
776  if(verbose >= 2): print(''.rjust(indent,' ')+'Classifying road users (HoG)...')
777 
778  import cv2
779  from scipy.stats import norm, lognorm
780  from moving import TimeInterval as TrafIntMoving_TimeInterval
781  from ml import SVM as TrafIntML_SVM
782  from types import MethodType
783 
784  speed_conversion = float(cameraView.camera.frameRate*config.mps_kmh)
785 
786 
787 
788  if(config.class_speedAggregationMeth=='quantile'): speedAggregationFunc = lambda speeds: np.percentile(speeds, config.class_speedAggregationQuant)
789  elif(config.class_speedAggregationMeth=='mean'): speedAggregationFunc = np.mean
790  else: speedAggregationFunc = np.median
791 
792 
793  pedBikeCarSVM = TrafIntML_SVM()
794  pedBikeCarSVM.load(os.path.join(config.dir, config.calibration_folder, config.class_PBV_SVMFilename))
795  bikeCarSVM = TrafIntML_SVM()
796  bikeCarSVM.load(os.path.join(config.dir, config.calibration_folder, config.class_BV_SVMFilename))
797 
798 
799  speedProbabilities = {1: lambda s: norm(config.class_meanVehicleSpeed/speed_conversion, config.class_stdVehicleSpeed/speed_conversion).pdf(s),
800  2: lambda s: norm(config.class_meanPedestrianSpeed/speed_conversion, config.class_stdPedestrianSpeed/speed_conversion).pdf(s),
801  4: lambda s: norm(config.class_meanCyclistSpeed/speed_conversion, config.class_stdCyclistSpeed/speed_conversion).pdf(s)} # numpy lognorm shape, loc, scale: shape for numpy is scale (std of the normal) and scale for numpy is location (mean of the normal)
802  #4: lambda s: lognorm(config.class_scaleCyclistSpeed, loc=0., scale = np.exp(config.class_locationCyclistSpeed-np.log(speed_conversion))).pdf(s)} # numpy lognorm shape, loc, scale: shape for numpy is scale (std of the normal) and scale for numpy is location (mean of the normal)
803  alignmentProbabilities = lambda s: alignments[s].getUserTypeProbabilities()
804 
805  if(verbose >= 10): print(''.rjust(indent+4,' ')+'SVM hashes: pedBikeCarSVM='+str(hash(pedBikeCarSVM))+' bikeCarSVM='+str(hash(bikeCarSVM)))
806 
807 
808  if(verbose >= 5): print(''.rjust(indent+4,' ')+'Preparing source video data...')
809  try: source = cv2.VideoCapture(cameraView[fileIx].getFullVideoFilename())
810  except Exception: raise Exception, [9997, 'Video file not found when attempting HoG-based classification.']
811  width = int(source.get(3))
812  height = int(source.get(4))
813  if(cameraView.camera.camera_matrix.data):
814  R = np.identity(3)
815  newImgSize = (int(round(width*cameraView.camera.imageScalingFactor)), int(round(height*cameraView.camera.imageScalingFactor)))
816  new_matrix = deepcopy(cameraView.camera.camera_matrix.asNpArray())
817  new_matrix[0,2] = newImgSize[0]/2.
818  new_matrix[1,2] = newImgSize[1]/2.
819  [map1, map2] = cv2.initUndistortRectifyMap(cameraView.camera.camera_matrix.asNpArray(), cameraView.camera.dist_coeffs.asNpArray(), R, new_matrix, newImgSize, cv2.CV_32FC1)
820  else: newImgSize=[width,height]
821 
822 
823  intervals = []
824  for obj in objects:
825  intervals.append(obj.getTimeInterval())
826  timeInterval = TrafIntMoving_TimeInterval.unionIntervals(intervals)
827 
828 
829 
832  print('!TEMPORARY FIX!: Fixing feature projections')
833  from cvutils import worldToImageProject
834  from moving import Trajectory
835  try: invHomography = tvaLib.Obj.invHomography(cameraView.getHomography().asNpArray())
836  except AttributeError: invHomography = None
837  for obj in objects:
838 
839  obj.classifyUserTypeHoGSVM = MethodType(tvaLib.Obj.classifyUserTypeHoGSVM, obj)
840 
841  if(obj.hasFeatures()):
842  for f in obj.getFeatures():
843  pp = worldToImageProject(f.getPositions().asArray(), None, None, invHomography).tolist()
844  f.positions_orig = deepcopy(f.positions)
845  f.positions = Trajectory(pp)
846 
847 
848 
849 
850 
851  ret = True
852  obj_instants = 0
853  obj_instants_tally = dict((x,0) for x in range(7))
854  class_method_tally = dict((x,0) for x in range(3))
855  frameNum = timeInterval.first
856  source.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frameNum)
857  if(verbose >= 5): print(''.rjust(indent+4,' ')+'Building classifier data from video...')
858  if(verbose >= 2 and verbose <=5): prog = tvaLib.ProgressBar(0, timeInterval.last, 77)
859  while ret and frameNum <= timeInterval.last:
860  if(verbose >= 2 and verbose <= 4): prog.updateAmount(frameNum)
861  elif(verbose >= 10): print(''.rjust(indent+8,' ')+'Frame #'+str(frameNum))
862  # Prepare frame
863  ret, img = source.read()
864  if(not ret): continue
865  if(cameraView.camera.camera_matrix.data): img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR)
866  # Crawl objects existing in current frame
867  for obj in objects:
868  inter = obj.getTimeInterval()
869  if(inter.contains(frameNum)):
870  try:
871  # First frame...
872  if(inter.first == frameNum):
873  obj.initClassifyUserTypeHoGSVM(speedAggregationFunc,
874  pedBikeCarSVM,
875  bikeCarSVM=bikeCarSVM,
876  pedBikeSpeedTreshold=config.class_maxPedestrianSpeed/speed_conversion,
877  bikeCarSpeedThreshold=config.class_maxCyclistSpeed/speed_conversion,
878  nInstantsIgnoredAtEnds=config.class_nFramesIgnoreAtEnds)
879  if(obj.appearanceClassifier == pedBikeCarSVM): class_method_tally[0] += 1
880  elif(obj.appearanceClassifier == bikeCarSVM): class_method_tally[1] += 1
881  else: class_method_tally[2] += 1
882  if(verbose >= 7): print(''.rjust(indent+12,' ')+'Obj #'+str(obj.num)+'\'s classification initialised')
883  # Final frame, perform final aggregated classification
884  elif(inter.last == frameNum):
885  for frame in obj.userTypes: obj_instants_tally[obj.userTypes[frame]] += 1
886  obj.classifyUserTypeHoGSVM(candidates=config.class_candidates, minSpeedEquiprobable=config.class_minSpeedEquiprobable/speed_conversion, speedProbabilities=speedProbabilities, alignmentProbabilities=alignmentProbabilities)
887  if(verbose >= 5): print(''.rjust(indent+12,' ')+'Obj #'+str(obj.num)+' classified fully as type ==> '+str(obj.userType)+')')
888 
889  # Perform instant classification at any other frame
890  else:
891  obj.classifyUserTypeHoGSVMAtInstant(img,
892  instant=frameNum,
893  width=newImgSize[0],
894  height=newImgSize[1],
895  px=config.class_percentIncreaseCrop,
896  py=config.class_percentIncreaseCrop,
897  minNPixels=config.class_minNPixels,
898  rescaleSize=config.class_hogRescaleSize,
899  orientations=config.class_hogNOrientations,
900  pixelsPerCell=config.class_hogNPixelsPerCell,
901  cellsPerBlock=config.class_hogNCellsPerBlock,
902  blockNorm=config.class_hogBlockNorm)
903  if(verbose >= 10): print(''.rjust(indent+12,' ')+'Obj #'+str(obj.num)+' classified at instant '+str(frameNum))
904 
905  except ValueError: import pdb; pdb.set_trace()#tvaLib.printWarning('Instant failed for obj #'+str(obj.num)+' (possibly due to missing features at this instant). Skipping.')
906  except cv2.error: tvaLib.printWarning('Instant failed for obj #'+str(obj.num)+' (OpenCV: Bad argument (The SVM should be trained first)) . Skipping.')
907  obj_instants += 1
908  frameNum += 1
909 
910 
911 
914  print('!TEMPORARY FIX!: Reverting feature projections')
915  for obj in objects:
916  if obj.hasFeatures():
917  for f in obj.getFeatures():
918  f.positions = f.positions_orig
919 
920 
921 
922 
923  if(verbose >= 5): print(''.rjust(indent+4,' ')+'Flushing classifiers...')
924  for obj in objects: obj.appearanceClassifier = None
925 
926 
927  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Filtering report: Instant-objects used for classification: '+str(obj_instants-obj_instants_tally[0])+'/'+str(obj_instants)+' ('+str(round((obj_instants-obj_instants_tally[0])/float(obj_instants), 3)*100)+'%) with '+str(config.class_minNPixels)+' pixels per object as a minimum for identification.')
928  if(verbose >= 3):
929  print(''.rjust(indent+4,' ')+'Filtering report: Instant-object identification rate:')
930  tvaLib.printTable([['User type:']+[x for x in obj_instants_tally],
931  '==',
932  ['Count']+ [obj_instants_tally[x] for x in obj_instants_tally],
933  ['Percentage']+[str(round(obj_instants_tally[x]/float(obj_instants), 3)*100)+'%' for x in obj_instants_tally]], indent=indent+8, padding=' | ')
934  print(''.rjust(indent+4,' ')+'Filtering report: Object classification methods:')
935  tvaLib.printTable([['Method:']+['pedBikeCarSVM','bikeCarSVM','carClassifier'],
936  '==',
937  ['Count']+ [class_method_tally[x] for x in class_method_tally],
938  ['Percentage']+[str(round(class_method_tally[x]/float(len(objects)), 3)*100)+'%' for x in class_method_tally]], indent=indent+8, padding=' | ')
939 
940 
941  else:
942  if(verbose >= 2): print(''.rjust(indent,' ')+'Classifying road users (speed threshold)...')
943  for i in range(len(objects)):
944  objects[i].classifyUserTypeSpeedMotorized(2/cameraView.camera.frameRate)
945 
946 
947 
948  if(commitPermanently and fileIx is not None):
949  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Permanently saving classification to source tracking database...')
950  from storage import setRoadUserTypes as TrafIntStorage_setRoadUserTypes
951  TrafIntStorage_setRoadUserTypes(cameraView[fileIx].getFullDataFilename(), objects)
952 
953 
954  return objects
955 
def classify(objects, cameraView, alignments=None, fileIx=None, config=None, commitPermanently=False, method=0, indent=4, verbose=0)
Definition: filt.py:760

◆ compensateParralax()

def lib.filt.compensateParralax (   objects,
  origin,
  mast_height,
  obj_heights = [1.4,
  indent = 4,
  verbose = 0 
)
Compensate for aparalax effect from mast height 

Definition at line 152 of file filt.py.

152 def compensateParralax(objects, origin, mast_height, obj_heights=[1.4,1.4,1.6,1.4,1.6,2.5,3.0], indent=4, verbose=0):
153  ''' Compensate for aparalax effect from mast height '''
154  if(not origin or not mast_height): return objects
155  if(verbose >= 2): print(''.rjust(indent,' ')+'Compensating for paralax effect (assuming a mast height of '+str(round(mast_height,2))+'m)...')
156  for i in range(len(objects)):
157  frac = 1-obj_heights[objects[i].getUserType()]/2.0/float(mast_height)
158  for point in range(len(objects[i].getXCoordinates())):
159  objects[i].positions.positions[0][point] = origin[0] + (objects[i].positions.positions[0][point]-origin[0])*frac
160  objects[i].positions.positions[1][point] = origin[1] + (objects[i].positions.positions[1][point]-origin[1])*frac
161  objects[i].velocities.positions[0][point] = objects[i].velocities.positions[0][point]*frac
162  objects[i].velocities.positions[1][point] = objects[i].velocities.positions[1][point]*frac
163  if(objects[i].features):
164  for f in range(len(objects[i].features)):
165  for point in range(len(objects[i].features[f].getXCoordinates())):
166  objects[i].features[f].positions.positions[0][point] = origin[0] + (objects[i].features[f].positions.positions[0][point]-origin[0])*frac
167  objects[i].features[f].positions.positions[1][point] = origin[1] + (objects[i].features[f].positions.positions[1][point]-origin[1])*frac
168  objects[i].features[f].velocities.positions[0][point] = objects[i].features[f].velocities.positions[0][point]*frac
169  objects[i].features[f].velocities.positions[1][point] = objects[i].features[f].velocities.positions[1][point]*frac
170  return objects
171 
def compensateParralax(objects, origin, mast_height, obj_heights=[1.4, indent=4, verbose=0)
Definition: filt.py:152

◆ corVelIndex()

def lib.filt.corVelIndex (   objects,
  indent = 4,
  verbose = 0 
)
Deal with the fact that len(.velocities = .positions + 1), i.e. assign velocities at positions
    Edit algorithm as needed (average may be more useful)
    This should always be initialised right away

Definition at line 68 of file filt.py.

68 def corVelIndex(objects, indent=4, verbose=0):
69  ''' Deal with the fact that len(.velocities = .positions + 1), i.e. assign velocities at positions
70  Edit algorithm as needed (average may be more useful)
71  This should always be initialised right away
72  '''
73  if(verbose >= 2): print(''.rjust(indent,' ')+'Correcting speed differential offset...')
74 
75  if(hasattr(objects, '__len__') and len(objects) > 0):
76  if(len(objects[0].getXCoordinates()) > len(objects[0].velocities.getXCoordinates())):
77  for i in range(len(objects)):
78  objects[i].velocities.getXCoordinates().append(objects[i].velocities.getXCoordinates()[-1])
79  objects[i].velocities.getYCoordinates().append(objects[i].velocities.getYCoordinates()[-1])
80  if(hasattr(objects[i], 'features') and objects[i].features):
81  for f in range(len(objects[i].features)):
82  objects[i].features[f].velocities.getXCoordinates().append(objects[i].features[f].velocities.getXCoordinates()[-1])
83  objects[i].features[f].velocities.getYCoordinates().append(objects[i].features[f].velocities.getYCoordinates()[-1])
84  return objects
85 
86 
def corVelIndex(objects, indent=4, verbose=0)
Definition: filt.py:68

◆ dropObjects()

def lib.filt.dropObjects (   objects,
  oIxs 
)
Drop objects[i] from list. 

Definition at line 1053 of file filt.py.

1053 def dropObjects(objects, oIxs):
1054  ''' Drop objects[i] from list. '''
1055  for i in oIxs: objects[i] = []
1056  return filter(None, objects)
1057 
def dropObjects(objects, oIxs)
Definition: filt.py:1053

◆ dropTrackingDuplicates()

def lib.filt.dropTrackingDuplicates (   objects,
  objectSearchWindow = 8,
  minimumSeperationDistance = 8.0,
  minimumSeperationVelocity = 0.08,
  contiguity = 0.80,
  indent = 4,
  verbose = 0 
)
Detect and drop duplicate trajectories.

    For each object, search objectSearchWindow indices away for objects
    with similar motion profiles and proximity. These are likely to be
    duplicate tracking objects.

    contiguity represents the percentage of instances where the
    minimumSperation conditions must be true, for both distance and
    Velocity (but not necessarily at the same instances).
    
    minimumSeperationDistance is in metres
    minimumSeperationVelocity is in m/frame

Definition at line 397 of file filt.py.

397 def dropTrackingDuplicates(objects, objectSearchWindow=8, minimumSeperationDistance=8.0, minimumSeperationVelocity=0.08, contiguity=0.80, indent=4, verbose=0):
398  ''' Detect and drop duplicate trajectories.
399 
400  For each object, search objectSearchWindow indices away for objects
401  with similar motion profiles and proximity. These are likely to be
402  duplicate tracking objects.
403 
404  contiguity represents the percentage of instances where the
405  minimumSperation conditions must be true, for both distance and
406  Velocity (but not necessarily at the same instances).
407 
408  minimumSeperationDistance is in metres
409  minimumSeperationVelocity is in m/frame
410 
411  '''
412  if(verbose >= 2): print(''.rjust(indent,' ')+'Detecting and removing duplicate objects...')
413 
414  minimumSeperationDistanceSquared = minimumSeperationDistance**2
415 
416  drop_traj = []
417  drop_traj_list = []
418 
419  for i in range(len(objects)):
420  if(i in drop_traj_list): continue
421  vel1_list = [m.sqrt(x**2 + y**2) for x,y in zip(objects[i].velocities.getXCoordinates(),objects[i].velocities.getYCoordinates())]
422  for j in range(max(0,i-objectSearchWindow),min(len(objects),i+objectSearchWindow)):
423  if(i == j or j in drop_traj_list): continue
424 
425 
426  commonTimeInterval = objects[i].commonTimeInterval(objects[j])
427  if(commonTimeInterval.empty()): continue
428 
429 
430  ruLowOffset1 = max(commonTimeInterval.first-objects[i].getFirstInstant(),0)
431  ruUpOffset1 = ruLowOffset1 + commonTimeInterval.last-commonTimeInterval.first+1
432  ruLowOffset2 = max(commonTimeInterval.first-objects[j].getFirstInstant(),0)
433  ruUpOffset2 = ruLowOffset2 + commonTimeInterval.last-commonTimeInterval.first+1
434  vel1 = vel1_list[ruLowOffset1:ruUpOffset1]
435  vel2 = [m.sqrt(x**2 + y**2) for x,y in zip(objects[j].velocities.getXCoordinates()[ruLowOffset1:ruUpOffset1],objects[j].velocities.getYCoordinates()[ruLowOffset2:ruUpOffset2])]
436  rdistSquared = [x**2 + y**2 for x,y in zip([x2-x1 for x1,x2 in zip(objects[i].positions.getXCoordinates()[ruLowOffset1:ruUpOffset1],objects[j].positions.getXCoordinates()[ruLowOffset2:ruUpOffset2])],[y2-y1 for y1,y2 in zip(objects[i].positions.getYCoordinates()[ruLowOffset1:ruUpOffset1],objects[j].positions.getYCoordinates()[ruLowOffset2:ruUpOffset2])])]
437  rvel = [abs(x-y) for x,y in zip(vel1,vel2)]
438 
439 
441 
442 
443  if(sum([x < minimumSeperationDistanceSquared for x in rdistSquared]) < len(rdistSquared)*contiguity): continue
444 
445 
446  if(sum([x < minimumSeperationVelocity for x in rvel]) < len(rdistSquared)*contiguity): continue
447 
448 
449  if(objects[j].features is None or objects[i].features is None):
450  drop_traj.append(objects[i])
451  drop_traj_list.append(i)
452  elif(len(objects[i].features) > len(objects[j].features)):
453  drop_traj.append(objects[j])
454  drop_traj_list.append(j)
455  else:
456  drop_traj.append(objects[i])
457  drop_traj_list.append(i)
458 
459 
460  objects = dropObjects(objects, drop_traj_list)
461 
462  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Filtering report: Duplicate trajectories rejected: {0}'.format(len(drop_traj)))
463  return objects, drop_traj
464 
def dropObjects(objects, oIxs)
Definition: filt.py:1053
def dropTrackingDuplicates(objects, objectSearchWindow=8, minimumSeperationDistance=8.0, minimumSeperationVelocity=0.08, contiguity=0.80, indent=4, verbose=0)
Definition: filt.py:397

◆ dropTrackingErrors()

def lib.filt.dropTrackingErrors (   objects,
  config_min_traj_len = 20,
  indent = 4,
  verbose = 0 
)
Drop short trajectories.
    config_min_traj_len is in frames

Definition at line 379 of file filt.py.

379 def dropTrackingErrors(objects, config_min_traj_len=20, indent=4, verbose=0):
380  ''' Drop short trajectories.
381  config_min_traj_len is in frames
382  '''
383  if(verbose >= 2): print(''.rjust(indent,' ')+'Detecting and removing tracking errors...')
384 
385  #Check for short trajectories
386  drop_traj = []
387  drop_traj_list = []
388  for i in range(len(objects)):
389  if((objects[i].timeInterval.last-objects[i].timeInterval.first) < config_min_traj_len):
390  drop_traj.append(objects[i])
391  drop_traj_list.append(i)
392  objects = dropObjects(objects, drop_traj_list)
393 
394  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Filtering report: Short trajectories (< {0} frames) rejected: {1}'.format(config_min_traj_len,len(drop_traj)))
395  return objects, drop_traj
396 
def dropObjects(objects, oIxs)
Definition: filt.py:1053
def dropTrackingErrors(objects, config_min_traj_len=20, indent=4, verbose=0)
Definition: filt.py:379

◆ duration()

def lib.filt.duration (   objects,
  duration = 0,
  start = 0,
  indent = 4,
  verbose = 0 
)

Filtering library.

Trim sequence duration as indicated (in frames). Duration of 0 equals
    infinity. 

Definition at line 53 of file filt.py.

53 def duration(objects, duration=0, start=0, indent=4, verbose=0):
54  ''' Trim sequence duration as indicated (in frames). Duration of 0 equals
55  infinity. '''
56  if(duration==0 and start==0): return objects
57  if(verbose >= 2): print(''.rjust(indent,' ')+'Trimming sequence duration to '+str(duration)+' frames, starting at frame '+str(start)+'...')
58  for i in range(len(objects)):
59  if(objects[i].timeInterval.last < start or (objects[i].timeInterval.first > start+duration and duration != 0)):
60  objects[i] = []
61  continue
62  if(objects[i].timeInterval.first < start): objects[i] = tvaLib.Obj.trimObject(objects[i], start-objects[i].timeInterval.first)
63  elif(objects[i].timeInterval.last > start+duration and duration != 0): objects[i] = tvaLib.Obj.trimObject(objects[i], objects[i].timeInterval.last-start-duration, fromEnd=True)
64  objects = filter(None, objects)
65  return objects
66 
67 
def duration(objects, duration=0, start=0, indent=4, verbose=0)
Filtering library.
Definition: filt.py:53

◆ filtBoundingBox()

def lib.filt.filtBoundingBox (   objects,
  bounding_boxes,
  containment_threshold = 1.0,
  loopback_verification_frames = 20,
  max_outside_dist = 2.0,
  bounding_boxes_label = 'bounding_boxes',
  indent = 4,
  min_ret_size = 10,
  verbose = 0 
)
Filter list of objects according to a list of boxes (mask or zone)
    ======

    Input:
    ======
    objects        = list of objects supplied by Traffic Intelligence
    bounding_boxes = a list of bounding boxes, where each bounding box is a
                     list of points, where each point is a list of
                     coordinates, e.g. bounding_boxes[alpha][1][x] is
                     coordinate x for point number 1 of the polygon that
                     represents bounding box alpha. bounding_boxes can also
                     be a compatible object that mimics a 3-dimensional
                     list using the __getitem__() method as is the case in
                     the PVAT specification.
    containment_threshold = The percentage of points that have to lie
                            within the bounding box such that it is not
                            removed entirely.
    loopback_verification_frames = In this mode, the algorithm will not
                                   stop at the first contained frame, and
                                   will instead attempt to search positions
                                   further upstream/downstream. This reduces
                                   error due to noisy trajectory terminators
                                   jumping across the scene (near edges).
    f_bb_max_outside_dist = max distance any point can be outside of bounding
                            box

    Output: (objects, dropped_traj)
    =======
    objects        = modified list of objects
    dropped_traj   = list of objects or object segments that were
                     truncated. The format is identical to that of objects.

    Notes:
    ======
    To minimise holes, we will only truncate outside-in; no individual
    points are removed.

Definition at line 236 of file filt.py.

236 def filtBoundingBox(objects, bounding_boxes, containment_threshold=1.0, loopback_verification_frames=20, max_outside_dist=2.0, bounding_boxes_label='bounding_boxes', indent=4, min_ret_size=10, verbose=0):
237  ''' Filter list of objects according to a list of boxes (mask or zone)
238  ======
239 
240  Input:
241  ======
242  objects = list of objects supplied by Traffic Intelligence
243  bounding_boxes = a list of bounding boxes, where each bounding box is a
244  list of points, where each point is a list of
245  coordinates, e.g. bounding_boxes[alpha][1][x] is
246  coordinate x for point number 1 of the polygon that
247  represents bounding box alpha. bounding_boxes can also
248  be a compatible object that mimics a 3-dimensional
249  list using the __getitem__() method as is the case in
250  the PVAT specification.
251  containment_threshold = The percentage of points that have to lie
252  within the bounding box such that it is not
253  removed entirely.
254  loopback_verification_frames = In this mode, the algorithm will not
255  stop at the first contained frame, and
256  will instead attempt to search positions
257  further upstream/downstream. This reduces
258  error due to noisy trajectory terminators
259  jumping across the scene (near edges).
260  f_bb_max_outside_dist = max distance any point can be outside of bounding
261  box
262 
263  Output: (objects, dropped_traj)
264  =======
265  objects = modified list of objects
266  dropped_traj = list of objects or object segments that were
267  truncated. The format is identical to that of objects.
268 
269  Notes:
270  ======
271  To minimise holes, we will only truncate outside-in; no individual
272  points are removed.
273  '''
274  if(verbose >= 2): print(''.rjust(indent,' ')+'Confining objects to '+bounding_boxes_label+'...')
275  if(not bounding_boxes): return objects, []
276 
277  dropped_traj = []
278  avg_traj_l = 0
279  containment_threshold = 1.0
280 
281 
282  rejected_points = {}
283  for i in range(len(objects)):
284  rejected_points[i] = []
285 
286  for point in range(len(objects[i].getXCoordinates())):
287  in_bb = False
288  for bb in range(len(bounding_boxes)):
289  if(tvaLib.Geo.pip(objects[i].getXCoordinates()[point],objects[i].getYCoordinates()[point],bounding_boxes[bb])): in_bb = True
290  if(not in_bb): rejected_points[i].append(point)
291 
292 
293  if(len(objects[i].getXCoordinates())-len(rejected_points[i]) < min_ret_size or len(rejected_points[i]) >= len(objects[i].getXCoordinates())*(containment_threshold)):
294  dropped_traj.append(objects[i])
295  objects[i] = []
296  continue
297  """
298  ## If any point further than max_outside_dist, split trajectory at that point
299  jump_to_point=0
300  for p in rejected_points:
301  if(p < jump_to_point): continue
302  distance = tvaLib.Geo.getNearestXYinSplineFromXY(objects[i].getXCoordinates()[p], objects[i].getYCoordinates()[p], bounding_boxes, stopAtValue=2.0)
303  if(distance > max_outside_dist):
304  if(i in splitList): splitList[i].append(p)
305  else: splitList[i] = [p]
306 
307  ## Commit Splits
308  for i in splitList:
309  objects,_ = splitAndInjectObjectsByPointIndex(objects, i, splitList[i], packInjected=True)
310  objects = tvaLib.flatten_list(objects)
311 """
312 
313  objects = filter(None, objects)
314 
315 
317  rejected_points = {}
318  for i in range(len(objects)):
319  rejected_points[i] = []
320 
321  for point in range(len(objects[i].getXCoordinates())):
322  in_bb = False
323  for bb in range(len(bounding_boxes)):
324  if(tvaLib.Geo.pip(objects[i].getXCoordinates()[point],objects[i].getYCoordinates()[point],bounding_boxes[bb])): in_bb = True
325  if(not in_bb): rejected_points[i].append(point)
326 
327 
328 
329  for i in range(len(objects)):
330  if(len(objects[i].getXCoordinates()) < min_ret_size):
331  objects[i] = []
332  continue
333  if(not rejected_points[i]): continue
334 
336  for break_point_start in range(len(objects[i].getXCoordinates())):
337  if(break_point_start not in rejected_points[i]):
338  newindex = 0
339  for loopbackIx in range(min(loopback_verification_frames, len(objects[i].getXCoordinates())-break_point_start-1)):
340  if(break_point_start+loopbackIx+1 in rejected_points[i]):
341  newindex = loopbackIx+1
342  break_point_start += newindex
343  break
344  # Search from last
345  for break_point_end in range(len(objects[i].getXCoordinates())-1, -1, -1):
346  if(break_point_end not in rejected_points[i]):
347  newindex = 0
348  for loopbackIx in range(min(loopback_verification_frames, break_point_end+1)):
349  if(break_point_end-loopbackIx-1 in rejected_points[i]):
350  newindex = -loopbackIx-1
351  break_point_end += newindex
352  break
353 
354 
355 
356  if(break_point_start + min_ret_size >= break_point_end):
357  dropped_traj.append(objects[i])
358  objects[i] = []
359  continue
360 
361  # Commit object trimming
362  objects[i] = tvaLib.Obj.trimObject(objects[i], break_point_start)
363  objects[i] = tvaLib.Obj.trimObject(objects[i], (objects[i].timeInterval.last-objects[i].timeInterval.first-break_point_end+break_point_start), fromEnd=True)
364 
365 
366  objects = filter(None, objects)
367 
368 
369  if(verbose >= 2):
370  if(len(dropped_traj) > 0): print(''.rjust(indent+4,' ')+'Filtering report: Trajectories removed entirely: '+str(len(dropped_traj)))
371  if(len(objects) == 0): print(''.rjust(indent+4,' ')+'Filtering report: No remaining objects!')
372  else:
373  print(''.rjust(indent+4,' ')+'Filtering report: Average points truncated: {0}'.format(sum([len(rejected_points[i]) for i in rejected_points])/len(objects)))
374  print(''.rjust(indent+4,' ')+'Filtering report: New average points per trajectory: {0}'.format(avg_traj_l/len(objects)))
375 
376  return objects, dropped_traj
377 
378 
def filtBoundingBox(objects, bounding_boxes, containment_threshold=1.0, loopback_verification_frames=20, max_outside_dist=2.0, bounding_boxes_label='bounding_boxes', indent=4, min_ret_size=10, verbose=0)
Definition: filt.py:236

◆ filtTime()

def lib.filt.filtTime (   objects,
  startTime = 0,
  endTime = -1,
  indent = 4,
  verbose = 0 
)
Filter list of objects according to a first and last frame. 

Definition at line 219 of file filt.py.

219 def filtTime(objects, startTime=0, endTime=-1, indent=4, verbose=0):
220  ''' Filter list of objects according to a first and last frame. '''
221  if(verbose >= 2): print(''.rjust(indent,' ')+'Confining objects to given time range...')
222  if(endTime==-1):
223  if(startTime==0): return objects
224  endTime = sys.maxint
225  for i in range(len(objects)):
226  if(objects[i].timeInterval.last > endTime):
227  if(objects[i].timeInterval.first > endTime): objects[i] = None
228  else: objects[i] = tvaLib.Obj.trimObject(objects[i], nframes=objects[i].timeInterval.last-endTime, fromEnd=True)
229  elif(objects[i].timeInterval.first < startTime):
230  if(objects[i].timeInterval.last < startTime): objects[i] = None
231  else: objects[i] = tvaLib.Obj.trimObject(objects[i], nframes=startTime-objects[i].timeInterval.first)
232  objects = filter(None, objects)
233  return objects
234 
235 
def filtTime(objects, startTime=0, endTime=-1, indent=4, verbose=0)
Definition: filt.py:219

◆ genHashID()

def lib.filt.genHashID (   objects)
Generates a string to uniquely identify the MovingObject() using
    hash(). This is particularly useful if  data pointing to MovingObject()
    will be stored separately and these MovingObject() are subject to
    modification, such as is the case with the above filtering
    functions. 

Definition at line 968 of file filt.py.

968 def genHashID(objects):
969  ''' Generates a string to uniquely identify the MovingObject() using
970  hash(). This is particularly useful if data pointing to MovingObject()
971  will be stored separately and these MovingObject() are subject to
972  modification, such as is the case with the above filtering
973  functions. '''
974  if(objects is not None):
975  for i in range(len(objects)):
976  obj_data_seq = tuple(objects[i].getXCoordinates()+objects[i].getYCoordinates()+objects[i].velocities.getXCoordinates()+objects[i].velocities.getYCoordinates())
977  objects[i].hash = hash(obj_data_seq)
978  return objects
979 
980 
981 
def genHashID(objects)
Definition: filt.py:968

◆ getReportedAlignIdxs()

def lib.filt.getReportedAlignIdxs (   objects)
Get a list of alignment numbers used by curvilinear positions of all
    objects. 

Definition at line 1058 of file filt.py.

1058 def getReportedAlignIdxs(objects):
1059  ''' Get a list of alignment numbers used by curvilinear positions of all
1060  objects. '''
1061  return list(set(tvaLib.flatten_list([obj.curvilinearPositions.getLanes() for obj in objects])))
def getReportedAlignIdxs(objects)
Definition: filt.py:1058

◆ objectStitch()

def lib.filt.objectStitch (   objects,
  bounding_boxes,
  framerate,
  search_window = 50,
  overlap_time = 0.66,
  max_dwell_time = 0.66,
  speed_adj_factor = 1000.0,
  speed_sim_limit_min = 10.0,
  speed_similarity_limit = 0.30,
  search_radius = 8.0,
  stop_speed_detection_limit = 5.5,
  angle_similairty_deg = 80.0,
  trim_frames = 5,
  mps_kmh = 3.6,
  indent = 4,
  verbose = 0 
)
Attempts to stitch two MovingObject() corresponding to the same real
    object. Correspondence is verified with a series of four tests:
        1-Sequential appearance within the scene
        2-Disappearance time within adjusted dwell time limit (based on speed)
        3-End-point velocity similarity
        4-End-point spatial proximity
        5-Heading similarity between velocity vectors and velocity with travel vector
    A new list of objects is returned, in addition to a list of (new)
    objects witch were detected as being stopped is returned.

    Test constraints:
    =================
    Test 1:
        search_window:          Is the number of following objects to verify; strictly performance related. Use a large number to increase accuracy.
        overlap_time:           Maximum time (in seconds) of overlap between objects to stitch together
    Test 2:
        max_base_dwell_time:    Maximum time (in seconds) between successive objects to be stitched together
        speed_adj_factor:       Used to calibrate speed-adjusted dwell time to add to max_base_dwell_time (the slower cars move, the longer to wait).
    Test 3:
        speed_sim_limit_min:    Ignore similarity check if both vehicles travelling under this speed (km/h)
        speed_similarity_limit: Maximum percentage difference between speeds (km/h).
    Test 4:
        search_radius:          Search radius (in distance units) to verify spatial proximity.
    Test 5:
        angle_similairty_deg:   Heading similarity threshold

    Other input:
    ============
    stop_speed_detection_limit: Minimum speed (in km/h) to trigger a stop detection
    trim_frames:                Trim this many frames at end of parent object and beginning of subsumed object (features tend pop in and out at edges and skew trajectories).

    The speed-adjusted dwell time = speed_adj_factor/max(5.0,(v1+v2)*3.6*framerate/2)^2.3
        E.g. for a speed_adj_factor of 1000, and a combined speed of 5 km/h,
        wait 175 s in addition to max_dwell_time. 10 km/h would wait at
        most 1.6 s.

    Velocity similarity fails if speed_similarity_limit < abs(v1-v2)/max(v1,v2)

Definition at line 553 of file filt.py.

553 def objectStitch(objects, bounding_boxes, framerate, search_window=50, overlap_time=0.66, max_dwell_time=0.66, speed_adj_factor=1000.0, speed_sim_limit_min=10.0, speed_similarity_limit=0.30, search_radius=8.0, stop_speed_detection_limit=5.5, angle_similairty_deg=80.0, trim_frames=5, mps_kmh=3.6, indent=4, verbose=0):
554  ''' Attempts to stitch two MovingObject() corresponding to the same real
555  object. Correspondence is verified with a series of four tests:
556  1-Sequential appearance within the scene
557  2-Disappearance time within adjusted dwell time limit (based on speed)
558  3-End-point velocity similarity
559  4-End-point spatial proximity
560  5-Heading similarity between velocity vectors and velocity with travel vector
561  A new list of objects is returned, in addition to a list of (new)
562  objects witch were detected as being stopped is returned.
563 
564  Test constraints:
565  =================
566  Test 1:
567  search_window: Is the number of following objects to verify; strictly performance related. Use a large number to increase accuracy.
568  overlap_time: Maximum time (in seconds) of overlap between objects to stitch together
569  Test 2:
570  max_base_dwell_time: Maximum time (in seconds) between successive objects to be stitched together
571  speed_adj_factor: Used to calibrate speed-adjusted dwell time to add to max_base_dwell_time (the slower cars move, the longer to wait).
572  Test 3:
573  speed_sim_limit_min: Ignore similarity check if both vehicles travelling under this speed (km/h)
574  speed_similarity_limit: Maximum percentage difference between speeds (km/h).
575  Test 4:
576  search_radius: Search radius (in distance units) to verify spatial proximity.
577  Test 5:
578  angle_similairty_deg: Heading similarity threshold
579 
580  Other input:
581  ============
582  stop_speed_detection_limit: Minimum speed (in km/h) to trigger a stop detection
583  trim_frames: Trim this many frames at end of parent object and beginning of subsumed object (features tend pop in and out at edges and skew trajectories).
584 
585  The speed-adjusted dwell time = speed_adj_factor/max(5.0,(v1+v2)*3.6*framerate/2)^2.3
586  E.g. for a speed_adj_factor of 1000, and a combined speed of 5 km/h,
587  wait 175 s in addition to max_dwell_time. 10 km/h would wait at
588  most 1.6 s.
589 
590  Velocity similarity fails if speed_similarity_limit < abs(v1-v2)/max(v1,v2)
591  '''
592  if(verbose >= 2): print(''.rjust(indent,' ')+'Attempting object stitching...')
593 
594  dropped_trajectories = []
595  subsummed = 0
596 
597  for i in range(len(objects)-1):
598  if(not objects[i]): continue
599 
600  for bb in range(len(bounding_boxes)):
601  if tvaLib.Geo.pip(objects[i].getXCoordinates()[-1],objects[i].getYCoordinates()[-1],bounding_boxes[bb]):
602 
603  for k in range(len(objects))[i+1:min(i+search_window,len(objects))]:
604  if(not objects[k]): continue
605  # Test 1: Check that (new) object k doesn't precede object i by more than overlap_frames frames
606  if(objects[i].timeInterval.last - overlap_time*framerate > objects[k].timeInterval.first): continue
607  # Test 2: Check maximum wait time
608  vi = m.sqrt(objects[i].velocities.getXCoordinates()[-max(1,trim_frames)]**2+objects[i].velocities.getYCoordinates()[-max(1,trim_frames)]**2)
609  vk = m.sqrt(objects[k].velocities.getXCoordinates()[max(0,trim_frames-1)]**2+objects[k].velocities.getYCoordinates()[max(0,trim_frames-1)]**2)
610  speed_adjusted_dwell_time = speed_adj_factor/max(5.0,(vi+vk)*mps_kmh*framerate/2)**2.3
611  if(objects[k].timeInterval.first-objects[i].timeInterval.last > (max_dwell_time+speed_adjusted_dwell_time)*framerate): continue
612  # Test 3: Check end-point velocity similarity if both speeds above minimum check
613  if(vi > speed_sim_limit_min/framerate/mps_kmh and vk > speed_sim_limit_min/framerate/3.6 and abs(vi-vk)/max(vi,vk) > speed_similarity_limit): continue
614  # Test 4: Check end-point spatial proximity
615  if(m.sqrt((objects[k].getXCoordinates()[max(0,trim_frames-1)]-objects[i].getXCoordinates()[-max(1,trim_frames)])**2+(objects[k].getYCoordinates()[max(0,trim_frames-1)]-objects[i].getYCoordinates()[-max(1,trim_frames)])**2) > search_radius): continue
616  # Get trimmed trajectories
617  obji = tvaLib.Obj.trimObject(deepcopy(objects[i]), nframes=trim_frames, fromEnd=True)
618  objk = tvaLib.Obj.trimObject(deepcopy(objects[k]), nframes=trim_frames)
619  if(obji == [] or objk == []): continue
620  # Test 5: Check heading similarity for velocity vectors at either end and end velocity vector with travel distance
621  v_angle = tvaLib.Geo.vectorsToAngleDegCC(objk.velocities.getXCoordinates()[0],obji.velocities.getXCoordinates()[-1],objk.velocities.getYCoordinates()[0],obji.velocities.getYCoordinates()[-1])
622  if(v_angle > 180.0): v_angle = 360.0 - v_angle
623  if(v_angle > angle_similairty_deg): continue
624  t_angle = tvaLib.Geo.vectorsToAngleDegCC(objk.positions.getXCoordinates()[0]-obji.positions.getXCoordinates()[-1],objk.positions.getYCoordinates()[0]-obji.positions.getYCoordinates()[-1],obji.velocities.getXCoordinates()[-1],obji.velocities.getYCoordinates()[-1])
625  if(t_angle > 180.0): t_angle = 360.0 - t_angle
626  if(t_angle > angle_similairty_deg): continue
627 
628  objects[i] = obji
629  objects[k] = objk
630  if(verbose >= 3): print(''.rjust(indent,' ')+'Stitiching report: Object num '+str(objects[i].num)+' subsumes object num '+str(objects[k].num)+'.')
631  # Concatenate trajectories
632  objects[i] = tvaLib.Obj.join(objects[i], objects[k])
633  # Drop second trajectory
634  objects[k] = []
635  subsummed += 1
636 
637  if(vi*framerate*mps_kmh < stop_speed_detection_limit): dropped_trajectories.append(objects[i].num)
638  break
639  break
640  objects = filter(None, objects)
641  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Filtering report: Subsummed objects: '+str(subsummed)+'; stopped objects: '+str(len(dropped_trajectories)))
642  return objects, dropped_trajectories
643 
644 
def objectStitch(objects, bounding_boxes, framerate, search_window=50, overlap_time=0.66, max_dwell_time=0.66, speed_adj_factor=1000.0, speed_sim_limit_min=10.0, speed_similarity_limit=0.30, search_radius=8.0, stop_speed_detection_limit=5.5, angle_similairty_deg=80.0, trim_frames=5, mps_kmh=3.6, indent=4, verbose=0)
Definition: filt.py:553

◆ objectStitchExp()

def lib.filt.objectStitchExp (   objects,
  maximum_d_seperation = 4.0,
  maximum_t_seperation = 200.0,
  maximum_h_seperation = 60.0,
  d_weight = 0.4,
  t_weight = 0.4,
  h_weight = 0.2,
  indent = 4,
  verbose = 0 
)
Experimental object reconnection function. 

Definition at line 465 of file filt.py.

465 def objectStitchExp(objects, maximum_d_seperation=4.0, maximum_t_seperation=200.0, maximum_h_seperation=60.0, d_weight=0.4, t_weight=0.4, h_weight=0.2, indent=4, verbose=0):
466  ''' Experimental object reconnection function. '''
467 
468  try: from munkres import Munkres
469  except Exception: raise Exception, [9998, 'Munkres is not installed.']
470  #import matplotlib.pyplot as plt
471  #import matplotlib.pylab as plb
472  #fig = plt.figure()
473  #for obj in objects:
474  # plt.plot(obj.getXCoordinates(), obj.getYCoordinates(), color='0.5')
475 
476  if(verbose >= 2): print(''.rjust(indent,' ')+'Attempting object reconnection...')
477  dropped_trajectories = []
478 
479 
480 
481  maximum_d_seperation = maximum_d_seperation**2
482 
483 
484  candidate_connections=[]
485  for i in range(len(objects)):
486  for j in range(len(objects)-1):
487  if(i==j): continue
488  time_seperation = abs(objects[j].getFirstInstant()-objects[i].getLastInstant())
489  if(time_seperation < maximum_t_seperation):
490  dist_squared = (objects[j].getXCoordinates()[0]-objects[i].getXCoordinates()[-1])**2+(objects[j].getYCoordinates()[0]-objects[i].getYCoordinates()[-1])**2
491  if(dist_squared < maximum_d_seperation):
492  heading_seperation = tvaLib.Geo.vectorsToAngleDegCC(objects[j].velocities.getXCoordinates()[0],objects[i].velocities.getXCoordinates()[-1],objects[j].velocities.getYCoordinates()[0],objects[i].velocities.getYCoordinates()[-1])
493  if(heading_seperation < maximum_h_seperation):
494  candidate_connections.append([i,j,dist_squared,time_seperation,heading_seperation])
495  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Starting candidate connections: '+str(len(candidate_connections)))
496  if(not candidate_connections): return objects, []
497 
498 
499 
500  indeces =[]
501  munkres = Munkres()
502  chunksize = 200
503  for chunk in range(len(candidate_connections)/chunksize):
504 
505  exit_vertices = list(set([x[0] for x in candidate_connections[chunk*chunksize:(chunk+1)*chunksize]]))
506  enter_vertices = list(set([x[1] for x in candidate_connections[chunk*chunksize:(chunk+1)*chunksize]]))
507 
508 
509  if(verbose >= 10): print(''.rjust(indent+4,' ')+'Building bipartite graph from connections for chunk '+str(chunk+1)+' of '+str(len(candidate_connections)/chunksize)+'...')
510  bipartite_graph = np.ones([len(exit_vertices), len(enter_vertices)])
511  for connection in candidate_connections[chunk*chunksize:(chunk+1)*chunksize]:
512  bipartite_graph[exit_vertices.index(connection[0])][enter_vertices.index(connection[1])] = (connection[2]/maximum_d_seperation*d_weight+connection[3]/maximum_t_seperation*t_weight+connection[4]/maximum_h_seperation*h_weight)/sum([d_weight, t_weight, h_weight])
513 
514 
515  if(verbose >= 4): print(''.rjust(indent+4,' ')+'Solving the trajectory assignment problem for chunk '+str(chunk+1)+' of '+str(len(candidate_connections)/chunksize)+'...')
516  indeces += munkres.compute(bipartite_graph.tolist())
517 
518 
519  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Joining trajectories... ('+str(len(indeces))+')')
520  b = 0
521  exit_vertices = list(set([x[0] for x in candidate_connections]))
522  enter_vertices = list(set([x[1] for x in candidate_connections]))
523  for index in indeces:
524  i = exit_vertices[index[0]]
525  j = enter_vertices[index[1]]
526  if(objects[i]==None or objects[j]==None): continue
527  dist_squared = (objects[j].getXCoordinates()[0]-objects[i].getXCoordinates()[-1])**2+(objects[j].getYCoordinates()[0]-objects[i].getYCoordinates()[-1])**2
528  time_seperation = abs(objects[j].getFirstInstant()-objects[i].getLastInstant())
529  heading_seperation = tvaLib.Geo.vectorsToAngleDegCC(objects[j].velocities.getXCoordinates()[0],objects[i].velocities.getXCoordinates()[-1],objects[j].velocities.getYCoordinates()[0],objects[i].velocities.getYCoordinates()[-1])
530  if(dist_squared < maximum_d_seperation and time_seperation < maximum_t_seperation and heading_seperation < maximum_h_seperation):
531  #print objects[i].num,objects[j].num, dist_squared, time_seperation, heading_seperation
532  #plt.plot(objects[i].getXCoordinates(), objects[i].getYCoordinates(), color='b')
533  #plt.annotate(objects[i].num, xy=(objects[i].getXCoordinates()[1], objects[i].getYCoordinates()[1]), color='b')
534  #plt.plot(objects[j].getXCoordinates(), objects[j].getYCoordinates(), color='r')
535  #plt.annotate(objects[j].num, xy=(objects[j].getXCoordinates()[1], objects[j].getYCoordinates()[1]), color='r')
536  #plt.plot([objects[i].getXCoordinates()[-1], objects[j].getXCoordinates()[0]], [objects[i].getYCoordinates()[-1], objects[j].getYCoordinates()[0]], linewidth=2, color='k')
537  objects[i] = tvaLib.Obj.join(objects[i], objects[j])
538  dropped_trajectories.append(objects[j])
539  objects[j] = None
540  b += 1
541  objects = filter(None, objects)
542  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Effective reconnections: '+str(b))
543 
544 
545 
551  return objects, dropped_trajectories
552 
def objectStitchExp(objects, maximum_d_seperation=4.0, maximum_t_seperation=200.0, maximum_h_seperation=60.0, d_weight=0.4, t_weight=0.4, h_weight=0.2, indent=4, verbose=0)
Definition: filt.py:465

◆ purgeFeatures()

def lib.filt.purgeFeatures (   objects)
Purge features from object if they exist (for the sake of saving 
    memory).
    It is recommend to ommit loading features from disk from the start, but
    this may not be an option if loading serialised trajectories or if
    features are necessary only temporarily for an intermediary step.

Definition at line 956 of file filt.py.

956 def purgeFeatures(objects):
957  ''' Purge features from object if they exist (for the sake of saving
958  memory).
959  It is recommend to ommit loading features from disk from the start, but
960  this may not be an option if loading serialised trajectories or if
961  features are necessary only temporarily for an intermediary step.
962  '''
963  for i in range(len(objects)):
964  if(hasattr(objects[i], 'features')): del(objects[i].features)
965 
966  return objects
967 
def purgeFeatures(objects)
Definition: filt.py:956

◆ smoothPositions()

def lib.filt.smoothPositions (   objects,
  indent = 4,
  verbose = 0 
)
Improve grouped position trajectories 

Definition at line 172 of file filt.py.

172 def smoothPositions(objects, indent=4, verbose=0):
173  ''' Improve grouped position trajectories '''
174  if(verbose >= 2): print(''.rjust(indent,' ')+'Smoothing trajectory positions...')
175  for i in range(len(objects)):
176  if(objects[i].hasFeatures()):
177  try: objects[i] = TrafIntSmoothing_smoothObject(objects[i], objects[i].num, plotResults=False)
178  except: pass
179  return objects
180 
181 
def smoothPositions(objects, indent=4, verbose=0)
Definition: filt.py:172

◆ splitAndInjectObjectsByPointIndex()

def lib.filt.splitAndInjectObjectsByPointIndex (   objects,
  oIx,
  pIx,
  packInjected = False,
  min_ret_size = 10 
)

Object list meta-methods.

For a given object and a list of positions, split the object and inject
    the new ones nearby. The split point occurs AFTER the designated point.

    To solve issues with non-dynamic indices when calling this function
    in-line with a loop through objects, use packInjected=True and flatten
    objects outside of the loop.

Definition at line 985 of file filt.py.

985 def splitAndInjectObjectsByPointIndex(objects, oIx, pIx, packInjected=False, min_ret_size=10):
986  ''' For a given object and a list of positions, split the object and inject
987  the new ones nearby. The split point occurs AFTER the designated point.
988 
989  To solve issues with non-dynamic indices when calling this function
990  in-line with a loop through objects, use packInjected=True and flatten
991  objects outside of the loop.
992 
993  '''
994  if(not pIx): return objects
995 
996 
997  new_objects = [deepcopy(objects[oIx]) for x in range(len(pIx))]
998 
999  if(len(pIx) >= 2):
1000  for i in range(len(pIx)-1):
1001  new_objects[i].uuid = uuid4()
1002  new_objects[i].positions.positions[0] = new_objects[i].positions.positions[0][pIx[i]:pIx[i+1]]
1003  new_objects[i].positions.positions[1] = new_objects[i].positions.positions[1][pIx[i]:pIx[i+1]]
1004  new_objects[i].velocities.positions[0] = new_objects[i].velocities.positions[0][pIx[i]:pIx[i+1]]
1005  new_objects[i].velocities.positions[1] = new_objects[i].velocities.positions[1][pIx[i]:pIx[i+1]]
1006  try: new_objects[i].velocities.positions[2] = new_objects[i].velocities.positions[2][pIx[-1]:pIx[i+1]]
1007  except: pass
1008  new_objects[i].timeInterval.last = new_objects[i].timeInterval.first + pIx[i+1]
1009  new_objects[i].timeInterval.first = new_objects[i].timeInterval.first + pIx[i]
1010 
1011  new_objects[-1].uuid = uuid4()
1012  new_objects[-1].positions.positions[0] = new_objects[-1].positions.positions[0][pIx[-1]:]
1013  new_objects[-1].positions.positions[1] = new_objects[-1].positions.positions[1][pIx[-1]:]
1014  new_objects[-1].velocities.positions[0] = new_objects[-1].velocities.positions[0][pIx[-1]:]
1015  new_objects[-1].velocities.positions[1] = new_objects[-1].velocities.positions[1][pIx[-1]:]
1016  try: new_objects[-1].velocities.positions[2] = new_objects[-1].velocities.positions[2][pIx[-1]:]
1017  except: pass
1018  new_objects[-1].timeInterval.first = new_objects[-1].timeInterval.last - len(new_objects[-1].positions.positions[0])+1
1019 
1020  objects[oIx].positions.positions[0] = objects[oIx].positions.positions[0][:pIx[0]]
1021  objects[oIx].positions.positions[1] = objects[oIx].positions.positions[1][:pIx[0]]
1022  objects[oIx].velocities.positions[0] = objects[oIx].velocities.positions[0][:pIx[0]]
1023  objects[oIx].velocities.positions[1] = objects[oIx].velocities.positions[1][:pIx[0]]
1024  try: objects[oIx].velocities.positions[2] = objects[oIx].velocities.positions[2][:pIx[0]]
1025  except: pass
1026  objects[oIx].timeInterval.last = objects[oIx].timeInterval.first + len(objects[oIx].positions.positions[0])-1
1027 
1028  retained = filter(None, [obj if len(obj.getXCoordinates()) >= min_ret_size else None for obj in new_objects+[objects[oIx]]])
1029  dropped_trajectories = filter(None, [obj if len(obj.getXCoordinates()) < min_ret_size else None for obj in new_objects+[objects[oIx]]])
1030 
1031 
1032  if(packInjected): return objects[:oIx] + [retained] + objects[(oIx+1):], dropped_trajectories
1033  else: return objects[:oIx] + retained + objects[(oIx+1):], dropped_trajectories
1034 
1035 
def splitAndInjectObjectsByPointIndex(objects, oIx, pIx, packInjected=False, min_ret_size=10)
Object list meta-methods.
Definition: filt.py:985

◆ splitbyOutlierVectors()

def lib.filt.splitbyOutlierVectors (   objects,
  hard_maxSpeed = 6.0,
  soft_maxSpeed = 2.0,
  maxAngle = 45.0,
  indent = 4,
  verbose = 0 
)
Split trajectories on key turn points (outlier points which deviate
    significantly from the trajectory via large and/or sudden change in
    speed and heading).

    These typically occur when features appear and disappear suddenly in
    the image, or, in more extreme situations, when features from different
    real objects get mixed up.

Definition at line 87 of file filt.py.

87 def splitbyOutlierVectors(objects, hard_maxSpeed=6.0, soft_maxSpeed=2.0, maxAngle=45.0, indent=4, verbose=0):
88  ''' Split trajectories on key turn points (outlier points which deviate
89  significantly from the trajectory via large and/or sudden change in
90  speed and heading).
91 
92  These typically occur when features appear and disappear suddenly in
93  the image, or, in more extreme situations, when features from different
94  real objects get mixed up.
95  '''
96  if(verbose >= 2): print(''.rjust(indent,' ')+'Removing outlier points...')
97 
98  rejected = []
99  for i in range(len(objects)):
100 
101  velVects = []
102  velRelAngles = []
103  for j in range(len(objects[i].getXCoordinates())-1):
104  velVects.append(m.sqrt((objects[i].getXCoordinates()[j+1]-objects[i].getXCoordinates()[j])**2+(objects[i].getYCoordinates()[j+1]-objects[i].getYCoordinates()[j])**2))
105  velRelAngles.append(tvaLib.Geo.vectorsToAngleDegCC(objects[i].velocities.getXCoordinates()[j],objects[i].velocities.getYCoordinates()[j],objects[i].velocities.getXCoordinates()[j+1],objects[i].velocities.getYCoordinates()[j+1]))
106 
107  outliers = []
108  for j in range(1,len(objects[i].getXCoordinates())-1):
109  if(velVects[j] > hard_maxSpeed or (velVects[j] > soft_maxSpeed and velRelAngles[j-1] > maxAngle)): outliers.append(j)
110 
111  if(outliers):
112  objects,reject = splitAndInjectObjectsByPointIndex(objects, i, outliers, packInjected=True)
113  if(reject): rejected.append(reject)
114 
115 
116  return tvaLib.flatten_list(objects),tvaLib.flatten_list(rejected)
117 
118 
def splitbyOutlierVectors(objects, hard_maxSpeed=6.0, soft_maxSpeed=2.0, maxAngle=45.0, indent=4, verbose=0)
Definition: filt.py:87
def splitAndInjectObjectsByPointIndex(objects, oIx, pIx, packInjected=False, min_ret_size=10)
Object list meta-methods.
Definition: filt.py:985

◆ transformToCurvilinear()

def lib.filt.transformToCurvilinear (   objects,
  alignments,
  restrict_by_type = True,
  cl_align_window_m = 4.0,
  matchHeadingStrength = 10.0,
  indent = 4,
  local = None,
  passNumber = 1,
  verbose = 0 
)
Add, for every object position, the class 'moving.CurvilinearTrajectory()'
    (curvilinearPositions instance) which holds information about the
    curvilinear coordinates using alignment metadata.
    ======

    Input:
    ======
    objects:       list of objects supplied by Traffic Intelligence
    alignments:    a list of alignments, where each alignment is a list of
                   points, where each point is a list of coordinates, e.g.
                   alignments[alpha][1][x] is coordinate x for point number
                   1 of the spline that represents alignment alpha.
                   alignments can also be a compatible object that mimics a
                   3-dimensional list using the __getitem__() method as is
                   the case in the PVAT specification.
    restrict_by_type:  Force certain user types onto certain alignments
    cl_align_window_m: moving average window (in metres) in which to smooth
                       lane changes. As per tools_math.weighted_mvgavg(),
                       this term is a search *radius* around the center of
                       the window.
    matchHeadingStrength: use heading similarity to match trajectories, in
                          addition to distance, by this much importance
                          (use zero to turn off)


    Output: (objects, dropped_trajectories)
    =======
    objects:              modified list of objects
    dropped_trajectories: list of objects or object segments that were
                          truncated. The format is identical to that of
                          objects.

Definition at line 645 of file filt.py.

645 def transformToCurvilinear(objects, alignments, restrict_by_type=True, cl_align_window_m=4.0, matchHeadingStrength=10.0, indent=4, local=None, passNumber=1, verbose=0):
646  ''' Add, for every object position, the class 'moving.CurvilinearTrajectory()'
647  (curvilinearPositions instance) which holds information about the
648  curvilinear coordinates using alignment metadata.
649  ======
650 
651  Input:
652  ======
653  objects: list of objects supplied by Traffic Intelligence
654  alignments: a list of alignments, where each alignment is a list of
655  points, where each point is a list of coordinates, e.g.
656  alignments[alpha][1][x] is coordinate x for point number
657  1 of the spline that represents alignment alpha.
658  alignments can also be a compatible object that mimics a
659  3-dimensional list using the __getitem__() method as is
660  the case in the PVAT specification.
661  restrict_by_type: Force certain user types onto certain alignments
662  cl_align_window_m: moving average window (in metres) in which to smooth
663  lane changes. As per tools_math.weighted_mvgavg(),
664  this term is a search *radius* around the center of
665  the window.
666  matchHeadingStrength: use heading similarity to match trajectories, in
667  addition to distance, by this much importance
668  (use zero to turn off)
669 
670 
671  Output: (objects, dropped_trajectories)
672  =======
673  objects: modified list of objects
674  dropped_trajectories: list of objects or object segments that were
675  truncated. The format is identical to that of
676  objects.
677  '''
678  if(len(objects) <= 0): return objects, []
679  if(not alignments): return objects, []
680 
681  if(verbose >= 2):
682  try: print(''.rjust(indent,' ')+'Transforming coordinates ('+local['nth'][passNumber]+' pass)...')
683  except: print(''.rjust(indent,' ')+'Transforming coordinates...')
684 
685 
686  from moving import MovingObject, CurvilinearTrajectory #Traffic Intelligence
687 
688  lane_readjustments = 0
689  dropped_trajectories = []
690  original_object_length = len(objects)
691  convert_objects_back_to_instance = False
692 
693  if(not isinstance(objects, list)):
694  objects = [objects]
695  convert_objects_back_to_instance = True
696  if(not isinstance(objects[0], MovingObject)):
697  return objects, dropped_trajectories
698 
699  #For each object
700  for i in range(len(objects)):
701  objects[i].curvilinearPositions = CurvilinearTrajectory([],[],[])
702  if(restrict_by_type): relevant_alignments = alignments.getRestrictedAlignmentsByClassification(objects[i].getUserType())
703  else: relevant_alignments = [align for align in alignments]
704 
705  #For each point
706  for point in range(len(objects[i].getXCoordinates())):
707 
708  if(matchHeadingStrength and len(objects[i].getXCoordinates())>1 and objects[i].getUserType() != 2):
709  if(point == len(objects[i].getXCoordinates())-1): [align, alignPoint, snapped_x, snapped_y, subsegmentDistance, S, Y] = tvaLib.Geo.getSYfromXY(objects[i].getXCoordinates()[point], objects[i].getYCoordinates()[point], relevant_alignments, orientation=True, matchHeadingStrength=matchHeadingStrength, vx=(objects[i].getXCoordinates()[point]-objects[i].getXCoordinates()[point-1]), vy=(objects[i].getYCoordinates()[point]-objects[i].getYCoordinates()[point-1]))
710  else: [align, alignPoint, snapped_x, snapped_y, subsegmentDistance, S, Y] = tvaLib.Geo.getSYfromXY(objects[i].getXCoordinates()[point], objects[i].getYCoordinates()[point], relevant_alignments, orientation=True, matchHeadingStrength=matchHeadingStrength, vx=(objects[i].getXCoordinates()[point+1]-objects[i].getXCoordinates()[point]), vy=(objects[i].getYCoordinates()[point+1]-objects[i].getYCoordinates()[point]))
711  else: [align, alignPoint, snapped_x, snapped_y, subsegmentDistance, S, Y] = tvaLib.Geo.getSYfromXY(objects[i].getXCoordinates()[point], objects[i].getYCoordinates()[point], relevant_alignments, orientation=True)
712 
713  # Error handling
714  if(align is None):
715  if(verbose >= 2): tvaLib.printWarning('Point '+str(point)+' of trajectory '+str(i)+' has alignment snapping errors and will be dropped', indent=4)
716  dropped_trajectories.append(objects[i])
717  objects[i] = None
718  break
719  else: objects[i].curvilinearPositions.addPositionSYL(S, Y, relevant_alignments[align].id)
720 
721  if(objects[i] == None): continue
722 
723 
725  if(len(objects[i].curvilinearPositions.getLanes()) >= 2):
726  cum_distances = [0]+np.cumsum([m.sqrt(m.pow(objects[i].getXCoordinates()[point]-objects[i].getXCoordinates()[point-1],2.0)+m.pow(objects[i].getYCoordinates()[point]-objects[i].getYCoordinates()[point-1],2.0)) for point in range(1,len(objects[i].getXCoordinates()))]).tolist()
727  smoothed_lanes = tvaLib.Math.cat_curvy_mvgavg(objects[i].curvilinearPositions.getLanes(), curvy=cum_distances, window=cl_align_window_m, passes=2)
728 
729  if(objects[i].curvilinearPositions.getLanes() != smoothed_lanes):
730  for point in range(len(objects[i].getXCoordinates())):
731  if(objects[i].curvilinearPositions.getLanes()[point] != smoothed_lanes[point]):
732  [align, _, _, _, _, S, Y] = tvaLib.Geo.getSYfromXY(objects[i].getXCoordinates()[point], objects[i].getYCoordinates()[point], [alignments[smoothed_lanes[point]]], orientation=True)
733 
734  if(align is None):
735  if(verbose >= 4): tvaLib.printWarning('trajectory '+str(i)+' at point '+str(point)+' has alignment errors during trajectory smoothing and will not be corrected.', indent=4)
736  else: objects[i].curvilinearPositions.setPosition(point, S, Y, smoothed_lanes[point])
737 
738  #Resize objects
739  if(len(dropped_trajectories) > 0):
740  objects = filter(None, objects)
741  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Filtering report: Trajectories dropped: '+str(len(dropped_trajectories)))
742  if(verbose >= 2): print(''.rjust(indent+4,' ')+'Filtering report: Lane observation corrections per object: '+str(lane_readjustments/original_object_length))
743 
744  if(convert_objects_back_to_instance and len(objects) > 0): return objects[0], dropped_trajectories
745  else: return objects, dropped_trajectories
746 
747 
def transformToCurvilinear(objects, alignments, restrict_by_type=True, cl_align_window_m=4.0, matchHeadingStrength=10.0, indent=4, local=None, passNumber=1, verbose=0)
Definition: filt.py:645

◆ transformTrajectories()

def lib.filt.transformTrajectories (   objects,
  translationX,
  translationY,
  rotation,
  objectsOnly = False,
  indent = 4,
  verbose = 0 
)
Translate and rotate trajectories (this should be done early on, before
    filtering based on location). 

Definition at line 182 of file filt.py.

182 def transformTrajectories(objects, translationX, translationY, rotation, objectsOnly=False, indent=4, verbose=0):
183  ''' Translate and rotate trajectories (this should be done early on, before
184  filtering based on location). '''
185  if(translationX and translationY):
186  if(verbose >= 2): print(''.rjust(indent,' ')+'Translating trajectories...')
187 
188  for i in range(len(objects)):
189  for point in range(len(objects[i].getXCoordinates())):
190  objects[i].positions.positions[0][point] = objects[i].getXCoordinates()[point]+translationX
191  objects[i].positions.positions[1][point] = objects[i].getYCoordinates()[point]+translationY
192 
193  if(hasattr(objects[0], 'features') and objects[0].features and not objectsOnly):
194  for i in range(len(objects)):
195  for f in range(len(objects[i].features)):
196  for point in range(len(objects[i].features[f].getXCoordinates())):
197  objects[i].features[f].positions.positions[0][point] = objects[i].features[f].getXCoordinates()[point]+translationX
198  objects[i].features[f].positions.positions[1][point] = objects[i].features[f].getYCoordinates()[point]+translationY
199 
200  if(rotation):
201  if(verbose >= 2): print(''.rjust(indent,' ')+'Rotating trajectories...')
202 
203  for i in range(len(objects)):
204  for point in range(len(objects[i].getXCoordinates())):
205  objects[i].positions.positions[0][point], objects[i].positions.positions[1][point] = tvaLib.Geo.rotPointCC(objects[i].getXCoordinates()[point],objects[i].getYCoordinates()[point],rotation)
206  if(not objectsOnly):
207  objects[i].velocities.positions[0][point], objects[i].velocities.positions[1][point] = tvaLib.Geo.rotPointCC(objects[i].velocities.getXCoordinates()[point],objects[i].velocities.getYCoordinates()[point],rotation)
208 
209  if(hasattr(objects[0], 'features') and objects[0].features and not objectsOnly):
210  for i in range(len(objects)):
211  for f in range(len(objects[i].features)):
212  for point in range(len(objects[i].features[f].getXCoordinates())):
213  objects[i].features[f].positions.positions[0][point], objects[i].features[f].positions.positions[1][point] = tvaLib.Geo.rotPointCC(objects[i].features[f].getXCoordinates()[point],objects[i].features[f].getYCoordinates()[point],rotation)
214  objects[i].features[f].velocities.positions[0][point], objects[i].features[f].velocities.positions[1][point] = tvaLib.Geo.rotPointCC(objects[i].features[f].velocities.getXCoordinates()[point],objects[i].features[f].velocities.getYCoordinates()[point],rotation)
215  return objects
216 
217 
218 
def transformTrajectories(objects, translationX, translationY, rotation, objectsOnly=False, indent=4, verbose=0)
Definition: filt.py:182

◆ trimObjects()

def lib.filt.trimObjects (   objects,
  max_obj 
)
Trim list of pre-loaded objects as though they were first loaded.
    This is handy for trimming cached objects with config.max_obj 

Definition at line 1042 of file filt.py.

1042 def trimObjects(objects, max_obj):
1043  ''' Trim list of pre-loaded objects as though they were first loaded.
1044  This is handy for trimming cached objects with config.max_obj '''
1045 
1046  delimiter = 0
1047  for object_ in objects:
1048  if(object_.num > max_obj): break
1049  delimiter += 1
1050 
1051  return objects[0:min(delimiter, max_obj)]
1052 
def trimObjects(objects, max_obj)
Definition: filt.py:1042

◆ verifyObjectIntegrity()

def lib.filt.verifyObjectIntegrity (   objects,
  ignoreFeatures = False,
  indent = 4,
  skipTrajectoryContiguityCheck = False,
  verbose = 0 
)
Check integrity of trajectories 

Definition at line 119 of file filt.py.

119 def verifyObjectIntegrity(objects, ignoreFeatures=False, indent=4, skipTrajectoryContiguityCheck=False, verbose=0):
120  ''' Check integrity of trajectories '''
121  if(verbose >= 2): print(''.rjust(indent,' ')+'Checking integrity of trajectory data...')
122 
123  drop_traj = []
124  drop_traj_list = []
125 
126 
127  for i in range(len(objects)):
128 
129  if((len(objects[i].getXCoordinates()) != len(objects[i].velocities.positions[0])) or (len(objects[i].velocities.positions[0]) != len(objects[i].velocities.positions[1])) or (len(objects[i].getXCoordinates()) != len(objects[i].getYCoordinates()))):
130  if(verbose): tvaLib.printWarning('Bad trajectory detected (num: '+str(objects[i].num)+'; index: '+str(i)+'): position observations do not match speed observations. This trajectory will automatically be dropped.', indent=4)
131  drop_traj.append(objects[i])
132  drop_traj_list.append(i)
133 
134  stated_length = objects[i].timeInterval.last-objects[i].timeInterval.first+1
135  if((len(objects[i].getXCoordinates()) != stated_length) or (len(objects[i].velocities.positions[0]) != stated_length)):
136  raise Exception, [9905, 'Bad trajectory detected (num: '+str(objects[i].num)+'; index: '+str(i)+'): position observations do not match declared first and last instants.']
137 
138  if(objects[i].hasFeatures()):
139 
140  for f in range(len(objects[i].features)):
141  stated_length = objects[i].features[f].timeInterval.last-objects[i].features[f].timeInterval.first+1
142  if((len(objects[i].features[f].getXCoordinates()) != stated_length) or (len(objects[i].features[f].velocities.positions[0]) != stated_length)):
143  raise Exception, [9906, 'Bad feature (index: '+str(f)+') detected for object (num: '+str(objects[i].num)+'; index: '+str(i)+'): position observations do not match declared first and last instants.']
144 
145  if(not skipTrajectoryContiguityCheck and False in [True in [f.getFirstInstant() <= pIx and pIx <= f.getLastInstant() for f in objects[i].features] for pIx in range(objects[i].getFirstInstant(), objects[i].getLastInstant())]):
146  tvaLib.printWarning('Trajectory (num: '+str(objects[i].num)+'; index: '+str(i)+') does not have full feature coverage.', indent=4)
147 
148  objects = dropObjects(objects, drop_traj_list)
149 
150  return objects, drop_traj
151 
def dropObjects(objects, oIxs)
Definition: filt.py:1053
def verifyObjectIntegrity(objects, ignoreFeatures=False, indent=4, skipTrajectoryContiguityCheck=False, verbose=0)
Definition: filt.py:119

Variable Documentation

◆ oldstdout

lib.filt.oldstdout

Definition at line 21 of file filt.py.

◆ stdout

lib.filt.stdout

Definition at line 21 of file filt.py.