tvaLib
Classes | Functions | Variables
lib.tools_obj Namespace Reference

Classes

class  NullWriter
 Internal. More...
 

Functions

def undistortImage (fileName, outputName, camera_matrix=[[377.42, dist_coeffs=[-0.11759321, frames=0, display=False, fourcc='MJPG', imageOutOffset=0, aceptableImageExtensions=('.png', '.jpg', '.jpeg'), freeScalingParameter=1.31, imageScalingFactor=1.31, maxWindowSize=800, verbose=0)
 Computer vision. More...
 
def cameraCalibration (path, checkerBoardSize=[6, secondPassSearch=False, display=False, maxFramesToUse=40, verbose=0)
 
def bruteVideoLengthSeconds (filename, fps=15.0)
 
def bruteFrameCount (filename)
 
def dumpFrameCrawler (path, frameNum=0, allowed_extensions=['.mp4', avi)
 
def dumpFrame (videoFilename, frameNum=0)
 
def checkCameraRejection (commands, siteIx, camIx)
 
def imageBox (img, obj, frameNum, homography, width=20, height=20, px=0.2, py=0.2, minNPixels=800)
 
def imageBoxTI (img, obj, frameNum, width, height, minNPixels=800, kwargs)
 
def imageBoxSizeTI (obj, frameNum, width, height, homography=None, px=0.2, py=0.2)
 
def homographyProject (points, homography)
 Homography functions (ported from TI to prevent future breaking due to changes in TI) More...
 
def trajectoryProject (obj, homography)
 
def invHomography (homography)
 
def pointsToTrajectoryTI (points)
 
def getHomography (homoSrcFrame='', orthoSrcPath='', savePath='homography.txt', tsaiCameraSrcPath='', nPoints=4, unitsPerPixel=0.1, videoPts=None, worldPts=None, pointDrawSize=3, pointTargetDrawSize=7, pointTargetDrawThickness=2, maxDisplayResolutionX=900, fig_name='', verbose=0)
 
def loadObjects (sequencepath, max_obj=None, max_obj_features=999, suppress_features=False, legacy=False, legacy_features_path='')
 The following functions are used for manipulating object data from Traffic-Intelligence. More...
 
def boundThis (obj, relFrame, homography, width, height)
 
def boundFromExtrema (width, height, xmin, xmax, ymin, ymax, px=0.2, py=0.2, minNPixels=800)
 
def contourThis (obj, relFrame)
 
def contourClassified (obj, x, y, vx, vy)
 
def contour (x, y, vx, vy, size=[5.0)
 
def true_lane_xy (objects, alignments)
 
def trimObject (obj, nframes, fromEnd=False)
 
def num2ind (objects, num)
 
def ixsAsObjects (objects, ixs)
 
def numsAsObjects (objects, nums)
 
def genXY_bounds (objects)
 
def interpolateTrajBetTwoObjects (pos1, pos2, nframes)
 
def interpolateNewObjectFramerate (obj, sourceFramerate, targetFramerate)
 
def join (obj1, obj2, postSmoothing=True)
 
def computeClassCorrespondance (matches, annotations, objects, maxClasses=7)
 
def computeClassInstantCorrespondance (matches, annotations, objects, maxClasses=7)
 
def matches (self, obj, instant, matchingDistance)
 Add-on obj methods. More...
 
def classifyUserTypeHoGSVM (self, candidates=[1, baseSVM=None, width=0, height=0, homography=None, images=None, minSpeedEquiprobable=-1, speedProbabilities=None, alignmentProbabilities=None, aggregationFunc=np.median, maxPercentUnknown=0.5, px=0.2, py=0.2, minNPixels=800, rescaleSize=(64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), verbose=0, kwargs)
 

Variables

 oldstdout = NullWriter()
 
 stdout
 

Function Documentation

◆ boundFromExtrema()

def lib.tools_obj.boundFromExtrema (   width,
  height,
  xmin,
  xmax,
  ymin,
  ymax,
  px = 0.2,
  py = 0.2,
  minNPixels = 800 
)

Definition at line 571 of file tools_obj.py.

571 def boundFromExtrema(width, height, xmin, xmax, ymin, ymax, px = 0.2, py = 0.2, minNPixels = 800):
572  xMm = px * (xmax - xmin)
573  yMm = py * (ymax - ymin)
574  a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm))
575  yCropMin = int(max(0, .5 * (ymin + ymax - a)))
576  yCropMax = int(min(height - 1, .5 * (ymin + ymax + a)))
577  xCropMin = int(max(0, .5 * (xmin + xmax - a)))
578  xCropMax = int(min(width - 1, .5 * (xmin + xmax + a)))
579  return [[xCropMin,yCropMin],[xCropMin,yCropMax],[xCropMax,yCropMax],[xCropMax,yCropMin]]
580 
581 
def boundFromExtrema(width, height, xmin, xmax, ymin, ymax, px=0.2, py=0.2, minNPixels=800)
Definition: tools_obj.py:571

◆ boundThis()

def lib.tools_obj.boundThis (   obj,
  relFrame,
  homography,
  width,
  height 
)
Find object's bounds using its features. Calculation is dervied from
    TI's imageBox().

Definition at line 552 of file tools_obj.py.

552 def boundThis(obj, relFrame, homography, width, height):
553  ''' Find object's bounds using its features. Calculation is dervied from
554  TI's imageBox().
555  '''
556  if(not obj.hasFeatures()): return None
557  x = []
558  y = []
559  for f in obj.getFeatures():
560  if f.existsAtInstant(relFrame):
561  projectedPosition = f.getPositionAtInstant(relFrame).project(homography)
562  x.append(projectedPosition.x)
563  y.append(projectedPosition.y)
564  xmin = min(x)
565  xmax = max(x)
566  ymin = min(y)
567  ymax = max(y)
568  return boundFromExtrema(width, height, xmin, xmax, ymin, ymax)
569 
570 
def boundFromExtrema(width, height, xmin, xmax, ymin, ymax, px=0.2, py=0.2, minNPixels=800)
Definition: tools_obj.py:571
def boundThis(obj, relFrame, homography, width, height)
Definition: tools_obj.py:552

◆ bruteFrameCount()

def lib.tools_obj.bruteFrameCount (   filename)
Read all frames of a video to reliably count it's length. This method
    is slow, but more reliable than using cap.get(7)

Definition at line 264 of file tools_obj.py.

264 def bruteFrameCount(filename):
265  ''' Read all frames of a video to reliably count it's length. This method
266  is slow, but more reliable than using cap.get(7)
267  '''
268  cap = cv2.VideoCapture(filename)
269  i = 0
270  ret = True
271  while(ret):
272  ret, _ = cap.read()
273  i += 1
274  return i
275 
276 
def bruteFrameCount(filename)
Definition: tools_obj.py:264

◆ bruteVideoLengthSeconds()

def lib.tools_obj.bruteVideoLengthSeconds (   filename,
  fps = 15.0 
)
Get video length in seconds using brute force frame reading and FPS 

Definition at line 260 of file tools_obj.py.

260 def bruteVideoLengthSeconds(filename, fps=15.0):
261  ''' Get video length in seconds using brute force frame reading and FPS '''
262  return bruteFrameCount(filename)/float(fps)
263 
def bruteVideoLengthSeconds(filename, fps=15.0)
Definition: tools_obj.py:260
def bruteFrameCount(filename)
Definition: tools_obj.py:264

◆ cameraCalibration()

def lib.tools_obj.cameraCalibration (   path,
  checkerBoardSize = [6,
  secondPassSearch = False,
  display = False,
  maxFramesToUse = 40,
  verbose = 0 
)
Camera calibration searches through all the images (jpg or png) or a
    video (avi or mp4) located in _path_ for matches to a checkerboard
    pattern of size checkboardSize. These images should all be of the same
    camera with the same resolution, obviously.
    
    For best results, use an asymetric board and ensure that the image has
    very high contrast, including the background. Suitable checkerboard:
    http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png
    The defined checkerBoardSize dimensions are each one less than the
    number of squares (searching inside corners).
    
    The code below is loosely based off of:
    https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
    
    Input:
    ======
    secondPassSearch: Use an expensive and fail-prone second-pass search to refine results
    display           Display results
    maxFramesToUse:   No more than this many frames will be used in the calculation (calculation becomes exponentially expensive with more results).

Definition at line 172 of file tools_obj.py.

172 def cameraCalibration(path, checkerBoardSize=[6,9], secondPassSearch=False, display=False, maxFramesToUse=40, verbose=0):
173  ''' Camera calibration searches through all the images (jpg or png) or a
174  video (avi or mp4) located in _path_ for matches to a checkerboard
175  pattern of size checkboardSize. These images should all be of the same
176  camera with the same resolution, obviously.
177 
178  For best results, use an asymetric board and ensure that the image has
179  very high contrast, including the background. Suitable checkerboard:
180  http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png
181  The defined checkerBoardSize dimensions are each one less than the
182  number of squares (searching inside corners).
183 
184  The code below is loosely based off of:
185  https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
186 
187  Input:
188  ======
189  secondPassSearch: Use an expensive and fail-prone second-pass search to refine results
190  display Display results
191  maxFramesToUse: No more than this many frames will be used in the calculation (calculation becomes exponentially expensive with more results).
192  '''
193 
194 
195  # termination criteria
196  criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
197 
198  # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
199  objp = np.zeros((checkerBoardSize[0]*checkerBoardSize[1],3), np.float32)
200  objp[:,:2] = np.mgrid[0:checkerBoardSize[1],0:checkerBoardSize[0]].T.reshape(-1,2)
201 
202  # Arrays to store object points and image points from all the images.
203  objpoints = [] # 3d point in real world space
204  imgpoints = [] # 2d points in image plane.
205 
206 
207  if(os.path.splitext(path)[1].lower() == '.avi' or os.path.splitext(path)[1].lower() == '.mp4'):
208  source = cv2.VideoCapture(path)
209  for frame in range(int(source.get(7))):
210  if(verbose >= 3): print('Processing frame #'+str(frame))
211  _, img = source.read()
212  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
213  # Find the chess board corners
214  ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
215  # If found, add object points, image points (after refining them)
216  if(not ret): continue
217  if(secondPassSearch): corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
218  if(corners is None): continue
219  if(verbose): print('Found pattern in frame #'+str(frame))
220  objpoints.append(objp)
221  imgpoints.append(corners)
222  # Draw and display the corners
223  if(display):
224  img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
225  if(img):
226  cv2.imshow('img',img)
227  cv2.waitKey(0)
228 
229  else:
230  images = glob.glob(os.path.join(path,'*.[jJ][pP][gG]'))+glob.glob(os.path.join(path,'*.[jJ][pP][eE][gG]'))+glob.glob(os.path.join(path,'*.[pP][nN][gG]'))
231  for fname in images:
232  if(verbose >= 3): print('Processing '+fname)
233  img = cv2.imread(fname)
234  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
235  # Find the chess board corners
236  ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1],checkerBoardSize[0]), None)
237  # If found, add object points, image points (after refining them)
238  if(not ret): continue
239  if(secondPassSearch): corners = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
240  if(corners is None): continue
241  if(verbose): print('Found pattern in '+fname)
242  objpoints.append(objp)
243  imgpoints.append(corners)
244  # Draw and display the corners
245  if(display):
246  img = cv2.drawChessboardCorners(img, (checkerBoardSize[1],checkerBoardSize[0]), corners, ret)
247  if(img):
248  cv2.imshow('img',img)
249  cv2.waitKey(0)
250 
251 
252  cv2.destroyAllWindows()
253  if(len(objpoints) == 0 or len(imgpoints) == 0): return False
254  if(verbose >= 3): print('Calculating distortion coefficients...')
255  try: ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpoints[:len(objpoints)-1:int(max(len(objpoints)/maxFramesToUse, 1))], imgpoints[:len(imgpoints)-1:int(max(len(imgpoints)/maxFramesToUse, 1))], gray.shape[::-1], None, None)
256  except NameError: return False
257 
258  return camera_matrix, dist_coeffs
259 
def cameraCalibration(path, checkerBoardSize=[6, secondPassSearch=False, display=False, maxFramesToUse=40, verbose=0)
Definition: tools_obj.py:172

◆ checkCameraRejection()

def lib.tools_obj.checkCameraRejection (   commands,
  siteIx,
  camIx 
)
Verify reject condition of a specific sequence 

Definition at line 303 of file tools_obj.py.

303 def checkCameraRejection(commands, siteIx, camIx):
304  ''' Verify reject condition of a specific sequence '''
305  for reject_site,reject_cam in zip(Parse.list1D(commands.rej_site, autosort=False),Parse.list1D(commands.rej_cam, autosort=False)):
306  if(siteIx == reject_site-1 and camIx == reject_cam-1): return True
307  return False
308 
309 
def checkCameraRejection(commands, siteIx, camIx)
Definition: tools_obj.py:303

◆ classifyUserTypeHoGSVM()

def lib.tools_obj.classifyUserTypeHoGSVM (   self,
  candidates = [1,
  baseSVM = None,
  width = 0,
  height = 0,
  homography = None,
  images = None,
  minSpeedEquiprobable = -1,
  speedProbabilities = None,
  alignmentProbabilities = None,
  aggregationFunc = np.median,
  maxPercentUnknown = 0.5,
  px = 0.2,
  py = 0.2,
  minNPixels = 800,
  rescaleSize = (64, 64),
  orientations = 9,
  pixelsPerCell = (8,8),
  cellsPerBlock = (2,2),
  verbose = 0,
  kwargs 
)
Forked from TI  to replace classifyUserTypeHoGSVM r963, including an
    alignment probability parameter, and generalised classification 
    candidates.
    
    Original notes:
    =======================================================================
    Agregates SVM detections in each image and returns probability
    (proportion of instants with classification in each category)

    images is a dictionary of images indexed by instant
    With default parameters, the general (ped-bike-car) classifier will be used
    
    Considered categories are the keys of speedProbabilities

Definition at line 918 of file tools_obj.py.

918 def classifyUserTypeHoGSVM(self, candidates=[1,2,4], baseSVM=None, width=0, height=0, homography = None, images = None, minSpeedEquiprobable=-1, speedProbabilities=None, alignmentProbabilities=None, aggregationFunc=np.median, maxPercentUnknown=0.5, px=0.2, py=0.2, minNPixels = 800, rescaleSize = (64, 64), orientations = 9, pixelsPerCell = (8,8), cellsPerBlock = (2,2), verbose=0, **kwargs):
919  ''' Forked from TI to replace classifyUserTypeHoGSVM r963, including an
920  alignment probability parameter, and generalised classification
921  candidates.
922 
923  Original notes:
924  =======================================================================
925  Agregates SVM detections in each image and returns probability
926  (proportion of instants with classification in each category)
927 
928  images is a dictionary of images indexed by instant
929  With default parameters, the general (ped-bike-car) classifier will be used
930 
931  Considered categories are the keys of speedProbabilities
932 
933  '''
934 
935 
936  if(not hasattr(self, 'aggregatedSpeed') or not hasattr(self, 'userTypes')):
937  if(verbose>=5): print('Initializing the data structures for classification by HoG-SVM')
938  self.initClassifyUserTypeHoGSVM(aggregationFunc, baseSVM, **kwargs)
939 
940 
941  if(len(self.userTypes) != self.length() and images is not None):
942  for t in self.getTimeInterval():
943  if(t not in self.userTypes): self.classifyUserTypeHoGSVMAtInstant(images[t], t, homography, width, height, px, py, minNPixels, rescaleSize, orientations, pixelsPerCell, cellsPerBlock)
944 
945 
946 
951  if(speedProbabilities is None or self.aggregatedSpeed < minSpeedEquiprobable): userTypeProbabilities = dict((candidate,1.0) for candidate in candidates)
952  else: userTypeProbabilities = dict((candidate,speedProbabilities[candidate](self.aggregatedSpeed)) for candidate in candidates)
953 
954 
955  nInstantsUserType_align = dict((candidate,0) for candidate in set(candidates+[0]))
956  if(alignmentProbabilities is not None and hasattr(self, 'curvilinearPositions')):
957  for t in self.curvilinearPositions.getLanes():
958  for candidate,prob in alignmentProbabilities(t).iteritems(): nInstantsUserType_align[candidate] += prob
959  #Normalize probability vector
960  if(sum(nInstantsUserType_align.values())):
961  nInstantsUserType_align = dict((candidate,prob/float(sum(nInstantsUserType_align.values()))) for candidate,prob in nInstantsUserType_align.iteritems())
962 
963 
964  nInstantsUserType_HOG = dict((candidate,0) for candidate in set(candidates+[0]))
965  for t in self.userTypes: nInstantsUserType_HOG[self.userTypes[t]] += 1
966  #Normalize probability vector
967  if(sum(nInstantsUserType_HOG.values())):
968  nInstantsUserType_HOG = dict((candidate,prob/float(sum(nInstantsUserType_HOG.values()))) for candidate,prob in nInstantsUserType_HOG.iteritems())
969 
970 
974  too_many_unknowns=False
975  if(nInstantsUserType_HOG[0] >= maxPercentUnknown or nInstantsUserType_align[0] >= maxPercentUnknown): too_many_unknowns=True
976  else:
977  for candidate in userTypeProbabilities: userTypeProbabilities[candidate] *= nInstantsUserType_HOG[candidate]
978  for candidate in userTypeProbabilities: userTypeProbabilities[candidate] *= nInstantsUserType_align[candidate]
979 
980 
981  # If no speed information and too many unknowns, set user class to 0 (unknown)
982  if(too_many_unknowns and (speedProbabilities is None or self.aggregatedSpeed < minSpeedEquiprobable)): self.setUserType(0)
983  # Set class as the user type that maximizes usertype probabilities
984  else: self.setUserType(max(userTypeProbabilities, key=userTypeProbabilities.get))
985 
986 
987  return True
988 
989 
990 
991 
def classifyUserTypeHoGSVM(self, candidates=[1, baseSVM=None, width=0, height=0, homography=None, images=None, minSpeedEquiprobable=-1, speedProbabilities=None, alignmentProbabilities=None, aggregationFunc=np.median, maxPercentUnknown=0.5, px=0.2, py=0.2, minNPixels=800, rescaleSize=(64, 64), orientations=9, pixelsPerCell=(8, 8), cellsPerBlock=(2, 2), verbose=0, kwargs)
Definition: tools_obj.py:918

◆ computeClassCorrespondance()

def lib.tools_obj.computeClassCorrespondance (   matches,
  annotations,
  objects,
  maxClasses = 7 
)
Use GT matches from moving.computeClearMOT() 
    (with returnMatches==True), i.e. the 6th parameter, to determine
    classification correspondance

Definition at line 860 of file tools_obj.py.

860 def computeClassCorrespondance(matches, annotations, objects, maxClasses=7):
861  ''' Use GT matches from moving.computeClearMOT()
862  (with returnMatches==True), i.e. the 6th parameter, to determine
863  classification correspondance
864  '''
865  correspondances = [[] for x in range(maxClasses)]
866 
867  annotation_map = dict((annotations[i].num, i) for i in range(len(annotations)))
868  object_map = dict((objects[i].num, i) for i in range(len(objects)))
869 
870  for match in matches:
871  x = [matches[match][x] for x in matches[match]]
872  if(not x): continue
873  obj_num = max(zip((x.count(item) for item in set(x)), set(x)))
874  if(annotations[annotation_map[match]].userType==objects[object_map[obj_num[1]]].userType): correspondances[annotations[annotation_map[match]].userType].append(True)
875  else: correspondances[annotations[annotation_map[match]].userType].append(False)
876 
877 
878  return [c.count(True)/float(len(c)) if len(c) > 0 else None for c in correspondances]
879 
880 
def computeClassCorrespondance(matches, annotations, objects, maxClasses=7)
Definition: tools_obj.py:860

◆ computeClassInstantCorrespondance()

def lib.tools_obj.computeClassInstantCorrespondance (   matches,
  annotations,
  objects,
  maxClasses = 7 
)
TODO:

Definition at line 881 of file tools_obj.py.

881 def computeClassInstantCorrespondance(matches, annotations, objects, maxClasses=7):
882  ''' TODO:
883  '''
884  correspondances = [[] for x in range(maxClasses)]
885 
886  annotation_map = dict((annotations[i].num, i) for i in range(len(annotations)))
887  object_map = dict((objects[i].num, i) for i in range(len(objects)))
888 
889  for match in matches:
890  x = [matches[match][x] for x in matches[match]]
891  if(not x): continue
892  obj_num = max(zip((x.count(item) for item in set(x)), set(x)))
893  if(annotations[annotation_map[match]].userType==objects[object_map[obj_num[1]]].userType): correspondances[annotations[annotation_map[match]].userType].append(True)
894  else: correspondances[annotations[annotation_map[match]].userType].append(False)
895 
896 
897  return [c.count(True)/float(len(c)) if len(c) > 0 else None for c in correspondances]
898 
899 
def computeClassInstantCorrespondance(matches, annotations, objects, maxClasses=7)
Definition: tools_obj.py:881

◆ contour()

def lib.tools_obj.contour (   x,
  y,
  vx,
  vy,
  size = [5.0 
)
Returns a box (matrix of points) corresponding to the contour of obj 
    aligned to its velocity vector. obj is a single object from objects[i]
    
               ^
      P1->  +--|--+ <-P4
            |  |  |
            |  +  |
            |     |
      P2->  +-----+ <-P3

Definition at line 600 of file tools_obj.py.

600 def contour(x, y, vx, vy, size=[5.0,2.0]):
601  ''' Returns a box (matrix of points) corresponding to the contour of obj
602  aligned to its velocity vector. obj is a single object from objects[i]
603 
604  ^
605  P1-> +--|--+ <-P4
606  | | |
607  | + |
608  | |
609  P2-> +-----+ <-P3
610  '''
611  contour = [[0.0 for j in range(2)] for i in range(4)]
612  ho = m.sqrt(m.pow(vx,2)+m.pow(vy,2))
613  l_vector = [(vx*size[0]/2/ho),(vy*size[0]/2/ho)]
614  w_vector = [0,m.sqrt(m.pow(size[1]/2,2)/(1+m.pow(vy,2)/m.pow(vx,2)))]
615  w_vector[0] = -vy*w_vector[1]/vx
616  contour[0] = [x+l_vector[0]+w_vector[0],y+l_vector[1]+w_vector[1]] #P1
617  contour[1] = [x-l_vector[0]+w_vector[0],y-l_vector[1]+w_vector[1]] #P2
618  contour[2] = [x-l_vector[0]-w_vector[0],y-l_vector[1]-w_vector[1]] #P3
619  contour[3] = [x+l_vector[0]-w_vector[0],y+l_vector[1]-w_vector[1]] #P4
620  return contour
621 
622 
def contour(x, y, vx, vy, size=[5.0)
Definition: tools_obj.py:600

◆ contourClassified()

def lib.tools_obj.contourClassified (   obj,
  x,
  y,
  vx,
  vy 
)
Calls contour() in reference to an object and given positions, assigning
    size based on road user classification. Use this as a shortcut to 
    contourThis() if the object does not have positions (i.e. annotations).

Definition at line 587 of file tools_obj.py.

587 def contourClassified(obj, x, y, vx, vy):
588  ''' Calls contour() in reference to an object and given positions, assigning
589  size based on road user classification. Use this as a shortcut to
590  contourThis() if the object does not have positions (i.e. annotations).
591  '''
592  if(obj.userType == 2): size = [0.75,0.75] # Pedestrian
593  elif(obj.userType == 3): size = [3.0,1.0] # Motorcycle
594  elif(obj.userType == 4): size = [2.0,1.0] # Bicycle
595  elif(obj.userType == 5): size = [12.0,2.5] # Bus
596  elif(obj.userType == 6): size = [15.0,2.6] # Truck
597  else: size = [5.0,2.0] # Anything else
598  return contour(x, y, vx, vy, size=size)
599 
def contourClassified(obj, x, y, vx, vy)
Definition: tools_obj.py:587
def contour(x, y, vx, vy, size=[5.0)
Definition: tools_obj.py:600

◆ contourThis()

def lib.tools_obj.contourThis (   obj,
  relFrame 
)
Calls contourClassified() in reference to this object and a relative
    frame. 

Definition at line 582 of file tools_obj.py.

582 def contourThis(obj, relFrame):
583  ''' Calls contourClassified() in reference to this object and a relative
584  frame. '''
585  return contourClassified(obj, obj.getPositionAt(relFrame).x, obj.getPositionAt(relFrame).y, obj.getVelocityAt(relFrame).x, obj.getVelocityAt(relFrame).y)
586 
def contourClassified(obj, x, y, vx, vy)
Definition: tools_obj.py:587
def contourThis(obj, relFrame)
Definition: tools_obj.py:582

◆ dumpFrame()

def lib.tools_obj.dumpFrame (   videoFilename,
  frameNum = 0 
)
Dump screen capture of video

    Manual method using mplayer MPlayer (press 's'):
        mplayer path_to_video -vf screenshot

Definition at line 289 of file tools_obj.py.

289 def dumpFrame(videoFilename, frameNum=0):
290  ''' Dump screen capture of video
291 
292  Manual method using mplayer MPlayer (press 's'):
293  mplayer path_to_video -vf screenshot
294  '''
295  if(not os.path.exists(videoFilename)):
296  from tools import printWarning as tvaLib_printWarning
297  tvaLib_printWarning('No video file found ('+videoFilename+')', 'Warning:')
298  source = cv2.VideoCapture(videoFilename)
299  ret, frame = source.read()
300  return cv2.imwrite(os.path.splitext(videoFilename)[0]+'-frame.png', frame)
301 
302 
def dumpFrame(videoFilename, frameNum=0)
Definition: tools_obj.py:289

◆ dumpFrameCrawler()

def lib.tools_obj.dumpFrameCrawler (   path,
  frameNum = 0,
  allowed_extensions = ['.mp4',
  avi 
)
Crawl through path recursively for video files and dump a screen
    capture of each.

Definition at line 277 of file tools_obj.py.

277 def dumpFrameCrawler(path, frameNum=0, allowed_extensions=['.mp4', '.avi']):
278  ''' Crawl through path recursively for video files and dump a screen
279  capture of each.
280  '''
281  import fnmatch
282  for rootDir, dirnames, filenames in os.walk(path):
283  for ext in allowed_extensions:
284  for filename in fnmatch.filter(filenames, '*'+allowed_extensions):
285  dumpFrame(os.path.join(rootDir, filename), frameNum=frameNum)
286  return True
287 
288 
def dumpFrameCrawler(path, frameNum=0, allowed_extensions=['.mp4', avi)
Definition: tools_obj.py:277
def dumpFrame(videoFilename, frameNum=0)
Definition: tools_obj.py:289

◆ genXY_bounds()

def lib.tools_obj.genXY_bounds (   objects)
Generate xy_bounds from objects maximum coordinates (shrink wrap)
    
    Returns:
    ========
    [Constructors.SuperBound() for X, Constructors.SuperBound() for Y]        

Definition at line 755 of file tools_obj.py.

755 def genXY_bounds(objects):
756  ''' Generate xy_bounds from objects maximum coordinates (shrink wrap)
757 
758  Returns:
759  ========
760  [Constructors.SuperBound() for X, Constructors.SuperBound() for Y]
761 
762  '''
763  xy_bounds = [Constructors.SuperBound([objects[0].getXCoordinates()[0],objects[0].getXCoordinates()[0]]),Constructors.SuperBound([objects[0].getYCoordinates()[0],objects[0].getYCoordinates()[0]])]
764  for i in range(len(objects)):
765  for point in range(int(objects[i].length())):
766  try:
767  if(xy_bounds[0].lower > objects[i].getXCoordinates()[point]): xy_bounds[0].lower = objects[i].getXCoordinates()[point]
768  elif(xy_bounds[0].upper < objects[i].getXCoordinates()[point]): xy_bounds[0].upper = objects[i].getXCoordinates()[point]
769  if(xy_bounds[1].lower > objects[i].getYCoordinates()[point]): xy_bounds[1].lower = objects[i].getYCoordinates()[point]
770  elif(xy_bounds[1].upper < objects[i].getYCoordinates()[point]): xy_bounds[1].upper = objects[i].getYCoordinates()[point]
771  except IndexError: pass #Ignore potential issues if the reported length of an object differs than its stored length usually a minor bug
772  return xy_bounds
773 
774 
def genXY_bounds(objects)
Definition: tools_obj.py:755

◆ getHomography()

def lib.tools_obj.getHomography (   homoSrcFrame = '',
  orthoSrcPath = '',
  savePath = 'homography.txt',
  tsaiCameraSrcPath = '',
  nPoints = 4,
  unitsPerPixel = 0.1,
  videoPts = None,
  worldPts = None,
  pointDrawSize = 3,
  pointTargetDrawSize = 7,
  pointTargetDrawThickness = 2,
  maxDisplayResolutionX = 900,
  fig_name = '',
  verbose = 0 
)
This code has been forked from the Traffic-Intelligence project 
    (scripts/compute-homography.py)

Definition at line 403 of file tools_obj.py.

403 def getHomography(homoSrcFrame='', orthoSrcPath='', savePath='homography.txt', tsaiCameraSrcPath='', nPoints=4, unitsPerPixel=0.1, videoPts=None, worldPts=None, pointDrawSize=3, pointTargetDrawSize=7, pointTargetDrawThickness=2, maxDisplayResolutionX=900, fig_name='', verbose=0):
404  ''' This code has been forked from the Traffic-Intelligence project
405  (scripts/compute-homography.py)'''
406  try:
407  import matplotlib.pyplot as plt
408  import matplotlib.pylab as plb
409  except ImportError:
410  if(verbose>=1): print 'Warning: matplotlib could not be found/imported. Graphing/GUI functionality may be unstable.'
411  try: import cv2
412  except ImportError:
413  if(verbose>=1): print 'Warning: openCV could not be found/imported. Image-processing functionality may be unstable.'
414  if(tsaiCameraSrcPath):
415  try: from pdtv import TsaiCamera
416  except ImportError:
417  #https://bitbucket.org/hakanardo/pdtv
418  if(verbose>=1):
419  print 'Warning: tsai camera calibration is available, but requires the pdtv module to work. Defaulting to ordinary homography.'
420  print 'Warning: pdtv can be installed from https://bitbucket.org/hakanardo/pdtv'
421  tsaiCameraSrcPath = ''
422  try: import yaml
423  except ImportError:
424  if(verbose>=1): print 'Warning: tsai camera calibration is available, but requires the yaml module to work. Defaulting to ordinary homography.'
425  tsaiCameraSrcPath = ''
426  from cvutils import computeHomographyFromPDTV as TrafIntCVUtils_computeHomographyFromPDTV
427  from storage import openCheck as TrafIntStorage_openCheck
428  from storage import getLines as TrafIntStorage_getLines
429  from StringIO import StringIO
430 
431  homography = np.array([])
432  if(worldPts is not None and videoPts is not None):
433  homography, mask = cv2.findHomography(videoPts, worldPts)
434  elif(tsaiCameraSrcPath):
435 
436  f = TrafIntStorage_openCheck(tsaiCameraSrcPath, quitting = True)
437  content = TrafIntStorage_getLines(f)
438  #out.write('data_class: TsaiCamera\n')
439  yamlContent = ''.join([l.replace(' f:', 'f:').replace(' k:', 'k:').replace(',','.')+'\n' for l in content])
440  cameraData = yaml.load(StringIO(yamlContent))
441  camera = TsaiCamera(Cx=cameraData['Cx'], Cy=cameraData['Cy'], Sx=cameraData['Sx'], Tx=cameraData['Tx'], Ty=cameraData['Ty'], Tz=cameraData['Tz'], dx=cameraData['dx'], dy=cameraData['dy'], f=cameraData['f'], k=cameraData['k'], r1=cameraData['r1'], r2=cameraData['r2'], r3=cameraData['r3'], r4=cameraData['r4'], r5=cameraData['r5'], r6=cameraData['r6'], r7=cameraData['r7'], r8=cameraData['r8'], r9=cameraData['r9'])
442  homography = TrafIntCVUtils_computeHomographyFromPDTV(camera)
443  else:
444 
446  worldImg = plt.imread(orthoSrcPath)
447  videoImg = plt.imread(homoSrcFrame)
448  videoFig = plt.figure(': '.join([fig_name,'Select image space points']))
449  plt.imshow(videoImg)
450  videoFig.tight_layout()
451 
452 
453  print('Click on {0} points in image space'.format(nPoints))
454  videoPts = np.array(videoFig.ginput(nPoints, timeout=3000))
455  ax = plb.gca()
456  xs = ax.get_xlim()
457  ys = ax.get_ylim()
458  plt.plot(videoPts[:,0], videoPts[:,1], 'r+')
459  for coordsIx in range(len(videoPts)):
460  plt.text(videoPts[coordsIx][0]+10,videoPts[coordsIx][1]-10,str(coordsIx+1))
461  plt.draw()
462  ax.set_xlim(xs)
463  ax.set_ylim(ys)
464 
465  worldFig = plt.figure(': '.join([fig_name,'Select world space points']))
466  plt.imshow(worldImg)
467  worldFig.tight_layout()
468 
469 
470  print('Click on {0} points in world space'.format(nPoints))
471  worldPts = unitsPerPixel*np.array(worldFig.ginput(nPoints, timeout=3000))
472  plt.close('all')
473 
474  '''## Save the points to file
475  f = open('point-correspondences.txt', 'a')
476  np.savetxt(f, worldPts.T)
477  np.savetxt(f, videoPts.T)
478  f.close()'''
479  homography, mask = cv2.findHomography(videoPts, worldPts)
480 
481  if(homography.size > 0):
482  np.savetxt(savePath, homography)
483 
484  if(worldPts is not None and len(worldPts) == len(videoPts) and orthoSrcPath and homoSrcFrame):
485  worldImg = cv2.imread(orthoSrcPath)
486  videoImg = cv2.imread(homoSrcFrame)
487  invH = invHomography(homography)
488  projectedWorldPts = np.array(homographyProject(worldPts.tolist(), invH))
489  projectedVideoPts = np.array(homographyProject(videoPts.tolist(), homography))
490 
491  for i in range(worldPts.shape[0]):
492  # world image
493  cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))), pointDrawSize, (0, 0, 255))
494  cv2.circle(worldImg,tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))), pointTargetDrawSize, (0, 0, 255), thickness=pointTargetDrawThickness)
495  cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))), pointDrawSize, (255, 0, 0))
496  cv2.circle(worldImg,tuple(np.int32(np.round(projectedVideoPts[i]/unitsPerPixel))), pointTargetDrawSize, (255, 0, 0), thickness=pointTargetDrawThickness)
497  cv2.putText(worldImg, str(i+1), tuple(np.int32(np.round(worldPts[i]/unitsPerPixel))+5), cv2.FONT_HERSHEY_PLAIN, 2., (255, 0, 0), 2)
498  # video image
499  cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))), pointDrawSize, (0, 0, 255))
500  cv2.circle(videoImg,tuple(np.int32(np.round(videoPts[i]))), pointTargetDrawSize, (0, 0, 255), thickness=pointTargetDrawThickness)
501  cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))), pointDrawSize, (255, 0, 0))
502  cv2.circle(videoImg,tuple(np.int32(np.round(projectedWorldPts[i]))), pointTargetDrawSize, (255, 0, 0), thickness=pointTargetDrawThickness)
503  cv2.putText(videoImg, str(i+1), tuple(np.int32(np.round(videoPts[i])+5)), cv2.FONT_HERSHEY_PLAIN, 2., (255, 0, 0), 2)
504  #print('img: {0} / projected: {1}'.format(videoPts[i], p))
505 
506  if(verbose >= 1):
507  if(videoImg.shape[1] > maxDisplayResolutionX): videoImg = cv2.resize(videoImg, (0,0), fx=maxDisplayResolutionX/float(videoImg.shape[1]), fy=maxDisplayResolutionX/float(videoImg.shape[1]))
508  if(worldImg.shape[1] > maxDisplayResolutionX): worldImg = cv2.resize(worldImg, (0,0), fx=maxDisplayResolutionX/float(videoImg.shape[1]), fy=maxDisplayResolutionX/float(videoImg.shape[1]))
509  cv2.imshow('video frame',videoImg)
510  cv2.imshow('world image',worldImg)
511  cv2.waitKey()
512  cv2.destroyAllWindows()
513 
514  return worldPts, homography
515 
516 
517 
def join(obj1, obj2, postSmoothing=True)
Definition: tools_obj.py:816
def invHomography(homography)
Definition: tools_obj.py:391
def homographyProject(points, homography)
Homography functions (ported from TI to prevent future breaking due to changes in TI) ...
Definition: tools_obj.py:371
def getHomography(homoSrcFrame='', orthoSrcPath='', savePath='homography.txt', tsaiCameraSrcPath='', nPoints=4, unitsPerPixel=0.1, videoPts=None, worldPts=None, pointDrawSize=3, pointTargetDrawSize=7, pointTargetDrawThickness=2, maxDisplayResolutionX=900, fig_name='', verbose=0)
Definition: tools_obj.py:403

◆ homographyProject()

def lib.tools_obj.homographyProject (   points,
  homography 
)

Homography functions (ported from TI to prevent future breaking due to changes in TI)

Returns the coordinates of the points projected through homography. 
    Points is a points (Nx2)

Definition at line 371 of file tools_obj.py.

371 def homographyProject(points, homography):
372  ''' Returns the coordinates of the points projected through homography.
373  Points is a points (Nx2)'''
374  if(hasattr(homography, 'asNpArray')): homography = homography.asNpArray()
375  if(hasattr(points, 'data')): points = points.data
376  if(len(points)==2 and type(points[0])!=list and type(points[0])!=tuple):
377  points = [points]
378  return_point = True
379  else:
380  return_point = False
381  prod = [np.dot(homography, np.array([point[0],point[1],1])) for point in points]
382  if(return_point): return [(x[:2]/x[2]).tolist() for x in prod][0]
383  else: return [(x[:2]/x[2]).tolist() for x in prod]
384 
def homographyProject(points, homography)
Homography functions (ported from TI to prevent future breaking due to changes in TI) ...
Definition: tools_obj.py:371

◆ imageBox()

def lib.tools_obj.imageBox (   img,
  obj,
  frameNum,
  homography,
  width = 20,
  height = 20,
  px = 0.2,
  py = 0.2,
  minNPixels = 800 
)
Identicial to Traffic Intelligence's cvutils.imageBox(), except that it
    does not rely on features, approximating instead based on distance and
    classification. 

Definition at line 310 of file tools_obj.py.

310 def imageBox(img, obj, frameNum, homography, width=20, height=20, px = 0.2, py = 0.2, minNPixels = 800):
311  ''' Identicial to Traffic Intelligence's cvutils.imageBox(), except that it
312  does not rely on features, approximating instead based on distance and
313  classification. '''
314 
315  pos_s = contourThis(obj, relFrame=frameNum-obj.getFirstInstant())
316  pos_s = homographyProject(pos_s, homography)
317 
318  xmin = round(min([point[0] for point in pos_s]))
319  xmax = round(max([point[0] for point in pos_s]))
320  ymin = round(min([point[1] for point in pos_s]))
321  ymax = round(max([point[1] for point in pos_s]))
322 
323  return img[int(ymin):int(ymax), int(xmin):int(xmax)], ymin, ymax, xmin, xmax
324 
325 
def imageBox(img, obj, frameNum, homography, width=20, height=20, px=0.2, py=0.2, minNPixels=800)
Definition: tools_obj.py:310
def homographyProject(points, homography)
Homography functions (ported from TI to prevent future breaking due to changes in TI) ...
Definition: tools_obj.py:371
def contourThis(obj, relFrame)
Definition: tools_obj.py:582

◆ imageBoxSizeTI()

def lib.tools_obj.imageBoxSizeTI (   obj,
  frameNum,
  width,
  height,
  homography = None,
  px = 0.2,
  py = 0.2 
)
Modified from imageBoxSize() in TI r960, implementing explicit 
    projection verification.
    Computes the bounding box size of object at frameNum. 

Definition at line 336 of file tools_obj.py.

336 def imageBoxSizeTI(obj, frameNum, width, height, homography=None, px=0.2, py=0.2):
337  ''' Modified from imageBoxSize() in TI r960, implementing explicit
338  projection verification.
339  Computes the bounding box size of object at frameNum. '''
340  x = []
341  y = []
342  if(obj.hasFeatures()):
343  for f in obj.getFeatures():
344  if(f.existsAtInstant(frameNum)):
345  p = f.getPositionAtInstant(frameNum)
346  if(homography is not None):
347  p = homographyProject([p.x,p.y], homography)
348  x.append(p[0])
349  y.append(p[1])
350  else:
351  x.append(p.x)
352  y.append(p.y)
353  xmin = min(x)
354  xmax = max(x)
355  ymin = min(y)
356  ymax = max(y)
357  xMm = px * (xmax - xmin)
358  yMm = py * (ymax - ymin)
359  a = max(ymax - ymin + (2 * yMm), xmax - (xmin + 2 * xMm))
360  yCropMin = int(max(0, .5 * (ymin + ymax - a)))
361  yCropMax = int(min(height - 1, .5 * (ymin + ymax + a)))
362  xCropMin = int(max(0, .5 * (xmin + xmax - a)))
363  xCropMax = int(min(width - 1, .5 * (xmin + xmax + a)))
364  return yCropMin, yCropMax, xCropMin, xCropMax
365 
366 
367 
def imageBoxSizeTI(obj, frameNum, width, height, homography=None, px=0.2, py=0.2)
Definition: tools_obj.py:336
def homographyProject(points, homography)
Homography functions (ported from TI to prevent future breaking due to changes in TI) ...
Definition: tools_obj.py:371

◆ imageBoxTI()

def lib.tools_obj.imageBoxTI (   img,
  obj,
  frameNum,
  width,
  height,
  minNPixels = 800,
  kwargs 
)
Ported directly from TI r960, substituting the imageBoxSize() function
    with one that verifies projection explicitly: imageBoxSizeTI().
    Computes the bounding box of object at frameNum. 

Definition at line 326 of file tools_obj.py.

326 def imageBoxTI(img, obj, frameNum, width, height, minNPixels=800, **kwargs):
327  ''' Ported directly from TI r960, substituting the imageBoxSize() function
328  with one that verifies projection explicitly: imageBoxSizeTI().
329  Computes the bounding box of object at frameNum. '''
330  yCropMin, yCropMax, xCropMin, xCropMax = imageBoxSizeTI(obj, frameNum, width, height, **kwargs)
331  if(yCropMax != yCropMin and xCropMax != xCropMin and (yCropMax - yCropMin) * (xCropMax - xCropMin) > minNPixels): return img[yCropMin : yCropMax, xCropMin : xCropMax]
332  else: return None
333 
334 
335 
def imageBoxSizeTI(obj, frameNum, width, height, homography=None, px=0.2, py=0.2)
Definition: tools_obj.py:336
def imageBoxTI(img, obj, frameNum, width, height, minNPixels=800, kwargs)
Definition: tools_obj.py:326

◆ interpolateNewObjectFramerate()

def lib.tools_obj.interpolateNewObjectFramerate (   obj,
  sourceFramerate,
  targetFramerate 
)
For an object obj, reinterpolate all grouped and feature positions and
    velcoities to a target framerate.        

Definition at line 799 of file tools_obj.py.

799 def interpolateNewObjectFramerate(obj, sourceFramerate, targetFramerate):
800  ''' For an object obj, reinterpolate all grouped and feature positions and
801  velcoities to a target framerate.
802  '''
803  obj.positions.positions = [list(x) for x in zip(*Geo.interpolateSplinePoints(zip(obj.positions.positions[0],obj.positions.positions[1]), fraction=float(targetFramerate)/float(sourceFramerate)))]
804  obj.velocities.positions = [list(x) for x in zip(*Geo.interpolateVectorSeries(zip(obj.velocities.positions[0],obj.velocities.positions[1]), fraction=float(targetFramerate)/float(sourceFramerate)))]
805  obj.timeInterval.first = int(obj.timeInterval.first*float(sourceFramerate)/float(targetFramerate))
806  obj.timeInterval.last = obj.timeInterval.first + len(obj.getXCoordinates()) - 1
807  if(hasattr(obj, 'features') and type(obj.features) is list):
808  for fIx in range(len(obj.features)):
809  obj.features[fIx].positions.positions = [list(x) for x in zip(*Geo.interpolateSplinePoints(zip(obj.features[fIx].positions.positions[0],obj.features[fIx].positions.positions[1]), fraction=float(targetFramerate)/float(sourceFramerate)))]
810  obj.features[fIx].velocities.positions = [list(x) for x in zip(*Geo.interpolateVectorSeries(zip(obj.features[fIx].velocities.positions[0],obj.features[fIx].velocities.positions[1]), fraction=float(targetFramerate)/float(sourceFramerate)))]
811  obj.features[fIx].timeInterval.first = int(obj.features[fIx].timeInterval.first*float(sourceFramerate)/float(targetFramerate))
812  obj.features[fIx].timeInterval.last = obj.features[fIx].timeInterval.first + len(obj.features[fIx].getXCoordinates()) - 1
813  return obj
814 
815 
def interpolateNewObjectFramerate(obj, sourceFramerate, targetFramerate)
Definition: tools_obj.py:799

◆ interpolateTrajBetTwoObjects()

def lib.tools_obj.interpolateTrajBetTwoObjects (   pos1,
  pos2,
  nframes 
)
Interpolate missing positions between positions pos1 and pos2 for
    nframes frames. Acceleration smoothing currently not supported.
    
    Notes:
    ======
    -nframes is NOT inclusive of positions pos1 and pos2!

Definition at line 775 of file tools_obj.py.

775 def interpolateTrajBetTwoObjects(pos1, pos2, nframes):
776  ''' Interpolate missing positions between positions pos1 and pos2 for
777  nframes frames. Acceleration smoothing currently not supported.
778 
779  Notes:
780  ======
781  -nframes is NOT inclusive of positions pos1 and pos2!
782  '''
783  if(nframes < 1): return [],[],[],[]
784 
785  avg_x_speed = (pos2[0]-pos1[0])/(nframes+1)
786  avg_y_speed = (pos2[1]-pos1[1])/(nframes+1)
787 
788  Xs,Ys,VXs,VYs = [],[],[],[]
789 
790  for frame in range(nframes):
791  Xs.append(pos1[0]+(frame+1)*avg_x_speed)
792  Ys.append(pos1[1]+(frame+1)*avg_y_speed)
793  VXs.append(avg_x_speed)
794  VYs.append(avg_y_speed)
795 
796  return Xs,Ys,VXs,VYs
797 
798 
def interpolateTrajBetTwoObjects(pos1, pos2, nframes)
Definition: tools_obj.py:775

◆ invHomography()

def lib.tools_obj.invHomography (   homography)
Returns an inverted homography. 

Definition at line 391 of file tools_obj.py.

391 def invHomography(homography):
392  ''' Returns an inverted homography. '''
393  if(hasattr(homography, 'asNpArray')): homography = homography.asNpArray()
394  invH = np.linalg.linalg.inv(homography)
395  invH /= invH[2,2]
396  return invH
397 
def invHomography(homography)
Definition: tools_obj.py:391

◆ ixsAsObjects()

def lib.tools_obj.ixsAsObjects (   objects,
  ixs 
)
Return list of objects from a list of numbers. 

Definition at line 748 of file tools_obj.py.

748 def ixsAsObjects(objects, ixs):
749  ''' Return list of objects from a list of numbers. '''
750  return [objects[i] for i in ixs]
def ixsAsObjects(objects, ixs)
Definition: tools_obj.py:748

◆ join()

def lib.tools_obj.join (   obj1,
  obj2,
  postSmoothing = True 
)

Definition at line 816 of file tools_obj.py.

816 def join(obj1, obj2, postSmoothing=True):
817  #Ensure that obj1 is first object
818  if(obj1.timeInterval.first > obj2.timeInterval.first):
819  _temp = obj2
820  obj2 = obj1
821  obj1 = _temp
822  _temp = None
823 
824  if(obj1.timeInterval.last >= obj2.timeInterval.last): return obj1
825 
826  if(obj2.timeInterval.first <= obj1.timeInterval.last):
827  trim_frames = obj1.timeInterval.last - obj2.timeInterval.first + 1
828  obj2.timeInterval.first = obj1.timeInterval.last + 1
829  obj2.positions.positions[0] = obj2.positions.positions[0][trim_frames:]
830  obj2.positions.positions[1] = obj2.positions.positions[1][trim_frames:]
831  obj2.velocities.positions[0] = obj2.velocities.positions[0][trim_frames:]
832  obj2.velocities.positions[1] = obj2.velocities.positions[1][trim_frames:]
833 
836  Xs,Ys,VXs,VYs = interpolateTrajBetTwoObjects(pos1=[obj1.getXCoordinates()[-1],obj1.getYCoordinates()[-1]], pos2=[obj2.getXCoordinates()[0],obj2.getYCoordinates()[0]], nframes=obj2.timeInterval.first-obj1.timeInterval.last-1)
837  obj1.timeInterval.last = obj2.timeInterval.last
838  obj1.positions.positions[0] += Xs + obj2.positions.positions[0]
839  obj1.positions.positions[1] += Ys + obj2.positions.positions[1]
840  obj1.velocities.positions[0] += VXs + obj2.velocities.positions[0]
841  obj1.velocities.positions[1] += VYs + obj2.velocities.positions[1]
842  if(hasattr(obj1, 'features') and type(obj1.features) is list and obj1.features is not None and hasattr(obj2, 'features') and type(obj2.features) is list and obj2.features is not None):
843  obj1.features += obj2.features
844 
845  if(postSmoothing):
846  try: obj1 = TrafIntSmoothing_smoothObject(obj1, obj1.num, plotResults=False)
847  except: pass
848 
849  #import matplotlib.pyplot as plt
850  #plt.plot(obj1.getXCoordinates()[:trim_indeces[0]], obj1.getYCoordinates()[:trim_indeces[0]], color='b')
851  #plt.annotate(obj1.num, xy=(obj1.getXCoordinates()[1], obj1.getYCoordinates()[1]), color='b')
852  #plt.plot(obj1.getXCoordinates()[trim_indeces[1]:], obj1.getYCoordinates()[trim_indeces[1]:], color='r')
853  #plt.plot(obj1.getXCoordinates()[trim_indeces[0]:trim_indeces[1]], obj1.getYCoordinates()[trim_indeces[0]:trim_indeces[1]], linewidth=2, color='k')
854 
855 
856 
857  return obj1
858 
859 
def interpolateTrajBetTwoObjects(pos1, pos2, nframes)
Definition: tools_obj.py:775
def join(obj1, obj2, postSmoothing=True)
Definition: tools_obj.py:816

◆ loadObjects()

def lib.tools_obj.loadObjects (   sequencepath,
  max_obj = None,
  max_obj_features = 999,
  suppress_features = False,
  legacy = False,
  legacy_features_path = '' 
)

The following functions are used for manipulating object data from Traffic-Intelligence.

Load sequence data from raw source path (manual). This function is 
    deperecated and feature loading is broken. 

Definition at line 521 of file tools_obj.py.

521 def loadObjects(sequencepath, max_obj=None, max_obj_features=999, suppress_features=False, legacy=False, legacy_features_path=''):
522  ''' Load sequence data from raw source path (manual). This function is
523  deperecated and feature loading is broken. '''
524 
525  if(legacy): from ubc_utils import loadTrajectories
526  else:
527  oldstdout = sys.stdout;sys.stdout = NullWriter()
528  try: import storage as TrafIntStorage
529  finally: sys.stdout = oldstdout #Re-enable output
530 
531  if(max_obj==-1): max_obj = None
532  if(legacy): objects = loadTrajectories(sequencepath, max_obj)
533  else: objects = TrafIntStorage.loadTrajectoriesFromSqlite(sequencepath, 'object', max_obj)
534 
535  if(not suppress_features):
536  if(legacy): features = loadTrajectories(legacy_features_path)
537  else: features = TrafIntStorage.loadTrajectoriesFromSqlite(sequencepath, 'feature')
538  for j in range(len(objects)):
539  try:
540  objects[j].featureNumbers = Math.sample(objects[j].featureNumbers, sampleSize=max_obj_features, method='interpolation')
541  objects[j].setFeatures(features)
542  except:
543  from tools import printWarning as tvaLib_printWarning
544  tvaLib_printWarning('There was a problem with the integrity of the feature database at object '+str(j)+' out of '+str(len(objects))+': some features are missing. This object will be dropped and analysis will continue...', 'Warning')
545  objects[j] = None
546  objects = filter(None, objects)
547  features = None
548 
549  return objects
550 
551 
def loadObjects(sequencepath, max_obj=None, max_obj_features=999, suppress_features=False, legacy=False, legacy_features_path='')
The following functions are used for manipulating object data from Traffic-Intelligence.
Definition: tools_obj.py:521

◆ matches()

def lib.tools_obj.matches (   self,
  obj,
  instant,
  matchingDistance 
)

Add-on obj methods.

Indicates if the annotation matches obj (MovingObject) with threshold
    matchingDistance. Returns a number, otherwise munkres does not
    terminate.
    
    Output:
    =======
    Returns a distance if below matchingDistance;
        returns matchingDistance+1 otherwise. 

Definition at line 903 of file tools_obj.py.

903 def matches(self, obj, instant, matchingDistance):
904  ''' Indicates if the annotation matches obj (MovingObject) with threshold
905  matchingDistance. Returns a number, otherwise munkres does not
906  terminate.
907 
908  Output:
909  =======
910  Returns a distance if below matchingDistance;
911  returns matchingDistance+1 otherwise. '''
912 
913  d = TrafIntMoving_Point.distanceNorm2(self.getPositionAtInstant(instant), obj.getPositionAtInstant(instant))
914  if d < matchingDistance: return d
915  else: return matchingDistance + 1
916 
917 
def matches(self, obj, instant, matchingDistance)
Add-on obj methods.
Definition: tools_obj.py:903

◆ num2ind()

def lib.tools_obj.num2ind (   objects,
  num 
)
Get index of object who's .num method = num (Brute force method). 

Definition at line 742 of file tools_obj.py.

742 def num2ind(objects, num):
743  ''' Get index of object who's .num method = num (Brute force method). '''
744  for i in range(len(objects)):
745  if(objects[i].num == num):
746  return i
747  return None
def num2ind(objects, num)
Definition: tools_obj.py:742

◆ numsAsObjects()

def lib.tools_obj.numsAsObjects (   objects,
  nums 
)
Return list of objects from a list of numbers. 

Definition at line 751 of file tools_obj.py.

751 def numsAsObjects(objects, nums):
752  ''' Return list of objects from a list of numbers. '''
753  return ixsAsObjects(objects, filter(None, [num2ind(objects, num) for num in nums]))
754 
def numsAsObjects(objects, nums)
Definition: tools_obj.py:751
def num2ind(objects, num)
Definition: tools_obj.py:742
def ixsAsObjects(objects, ixs)
Definition: tools_obj.py:748

◆ pointsToTrajectoryTI()

def lib.tools_obj.pointsToTrajectoryTI (   points)
Convert a list of points into a trajectory shape (TI). 

Definition at line 398 of file tools_obj.py.

398 def pointsToTrajectoryTI(points):
399  ''' Convert a list of points into a trajectory shape (TI). '''
400  return [[point[0] for point in points],[point[1] for point in points]]
401 
402 
def pointsToTrajectoryTI(points)
Definition: tools_obj.py:398

◆ trajectoryProject()

def lib.tools_obj.trajectoryProject (   obj,
  homography 
)
Create projected positions for object. 

Definition at line 385 of file tools_obj.py.

385 def trajectoryProject(obj, homography):
386  ''' Create projected positions for object. '''
387  obj.projectedPositions = deepcopy(obj.positions)
388  if(homography is not None): obj.projectedPositions.positions = pointsToTrajectoryTI(homographyProject(obj.positions, homography))
389  return obj.projectedPositions
390 
def trajectoryProject(obj, homography)
Definition: tools_obj.py:385
def pointsToTrajectoryTI(points)
Definition: tools_obj.py:398
def homographyProject(points, homography)
Homography functions (ported from TI to prevent future breaking due to changes in TI) ...
Definition: tools_obj.py:371

◆ trimObject()

def lib.tools_obj.trimObject (   obj,
  nframes,
  fromEnd = False 
)
Trim from start (or end) of object by nframes. obj will be modified 
    directly. nframes is specified with respect to start of object (not
    it's absolute time).

Definition at line 663 of file tools_obj.py.

663 def trimObject(obj, nframes, fromEnd=False):
664  ''' Trim from start (or end) of object by nframes. obj will be modified
665  directly. nframes is specified with respect to start of object (not
666  it's absolute time).
667  '''
668 
669  original_first = obj.timeInterval.first
670  original_last = obj.timeInterval.last
671 
672  length = len(obj.positions.positions[0])
673  if(nframes==0): return obj
674  if(nframes >= length): return []
675  if(type(nframes) is not int): nframes = int(nframes)
676  if(fromEnd):
677  obj.positions.positions[0] = obj.positions.positions[0][:-nframes]
678  obj.positions.positions[1] = obj.positions.positions[1][:-nframes]
679  obj.velocities.positions[0] = obj.velocities.positions[0][:-nframes]
680  obj.velocities.positions[1] = obj.velocities.positions[1][:-nframes]
681  try: obj.velocities.positions[2] = obj.velocities.positions[2][:-nframes]
682  except: pass
683  try:
684  obj.curvilinearPositions.positions[0] = obj.curvilinearPositions.positions[0][:-nframes]
685  obj.curvilinearPositions.positions[1] = obj.curvilinearPositions.positions[1][:-nframes]
686  obj.curvilinearPositions.lanes = obj.curvilinearPositions.lanes[:-nframes]
687  except: pass
688  obj.timeInterval.last -= nframes
689  else:
690  obj.positions.positions[0] = obj.positions.positions[0][nframes:]
691  obj.positions.positions[1] = obj.positions.positions[1][nframes:]
692  obj.velocities.positions[0] = obj.velocities.positions[0][nframes:]
693  obj.velocities.positions[1] = obj.velocities.positions[1][nframes:]
694  try: obj.velocities.positions[2] = obj.velocities.positions[2][nframes:]
695  except: pass
696  try:
697  obj.curvilinearPositions.positions[0] = obj.curvilinearPositions.positions[0][nframes:]
698  obj.curvilinearPositions.positions[1] = obj.curvilinearPositions.positions[1][nframes:]
699  obj.curvilinearPositions.lanes = obj.curvilinearPositions.lanes[nframes:]
700  except: pass
701  obj.timeInterval.first += nframes
702 
703  if(hasattr(obj, 'features') and obj.features):
704  for f in range(len(obj.features)):
705  if(fromEnd):
706  nframes_ = nframes-(original_last - obj.features[f].timeInterval.last)
707  if(nframes_ > 0):
708  obj.features[f].positions.positions[0] = obj.features[f].positions.positions[0][:-nframes_]
709  obj.features[f].positions.positions[1] = obj.features[f].positions.positions[1][:-nframes_]
710  obj.features[f].velocities.positions[0] = obj.features[f].velocities.positions[0][:-nframes_]
711  obj.features[f].velocities.positions[1] = obj.features[f].velocities.positions[1][:-nframes_]
712  try: obj.features[f].velocities.positions[2] = obj.features[f].velocities.positions[2][:-nframes_]
713  except: pass
714  try:
715  obj.features[f].curvilinearPositions.positions[0] = obj.features[f].curvilinearPositions.positions[0][:-nframes_]
716  obj.features[f].curvilinearPositions.positions[1] = obj.features[f].curvilinearPositions.positions[1][:-nframes_]
717  obj.features[f].curvilinearPositions.lanes = obj.features[f].curvilinearPositions.lanes[:-nframes_]
718  except: pass
719  obj.features[f].timeInterval.last = obj.timeInterval.last
720  else:
721  nframes_ = nframes-(obj.features[f].timeInterval.first - original_first)
722  if(nframes_ > 0):
723  obj.features[f].positions.positions[0] = obj.features[f].positions.positions[0][nframes_:]
724  obj.features[f].positions.positions[1] = obj.features[f].positions.positions[1][nframes_:]
725  obj.features[f].velocities.positions[0] = obj.features[f].velocities.positions[0][nframes_:]
726  obj.features[f].velocities.positions[1] = obj.features[f].velocities.positions[1][nframes_:]
727  try: obj.features[f].velocities.positions[2] = obj.features[f].velocities.positions[2][nframes_:]
728  except: pass
729  try:
730  obj.features[f].curvilinearPositions.positions[0] = obj.features[f].curvilinearPositions.positions[0][nframes_:]
731  obj.features[f].curvilinearPositions.positions[1] = obj.features[f].curvilinearPositions.positions[1][nframes_:]
732  obj.features[f].curvilinearPositions.lanes = obj.features[f].curvilinearPositions.lanes[nframes_:]
733  except: pass
734  obj.features[f].timeInterval.first = obj.timeInterval.first
735 
736 
737  obj.features = [f for f in obj.features if f.getXCoordinates()]
738 
739  return obj
740 
741 
def trimObject(obj, nframes, fromEnd=False)
Definition: tools_obj.py:663

◆ true_lane_xy()

def lib.tools_obj.true_lane_xy (   objects,
  alignments 
)
Returns vector of xy coordinates of observed mean trajectory
    #######Under construction#######

Definition at line 623 of file tools_obj.py.

623 def true_lane_xy(objects, alignments):
624  ''' Returns vector of xy coordinates of observed mean trajectory
625  #######Under construction#######
626  '''
627 
628  alignments_true = []
629  for lane in range(len(alignments)):
630  alignments_true.append([])
631  s = 0
632  for station in alignments[lane].pos:
633  if s == 0:
634  #In this case we only have one normal, at the first station
635  normal = [-1*(alignments[lane].pos[s+1][1]-alignments[lane].pos[s][1]),(alignments[lane].pos[s+1][0]-alignments[lane].pos[s][0])]
636  NP = map(sum, zip(station,normal))
637  elif s == (len(lane)-1):
638  #In this case we only have one normal, at the last station
639  normal = [-1*(alignments[lane].pos[s][1]-alignments[lane].pos[s-1][1]),(alignments[lane].pos[s][0]-alignments[lane].pos[s-1][0])]
640  NP = map(sum, zip(station,normal))
641  else:
642  #In this case we find the midpoint joint normal
643  normal1 = [-1*(alignments[lane].pos[s+1][1]-alignments[lane].pos[s][1]),(alignments[lane].pos[s+1][0]-alignments[lane].pos[s][0])]
644  normal2 = [-1*(alignments[lane].pos[s][1]-alignments[lane].pos[s-1][1]),(alignments[lane].pos[s][0]-alignments[lane].pos[s-1][0])]
645  NP = Geo.orthogonal_joint_midpoint(station,normal1,normal2)
646  cluster_list_x = []
647  cluster_list_y = []
648  for i in range(len(objects)):
649  for position in range(len(objects[i].positions.posTrans[2])-1):
650  if objects[i].positions.posTrans[2][position] != lane or objects[i].positions.posTrans[2][position+1] != lane:
651  continue
652  if Geo.points_are_segmented(station,NP,[objects[i].positions.positions[0][position],objects[i].positions.positions[1][position]],[objects[i].positions.positions[0][position+1],objects[i].positions.positions[1][position+1]]):
653  [x,y] = Geo.lineInter(station,NP,[objects[i].positions.positions[0][position],objects[i].positions.positions[1][position]],[objects[i].positions.positions[0][position+1],objects[i].positions.positions[1][position+1]])
654  cluster_list_x.append(x)
655  cluster_list_y.append(y)
656  break
657  #Calculate the average station position
658  if len(cluster_list_x) != 0:
659  alignments_true[lane].append([sum(cluster_list_x)/len(cluster_list_x),sum(cluster_list_y)/len(cluster_list_y)])
660  return alignments_true
661 
662 
def true_lane_xy(objects, alignments)
Definition: tools_obj.py:623

◆ undistortImage()

def lib.tools_obj.undistortImage (   fileName,
  outputName,
  camera_matrix = [[377.42,
  dist_coeffs = [-0.11759321,
  frames = 0,
  display = False,
  fourcc = 'MJPG',
  imageOutOffset = 0,
  aceptableImageExtensions = ('.png', '.jpg', '.jpeg'),
  freeScalingParameter = 1.31,
  imageScalingFactor = 1.31,
  maxWindowSize = 800,
  verbose = 0 
)

Computer vision.

This code is mostly based off of (with English translation):
    http://www.htw-mechlab.de/index.php/undistortion-der-gopro-hd-hero2/
    
    More info:
    https://en.wikipedia.org/wiki/Barrel_distortion#Software_correction

    If fileName or outputName is an image file (png or jpg), the output will be an image as well
    
    #Sample parameters for GoPro HD Hero2 1280*960 (In-house calibration):
    camera_matrix = [[377.42, 0.0,     639.12],
                     [0.0,    378.43,  490.20],
                     [0.0,    0.0,     1.0]]
    dist_coeffs = [[-0.11759321,  0.0148536,  0.00030756, -0.00020578, -0.00091816]]
    #Trial 2:
    camera_matrix = [[636.69002944,    0.        ,  633.67659913],
                     [  0.        ,  644.29664112,  504.71528879],
                     [  0.        ,    0.        ,    1.        ]]
    dist_coeffs =  [[-0.30511597,  0.12479823, -0.00346353,  0.00082249, -0.02849068]]
    
    #Sample parameters for GoPro HD Hero2 1280*720 (Taken from http://www.htw-mechlab.de/index.php/undistortion-der-gopro-hd-hero2/):
    camera_matrix = [[469.96, 0.0,    640],
             [0.0,    467.68, 360],
             [0.0,    0.0,    1.0]]
    dist_coeffs = [-0.18957, 0.037319, 0.0, 0.0, -0.00337]
    
    #Sample parameters for GoPro HD Hero2 4K (In-house calibration):
    camera_matrix = [[  1.80151934e+03,   0.00000000e+00,   1.88663242e+03],
                     [  0.00000000e+00,   1.80423487e+03,   1.48375206e+03],
                     [  0.00000000e+00,   0.00000000e+00,   1.00000000e+00]]
     dist_coeffs = [[ -3.41396353e-01,   1.70761088e-01,   1.85596892e-05,   6.56168307e-04,  -5.22716120e-02]]

Definition at line 41 of file tools_obj.py.

41 def undistortImage(fileName, outputName, camera_matrix=[[377.42,0.0,639.12],[0.0,378.43,490.20],[0.0,0.0,1.0]], dist_coeffs=[-0.11759321,0.0148536,0.00030756,-0.00020578,-0.00091816], frames=0, display=False, fourcc='MJPG', imageOutOffset=0, aceptableImageExtensions=('.png', '.jpg', '.jpeg'), freeScalingParameter=1.31, imageScalingFactor=1.31, maxWindowSize=800, verbose=0):
42  ''' This code is mostly based off of (with English translation):
43  http://www.htw-mechlab.de/index.php/undistortion-der-gopro-hd-hero2/
44 
45  More info:
46  https://en.wikipedia.org/wiki/Barrel_distortion#Software_correction
47 
48  If fileName or outputName is an image file (png or jpg), the output will be an image as well
49 
50  #Sample parameters for GoPro HD Hero2 1280*960 (In-house calibration):
51  camera_matrix = [[377.42, 0.0, 639.12],
52  [0.0, 378.43, 490.20],
53  [0.0, 0.0, 1.0]]
54  dist_coeffs = [[-0.11759321, 0.0148536, 0.00030756, -0.00020578, -0.00091816]]
55  #Trial 2:
56  camera_matrix = [[636.69002944, 0. , 633.67659913],
57  [ 0. , 644.29664112, 504.71528879],
58  [ 0. , 0. , 1. ]]
59  dist_coeffs = [[-0.30511597, 0.12479823, -0.00346353, 0.00082249, -0.02849068]]
60 
61  #Sample parameters for GoPro HD Hero2 1280*720 (Taken from http://www.htw-mechlab.de/index.php/undistortion-der-gopro-hd-hero2/):
62  camera_matrix = [[469.96, 0.0, 640],
63  [0.0, 467.68, 360],
64  [0.0, 0.0, 1.0]]
65  dist_coeffs = [-0.18957, 0.037319, 0.0, 0.0, -0.00337]
66 
67  #Sample parameters for GoPro HD Hero2 4K (In-house calibration):
68  camera_matrix = [[ 1.80151934e+03, 0.00000000e+00, 1.88663242e+03],
69  [ 0.00000000e+00, 1.80423487e+03, 1.48375206e+03],
70  [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]
71  dist_coeffs = [[ -3.41396353e-01, 1.70761088e-01, 1.85596892e-05, 6.56168307e-04, -5.22716120e-02]]
72  '''
73 
74 
75 
76  if(outputName.lower().endswith(aceptableImageExtensions)): saveToImage = True
77  else: saveToImage = False
78 
79  camera_matrix = np.array(camera_matrix)
80  dist_coeffs = np.array(dist_coeffs)
81  R = np.identity(3)
82 
83 
84  if(fileName.lower().endswith(aceptableImageExtensions) and saveToImage):
85  source = cv2.imread(fileName)
86  width = source.shape[1]
87  height = source.shape[0]
88  newImgSize = (int(round(width*imageScalingFactor)), int(round(height*imageScalingFactor)))
89  #new_matrix, _ = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coeffs, (width, height), freeScalingParameter, newImgSize, centerPrincipalPoint=True)
90  new_matrix = deepcopy(camera_matrix)
91  new_matrix[0,2] = newImgSize[0]/2.
92  new_matrix[1,2] = newImgSize[1]/2.
93  [map1, map2] = cv2.initUndistortRectifyMap(camera_matrix, dist_coeffs, R, new_matrix, newImgSize, cv2.CV_32FC1)
94  undistImage = cv2.remap(source, map1, map2, interpolation=cv2.INTER_LINEAR)
95 
96  if(display):
97  cv2.namedWindow('Source', cv2.WINDOW_NORMAL)
98  cv2.resizeWindow('Source', min(maxWindowSize, width), int(min(maxWindowSize, width)/width*height))
99  cv2.imshow('Source', source)
100  cv2.namedWindow('Undistorted', cv2.WINDOW_NORMAL)
101  cv2.resizeWindow('Undistorted', min(maxWindowSize, width), int(min(maxWindowSize, width)/width*height))
102  cv2.imshow('Undistorted', undistImage)
103  cv2.waitKey(0)
104  cv2.destroyAllWindows()
105 
106  cv2.imwrite(outputName, undistImage)
107  return True
108 
109 
110  else:
111 
112  source = cv2.VideoCapture(fileName)
113  nFrames = int(source.get(7))
114  fps = int(source.get(5))
115  width = int(source.get(3))
116  height = int(source.get(4))
117 
118  newImgSize = (int(round(width*imageScalingFactor)), int(round(height*imageScalingFactor)))
119  new_matrix = deepcopy(camera_matrix)
120  new_matrix[0,2] = newImgSize[0]/2.
121  new_matrix[1,2] = newImgSize[1]/2.
122  [map1, map2] = cv2.initUndistortRectifyMap(camera_matrix, dist_coeffs, R, new_matrix, newImgSize, cv2.CV_32FC1)
123 
124  if(verbose >= 2):
125  print 'Num. Frames = ', nFrames
126  print 'Frame Rate = ', fps, ' frames per sec'
127 
128  if(not saveToImage):
129  #fourcc=-1, # With -1: Windows GUI codec selection
130  try: fourcc_ = cv2.VideoWriter_fourcc(*fourcc)
131  #Add legacy support for opencv 2.4
132  except AttributeError: fourcc_ = cv2.cv.CV_FOURCC(*fourcc)
133  writer = cv2.VideoWriter(
134  filename=outputName,
135  fourcc=fourcc_,
136  fps=fps,
137  frameSize=newImgSize,
138  isColor=1)
139 
140  if(frames and frames < nFrames): nFrames = int(frames)
141  from tools import ProgressBar as tvaLib_ProgressBar
142  if(verbose): prog = tvaLib_ProgressBar(0, nFrames, 77)
143  for f in xrange(nFrames):
144  _, frameImg = source.read()
145  undistImage = cv2.remap(frameImg, map1, map2, interpolation=cv2.INTER_LINEAR)
146 
147  if(display):
148  cv2.namedWindow('Undistorted', cv2.WINDOW_NORMAL)
149  cv2.resizeWindow('Undistorted', min(maxWindowSize, width), int(min(maxWindowSize, width)/width*height))
150  cv2.imshow('Undistorted', undistImage)
151  if(saveToImage):
152  cv2.waitKey(0)
153  else:
154  k = cv2.waitKey(1)
155  if(k % 0x100 == 27): break
156 
157  if(saveToImage):
158  cv2.imwrite(outputName, undistImage)
159  break
160  else:
161  writer.write(undistImage)
162  if(verbose): prog.updateAmount(f)
163 
164  #Garbage collection
165  if(display): cv2.destroyAllWindows()
166  del writer
167 
168  return True
169 
170 
171 
def undistortImage(fileName, outputName, camera_matrix=[[377.42, dist_coeffs=[-0.11759321, frames=0, display=False, fourcc='MJPG', imageOutOffset=0, aceptableImageExtensions=('.png', '.jpg', '.jpeg'), freeScalingParameter=1.31, imageScalingFactor=1.31, maxWindowSize=800, verbose=0)
Computer vision.
Definition: tools_obj.py:41

Variable Documentation

◆ oldstdout

lib.tools_obj.oldstdout = NullWriter()

Definition at line 17 of file tools_obj.py.

◆ stdout

lib.tools_obj.stdout

Definition at line 34 of file tools_obj.py.