+
+
+
\ No newline at end of file
diff --git a/trackeval/.idea/misc.xml b/trackeval/.idea/misc.xml
new file mode 100644
index 0000000..835a6a6
--- /dev/null
+++ b/trackeval/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/trackeval/.idea/modules.xml b/trackeval/.idea/modules.xml
new file mode 100644
index 0000000..7dc71b9
--- /dev/null
+++ b/trackeval/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/trackeval/.idea/trackeval.iml b/trackeval/.idea/trackeval.iml
new file mode 100644
index 0000000..07cf51c
--- /dev/null
+++ b/trackeval/.idea/trackeval.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/trackeval/Readme.md b/trackeval/Readme.md
new file mode 100644
index 0000000..ff8e198
--- /dev/null
+++ b/trackeval/Readme.md
@@ -0,0 +1,363 @@
+# 修改mot官网指标测试代码
+
+我是以mot17数据测试作为基准修改的,其余不相干脚本,我将删除,或相关较麻烦的脚本,我已修改。
+
+
+##指标数据测试结构
+###mot17 gt.txt数据格式:
+frame, id, bb_left, bb_top, bb_width, bb_height, active, label_id, visibility ratio
+
+第0个代表第几帧;
+第1个值为目标运动轨迹的ID号;
+第2个到第5个数代表物体框的左上角坐标及长宽;
+第6个值为目标轨迹是否进入考虑范围内的标志,0表示忽略,1表示active;
+第7个值为该轨迹对应的目标种类(种类见下面的表格中的label-ID对应情况);
+第8个值为box的visibility ratio,表示目标运动时被其他目标box包含/覆盖或者目标之间box边缘裁剪情况。
+
+特别说明:
+第6个activate若为0表示该行目标不考虑计算。
+第7个目标种类,目标名称和数字一定要对齐,依照数字指定为目标选择,如下:
+"class_name_to_class_id":
+{'pedestrian': 1, 'person_on_vehicle': 2, 'car': 3, 'bicycle': 4, 'motorbike': 5,'non_mot_vehicle': 6,
+'static_person': 7, 'distractor': 8, 'occluder': 9, 'occluder_on_ground': 10, 'occluder_full': 11,
+'reflection': 12, 'crowd': 13}
+具体解释如图:
+![](trackeval/data/images/mot-format.png)
+
+###预测数据结构
+
+frame_id, id, bbox_left, bbox_top, bbox_w, bbox_h, -1, -1, -1, i
+第0个的frame_id表示第几帧,没有0帧,只有第1帧开始,与gt.txt的帧对应;
+第1个id表示跟踪id,由跟踪算法决定,实际为常说的track_id;
+第2个到第5个数代表物体框的左上角坐标及长宽;
+第9个i表示第frame_idx+1帧第几个目标id,也可以固定为-1,该参数不需要。
+
+特别说明:
+指标预测模型是第一帧到最后一帧by顺序一张一张图预测;
+输出结果为预测txt保存结果,名称需和seq相同,如MOT17-02-FRCNN.txt
+
+
+![](trackeval/data/images/mot-predect-format.png)
+
+##文件结构
+###gt文件夹:
+主路径下是被测文件夹列表,每个列表文件(如:MOT17-02-FRCNN)下有一个gt.txt文件和seqinfo.ini文件,
+该路径可由config['GT_LOC_FORMAT'] = '{gt_folder}/{seq}/gt/gt.txt'参数控制。
+其中seqinfo.ini文件用于记录name=MOT17-02-FRCNN文件下相关信息,主要使用seqLength信息。
+seqinfo.ini如下:
+
+'''
+
+ [Sequence]
+ name=MOT17-02-FRCNN
+ imDir=img1
+ frameRate=30
+ seqLength=600
+ imWidth=1920
+ imHeight=1080
+ imExt=.jpg
+
+'''
+也可变为如下:
+'''
+
+ [Sequence]
+ seqLength=600
+
+'''
+
+
+###predect文件夹:
+主路径下只有对应txt文件,命名分别为gt文件对应的文件列表名称,MOT17-02-FRCNN.txt
+具体方法如下图:
+![](trackeval/data/images/file-format.png)
+
+
+
+##运行参数说明
+在mot_challenge_2d_box.py文件夹下有类MotChallenge2DBox(_BaseDataset),用于处理相关数据,
+而在该类中有一个默认default_config字典,我已做了修改,具体用法如下解释。
+'''
+
+ default_config = {
+ 'GT_FOLDER': os.path.join(code_path, 'data/'), # 真实标签路径Location of GT data
+ 'TRACKERS_FOLDER': os.path.join(code_path, 'data/predect_mot/'), # 预测标签路径 Trackers location
+ 'OUTPUT_FOLDER': None, # 指标保存路径,若为None将保存在TRACKERS_FOLDER文件下
+ # 'TRACKERS_TO_EVAL': None, # Filenames of predect_mot to eval (if None, all in folder)
+ 'CLASSES_TO_EVAL': ['pedestrian'], # 规定哪些类别预测指标 Valid: ['pedestrian']
+ 'BENCHMARK': 'MOT17', # 用于显示名字,随便起名,我是以mot17为基准修改,因此默认为MOT17
+
+ 'PRINT_CONFIG': True, # Whether to print current config
+ 'DO_PREPROC': True, # Whether to perform preprocessing (never done for MOT15)
+ 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
+ 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
+
+ 'TRACKER_DISPLAY_NAMES': None, # Names of predect_mot to display, if None: TRACKERS_TO_EVAL
+ 'SEQMAP_FOLDER': None, # Where seqmaps are found (if None, GT_FOLDER/seqmaps)
+ 'SEQMAP_FILE': None, # Directly specify seqmap file (if none use seqmap_folder/benchmark-split_to_eval)
+ 'SEQ_INFO': None, # 需要预测的列表,若为None则将GT_FOLDER文件均当做seq预测指标
+
+ 'GT_LOC_FORMAT': '{gt_folder}/{seq}/gt/gt.txt', # 能找到真实标签txt路径,gt_folder为GT_FOLDER,seq为seq_list遍历值
+ 'SKIP_SPLIT_FOL': False, # If False, data is in GT_FOLDER/BENCHMARK-SPLIT_TO_EVAL/ and in
+ # TRACKERS_FOLDER/BENCHMARK-SPLIT_TO_EVAL/tracker/
+ # If True, then the middle 'benchmark-split' folder is skipped for both.
+ "class_name_to_class_id": {'pedestrian': 1, 'person_on_vehicle': 2, 'car': 3, 'bicycle': 4, 'motorbike': 5,
+ 'non_mot_vehicle': 6, 'static_person': 7, 'distractor': 8, 'occluder': 9,
+ 'occluder_on_ground': 10, 'occluder_full': 11, 'reflection': 12, 'crowd': 13},
+ 'use_super_categories':False, # 决定是否合并类别为key类预测
+ 'super_categories' : {"FF": ['pedestrian', 'car']} # 表示将字典value类统一到key上,给出指标结果
+ }
+
+
+'''
+###参数重点说明
+①合并类指标测试方法
+use_super_categories:参数为True将会将我们在super_categories指定合并类一起测试
+'super_categories' :指定合并类,如将'pedestrian', 'car'合并为’FF‘,如: {"FF": ['pedestrian', 'car']}
+
+②测试类指定方法
+CLASSES_TO_EVAL:列表,指定gt.txt需要测试的类别,特别说明,它和gt.txt的第6个activate共同决定有效gt目标
+③指定gt路径方法
+GT_LOC_FORMAT:如{gt_folder}/{seq}/gt/gt.txt,确定gt路径
+④指定测试方法
+SEQ_INFO:指定gt_folder测试方法
+⑤确定gt类别方法
+class_name_to_class_id:指定名称和类别对应字典,gt.txt中的label id为对应数字,此为②提供依据
+
+
+##自定义参数
+
+通过config字典,修改MotChallenge2DBox(_BaseDataset)中的默认参数,如下:
+
+'''
+
+ config={}
+ config['TRACKERS_FOLDER'] = ROOT+'/data/predect_mot' # 预测路径
+ config['GT_FOLDER'] = ROOT+'/data/mot17_gt' # 给出gt路径
+ config['OUTPUT_FOLDER'] = ROOT+'/data/out_dir'
+ # 确定文件内gt.txt的路径,gt_folder=config['GT_FOLDER'],seq为os.listdir(gt_folder)列表
+ config['GT_LOC_FORMAT'] = '{gt_folder}/{seq}/gt/gt.txt'
+ config['CLASSES_TO_EVAL'] = ['pedestrian'] # 确定预测指标的类别
+ dataset = MotChallenge2DBox(config) # dataset_list是存放数据信息列表
+
+
+'''
+
+
+
+##运行命令
+
+按照以上参数修改,直接运行run_mot_challenge.py文件
+
+#tools文件工具
+mot_vision.py文件用于将gt.txt信息可视化图像上,效果如下图:
+
+![](trackeval/tools/img.png)
+
+
+##测试结果
+
+![](data/images/result.png)
+
+
+# TrackEval
+*Code for evaluating object tracking.*
+
+This codebase provides code for a number of different tracking evaluation metrics (including the [HOTA metrics](https://link.springer.com/article/10.1007/s11263-020-01375-2)), as well as supporting running all of these metrics on a number of different tracking benchmarks. Plus plotting of results and other things one may want to do for tracking evaluation.
+
+## **NEW**: RobMOTS Challenge 2021
+
+Call for submission to our [RobMOTS Challenge](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110) (Robust Multi-Object Tracking and Segmentation) held in conjunction with our [RVSU CVPR'21 Workshop](https://eval.vision.rwth-aachen.de/rvsu-workshop21/). Robust tracking evaluation against 8 tracking benchmarks. Challenge submission deadline June 15th. Also check out our workshop [call for papers](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=74).
+
+## Official Evaluation Code
+
+The following benchmarks use TrackEval as their official evaluation code, check out the links to see TrackEval in action:
+
+ - **[RobMOTS](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110)** ([Official Readme](docs/RobMOTS-Official/Readme.md))
+ - **[KITTI Tracking](http://www.cvlibs.net/datasets/kitti/eval_tracking.php)**
+ - **[KITTI MOTS](http://www.cvlibs.net/datasets/kitti/eval_mots.php)**
+ - **[MOTChallenge](https://motchallenge.net/)** ([Official Readme](docs/MOTChallenge-Official/Readme.md))
+ - **[Open World Tracking](https://openworldtracking.github.io)** ([Official Readme](docs/OpenWorldTracking-Official))
+ - **[PersonPath22](https://amazon-research.github.io/tracking-dataset/personpath22.html)**
+
+
+If you run a tracking benchmark and want to use TrackEval as your official evaluation code, please contact Jonathon (contact details below).
+
+## Currently implemented metrics
+
+The following metrics are currently implemented:
+
+Metric Family | Sub metrics | Paper | Code | Notes |
+|----- | ----------- |----- | ----------- | ----- |
+| | | | | |
+|**HOTA metrics**|HOTA, DetA, AssA, LocA, DetPr, DetRe, AssPr, AssRe|[paper](https://link.springer.com/article/10.1007/s11263-020-01375-2)|[code](trackeval/metrics/hota.py)|**Recommended tracking metric**|
+|**CLEARMOT metrics**|MOTA, MOTP, MT, ML, Frag, etc.|[paper](https://link.springer.com/article/10.1155/2008/246309)|[code](trackeval/metrics/clear.py)| |
+|**Identity metrics**|IDF1, IDP, IDR|[paper](https://arxiv.org/abs/1609.01775)|[code](trackeval/metrics/identity.py)| |
+|**VACE metrics**|ATA, SFDA|[paper](https://link.springer.com/chapter/10.1007/11612704_16)|[code](trackeval/metrics/vace.py)| |
+|**Track mAP metrics**|Track mAP|[paper](https://arxiv.org/abs/1905.04804)|[code](trackeval/metrics/track_map.py)|Requires confidence scores|
+|**J & F metrics**|J&F, J, F|[paper](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Perazzi_A_Benchmark_Dataset_CVPR_2016_paper.pdf)|[code](trackeval/metrics/j_and_f.py)|Only for Seg Masks|
+|**ID Euclidean**|ID Euclidean|[paper](https://arxiv.org/pdf/2103.13516.pdf)|[code](trackeval/metrics/ideucl.py)| |
+
+
+## Currently implemented benchmarks
+
+The following benchmarks are currently implemented:
+
+Benchmark | Sub-benchmarks | Type | Website | Code | Data Format |
+|----- | ----------- |----- | ----------- | ----- | ----- |
+| | | | | | |
+|**RobMOTS**|Combination of 8 benchmarks|Seg Masks|[website](https://eval.vision.rwth-aachen.de/rvsu-workshop21/?page_id=110)|[code](trackeval/datasets/rob_mots.py)|[format](docs/RobMOTS-Official/Readme.md)|
+|**Open World Tracking**|TAO-OW|OpenWorld / Seg Masks|[website](https://openworldtracking.github.io)|[code](trackeval/datasets/tao_ow.py)|[format](docs/OpenWorldTracking-Official/Readme.md)|
+|**MOTChallenge**|MOT15/16/17/20|2D BBox|[website](https://motchallenge.net/)|[code](trackeval/datasets/mot_challenge_2d_box.py)|[format](docs/MOTChallenge-format.txt)|
+|**KITTI Tracking**| |2D BBox|[website](http://www.cvlibs.net/datasets/kitti/eval_tracking.php)|[code](trackeval/datasets/kitti_2d_box.py)|[format](docs/KITTI-format.txt)|
+|**BDD-100k**| |2D BBox|[website](https://bdd-data.berkeley.edu/)|[code](trackeval/datasets/bdd100k.py)|[format](docs/BDD100k-format.txt)|
+|**TAO**| |2D BBox|[website](https://taodataset.org/)|[code](trackeval/datasets/tao.py)|[format](docs/TAO-format.txt)|
+|**MOTS**|KITTI-MOTS, MOTS-Challenge|Seg Mask|[website](https://www.vision.rwth-aachen.de/page/mots)|[code](trackeval/datasets/mots_challenge.py) and [code](trackeval/datasets/kitti_mots.py)|[format](docs/MOTS-format.txt)|
+|**DAVIS**|Unsupervised|Seg Mask|[website](https://davischallenge.org/)|[code](trackeval/datasets/davis.py)|[format](docs/DAVIS-format.txt)|
+|**YouTube-VIS**| |Seg Mask|[website](https://youtube-vos.org/dataset/vis/)|[code](trackeval/datasets/youtube_vis.py)|[format](docs/YouTube-VIS-format.txt)|
+|**Head Tracking Challenge**| |2D BBox|[website](https://arxiv.org/pdf/2103.13516.pdf)|[code](trackeval/datasets/head_tracking_challenge.py)|[format](docs/MOTChallenge-format.txt)|
+|**PersonPath22**| |2D BBox|[website](https://github.com/amazon-research/tracking-dataset)|[code](trackeval/datasets/person_path_22.py)|[format](docs/MOTChallenge-format.txt)|
+|**BURST**| {Common, Long-tail, Open-world} Class-guided, {Point, Box, Mask} Exemplar-guided |Seg Mask|[website](https://github.com/Ali2500/BURST-benchmark)|[format](https://github.com/Ali2500/BURST-benchmark/blob/main/ANNOTATION_FORMAT.md)|
+
+## HOTA metrics
+
+This code is also the official reference implementation for the HOTA metrics:
+
+*[HOTA: A Higher Order Metric for Evaluating Multi-Object Tracking](https://link.springer.com/article/10.1007/s11263-020-01375-2). IJCV 2020. Jonathon Luiten, Aljosa Osep, Patrick Dendorfer, Philip Torr, Andreas Geiger, Laura Leal-Taixe and Bastian Leibe.*
+
+HOTA is a novel set of MOT evaluation metrics which enable better understanding of tracking behavior than previous metrics.
+
+For more information check out the following links:
+ - [Short blog post on HOTA](https://jonathonluiten.medium.com/how-to-evaluate-tracking-with-the-hota-metrics-754036d183e1) - **HIGHLY RECOMMENDED READING**
+ - [IJCV version of paper](https://link.springer.com/article/10.1007/s11263-020-01375-2) (Open Access)
+ - [ArXiv version of paper](https://arxiv.org/abs/2009.07736)
+ - [Code](trackeval/metrics/hota.py)
+
+## Properties of this codebase
+
+The code is written 100% in python with only numpy and scipy as minimum requirements.
+
+The code is designed to be easily understandable and easily extendable.
+
+The code is also extremely fast, running at more than 10x the speed of the both [MOTChallengeEvalKit](https://github.com/dendorferpatrick/MOTChallengeEvalKit), and [py-motmetrics](https://github.com/cheind/py-motmetrics) (see detailed speed comparison below).
+
+The implementation of CLEARMOT and ID metrics aligns perfectly with the [MOTChallengeEvalKit](https://github.com/dendorferpatrick/MOTChallengeEvalKit).
+
+By default the code prints results to the screen, saves results out as both a summary txt file and a detailed results csv file, and outputs plots of the results. All outputs are by default saved to the 'tracker' folder for each tracker.
+
+## Running the code
+
+The code can be run in one of two ways:
+
+ - From the terminal via one of the scripts [here](scripts/). See each script for instructions and arguments, hopefully this is self-explanatory.
+ - Directly by importing this package into your code, see the same scripts above for how.
+
+## Quickly evaluate on supported benchmarks
+
+To enable you to use TrackEval for evaluation as quickly and easily as possible, we provide ground-truth data, meta-data and example trackers for all currently supported benchmarks.
+You can download this here: [data.zip](https://omnomnom.vision.rwth-aachen.de/data/TrackEval/data.zip) (~150mb).
+
+The data for RobMOTS is separate and can be found here: [rob_mots_train_data.zip](https://omnomnom.vision.rwth-aachen.de/data/RobMOTS/train_data.zip) (~750mb).
+
+The data for PersonPath22 is separate and can be found here: [person_path_22_data.zip](https://tracking-dataset-eccv-2022.s3.us-east-2.amazonaws.com/person_path_22_data.zip) (~3mb).
+
+The easiest way to begin is to extract this zip into the repository root folder such that the file paths look like: TrackEval/data/gt/...
+
+This then corresponds to the default paths in the code. You can now run each of the scripts [here](scripts/) without providing any arguments and they will by default evaluate all trackers present in the supplied file structure. To evaluate your own tracking results, simply copy your files as a new tracker folder into the file structure at the same level as the example trackers (MPNTrack, CIWT, track_rcnn, qdtrack, ags, Tracktor++, STEm_Seg), ensuring the same file structure for your trackers as in the example.
+
+Of course, if your ground-truth and tracker files are located somewhere else you can simply use the script arguments to point the code toward your data.
+
+To ensure your tracker outputs data in the correct format, check out our format guides for each of the supported benchmarks [here](docs), or check out the example trackers provided.
+
+## Evaluate on your own custom benchmark
+
+To evaluate on your own data, you have two options:
+ - Write custom dataset code (more effort, rarely worth it).
+ - Convert your current dataset and trackers to the same format of an already implemented benchmark.
+
+To convert formats, check out the format specifications defined [here](docs).
+
+By default, we would recommend the MOTChallenge format, although any implemented format should work. Note that for many cases you will want to use the argument ```--DO_PREPROC False``` unless you want to run preprocessing to remove distractor objects.
+
+## Requirements
+ Code tested on Python 3.7.
+
+ - Minimum requirements: numpy, scipy
+ - For plotting: matplotlib
+ - For segmentation datasets (KITTI MOTS, MOTS-Challenge, DAVIS, YouTube-VIS): pycocotools
+ - For DAVIS dataset: Pillow
+ - For J & F metric: opencv_python, scikit_image
+ - For simples test-cases for metrics: pytest
+
+use ```pip3 -r install requirements.txt``` to install all possible requirements.
+
+use ```pip3 -r install minimum_requirments.txt``` to only install the minimum if you don't need the extra functionality as listed above.
+
+## Timing analysis
+
+Evaluating CLEAR + ID metrics on Lift_T tracker on MOT17-train (seconds) on a i7-9700K CPU with 8 physical cores (median of 3 runs):
+Num Cores|TrackEval|MOTChallenge|Speedup vs MOTChallenge|py-motmetrics|Speedup vs py-motmetrics
+:---|:---|:---|:---|:---|:---
+1|9.64|66.23|6.87x|99.65|10.34x
+4|3.01|29.42|9.77x| |33.11x*
+8|1.62|29.51|18.22x| |61.51x*
+
+*using a different number of cores as py-motmetrics doesn't allow multiprocessing.
+
+```
+python scripts/run_mot_challenge.py --BENCHMARK MOT17 --TRACKERS_TO_EVAL Lif_T --METRICS CLEAR Identity --USE_PARALLEL False --NUM_PARALLEL_CORES 1
+```
+
+Evaluating CLEAR + ID metrics on LPC_MOT tracker on MOT20-train (seconds) on a i7-9700K CPU with 8 physical cores (median of 3 runs):
+Num Cores|TrackEval|MOTChallenge|Speedup vs MOTChallenge|py-motmetrics|Speedup vs py-motmetrics
+:---|:---|:---|:---|:---|:---
+1|18.63|105.3|5.65x|175.17|9.40x
+
+```
+python scripts/run_mot_challenge.py --BENCHMARK MOT20 --TRACKERS_TO_EVAL LPC_MOT --METRICS CLEAR Identity --USE_PARALLEL False --NUM_PARALLEL_CORES 1
+```
+
+## License
+
+TrackEval is released under the [MIT License](LICENSE).
+
+## Contact
+
+If you encounter any problems with the code, please contact [Jonathon Luiten](https://www.vision.rwth-aachen.de/person/216/) ([luiten@vision.rwth-aachen.de](mailto:luiten@vision.rwth-aachen.de)).
+If anything is unclear, or hard to use, please leave a comment either via email or as an issue and I would love to help.
+
+## Dedication
+
+This codebase was built for you, in order to make your life easier! For anyone doing research on tracking or using trackers, please don't hesitate to reach out with any comments or suggestions on how things could be improved.
+
+## Contributing
+
+We welcome contributions of new metrics and new supported benchmarks. Also any other new features or code improvements. Send a PR, an email, or open an issue detailing what you'd like to add/change to begin a conversation.
+
+## Citing TrackEval
+
+If you use this code in your research, please use the following BibTeX entry:
+
+```BibTeX
+@misc{luiten2020trackeval,
+ author = {Jonathon Luiten, Arne Hoffhues},
+ title = {TrackEval},
+ howpublished = {\url{https://github.com/JonathonLuiten/TrackEval}},
+ year = {2020}
+}
+```
+
+Furthermore, if you use the HOTA metrics, please cite the following paper:
+
+```
+@article{luiten2020IJCV,
+ title={HOTA: A Higher Order Metric for Evaluating Multi-Object Tracking},
+ author={Luiten, Jonathon and Osep, Aljosa and Dendorfer, Patrick and Torr, Philip and Geiger, Andreas and Leal-Taix{\'e}, Laura and Leibe, Bastian},
+ journal={International Journal of Computer Vision},
+ pages={1--31},
+ year={2020},
+ publisher={Springer}
+}
+```
+
+If you use any other metrics please also cite the relevant papers, and don't forget to cite each of the benchmarks you evaluate on.
diff --git a/trackeval/__init__.py b/trackeval/__init__.py
new file mode 100644
index 0000000..dce62da
--- /dev/null
+++ b/trackeval/__init__.py
@@ -0,0 +1,5 @@
+from .eval import Evaluator
+from . import datasets
+from . import metrics
+from . import plotting
+from . import utils
diff --git a/trackeval/__pycache__/__init__.cpython-311.pyc b/trackeval/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f1fe1a5bb943bb05e3c5591efe2dd2adf6d2a28
GIT binary patch
literal 387
zcmZvWzfQw25XR53^9N8S1WR9_S?U+47?2o&0j8)@WEzWF*h%F$NaaO%1jO59jIuJZ
zA$9A-If5ZlPoKW~o#gM%A8C3DSgYkiI_zJ5_^;p(_SXpSP(n#G7?Km>InaTjIUSiI
zvz(30ki$G2IYf@~2swIyEdD0AC?mtk&8yI@aJ9uDDTNb_bPaNBq_b-6G1=78uWX7z|G`G0{ZK+k53_En9xl77dbsGOI`-zggCnwMcWPQ>csdjQvcpydG0rl`%A@^T%kRp{Hn+chni
JDvT$7KmSj1K`j6P
literal 0
HcmV?d00001
diff --git a/trackeval/__pycache__/__init__.cpython-38.pyc b/trackeval/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33ef48e859e0d201774daff01fcf1e296116b7e9
GIT binary patch
literal 291
zcmYk0v2MaJ5QgoE0HLIm3EiEiGXGq3~E4h7JBiXj#iW3tV$fp7|`~I-fj87g|g%(504>OY}k)
z=*5B66((q_ajA
zb8Jx%B%wfjvp6Fs{5`94AFR5ahpRcdPVHV^6-0CTq$R3pT<4%^mY3^TtwP@p-mdAu
KR3V=D{rv-QuR%Zn
literal 0
HcmV?d00001
diff --git a/trackeval/__pycache__/__init__.cpython-39.pyc b/trackeval/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef9bf9c50daa5612bad7791d05d6f40bb5b0572b
GIT binary patch
literal 293
zcmYk0u};G<5Qgomu$f~W@!j4<}Qp6*5;~992tPF@(VB#(;
zoaOJnf4I|`&1TK8zCPY9VE?4>zbXt{{Dokd<$*=6IVZ0|6>D8DUZV>w78mGJ%f%&n
zqZ{<*iS_L_$2C2lSG$V|lYx{mbk=|=4hni39b~^J?M@*9`0;=)C-9*>_U$YjPG&l5
zX1T_e1wj%T#Fgb0IpOD49aD5F`JkT7csP4AzD^3V*}u>lH4J|AFbsF6x4FBI$xgvN
M(21o(-0;8m3)pZ&>;M1&
literal 0
HcmV?d00001
diff --git a/trackeval/__pycache__/_timing.cpython-311.pyc b/trackeval/__pycache__/_timing.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a07c505d7cd59a860948c0e5083438c2708d377
GIT binary patch
literal 2612
zcmaJ@O-vg{6rNrG+Zfwm6NAAqRw;3QZ0uO10z?%`f=B}ikR~Zg<2c?m82kgf>m;~q
zg-VeMQdKPyQQat2tSCx|(gT-DIrPwDj=RWht&xzBsvL526jh2ib!IWxhNRCkZ{NIm
z^S+t)=Eu*BMm<1utHzo$H+G3E3-9`n@Drc_MZ`gZm?Q`+lW{Vkm{iEJBCecN
zQp!mcs_Gb+)KDO-rBwI8BuObIfJglXyLwJQ)oms`$9f1@H%BwkP$ZR3ax^n~HbRuK
z@JCUuGVb>XnXBqu22WAz#hzvfLtCrhN`c5-gi|5Cc5%g!qxjD*ne-$t!s>g94jXajIQ^0L<)pUpCb)
zf+Cvt%(7};UEQY0Sve7mj??ZL0fyvJU_WB{DQBoy@+fT;@3puyI3hApiVXTLn<^UI
z&T5jHYF$>tYqnGEM(!41nySa}nmgnjvN9uHl`#SkQA9|*&60XvRcnp4
zyoOSiqcF}jR*)!F?Rp4M)2lwvTRnGq<5zs`NVRhx1m84YL($J_v$`738eYrmqGSfe
zZ$HKlEy?vMKxrQnS^X5q8n{Ch41V8?kpE9)|8-d-Z{+nTRu0~fud9rxNIc-{Dp5Ot
z+Ck|keI7;fAn}G;Yn)liAm_LKBRj}Zv)Ve!$ZN1YUoUgjhgY5q?(G2zH8%|Hc9&qGBd&TbP&
zGURdBHsO)&gH`q!r3(>Max}}m!534ep(Ezn@6-f!gBL=VhDU}+hZuY-Kus*k&e0JL
z)=$w~G#!tJnJKJ*w!z_vi|0>&9XfwzVj^^L{KC*U5)i<`91XRhP%@mLLm{Z=VhNfF
zQ8DB~IZH3FP&LQIk{ndUI6A>X9i2=ggD^)!b$D)$PEt@glZqvwrhM;u%utCjAvt>+
zk{k=w49%wF9Fl2NIjBRuG8zjF37VTuQF03^5E@kBn9@*>ZMYa0r&*}QGE%dUh(RK}
z)8^GxtCJ}hi6%KZC9@Y-N~3j#X#YfIaTbJe*tP_#7(V>zX8Jx>bPh3iXa
zH}`rhjl#f+ZE4_NHE4Df?B2C`$?h-M{p;-odvIyEXtL)lKk&I*8>SA?)bZ!h&k-*k
zY7>k_OH0nb+L~|Luyl%+&P}H~A6z4zoccNOFi~&@*ICKg^K$I>Ny#}R7zD$X^YFtN
z!BA2Iv*VYUN2v!X$>}RNeWD4MO>;}GS2TB`QZRR}1tjx{ot7l)wvHaZug~?OhCE($
z1~yCq(G=J;+k~4dx>cQ^L!#JpL^68{W{+t0Y`TyC?G9|X1Cl#fa0g3(u=L4n<(%+E
z&bDc77F&+3vFm5n;!;bGWbG|jdqugF6xdV?oTtRLZd3$)(a|PoH(NXMF{!ny(Arf3
zq{T0@l|kXOFkd`m&z%$Q3U`Zk*CW#d)0%Q^>{pFs?<&~4M0?l!qQjZDK5Ks5y!M&Y
z9xSv6B?k_Vqesx9i^k0@*E=$%c
z1?v^jdS$Dzd1(YA+XTNr3H`a7Lch>2*0sMwdk8T2dG?Tu56gbUSbUw_a4}uzgAN})
zY$uV3-&Qt=7}{`~D*clGyQ}xEmdKMTcL~(6w+f)H-|dbQCn80jZq0)LMt6PTX~rdzouc?kP`t7ycz0x%{n!m4g~cIa^Ri
z7iGS3jI4{4M{S7kk1SE=3>pcuYv$9x!Mc2jw$YCpMTd53H(rP1RHxGI*~l7geab+JO4N_WeslE3|r1QH(`XfWJ10(2`uWH
zN%I4jtrcC`KIk9rKHJ@Y%FU(IUXqC^i)9!LnH23jo@d^C{DQzn?{8*6-`GOva4&X%b6qkJwyjXUdaIYA2CwEOd!wGf;>m({41K
zvW#l`WRhgsyM7IjlWk&ZGJ36v6x!uX%u)%M0m-lgyG1miD`O^)C$v~;3!-Vqbd-f<
zqazcUG7_cp@qUXH0sQWGLT4#^WNwZyrd6oUCpOdq
GpYD1@Lb90vvxJBXY`2PuTa#cfx~CwSG%vC|mk;2vIH-Z#m4UawaWj2FEQTMZEU$1H9S
z0*fDjL=S)>iZe995yplm8JSp-Rq|G5M>e%12gprPPfiU_s(sO`N259lq{
z&TAgeL>uecjPc`_`zO+jl#_^acv+ANUQ@$bdFB(?`ViArsT{RXOv{8|SI
zZ2vb_fOCL82NGxCQldAQzmoVA733T#r0^fOu*$WqY(>^l;Ye~}ouf-|y@3i>xn`zh
zO!%I(`JRIGZ^<(0C}_k6q;uj@YYh@Rao14c<*wlhPkGY2@GsHWac9JGOyvjesC
zQ0^cAr%5SA0p#s=cFi%3mW`uylIEjUoX6Qhq~iPK%GNhK--_1Ocb)BF
zORz+HlE=xIacv3qQd>zTuJHXO`MPz5+oksQE$yYuxxInt^c`fujpX+
zaB#H$WdGn1H4fnCO%ti@F|R6OnIt
z5DSe{jpOUJWWPyxka+kOUES**PbaLqJf1J&U(>w%-q@*5tq2Qf?e!eA#PwB*YGBBtr~6;-}15f8u(MI0y~Y8Z-U+?4l#k?Q&htc0r94hGN|O|+_!6iQ*a$_6gscis+A38^tGWVVfvlBh>~6eXJ2T^^
zv1U)HM8pAc;K;#2TzcaV;KW~n=E?z8>R*r(Z=94y%<#euf1`?=hx9Iz`~h7m_=$*TiX1~uPe4Ngn*2t7B)PMax!Toq9db|6rE>zO
z=zaroU;B1ubWFuP<%)Y6OSmMPDNkcFHZY}qpE+xo+`hjCd64;*%L5U>TmmQxHEZ
zTzRko+S|r@I*^6hv$788fVmjqFI8aPF*X%ve>Dfo;wkLFckAEMJj@Fy4|J#tLwXG(
z5;*uH`(*?y{FUUz5#%LR(rAbJH&DNvS9C=eG4d;2T9!5F@&?w0RX7MXb6FafB8%73ttY-#O3EcI$Gl
zyLeo1tFG(`yT`_rJRKU+TGVepjnjForlQ5qIc{K4dz?&?Y}AOeI6aby{B%*>{Al}Q
z+1UJ~wKZ%=K4>~7P$C|TxiF682gVts@{HVE&<`7Dq`A`Hx}?8w8#s)sxKuA~3XA$?
z()hpAswwO|=s({5a(C~pu!qkCNhW7}pnB1WtKmFNV==M<^Y+ee_u>7|zvu
z@r)UFf0|?_xVRO~s&Q>hL&t|bs-*D+m-AF1&ykcV;NMj=X-X5Wo=n+lWgLuVJlj*=
zD_R|usFcfIXn7P%LlQ&c^R?p76kbd+d;w?8_LJ#^x2H+k{yNS^2T7bA&fDkBM!PmT
zvYXSI9U0eNt5K0ph?AhIEki(_bqH%xQT!*N1GCI(nkOcA8m;bkZTr9
zVK~nQYC27&u&=bn4+OFy;QEEqh~{XyzK{22D%D3gD5xx|YsBtFJSjKmYz
zocfE;;YwaJry(_@Hlbb8&FOFq(z=9hNk6Ao(t5xf<_t>OkT5Qp=1h=Ae9UuZ4K{+2
z#t$&k^r;5JK7+6N%vmVIVm^bB<`1w>A@^AUGgm?4(^$arYlI2^8V4A_Wqq2BCm2Xn
z&(P7>!_cE>BJ_TYT4v*^Bw&pZHog>3E+ERmR65DV3j83U&1GIpLE>YK!jN{L89t|_
zbfo4b3G}3q)O~=@87SkTDbI*Bk%kZOW3rk_Ey!w?%}AhhlugmABZVXpOGFulm`~9J
zr51yP1R0;7r)Vn45=#_I$73o%5lcqdC_}MKnFjWp7B4cca?fHRT`pS&`T_MF
ziL?2hh8l}`TVPuy*gP)`<1kWJ-d~6Vd`mz2PbxDwhkD7d+A~G5X*$Ue>;sA*srhI+
z!4k1laz4I5Af>5t1Ei9}zeXmVS+!9#ouVU&)IyMbpAA&X*4gP$Wb*3N)v>YAm|Pv6
z9WUe}gA-GsX}R*+ROsrB$V502nwpxJlI>HWnc1mu5n&yh7%Ap#lT)MNnF!RJ9h;e!
zYt-yScLqk6)d-BRg~X(D>vGupYZQJq^OzbJ(HK%+=8`XsTjzY+@!dI6HMKG#${>
z$o#Txfnp=E6iG#vqwE7&lVW6@lF}`v;>mzUHWv#kbJhiVWL*Z0D1!K1Tla(164kx>
zApJOcKc4I!pP1=p(HKEK*Ns4M`LXOm4dodiWRA;fO9=H6^4#DnNF2}(^DclV*l!G2
zZN2C`EjRS(Zrh?UqBmJI97-hkki8#dF+e-0q`+)vfpz!
zhi#SC0$TMc9}y(}De(HNFakI%Qj#u?3{vCCGr;I`T*;UrwZ#%9K2qb+U>$u*&r}-2x@DL3JS-2_Tz?CL-YXqu~*y8aSPfp%U}-DJaZ2
z8BHKUQzW8rimX-_ohFZKN7Ua1@C5s^u0gCjy=~ah?e~oB^^6HUVX-I7Pu>#hZi{ud
z*K`{dhZXjXOWE&;6{n9dt@-?p?iUriKy*XNNkrRUkBHVr$zHSZExzH5Xg_;|;kNTq
z&$;g#MMoE}yhlc;sL+=H9gr!sbSsE594n1+X278SesLXa^kLJ7%^wmUwtNf`uQ(fJ
zbex7Q_yl%rW?H~kp?XHc7J9lgo^qN`ArgO9h{hQ`ho2l{KLswu`7UFKV;LiSO>vAf
za)y2=g_KE6ark1PwTu~ZsLPZmV`0&(kb5A?SLM?X#m(iil$BY^%Qyq6*+RB?>EZO{
zl=He(XuTW<^Sw4xVaCd2(Qy?gQL@M@bI+nBpE_m@D_FwDy7Hwj>yoKti$e-qMEtJ&kA03=PbbE!ADIh
zrr79u6DnmJOE@E*5P^!T9H@Ey9l&H99}#O!Ys9<;Ru3l5wpG$+#rowo7&_*6nJUh)
zg`)6F4_8&LGm^Yi&XP2tXn12I4n9=y6#B>O%?q$f`!T+1!wR|=Ymp{Y>$v`;`5Woa
z7W8jc^tT}WOXd5cY*7uisD8;1fZ(cspDjpJQLA;u7Qm^dK{pD)Pb;Dok6RzeP^~q$RPl&Y^@X3n*mp*a6(w5G0
zxy$>Xt9i+mH#a`ybfmS2r`yXSi;b)JCQ*d5u0nj7*PIlEAS#zcm!g%+$6%%w`q7;)
z&D51kzEu=3PXfQ?9EPwge)p4)nijB(3%>4h4(m-t7Gz~f+Fnd&JYWk)G0zw|4_Wn4
znbWd3o%ll$2-dQU9JcnbzzpjkG`<48?MXtrvH{kCo;qM0ROXjOmK1h46
z_EYex&kCz09DH|!IA(*ovUN{Wo(Jbr>cgt3xUzmpzw)}xg}UyO>O$N90CO^|g^C(*
z6)3)uZnQ2sj?Lo9J|D(j$#cc9Hjtn0$aqQ5NnF^$A|>Z7?-_QeSoToaQ_voEq=-9C
zf08`O3bOtrd|4!Gc*Pp2WPNbFEgWMuNB!9ioUR0Y^7PYQAu`lS&qG`IDX1dlo|7q0{Y5fB+JI9O}K7rjjuV
zVbF;Ypp@gIb}C205V`t9nnCAC=)7e$;*j|3tm5vf{`m!
zB_he_62%}P2!eR>MA>`(&eBiDV{9z7v^=nSs<`{4)Lk(Z>`$a((F8LPEYjTv4$Nf$
zUq8WMPe@!U0^!EpXRq@!cO~NV_P`#|#}j==TCAlVjxNrNfxg`iF)%DCXed#(k^!x3
z2N_`(y>uT58;L$DL6C5`0tjt1k)Y2*i}VEmC)>0KN(wfuF4}Zm`w0IWHh5sqavRo6
zV{{kdi2$4%b;FNrL?S3wLh})K+ETZogTUfXvDROq`fmVG9VO5Q#>xRR-z1%PDbx1;Os0#+^6#A??6YRjkJtM;if=`&dye~
z7K??hL@l9dEzqu6oJEP)YFfWc
z$CIqAO;AZ1t-NF%+)z-mfl1LUMMCa9a!IzK_!Y^Aep$O3UzYW8mRe%u{AezeGIAaB
zAeBy_;SJXclp2d*yvt3wfnO{@1Q3uYI=TRDd5@Q?;VM_^Ofyu3NiS108Z*>FijGr^
z?05-dh%PTvNm8yvil@@-a+;06{3Dz1!?g8aDM~+-btve_6^iB&7MWkRGwCHLdmK@x
zAz%!RN5Sr70n@OYNU;$#rzNOJZq_Ok)~?WC50I~%OPz{-iI=%>DFhaN3_77{MAkAa
zEt?T*>OLHd%Vt$V>iw9kOMz0d1!ml(C>sGxpp`~PnX)lA+sf9mZg)T*tVEXS)B-5P
z+>g?VyX1+~sY|xz>Z<->$r%eO$ocdVvp|QS8G2Z0E2p(=K|T^;*eDI2gXryf--_h?
z#)RA*`Gjmx<&sqlB332^l+Z#)LJkxvZ4-5tGjCrq&MYUQk5viHs(Der8svMHHf
zf(rwd0mGr5R9TU!Bn#Rro(%&~#^>oXNR3Mfpx{fR*@9^W0F(Fcd9J{t%{@0?>fEbIxV;YqALK1TGyII0Y@gR?bMnj+b@_1$>-0d4y)W^Rm+-5s;L)i0QsfK
z264&WbkOSGdhnyJpLG38&Cc8x&A(`V+9|d8f9mnh3am>+9O(fcx%srr|}cd-!**Fuy*~h
z+POCPWmDVM+d@-NYznST9C(ROMz>Oe_k!rX032=o3douch_3AiyT+&W&)$)O&@|rC
z{}l!xtIfXkJzLgxREf3tcsM_AZCe}6RtnbULyzwW)7zpjhas5NOI_VS9TpwFt&re&
z?VuyL&F#J;bX*rZuETM96{18?aGuT%Z{9f4lr5jYGP%_hA0_vri+j;UA({}Q38mmU
zDg}XvOWC&$2Zr_sZtV@+;%`TUfp^7$ce8_=m2iW!trwla?QaRrOT6^Hx$*ZhXid{Jz^2we+QpbtP$2w-*uT+q|J?|yU7{pRk1;2sv;!!YyLx)rVX
z_CC>harcto%+p=kb6?sW5!^$fd+06h68!Zgp>9cVBt=J(cO(xz#J;C{&(ke;nTM5bI-MlAyI`=)jd!Amwb5``gRZ)}eCWwh3IJ&nfYnJN0S){GCO9Y;hAz
z0^>&0IEhQdseR(i9&u(T{B-nLQXIO=pS>rY9eGxZ0>vciIe;Jvz^@M*eOnV^<2lGe
zBa*ju-y7WX2Dcyc7va8cR`A{uy|;MpEvcb}Z#^S6pnEtEwBhyddpq~Mo!htey9W2V
z2A_@!U16~+EO;kG?*#9iP%4}i8x+nHP{H@Z@DIY<7xp`@?R8vx+9!03iyh;FH!OO?
zyf+NwTU}y952)a|p~^9^=N;I6%wNC9ze`D%M?v_;8%TJ7AOyhQ9=aNN2$imG$k$Hc
z2ku7EeRgMbH~n-`aNiW&H+lC>3DILI}z60ITL8UkF_w|;%&
z`r7paYprN)7OX9zwS~8~NcP%nQndSdd+$N5XYw^OLKCpO?clHTR2w#1WK;-S)
zRlKu9aCV5!4oDoCfw$6t0-mF$zCxggJ1GUz_DN_I>+ATsQ=%PR0Na96od#m5X-=
z#H!bzvjF94*}BNPa<{$eop0{uR|~}o%`t!$2}eHYAoSagIqEkr@?a{HpFwM_e`}u{
zm~O{@*llGTquX*0~X#dlCweEIMzu#(w%zjXJ`;2-2j25NO<0yT>4QZ(ohZ>Sc
z4={j-1Nd;jf-b|+>OT^ZEs@AlicG^2DI!}Vk@wQkM6SdbiIAxnLc@}djmKcc_8>(n
zSMYQIBvS6*X>_JXKS4m*=A*TfvNEF4rK|#MkYG{(o)kWk?k2Chd8n^C($wmkjxYev
zwV0v$gLhO!(5Q!t@N!U%8EmM0M5BigT!e*+MU+~sZpx4tF|>}(KPZjPe_s3-0JuQM
zx8V2XhdXvvoYBM8u7T6Q)#y!h;SGn2#(t;<^-E8m3XC{CXDmP5%a}@!pgApf?5|3|
z*{5uO%^3@4-crt}3hZ(T&QyNji=-%?S#cI5(MH?G6bFCF2;i%M;dC;F{$zIiZ(LDA
z-LVvQ^Z}jhKwBd^x`Ni{5wr)8JqWW*Q?Ow`+gI52Ufl
z73`Nq`{gy=0pZ&k+vyaD%OY_Z2BR5OL{PAL*M_riN&b#~f8UUl_OI^gTtUZ89rC=UzL{L)58|x1MiW>g4e);7_Wgrs;MkRC|Jd8{|is+
B5-tD$
literal 0
HcmV?d00001
diff --git a/trackeval/__pycache__/eval.cpython-37.pyc b/trackeval/__pycache__/eval.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ec5e8530db739ebd710dd8dcbb83d1014e6c32b
GIT binary patch
literal 5324
zcma)AO>7&-6`t8${w*m=*1u#~p`F+<(MC@H(jXAr$QErVl_iIg;&>5fv*NBKS|pd6
zT}CpoY=O!^